mirror of
https://github.com/ghostty-org/ghostty.git
synced 2025-08-02 14:57:31 +03:00
fix: fixing some typos found
This commit is contained in:
@ -28,7 +28,7 @@ extension Ghostty {
|
||||
if (flags.contains(.capsLock)) { mods |= GHOSTTY_MODS_CAPS.rawValue }
|
||||
|
||||
// Handle sided input. We can't tell that both are pressed in the
|
||||
// Ghostty structure but thats okay -- we don't use that information.
|
||||
// Ghostty structure but that's okay -- we don't use that information.
|
||||
let rawFlags = flags.rawValue
|
||||
if (rawFlags & UInt(NX_DEVICERSHIFTKEYMASK) != 0) { mods |= GHOSTTY_MODS_SHIFT_RIGHT.rawValue }
|
||||
if (rawFlags & UInt(NX_DEVICERCTLKEYMASK) != 0) { mods |= GHOSTTY_MODS_CTRL_RIGHT.rawValue }
|
||||
|
@ -3031,7 +3031,7 @@ struct kernel_statx {
|
||||
* Unfortunately, we cannot just reference the glibc version of this
|
||||
* function, as glibc goes out of its way to make it inaccessible.
|
||||
*
|
||||
* This is simular to __kernel_rt_sigreturn().
|
||||
* This is similar to __kernel_rt_sigreturn().
|
||||
*/
|
||||
long long res;
|
||||
__asm__ __volatile__("b 2f\n"
|
||||
@ -3040,7 +3040,7 @@ struct kernel_statx {
|
||||
* see aarch64's vdso/sigreturn.S in the kernel.
|
||||
*/
|
||||
"nop\n"
|
||||
/* Some system softwares recognize this instruction
|
||||
/* Some system software recognize this instruction
|
||||
* sequence to unwind from * signal handlers. Do not
|
||||
* modify the next two instructions.
|
||||
*/
|
||||
@ -4466,7 +4466,7 @@ struct kernel_statx {
|
||||
/* On aarch64, the kernel requires us to always set our own
|
||||
* SA_RESTORER in order to be able to return from a signal handler.
|
||||
* This function must have a known "magic" instruction sequence
|
||||
* that system softwares like a stack unwinder can recognize.
|
||||
* that system software like a stack unwinder can recognize.
|
||||
*/
|
||||
if (act != NULL && !(act->sa_flags & SA_RESTORER)) {
|
||||
struct kernel_sigaction a = *act;
|
||||
|
2
pkg/simdutf/vendor/simdutf.cpp
vendored
2
pkg/simdutf/vendor/simdutf.cpp
vendored
@ -5690,7 +5690,7 @@ result base64_tail_decode(char *dst, const char_type *src, size_t length, base64
|
||||
}
|
||||
}
|
||||
|
||||
// like base64_tail_decode, but it will not write past the end of the ouput buffer.
|
||||
// like base64_tail_decode, but it will not write past the end of the output buffer.
|
||||
// outlen is modified to reflect the number of bytes written.
|
||||
// This functions assumes that the padding (=) has been removed.
|
||||
template <class char_type>
|
||||
|
16
pkg/simdutf/vendor/simdutf.h
vendored
16
pkg/simdutf/vendor/simdutf.h
vendored
@ -2328,7 +2328,7 @@ simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input,
|
||||
simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) noexcept;
|
||||
|
||||
/**
|
||||
* Convert a base64 input to a binary ouput.
|
||||
* Convert a base64 input to a binary output.
|
||||
*
|
||||
* This function follows the WHATWG forgiving-base64 format, which means that it will
|
||||
* ignore any ASCII spaces in the input. You may provide a padded input (with one or two
|
||||
@ -2365,7 +2365,7 @@ simdutf_warn_unused result base64_to_binary(const char * input, size_t length, c
|
||||
simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options = base64_default) noexcept;
|
||||
|
||||
/**
|
||||
* Convert a binary input to a base64 ouput. The output is always padded with equal signs so that it is
|
||||
* Convert a binary input to a base64 output. The output is always padded with equal signs so that it is
|
||||
* a multiple of 4 bytes long.
|
||||
*
|
||||
* This function always succeeds.
|
||||
@ -2379,7 +2379,7 @@ simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_optio
|
||||
size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options = base64_default) noexcept;
|
||||
|
||||
/**
|
||||
* Convert a base64 input to a binary ouput.
|
||||
* Convert a base64 input to a binary output.
|
||||
*
|
||||
* This function follows the WHATWG forgiving-base64 format, which means that it will
|
||||
* ignore any ASCII spaces in the input. You may provide a padded input (with one or two
|
||||
@ -2408,7 +2408,7 @@ size_t binary_to_base64(const char * input, size_t length, char* output, base64_
|
||||
simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options = base64_default) noexcept;
|
||||
|
||||
/**
|
||||
* Convert a base64 input to a binary ouput.
|
||||
* Convert a base64 input to a binary output.
|
||||
*
|
||||
* This function follows the WHATWG forgiving-base64 format, which means that it will
|
||||
* ignore any ASCII spaces in the input. You may provide a padded input (with one or two
|
||||
@ -3531,7 +3531,7 @@ public:
|
||||
simdutf_warn_unused virtual size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept = 0;
|
||||
|
||||
/**
|
||||
* Convert a base64 input to a binary ouput.
|
||||
* Convert a base64 input to a binary output.
|
||||
*
|
||||
* This function follows the WHATWG forgiving-base64 format, which means that it will
|
||||
* ignore any ASCII spaces in the input. You may provide a padded input (with one or two
|
||||
@ -3556,7 +3556,7 @@ public:
|
||||
simdutf_warn_unused virtual result base64_to_binary(const char * input, size_t length, char* output, base64_options options = base64_default) const noexcept = 0;
|
||||
|
||||
/**
|
||||
* Convert a base64 input to a binary ouput.
|
||||
* Convert a base64 input to a binary output.
|
||||
*
|
||||
* This function follows the WHATWG forgiving-base64 format, which means that it will
|
||||
* ignore any ASCII spaces in the input. You may provide a padded input (with one or two
|
||||
@ -3584,13 +3584,13 @@ public:
|
||||
* Provide the base64 length in bytes given the length of a binary input.
|
||||
*
|
||||
* @param length the length of the input in bytes
|
||||
* @parem options the base64 options to use, can be base64_default or base64_url, is base64_default by default.
|
||||
* @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default.
|
||||
* @return number of base64 bytes
|
||||
*/
|
||||
simdutf_warn_unused virtual size_t base64_length_from_binary(size_t length, base64_options options = base64_default) const noexcept = 0;
|
||||
|
||||
/**
|
||||
* Convert a binary input to a base64 ouput. The output is always padded with equal signs so that it is
|
||||
* Convert a binary input to a base64 output. The output is always padded with equal signs so that it is
|
||||
* a multiple of 4 bytes long.
|
||||
*
|
||||
* This function always succeeds.
|
||||
|
@ -1610,7 +1610,7 @@ fn keyEvent(
|
||||
else
|
||||
// On Wayland, we have to use the GDK device because the mods sent
|
||||
// to this event do not have the modifier key applied if it was
|
||||
// presssed (i.e. left control).
|
||||
// pressed (i.e. left control).
|
||||
translateMods(c.gdk_device_get_modifier_state(device));
|
||||
|
||||
mods.num_lock = c.gdk_device_get_num_lock_state(device) == 1;
|
||||
|
@ -463,7 +463,7 @@ palette: Palette = .{},
|
||||
/// than 0.01 or greater than 10,000 will be clamped to the nearest valid
|
||||
/// value.
|
||||
///
|
||||
/// A value of "1" (default) scrolls te default amount. A value of "2" scrolls
|
||||
/// A value of "1" (default) scrolls the default amount. A value of "2" scrolls
|
||||
/// double the default amount. A value of "0.5" scrolls half the default amount.
|
||||
/// Et cetera.
|
||||
@"mouse-scroll-multiplier": f64 = 1.0,
|
||||
@ -1049,7 +1049,7 @@ keybind: Keybinds = .{},
|
||||
@"clipboard-paste-bracketed-safe": bool = true,
|
||||
|
||||
/// The total amount of bytes that can be used for image data (i.e. the Kitty
|
||||
/// image protocol) per terminal scren. The maximum value is 4,294,967,295
|
||||
/// image protocol) per terminal screen. The maximum value is 4,294,967,295
|
||||
/// (4GiB). The default is 320MB. If this is set to zero, then all image
|
||||
/// protocols will be disabled.
|
||||
///
|
||||
@ -2174,7 +2174,7 @@ pub fn loadCliArgs(self: *Config, alloc_gpa: Allocator) !void {
|
||||
self.@"config-default-files" = true;
|
||||
|
||||
// Keep track of the replay steps up to this point so we
|
||||
// can replay if we are disgarding the default files.
|
||||
// can replay if we are discarding the default files.
|
||||
const replay_len_start = self._replay_steps.items.len;
|
||||
|
||||
// Keep track of font families because if they are set from the CLI
|
||||
|
@ -158,7 +158,7 @@ pub fn open(
|
||||
|
||||
// Unlikely scenario: the theme doesn't exist. In this case, we reset
|
||||
// our iterator, reiterate over in order to build a better error message.
|
||||
// This does double allocate some memory but for errors I think thats
|
||||
// This does double allocate some memory but for errors I think that's
|
||||
// fine.
|
||||
it.reset();
|
||||
while (try it.next()) |loc| {
|
||||
|
@ -102,7 +102,7 @@ fn kitty(
|
||||
|
||||
// IME confirmation still sends an enter key so if we have enter
|
||||
// and UTF8 text we just send it directly since we assume that is
|
||||
// whats happening.
|
||||
// what's happening.
|
||||
if (self.event.key == .enter and
|
||||
self.event.utf8.len > 0)
|
||||
{
|
||||
|
@ -10,7 +10,7 @@ pub const ButtonState = enum(c_int) {
|
||||
press,
|
||||
};
|
||||
|
||||
/// Possible mouse buttons. We only track up to 11 because thats the maximum
|
||||
/// Possible mouse buttons. We only track up to 11 because that's the maximum
|
||||
/// button input that terminal mouse tracking handles without becoming
|
||||
/// ambiguous.
|
||||
///
|
||||
|
@ -35,7 +35,7 @@ pub const VTEvent = struct {
|
||||
const Kind = enum { print, execute, csi, esc, osc, dcs, apc };
|
||||
const Metadata = std.StringHashMap([:0]const u8);
|
||||
|
||||
/// Initiaze the event information for the given parser action.
|
||||
/// Initialize the event information for the given parser action.
|
||||
pub fn init(
|
||||
alloc: Allocator,
|
||||
surface: *Surface,
|
||||
|
@ -2153,7 +2153,7 @@ stbi_inline static int stbi__extend_receive(stbi__jpeg *j, int n)
|
||||
unsigned int k;
|
||||
int sgn;
|
||||
if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
|
||||
if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing
|
||||
if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s instead of continuing
|
||||
|
||||
sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative)
|
||||
k = stbi_lrot(j->code_buffer, n);
|
||||
@ -2168,7 +2168,7 @@ stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n)
|
||||
{
|
||||
unsigned int k;
|
||||
if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
|
||||
if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing
|
||||
if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s instead of continuing
|
||||
k = stbi_lrot(j->code_buffer, n);
|
||||
j->code_buffer = k & ~stbi__bmask[n];
|
||||
k &= stbi__bmask[n];
|
||||
@ -2180,7 +2180,7 @@ stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j)
|
||||
{
|
||||
unsigned int k;
|
||||
if (j->code_bits < 1) stbi__grow_buffer_unsafe(j);
|
||||
if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing
|
||||
if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s instead of continuing
|
||||
k = j->code_buffer;
|
||||
j->code_buffer <<= 1;
|
||||
--j->code_bits;
|
||||
@ -4773,13 +4773,13 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r
|
||||
|
||||
// we make a separate pass to expand bits to pixels; for performance,
|
||||
// this could run two scanlines behind the above code, so it won't
|
||||
// intefere with filtering but will still be in the cache.
|
||||
// interfere with filtering but will still be in the cache.
|
||||
if (depth < 8) {
|
||||
for (j=0; j < y; ++j) {
|
||||
stbi_uc *cur = a->out + stride*j;
|
||||
stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes;
|
||||
// unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit
|
||||
// png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop
|
||||
// png guarantee byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop
|
||||
stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range
|
||||
|
||||
// note that the final byte might overshoot and write more data than desired.
|
||||
@ -4962,7 +4962,7 @@ static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int
|
||||
p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0);
|
||||
if (p == NULL) return stbi__err("outofmem", "Out of memory");
|
||||
|
||||
// between here and free(out) below, exitting would leak
|
||||
// between here and free(out) below, exiting would leak
|
||||
temp_out = p;
|
||||
|
||||
if (pal_img_n == 3) {
|
||||
@ -5900,7 +5900,7 @@ static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req
|
||||
if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
|
||||
if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)");
|
||||
|
||||
// do a tiny bit of precessing
|
||||
// do a tiny bit of processing
|
||||
if ( tga_image_type >= 8 )
|
||||
{
|
||||
tga_image_type -= 8;
|
||||
@ -6831,7 +6831,7 @@ static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, i
|
||||
// 0: not specified.
|
||||
}
|
||||
|
||||
// background is what out is after the undoing of the previou frame;
|
||||
// background is what out is after the undoing of the previous frame;
|
||||
memcpy( g->background, g->out, 4 * g->w * g->h );
|
||||
}
|
||||
|
||||
@ -7864,7 +7864,7 @@ STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user
|
||||
1.31 (2011-06-20)
|
||||
a few more leak fixes, bug in PNG handling (SpartanJ)
|
||||
1.30 (2011-06-11)
|
||||
added ability to load files via callbacks to accomidate custom input streams (Ben Wenger)
|
||||
added ability to load files via callbacks to accommodate custom input streams (Ben Wenger)
|
||||
removed deprecated format-specific test/load functions
|
||||
removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway
|
||||
error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha)
|
||||
|
@ -595,7 +595,7 @@ fn printCell(
|
||||
|
||||
// TODO: this case was not handled in the old terminal implementation
|
||||
// but it feels like we should do something. investigate other
|
||||
// terminals (xterm mainly) and see whats up.
|
||||
// terminals (xterm mainly) and see what's up.
|
||||
.spacer_head => {},
|
||||
}
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ pub const Placement = struct {
|
||||
if (img_scale_source.y < img_scaled.y_offset) {
|
||||
// If our source rect y is within the offset area, we need to
|
||||
// adjust our source rect and destination since the source texture
|
||||
// doesnt actually have the offset area blank.
|
||||
// doesn't actually have the offset area blank.
|
||||
const offset: f64 = img_scaled.y_offset - img_scale_source.y;
|
||||
img_scale_source.height -= offset;
|
||||
y_offset = offset;
|
||||
@ -286,7 +286,7 @@ pub const Placement = struct {
|
||||
if (img_scale_source.x < img_scaled.x_offset) {
|
||||
// If our source rect x is within the offset area, we need to
|
||||
// adjust our source rect and destination since the source texture
|
||||
// doesnt actually have the offset area blank.
|
||||
// doesn't actually have the offset area blank.
|
||||
const offset: f64 = img_scaled.x_offset - img_scale_source.x;
|
||||
img_scale_source.width -= offset;
|
||||
x_offset = offset;
|
||||
|
@ -205,7 +205,7 @@ pub fn RefCountedSet(
|
||||
OutOfMemory,
|
||||
|
||||
/// The set needs to be rehashed, as there are many dead
|
||||
/// items with lower IDs which are inaccessible for re-use.
|
||||
/// items with lower IDs which are inaccessible for reuse.
|
||||
NeedsRehash,
|
||||
};
|
||||
|
||||
@ -557,7 +557,7 @@ pub fn RefCountedSet(
|
||||
const item = &items[id];
|
||||
|
||||
// If there's a dead item then we resurrect it
|
||||
// for our value so that we can re-use its ID.
|
||||
// for our value so that we can reuse its ID.
|
||||
if (item.meta.ref == 0) {
|
||||
if (comptime @hasDecl(Context, "deleted")) {
|
||||
// Inform the context struct that we're
|
||||
|
@ -335,7 +335,7 @@ fn processExit(
|
||||
// exited flag is set to true. This allows the terminal window to remain
|
||||
// open.
|
||||
if (execdata.wait_after_command) {
|
||||
// We output a message so that the user knows whats going on and
|
||||
// We output a message so that the user knows what's going on and
|
||||
// doesn't think their terminal just froze.
|
||||
terminal: {
|
||||
td.renderer_state.mutex.lock();
|
||||
|
2
vendor/pixman/CODING_STYLE
vendored
2
vendor/pixman/CODING_STYLE
vendored
@ -107,7 +107,7 @@ Whitespace
|
||||
x / unit_x
|
||||
|
||||
* Do not put spaces after the address-of operator, the * when used as
|
||||
a pointer derefernce or the ! and ~ operators:
|
||||
a pointer dereference or the ! and ~ operators:
|
||||
|
||||
&foo;
|
||||
|
||||
|
2
vendor/pixman/meson.build
vendored
2
vendor/pixman/meson.build
vendored
@ -464,7 +464,7 @@ if pthreads_found
|
||||
endif
|
||||
|
||||
funcs = ['sigaction', 'alarm', 'mprotect', 'getpagesize', 'mmap', 'getisax', 'gettimeofday']
|
||||
# mingw claimes to have posix_memalign, but it doesn't
|
||||
# mingw claims to have posix_memalign, but it doesn't
|
||||
if host_machine.system() != 'windows'
|
||||
funcs += 'posix_memalign'
|
||||
endif
|
||||
|
@ -307,7 +307,7 @@
|
||||
|
||||
/*
|
||||
* Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form.
|
||||
* Interleave should be done when maks is enabled or operator is 'over'.
|
||||
* Interleave should be done when mask is enabled or operator is 'over'.
|
||||
*/
|
||||
.macro bilinear_interleave src0, src1, dst0, dst1
|
||||
vuzp.8 src0, src1
|
||||
@ -579,7 +579,7 @@
|
||||
/*
|
||||
* Main template macro for generating NEON optimized bilinear scanline functions.
|
||||
*
|
||||
* Bilinear scanline generator macro take folling arguments:
|
||||
* Bilinear scanline generator macro take following arguments:
|
||||
* fname - name of the function to generate
|
||||
* src_fmt - source color format (8888 or 0565)
|
||||
* dst_fmt - destination color format (8888 or 0565)
|
||||
|
8
vendor/pixman/pixman/pixman-arm-neon-asm.S
vendored
8
vendor/pixman/pixman/pixman-arm-neon-asm.S
vendored
@ -71,7 +71,7 @@
|
||||
* as NOP to workaround some HW bugs or for whatever other reason)
|
||||
*
|
||||
* PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
|
||||
* advanced prefetch intruduces heavy overhead)
|
||||
* advanced prefetch introduces heavy overhead)
|
||||
*
|
||||
* PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
|
||||
* which can run ARM and NEON instructions simultaneously so that extra ARM
|
||||
@ -100,7 +100,7 @@
|
||||
* handling unaligned leading/trailing pixels), so we only need to deal with
|
||||
* the data in NEON registers.
|
||||
*
|
||||
* NEON registers allocation in general is recommented to be the following:
|
||||
* NEON registers allocation in general is recommended to be the following:
|
||||
* d0, d1, d2, d3 - contain loaded source pixel data
|
||||
* d4, d5, d6, d7 - contain loaded destination pixels (if they are needed)
|
||||
* d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
|
||||
@ -117,7 +117,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* Step one. We need to have some code to do some arithmetics on pixel data.
|
||||
* Step one. We need to have some code to do some arithmetic on pixel data.
|
||||
* This is implemented as a pair of macros: '*_head' and '*_tail'. When used
|
||||
* back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
|
||||
* perform all the needed calculations and write the result to {d28, d29}.
|
||||
@ -218,7 +218,7 @@
|
||||
* cache_preload 8, 8
|
||||
*
|
||||
* Now it also got some VLD/VST instructions. We simply can't move from
|
||||
* processing one block of pixels to the other one with just arithmetics.
|
||||
* processing one block of pixels to the other one with just arithmetic.
|
||||
* The previously processed data needs to be written to memory and new
|
||||
* data needs to be fetched. Fortunately, this main loop does not deal
|
||||
* with partial leading/trailing pixels and can load/store a full block
|
||||
|
@ -315,7 +315,7 @@
|
||||
|
||||
/*
|
||||
* Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form.
|
||||
* Interleave should be done when maks is enabled or operator is 'over'.
|
||||
* Interleave should be done when mask is enabled or operator is 'over'.
|
||||
*/
|
||||
.macro bilinear_interleave src0, src1, src01, dst0, dst1, dst01
|
||||
vuzp &src0&.8b, &src1&.8b
|
||||
@ -605,7 +605,7 @@
|
||||
/*
|
||||
* Main template macro for generating NEON optimized bilinear scanline functions.
|
||||
*
|
||||
* Bilinear scanline generator macro take folling arguments:
|
||||
* Bilinear scanline generator macro take following arguments:
|
||||
* fname - name of the function to generate
|
||||
* src_fmt - source color format (8888 or 0565)
|
||||
* dst_fmt - destination color format (8888 or 0565)
|
||||
|
@ -67,7 +67,7 @@
|
||||
* as NOP to workaround some HW bugs or for whatever other reason)
|
||||
*
|
||||
* PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
|
||||
* advanced prefetch intruduces heavy overhead)
|
||||
* advanced prefetch introduces heavy overhead)
|
||||
*
|
||||
* PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
|
||||
* which can run ARM and NEON instructions simultaneously so that extra ARM
|
||||
@ -96,7 +96,7 @@
|
||||
* handling unaligned leading/trailing pixels), so we only need to deal with
|
||||
* the data in NEON registers.
|
||||
*
|
||||
* NEON registers allocation in general is recommented to be the following:
|
||||
* NEON registers allocation in general is recommended to be the following:
|
||||
* v0, v1, v2, v3 - contain loaded source pixel data
|
||||
* v4, v5, v6, v7 - contain loaded destination pixels (if they are needed)
|
||||
* v24, v25, v26, v27 - contain loading mask pixel data (if mask is used)
|
||||
@ -113,7 +113,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* Step one. We need to have some code to do some arithmetics on pixel data.
|
||||
* Step one. We need to have some code to do some arithmetic on pixel data.
|
||||
* This is implemented as a pair of macros: '*_head' and '*_tail'. When used
|
||||
* back-to-back, they take pixel data from {v0, v1, v2, v3} and {v4, v5},
|
||||
* perform all the needed calculations and write the result to {v28, v29}.
|
||||
@ -222,7 +222,7 @@
|
||||
* cache_preload 8, 8
|
||||
*
|
||||
* Now it also got some VLD/VST instructions. We simply can't move from
|
||||
* processing one block of pixels to the other one with just arithmetics.
|
||||
* processing one block of pixels to the other one with just arithmetic.
|
||||
* The previously processed data needs to be written to memory and new
|
||||
* data needs to be fetched. Fortunately, this main loop does not deal
|
||||
* with partial leading/trailing pixels and can load/store a full block
|
||||
|
2
vendor/pixman/pixman/pixman-filter.c
vendored
2
vendor/pixman/pixman/pixman-filter.c
vendored
@ -155,7 +155,7 @@ static const filter_info_t filters[] =
|
||||
*
|
||||
* This function assumes that the intervals are within
|
||||
* the kernels in question. E.g., the caller must not
|
||||
* try to integrate a linear kernel ouside of [-1:1]
|
||||
* try to integrate a linear kernel outside of [-1:1]
|
||||
*/
|
||||
static double
|
||||
integral (pixman_kernel_t kernel1, double x1,
|
||||
|
2
vendor/pixman/pixman/pixman-image.c
vendored
2
vendor/pixman/pixman/pixman-image.c
vendored
@ -513,7 +513,7 @@ compute_image_info (pixman_image_t *image)
|
||||
}
|
||||
|
||||
/* Alpha maps are only supported for BITS images, so it's always
|
||||
* safe to ignore their presense for non-BITS images
|
||||
* safe to ignore their presence for non-BITS images
|
||||
*/
|
||||
if (!image->common.alpha_map || image->type != BITS)
|
||||
{
|
||||
|
8
vendor/pixman/pixman/pixman-mips-dspr2-asm.h
vendored
8
vendor/pixman/pixman/pixman-mips-dspr2-asm.h
vendored
@ -112,7 +112,7 @@ LEAF_MIPS32R2(symbol) \
|
||||
|
||||
/*
|
||||
* Saves set of registers on stack. Maximum number of registers that
|
||||
* can be saved on stack is limitted to 14 (a0-a3, v0-v1 and s0-s7).
|
||||
* can be saved on stack is limited to 14 (a0-a3, v0-v1 and s0-s7).
|
||||
* Stack offset is number of bytes that are added to stack pointer (sp)
|
||||
* before registers are pushed in order to provide enough space on stack
|
||||
* (offset must be multiple of 4, and must be big enough, as described by
|
||||
@ -128,7 +128,7 @@ LEAF_MIPS32R2(symbol) \
|
||||
r11 = 0, r12 = 0, r13 = 0, \
|
||||
r14 = 0
|
||||
.if (\stack_offset < 0) || (\stack_offset - (\stack_offset / 4) * 4)
|
||||
.error "Stack offset must be pozitive and multiple of 4."
|
||||
.error "Stack offset must be positive and multiple of 4."
|
||||
.endif
|
||||
.if \stack_offset != 0
|
||||
addiu sp, sp, -\stack_offset
|
||||
@ -187,7 +187,7 @@ LEAF_MIPS32R2(symbol) \
|
||||
|
||||
/*
|
||||
* Restores set of registers from stack. Maximum number of registers that
|
||||
* can be restored from stack is limitted to 14 (a0-a3, v0-v1 and s0-s7).
|
||||
* can be restored from stack is limited to 14 (a0-a3, v0-v1 and s0-s7).
|
||||
* Stack offset is number of bytes that are added to stack pointer (sp)
|
||||
* after registers are restored (offset must be multiple of 4, and must
|
||||
* be big enough, as described by CHECK_STACK_OFFSET macro). This macro is
|
||||
@ -203,7 +203,7 @@ LEAF_MIPS32R2(symbol) \
|
||||
r11 = 0, r12 = 0, r13 = 0, \
|
||||
r14 = 0
|
||||
.if (\stack_offset < 0) || (\stack_offset - (\stack_offset/4)*4)
|
||||
.error "Stack offset must be pozitive and multiple of 4."
|
||||
.error "Stack offset must be positive and multiple of 4."
|
||||
.endif
|
||||
lw \r1, 0(sp)
|
||||
.if \r2 != 0
|
||||
|
@ -61,7 +61,7 @@ LEAF_MIPS32R2(pixman_mips_fast_memcpy)
|
||||
|
||||
andi a3, a3, 0x3 /* we need to copy a3 bytes to make a0/a1 aligned */
|
||||
beq a3, zero, $chk16w /* when a3=0 then the dst (a0) is word-aligned */
|
||||
subu a2, a2, a3 /* now a2 is the remining bytes count */
|
||||
subu a2, a2, a3 /* now a2 is the remaining bytes count */
|
||||
|
||||
LWHI t8, 0(a1)
|
||||
addu a1, a1, a3
|
||||
|
4
vendor/pixman/pixman/pixman-region.c
vendored
4
vendor/pixman/pixman/pixman-region.c
vendored
@ -627,7 +627,7 @@ pixman_coalesce (region_type_t * region, /* Region to coalesce */
|
||||
return prev_start;
|
||||
}
|
||||
|
||||
/* Quicky macro to avoid trivial reject procedure calls to pixman_coalesce */
|
||||
/* Quickie macro to avoid trivial reject procedure calls to pixman_coalesce */
|
||||
|
||||
#define COALESCE(new_reg, prev_band, cur_band) \
|
||||
do \
|
||||
@ -1544,7 +1544,7 @@ quick_sort_rects (
|
||||
*
|
||||
* Side Effects:
|
||||
* The passed-in ``region'' may be modified.
|
||||
* overlap set to TRUE if any retangles overlapped,
|
||||
* overlap set to TRUE if any rectangles overlapped,
|
||||
* else FALSE;
|
||||
*
|
||||
* Strategy:
|
||||
|
8
vendor/pixman/pixman/pixman-sse2.c
vendored
8
vendor/pixman/pixman/pixman-sse2.c
vendored
@ -787,7 +787,7 @@ sse2_combine_over_reverse_u (pixman_implementation_t *imp,
|
||||
&xmm_alpha_lo, &xmm_alpha_hi,
|
||||
&xmm_src_lo, &xmm_src_hi);
|
||||
|
||||
/* rebuid the 4 pixel data and save*/
|
||||
/* rebuild the 4 pixel data and save*/
|
||||
save_128_aligned ((__m128i*)pd,
|
||||
pack_2x128_128 (xmm_src_lo, xmm_src_hi));
|
||||
|
||||
@ -2437,7 +2437,7 @@ sse2_composite_over_n_8888 (pixman_implementation_t *imp,
|
||||
&xmm_alpha, &xmm_alpha,
|
||||
&xmm_dst_lo, &xmm_dst_hi);
|
||||
|
||||
/* rebuid the 4 pixel data and save*/
|
||||
/* rebuild the 4 pixel data and save*/
|
||||
save_128_aligned (
|
||||
(__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
|
||||
|
||||
@ -3155,7 +3155,7 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
|
||||
&xmm_alpha_lo, &xmm_alpha_hi);
|
||||
|
||||
/* I'm loading next 4 pixels from memory
|
||||
* before to optimze the memory read.
|
||||
* before to optimize the memory read.
|
||||
*/
|
||||
xmm_src = load_128_unaligned ((__m128i*) (src + 4));
|
||||
|
||||
@ -5395,7 +5395,7 @@ scaled_nearest_scanline_sse2_8888_8888_OVER (uint32_t* pd,
|
||||
&xmm_alpha_lo, &xmm_alpha_hi,
|
||||
&xmm_dst_lo, &xmm_dst_hi);
|
||||
|
||||
/* rebuid the 4 pixel data and save*/
|
||||
/* rebuild the 4 pixel data and save*/
|
||||
save_128_aligned ((__m128i*)pd,
|
||||
pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
|
||||
}
|
||||
|
Reference in New Issue
Block a user