diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2c24e7142..7cd20f28f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -97,7 +97,3 @@ jobs: - name: Test Dynamic Build run: nix develop -c zig build -Dstatic=false - - - name: Test Wasm Build - run: nix develop -c zig build wasm - diff --git a/TODO.md b/TODO.md index a98af3b68..a2d756f8e 100644 --- a/TODO.md +++ b/TODO.md @@ -27,4 +27,3 @@ Major Features: * Bell * Sixels: https://saitoha.github.io/libsixel/ -* Kitty graphics protocol: https://sw.kovidgoyal.net/kitty/graphics-protocol/ diff --git a/build.zig b/build.zig index 51cdaf0a3..5546b1117 100644 --- a/build.zig +++ b/build.zig @@ -24,7 +24,6 @@ const libpng = @import("pkg/libpng/build.zig"); const macos = @import("pkg/macos/build.zig"); const objc = @import("vendor/zig-objc/build.zig"); const pixman = @import("pkg/pixman/build.zig"); -const stb_image_resize = @import("pkg/stb_image_resize/build.zig"); const utf8proc = @import("pkg/utf8proc/build.zig"); const zlib = @import("pkg/zlib/build.zig"); const tracylib = @import("pkg/tracy/build.zig"); @@ -670,6 +669,11 @@ fn addDeps( step.addLibraryPath(.{ .path = b.fmt("/usr/lib/{s}", .{triple}) }); } + // C files + step.linkLibC(); + step.addIncludePath(.{ .path = "src/stb" }); + step.addCSourceFiles(&.{"src/stb/stb.c"}, &.{}); + // If we're building a lib we have some different deps const lib = step.kind == .lib; @@ -693,7 +697,6 @@ fn addDeps( })); step.addModule("xev", mod_libxev); step.addModule("pixman", pixman.module(b)); - step.addModule("stb_image_resize", stb_image_resize.module(b)); step.addModule("utf8proc", utf8proc.module(b)); // Mac Stuff @@ -713,10 +716,6 @@ fn addDeps( system_sdk.include(b, tracy_step, .{}); } - // stb_image_resize - const stb_image_resize_step = try stb_image_resize.link(b, step, .{}); - try static_libs.append(stb_image_resize_step.getEmittedBin()); - // utf8proc const utf8proc_step = try utf8proc.link(b, step); try static_libs.append(utf8proc_step.getEmittedBin()); diff --git a/pkg/stb_image_resize/build.zig b/pkg/stb_image_resize/build.zig deleted file mode 100644 index 00f0bec57..000000000 --- a/pkg/stb_image_resize/build.zig +++ /dev/null @@ -1,65 +0,0 @@ -const std = @import("std"); - -/// Directories with our includes. -const root = thisDir(); -pub const include_paths = [_][]const u8{ - root, -}; - -pub fn module(b: *std.Build) *std.build.Module { - return b.createModule(.{ - .source_file = .{ .path = (comptime thisDir()) ++ "/main.zig" }, - }); -} - -fn thisDir() []const u8 { - return std.fs.path.dirname(@src().file) orelse "."; -} - -pub const Options = struct {}; - -pub fn link( - b: *std.Build, - step: *std.build.LibExeObjStep, - opt: Options, -) !*std.build.LibExeObjStep { - const lib = try buildStbImageResize(b, step, opt); - step.linkLibrary(lib); - inline for (include_paths) |path| step.addIncludePath(.{ .path = path }); - return lib; -} - -pub fn buildStbImageResize( - b: *std.Build, - step: *std.build.LibExeObjStep, - opt: Options, -) !*std.build.LibExeObjStep { - _ = opt; - - const lib = b.addStaticLibrary(.{ - .name = "stb_image_resize", - .target = step.target, - .optimize = step.optimize, - }); - - // Include - inline for (include_paths) |path| lib.addIncludePath(.{ .path = path }); - - // Link - lib.linkLibC(); - - // Compile - var flags = std.ArrayList([]const u8).init(b.allocator); - defer flags.deinit(); - try flags.appendSlice(&.{ - //"-fno-sanitize=undefined", - }); - - // C files - lib.addCSourceFile(.{ - .file = .{ .path = root ++ "/stb_image_resize.c" }, - .flags = flags.items, - }); - - return lib; -} diff --git a/pkg/stb_image_resize/stb_image_resize.c b/pkg/stb_image_resize/stb_image_resize.c deleted file mode 100644 index 420cd5379..000000000 --- a/pkg/stb_image_resize/stb_image_resize.c +++ /dev/null @@ -1,2 +0,0 @@ -#define STB_IMAGE_RESIZE_IMPLEMENTATION -#include diff --git a/src/Surface.zig b/src/Surface.zig index 04705dfbd..004a2c987 100644 --- a/src/Surface.zig +++ b/src/Surface.zig @@ -401,6 +401,7 @@ pub fn init( var io = try termio.Impl.init(alloc, .{ .grid_size = grid_size, .screen_size = screen_size, + .padding = padding, .full_config = config, .config = try termio.Impl.DerivedConfig.init(alloc, config), .resources_dir = app.resources_dir, @@ -894,14 +895,12 @@ pub fn sizeCallback(self: *Surface, size: apprt.SurfaceSize) !void { // Recalculate our grid size. Because Ghostty supports fluid resizing, // its possible the grid doesn't change at all even if the screen size changes. - const new_grid_size = renderer.GridSize.init( + // We have to update the IO thread no matter what because we send + // pixel-level sizing to the subprocess. + self.grid_size = renderer.GridSize.init( self.screen_size.subPadding(self.padding), self.cell_size, ); - if (self.grid_size.equals(new_grid_size)) return; - - // Grid size changed, update our grid size and notify the terminal - self.grid_size = new_grid_size; if (self.grid_size.columns < 5 and (self.padding.left > 0 or self.padding.right > 0)) { log.warn("WARNING: very small terminal grid detected with padding " ++ "set. Is your padding reasonable?", .{}); diff --git a/src/config.zig b/src/config.zig index b16ead7c1..d76dba115 100644 --- a/src/config.zig +++ b/src/config.zig @@ -186,6 +186,15 @@ pub const Config = struct { /// This does not affect data sent to the clipboard via "clipboard-write". @"clipboard-trim-trailing-spaces": bool = true, + /// The total amount of bytes that can be used for image data (i.e. + /// the Kitty image protocol) per terminal scren. The maximum value + /// is 4,294,967,295 (4GB). The default is 320MB. If this is set to zero, + /// then all image protocols will be disabled. + /// + /// This value is separate for primary and alternate screens so the + /// effective limit per surface is double. + @"image-storage-limit": u32 = 320 * 1000 * 1000, + /// Whether to automatically copy selected text to the clipboard. "true" /// will only copy on systems that support a selection clipboard. /// diff --git a/src/font/face/freetype.zig b/src/font/face/freetype.zig index e25b947c0..ac0c525f2 100644 --- a/src/font/face/freetype.zig +++ b/src/font/face/freetype.zig @@ -8,7 +8,7 @@ const std = @import("std"); const builtin = @import("builtin"); const freetype = @import("freetype"); const harfbuzz = @import("harfbuzz"); -const resize = @import("stb_image_resize"); +const stb = @import("../../stb/main.zig"); const assert = std.debug.assert; const testing = std.testing; const Allocator = std.mem.Allocator; @@ -204,7 +204,7 @@ pub const Face = struct { result.buffer = buf.ptr; errdefer alloc.free(buf); - if (resize.stbir_resize_uint8( + if (stb.stbir_resize_uint8( bm.buffer, @intCast(bm.width), @intCast(bm.rows), diff --git a/src/renderer/Metal.zig b/src/renderer/Metal.zig index b36824db5..083627d9c 100644 --- a/src/renderer/Metal.zig +++ b/src/renderer/Metal.zig @@ -21,6 +21,20 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Terminal = terminal.Terminal; +const mtl = @import("metal/api.zig"); +const mtl_buffer = @import("metal/buffer.zig"); +const mtl_image = @import("metal/image.zig"); +const mtl_shaders = @import("metal/shaders.zig"); +const Image = mtl_image.Image; +const ImageMap = mtl_image.ImageMap; +const Shaders = mtl_shaders.Shaders; + +const CellBuffer = mtl_buffer.Buffer(mtl_shaders.Cell); +const ImageBuffer = mtl_buffer.Buffer(mtl_shaders.Image); +const InstanceBuffer = mtl_buffer.Buffer(u16); + +const ImagePlacementList = std.ArrayListUnmanaged(mtl_image.Placement); + // Get native API access on certain platforms so we can do more customization. const glfwNative = glfw.Native(.{ .cocoa = builtin.os.tag == .macos, @@ -58,72 +72,35 @@ cursor_style: renderer.CursorStyle, /// The current set of cells to render. This is rebuilt on every frame /// but we keep this around so that we don't reallocate. Each set of /// cells goes into a separate shader. -cells_bg: std.ArrayListUnmanaged(GPUCell), -cells: std.ArrayListUnmanaged(GPUCell), +cells_bg: std.ArrayListUnmanaged(mtl_shaders.Cell), +cells: std.ArrayListUnmanaged(mtl_shaders.Cell), /// The current GPU uniform values. -uniforms: GPUUniforms, +uniforms: mtl_shaders.Uniforms, /// The font structures. font_group: *font.GroupCache, font_shaper: font.Shaper, +/// The images that we may render. +images: ImageMap = .{}, +image_placements: ImagePlacementList = .{}, +image_bg_end: u32 = 0, +image_text_end: u32 = 0, + +/// Metal state +shaders: Shaders, // Compiled shaders +buf_cells: CellBuffer, // Vertex buffer for cells +buf_cells_bg: CellBuffer, // Vertex buffer for background cells +buf_instance: InstanceBuffer, // MTLBuffer + /// Metal objects device: objc.Object, // MTLDevice queue: objc.Object, // MTLCommandQueue swapchain: objc.Object, // CAMetalLayer -buf_cells_bg: objc.Object, // MTLBuffer -buf_cells: objc.Object, // MTLBuffer -buf_instance: objc.Object, // MTLBuffer -pipeline: objc.Object, // MTLRenderPipelineState texture_greyscale: objc.Object, // MTLTexture texture_color: objc.Object, // MTLTexture -const GPUCell = extern struct { - mode: GPUCellMode, - grid_pos: [2]f32, - glyph_pos: [2]u32 = .{ 0, 0 }, - glyph_size: [2]u32 = .{ 0, 0 }, - glyph_offset: [2]i32 = .{ 0, 0 }, - color: [4]u8, - cell_width: u8, -}; - -// Intel macOS 13 doesn't like it when any field in a vertex buffer is not -// aligned on the alignment of the struct. I don't understand it, I think -// this must be some macOS 13 Metal GPU driver bug because it doesn't matter -// on macOS 12 or Apple Silicon macOS 13. -// -// To be safe, we put this test in here. -test "GPUCell offsets" { - const testing = std.testing; - const alignment = @alignOf(GPUCell); - inline for (@typeInfo(GPUCell).Struct.fields) |field| { - const offset = @offsetOf(GPUCell, field.name); - try testing.expectEqual(0, @mod(offset, alignment)); - } -} - -const GPUUniforms = extern struct { - /// The projection matrix for turning world coordinates to normalized. - /// This is calculated based on the size of the screen. - projection_matrix: math.Mat, - - /// Size of a single cell in pixels, unscaled. - cell_size: [2]f32, - - /// Metrics for underline/strikethrough - strikethrough_position: f32, - strikethrough_thickness: f32, -}; - -const GPUCellMode = enum(u8) { - bg = 1, - fg = 2, - fg_color = 7, - strikethrough = 8, -}; - /// The configuration for this renderer that is derived from the main /// configuration. This must be exported so that we don't need to /// pass around Config pointers which makes memory management a pain. @@ -197,7 +174,7 @@ pub fn surfaceInit(surface: *apprt.Surface) !void { pub fn init(alloc: Allocator, options: renderer.Options) !Metal { // Initialize our metal stuff - const device = objc.Object.fromId(MTLCreateSystemDefaultDevice()); + const device = objc.Object.fromId(mtl.MTLCreateSystemDefaultDevice()); const queue = device.msgSend(objc.Object, objc.sel("newCommandQueue"), .{}); const swapchain = swapchain: { const CAMetalLayer = objc.Class.getClass("CAMetalLayer").?; @@ -240,57 +217,22 @@ pub fn init(alloc: Allocator, options: renderer.Options) !Metal { }); errdefer font_shaper.deinit(); - // Initialize our Metal buffers - const buf_instance = buffer: { - const data = [6]u16{ - 0, 1, 3, // Top-left triangle - 1, 2, 3, // Bottom-right triangle - }; + // Vertex buffers + var buf_cells = try CellBuffer.init(device, 160 * 160); + errdefer buf_cells.deinit(); + var buf_cells_bg = try CellBuffer.init(device, 160 * 160); + errdefer buf_cells_bg.deinit(); + var buf_instance = try InstanceBuffer.initFill(device, &.{ + 0, 1, 3, // Top-left triangle + 1, 2, 3, // Bottom-right triangle + }); + errdefer buf_instance.deinit(); - break :buffer device.msgSend( - objc.Object, - objc.sel("newBufferWithBytes:length:options:"), - .{ - @as(*const anyopaque, @ptrCast(&data)), - @as(c_ulong, @intCast(data.len * @sizeOf(u16))), - MTLResourceStorageModeShared, - }, - ); - }; + // Initialize our shaders + var shaders = try Shaders.init(device); + errdefer shaders.deinit(); - const buf_cells = buffer: { - // Preallocate for 160x160 grid with 3 modes (bg, fg, text). This - // should handle most terminals well, and we can avoid a resize later. - const prealloc = 160 * 160 * 3; - - break :buffer device.msgSend( - objc.Object, - objc.sel("newBufferWithLength:options:"), - .{ - @as(c_ulong, @intCast(prealloc * @sizeOf(GPUCell))), - MTLResourceStorageModeShared, - }, - ); - }; - - const buf_cells_bg = buffer: { - // Preallocate for 160x160 grid with 3 modes (bg, fg, text). This - // should handle most terminals well, and we can avoid a resize later. - const prealloc = 160 * 160; - - break :buffer device.msgSend( - objc.Object, - objc.sel("newBufferWithLength:options:"), - .{ - @as(c_ulong, @intCast(prealloc * @sizeOf(GPUCell))), - MTLResourceStorageModeShared, - }, - ); - }; - - // Initialize our shader (MTLLibrary) - const library = try initLibrary(device, @embedFile("shaders/cell.metal")); - const pipeline_state = try initPipelineState(device, library); + // Font atlas textures const texture_greyscale = try initAtlasTexture(device, &options.font_group.atlas_greyscale); const texture_color = try initAtlasTexture(device, &options.font_group.atlas_color); @@ -319,14 +261,16 @@ pub fn init(alloc: Allocator, options: renderer.Options) !Metal { .font_group = options.font_group, .font_shaper = font_shaper, + // Shaders + .shaders = shaders, + .buf_cells = buf_cells, + .buf_cells_bg = buf_cells_bg, + .buf_instance = buf_instance, + // Metal stuff .device = device, .queue = queue, .swapchain = swapchain, - .buf_cells = buf_cells, - .buf_cells_bg = buf_cells_bg, - .buf_instance = buf_instance, - .pipeline = pipeline_state, .texture_greyscale = texture_greyscale, .texture_color = texture_color, }; @@ -341,13 +285,22 @@ pub fn deinit(self: *Metal) void { self.config.deinit(); - deinitMTLResource(self.buf_cells_bg); - deinitMTLResource(self.buf_cells); - deinitMTLResource(self.buf_instance); + { + var it = self.images.iterator(); + while (it.next()) |kv| kv.value_ptr.deinit(self.alloc); + self.images.deinit(self.alloc); + } + self.image_placements.deinit(self.alloc); + + self.buf_cells_bg.deinit(); + self.buf_cells.deinit(); + self.buf_instance.deinit(); deinitMTLResource(self.texture_greyscale); deinitMTLResource(self.texture_color); self.queue.msgSend(void, objc.sel("release"), .{}); + self.shaders.deinit(); + self.* = undefined; } @@ -550,7 +503,7 @@ pub fn render( // We used to share terminal state, but we've since learned through // analysis that it is faster to copy the terminal state than to - // hold the lock wile rebuilding GPU cells. + // hold the lock while rebuilding GPU cells. const viewport_bottom = state.terminal.screen.viewportIsBottom(); var screen_copy = if (viewport_bottom) try state.terminal.screen.clone( self.alloc, @@ -573,6 +526,13 @@ pub fn render( // Whether to draw our cursor or not. const draw_cursor = self.cursor_visible and state.terminal.screen.viewportIsBottom(); + // If we have Kitty graphics data, we enter a SLOW SLOW SLOW path. + // We only do this if the Kitty image state is dirty meaning only if + // it changes. + if (state.terminal.screen.kitty_images.dirty) { + try self.prepKittyGraphics(state.terminal); + } + break :critical .{ .bg = self.config.background, .selection = selection, @@ -608,6 +568,27 @@ pub fn render( self.font_group.atlas_color.modified = false; } + // Go through our images and see if we need to setup any textures. + { + var image_it = self.images.iterator(); + while (image_it.next()) |kv| { + switch (kv.value_ptr.*) { + .ready => {}, + + .pending_rgb, + .pending_rgba, + => try kv.value_ptr.upload(self.alloc, self.device), + + .unload_pending, + .unload_ready, + => { + kv.value_ptr.deinit(self.alloc); + self.images.removeByPtr(kv.key_ptr); + }, + } + } + } + // Command buffer (MTLCommandBuffer) const buffer = self.queue.msgSend(objc.Object, objc.sel("commandBuffer"), .{}); @@ -635,10 +616,10 @@ pub fn render( // which ironically doesn't implement CAMetalDrawable as a // property so we just send a message. const texture = drawable.msgSend(objc.c.id, objc.sel("texture"), .{}); - attachment.setProperty("loadAction", @intFromEnum(MTLLoadAction.clear)); - attachment.setProperty("storeAction", @intFromEnum(MTLStoreAction.store)); + attachment.setProperty("loadAction", @intFromEnum(mtl.MTLLoadAction.clear)); + attachment.setProperty("storeAction", @intFromEnum(mtl.MTLStoreAction.store)); attachment.setProperty("texture", texture); - attachment.setProperty("clearColor", MTLClearColor{ + attachment.setProperty("clearColor", mtl.MTLClearColor{ .red = @as(f32, @floatFromInt(critical.bg.r)) / 255, .green = @as(f32, @floatFromInt(critical.bg.g)) / 255, .blue = @as(f32, @floatFromInt(critical.bg.b)) / 255, @@ -657,48 +638,146 @@ pub fn render( ); defer encoder.msgSend(void, objc.sel("endEncoding"), .{}); - //do we need to do this? - //encoder.msgSend(void, objc.sel("setViewport:"), .{viewport}); + // Draw background images first + try self.drawImagePlacements(encoder, self.image_placements.items[0..self.image_bg_end]); - // Use our shader pipeline - encoder.msgSend(void, objc.sel("setRenderPipelineState:"), .{self.pipeline.value}); - - // Set our buffers - encoder.msgSend( - void, - objc.sel("setVertexBytes:length:atIndex:"), - .{ - @as(*const anyopaque, @ptrCast(&self.uniforms)), - @as(c_ulong, @sizeOf(@TypeOf(self.uniforms))), - @as(c_ulong, 1), - }, - ); - encoder.msgSend( - void, - objc.sel("setFragmentTexture:atIndex:"), - .{ - self.texture_greyscale.value, - @as(c_ulong, 0), - }, - ); - encoder.msgSend( - void, - objc.sel("setFragmentTexture:atIndex:"), - .{ - self.texture_color.value, - @as(c_ulong, 1), - }, - ); - - // Issue the draw calls for this shader + // Then draw background cells try self.drawCells(encoder, &self.buf_cells_bg, self.cells_bg); + + // Then draw images under text + try self.drawImagePlacements(encoder, self.image_placements.items[0..self.image_text_end]); + + // Then draw fg cells try self.drawCells(encoder, &self.buf_cells, self.cells); + + // Then draw remaining images + try self.drawImagePlacements(encoder, self.image_placements.items[self.image_text_end..]); } buffer.msgSend(void, objc.sel("presentDrawable:"), .{drawable.value}); buffer.msgSend(void, objc.sel("commit"), .{}); } +fn drawImagePlacements( + self: *Metal, + encoder: objc.Object, + placements: []const mtl_image.Placement, +) !void { + if (placements.len == 0) return; + + // Use our image shader pipeline + encoder.msgSend( + void, + objc.sel("setRenderPipelineState:"), + .{self.shaders.image_pipeline.value}, + ); + + // Set our uniform, which is the only shared buffer + encoder.msgSend( + void, + objc.sel("setVertexBytes:length:atIndex:"), + .{ + @as(*const anyopaque, @ptrCast(&self.uniforms)), + @as(c_ulong, @sizeOf(@TypeOf(self.uniforms))), + @as(c_ulong, 1), + }, + ); + + for (placements) |placement| { + try self.drawImagePlacement(encoder, placement); + } +} + +fn drawImagePlacement( + self: *Metal, + encoder: objc.Object, + p: mtl_image.Placement, +) !void { + // Look up the image + const image = self.images.get(p.image_id) orelse { + log.warn("image not found for placement image_id={}", .{p.image_id}); + return; + }; + + // Get the texture + const texture = switch (image) { + .ready => |t| t, + else => { + log.warn("image not ready for placement image_id={}", .{p.image_id}); + return; + }, + }; + + // Create our vertex buffer, which is always exactly one item. + // future(mitchellh): we can group rendering multiple instances of a single image + const Buffer = mtl_buffer.Buffer(mtl_shaders.Image); + var buf = try Buffer.initFill(self.device, &.{.{ + .grid_pos = .{ + @as(f32, @floatFromInt(p.x)), + @as(f32, @floatFromInt(p.y)), + }, + + .cell_offset = .{ + @as(f32, @floatFromInt(p.cell_offset_x)), + @as(f32, @floatFromInt(p.cell_offset_y)), + }, + + .source_rect = .{ + @as(f32, @floatFromInt(p.source_x)), + @as(f32, @floatFromInt(p.source_y)), + @as(f32, @floatFromInt(p.source_width)), + @as(f32, @floatFromInt(p.source_height)), + }, + + .dest_size = .{ + @as(f32, @floatFromInt(p.width)), + @as(f32, @floatFromInt(p.height)), + }, + }}); + defer buf.deinit(); + + // Set our buffer + encoder.msgSend( + void, + objc.sel("setVertexBuffer:offset:atIndex:"), + .{ buf.buffer.value, @as(c_ulong, 0), @as(c_ulong, 0) }, + ); + + // Set our texture + encoder.msgSend( + void, + objc.sel("setVertexTexture:atIndex:"), + .{ + texture.value, + @as(c_ulong, 0), + }, + ); + encoder.msgSend( + void, + objc.sel("setFragmentTexture:atIndex:"), + .{ + texture.value, + @as(c_ulong, 0), + }, + ); + + // Draw! + encoder.msgSend( + void, + objc.sel("drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:"), + .{ + @intFromEnum(mtl.MTLPrimitiveType.triangle), + @as(c_ulong, 6), + @intFromEnum(mtl.MTLIndexType.uint16), + self.buf_instance.buffer.value, + @as(c_ulong, 0), + @as(c_ulong, 1), + }, + ); + + // log.debug("drawImagePlacement: {}", .{p}); +} + /// Loads some set of cell data into our buffer and issues a draw call. /// This expects all the Metal command encoder state to be setup. /// @@ -707,29 +786,215 @@ pub fn render( fn drawCells( self: *Metal, encoder: objc.Object, - buf: *objc.Object, - cells: std.ArrayListUnmanaged(GPUCell), + buf: *CellBuffer, + cells: std.ArrayListUnmanaged(mtl_shaders.Cell), ) !void { - try self.syncCells(buf, cells); + if (cells.items.len == 0) return; + + try buf.sync(self.device, cells.items); + + // Use our shader pipeline + encoder.msgSend( + void, + objc.sel("setRenderPipelineState:"), + .{self.shaders.cell_pipeline.value}, + ); + + // Set our buffers + encoder.msgSend( + void, + objc.sel("setVertexBytes:length:atIndex:"), + .{ + @as(*const anyopaque, @ptrCast(&self.uniforms)), + @as(c_ulong, @sizeOf(@TypeOf(self.uniforms))), + @as(c_ulong, 1), + }, + ); + encoder.msgSend( + void, + objc.sel("setFragmentTexture:atIndex:"), + .{ + self.texture_greyscale.value, + @as(c_ulong, 0), + }, + ); + encoder.msgSend( + void, + objc.sel("setFragmentTexture:atIndex:"), + .{ + self.texture_color.value, + @as(c_ulong, 1), + }, + ); encoder.msgSend( void, objc.sel("setVertexBuffer:offset:atIndex:"), - .{ buf.value, @as(c_ulong, 0), @as(c_ulong, 0) }, + .{ buf.buffer.value, @as(c_ulong, 0), @as(c_ulong, 0) }, ); - if (cells.items.len > 0) { - encoder.msgSend( - void, - objc.sel("drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:"), - .{ - @intFromEnum(MTLPrimitiveType.triangle), - @as(c_ulong, 6), - @intFromEnum(MTLIndexType.uint16), - self.buf_instance.value, - @as(c_ulong, 0), - @as(c_ulong, cells.items.len), - }, - ); + encoder.msgSend( + void, + objc.sel("drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:"), + .{ + @intFromEnum(mtl.MTLPrimitiveType.triangle), + @as(c_ulong, 6), + @intFromEnum(mtl.MTLIndexType.uint16), + self.buf_instance.buffer.value, + @as(c_ulong, 0), + @as(c_ulong, cells.items.len), + }, + ); +} + +/// This goes through the Kitty graphic placements and accumulates the +/// placements we need to render on our viewport. It also ensures that +/// the visible images are loaded on the GPU. +fn prepKittyGraphics( + self: *Metal, + t: *terminal.Terminal, +) !void { + const storage = &t.screen.kitty_images; + defer storage.dirty = false; + + // We always clear our previous placements no matter what because + // we rebuild them from scratch. + self.image_placements.clearRetainingCapacity(); + + // Go through our known images and if there are any that are no longer + // in use then mark them to be freed. + // + // This never conflicts with the below because a placement can't + // reference an image that doesn't exist. + { + var it = self.images.iterator(); + while (it.next()) |kv| { + if (storage.imageById(kv.key_ptr.*) == null) { + kv.value_ptr.markForUnload(); + } + } + } + + // The top-left and bottom-right corners of our viewport in screen + // points. This lets us determine offsets and containment of placements. + const top = (terminal.point.Viewport{}).toScreen(&t.screen); + const bot = (terminal.point.Viewport{ + .x = t.screen.cols - 1, + .y = t.screen.rows - 1, + }).toScreen(&t.screen); + + // Go through the placements and ensure the image is loaded on the GPU. + var it = storage.placements.iterator(); + while (it.next()) |kv| { + // Find the image in storage + const p = kv.value_ptr; + const image = storage.imageById(kv.key_ptr.image_id) orelse { + log.warn( + "missing image for placement, ignoring image_id={}", + .{kv.key_ptr.image_id}, + ); + continue; + }; + + // If the selection isn't within our viewport then skip it. + const rect = p.rect(image, t); + if (rect.top_left.y > bot.y) continue; + if (rect.bottom_right.y < top.y) continue; + + // If the top left is outside the viewport we need to calc an offset + // so that we render (0, 0) with some offset for the texture. + const offset_y: u32 = if (rect.top_left.y < t.screen.viewport) offset_y: { + const offset_cells = t.screen.viewport - rect.top_left.y; + const offset_pixels = offset_cells * self.cell_size.height; + break :offset_y @intCast(offset_pixels); + } else 0; + + // If we already know about this image then do nothing + const gop = try self.images.getOrPut(self.alloc, kv.key_ptr.image_id); + if (!gop.found_existing) { + // Copy the data into the pending state. + const data = try self.alloc.dupe(u8, image.data); + errdefer self.alloc.free(data); + + // Store it in the map + const pending: Image.Pending = .{ + .width = image.width, + .height = image.height, + .data = data.ptr, + }; + + gop.value_ptr.* = switch (image.format) { + .rgb => .{ .pending_rgb = pending }, + .rgba => .{ .pending_rgba = pending }, + .png => unreachable, // should be decoded by now + }; + } + + // Convert our screen point to a viewport point + const viewport = p.point.toViewport(&t.screen); + + // Calculate the source rectangle + const source_x = @min(image.width, p.source_x); + const source_y = @min(image.height, p.source_y + offset_y); + const source_width = if (p.source_width > 0) + @min(image.width - source_x, p.source_width) + else + image.width; + const source_height = if (p.source_height > 0) + @min(image.height, p.source_height) + else + image.height -| offset_y; + + // Calculate the width/height of our image. + const dest_width = if (p.columns > 0) p.columns * self.cell_size.width else source_width; + const dest_height = if (p.rows > 0) p.rows * self.cell_size.height else source_height; + + // Accumulate the placement + if (image.width > 0 and image.height > 0) { + try self.image_placements.append(self.alloc, .{ + .image_id = kv.key_ptr.image_id, + .x = @intCast(p.point.x), + .y = @intCast(viewport.y), + .z = p.z, + .width = dest_width, + .height = dest_height, + .cell_offset_x = p.x_offset, + .cell_offset_y = p.y_offset, + .source_x = source_x, + .source_y = source_y, + .source_width = source_width, + .source_height = source_height, + }); + } + } + + // Sort the placements by their Z value. + std.mem.sortUnstable( + mtl_image.Placement, + self.image_placements.items, + {}, + struct { + fn lessThan( + ctx: void, + lhs: mtl_image.Placement, + rhs: mtl_image.Placement, + ) bool { + _ = ctx; + return lhs.z < rhs.z or (lhs.z == rhs.z and lhs.image_id < rhs.image_id); + } + }.lessThan, + ); + + // Find our indices + self.image_bg_end = 0; + self.image_text_end = 0; + const bg_limit = std.math.minInt(i32) / 2; + for (self.image_placements.items, 0..) |p, i| { + if (self.image_bg_end == 0 and p.z >= bg_limit) { + self.image_bg_end = @intCast(i); + } + if (self.image_text_end == 0 and p.z >= 0) { + self.image_text_end = @intCast(i); + } } } @@ -838,7 +1103,7 @@ fn rebuildCells( // This is the cell that has [mode == .fg] and is underneath our cursor. // We keep track of it so that we can invert the colors so the character // remains visible. - var cursor_cell: ?GPUCell = null; + var cursor_cell: ?mtl_shaders.Cell = null; // Build each cell var rowIter = screen.rowIterator(.viewport); @@ -943,7 +1208,7 @@ fn rebuildCells( // We try to base on the cursor cell but if its not there // we use the actual cursor and if thats not there we give // up on preedit rendering. - var cell: GPUCell = cursor_cell orelse + var cell: mtl_shaders.Cell = cursor_cell orelse (real_cursor_cell orelse break :preedit).*; cell.color = .{ 0, 0, 0, 255 }; @@ -1091,7 +1356,7 @@ pub fn updateCell( // If we're rendering a color font, we use the color atlas const presentation = try self.font_group.group.presentationFromIndex(shaper_run.font_index); - const mode: GPUCellMode = switch (presentation) { + const mode: mtl_shaders.Cell.Mode = switch (presentation) { .text => .fg, .emoji => .fg_color, }; @@ -1149,7 +1414,7 @@ pub fn updateCell( return true; } -fn addCursor(self: *Metal, screen: *terminal.Screen) ?*const GPUCell { +fn addCursor(self: *Metal, screen: *terminal.Screen) ?*const mtl_shaders.Cell { // Add the cursor const cell = screen.getCell( .active, @@ -1197,7 +1462,7 @@ fn addCursor(self: *Metal, screen: *terminal.Screen) ?*const GPUCell { /// Updates cell with the the given character. This returns true if the /// cell was successfully updated. -fn updateCellChar(self: *Metal, cell: *GPUCell, cp: u21) bool { +fn updateCellChar(self: *Metal, cell: *mtl_shaders.Cell, cp: u21) bool { // Get the font index for this codepoint const font_index = if (self.font_group.indexForCodepoint( self.alloc, @@ -1233,52 +1498,6 @@ fn updateCellChar(self: *Metal, cell: *GPUCell, cp: u21) bool { return true; } -/// Sync the vertex buffer inputs to the GPU. This will attempt to reuse -/// the existing buffer (of course!) but will allocate a new buffer if -/// our cells don't fit in it. -fn syncCells( - self: *Metal, - target: *objc.Object, - cells: std.ArrayListUnmanaged(GPUCell), -) !void { - const req_bytes = cells.items.len * @sizeOf(GPUCell); - const avail_bytes = target.getProperty(c_ulong, "length"); - - // If we need more bytes than our buffer has, we need to reallocate. - if (req_bytes > avail_bytes) { - // Deallocate previous buffer - deinitMTLResource(target.*); - - // Allocate a new buffer with enough to hold double what we require. - const size = req_bytes * 2; - target.* = self.device.msgSend( - objc.Object, - objc.sel("newBufferWithLength:options:"), - .{ - @as(c_ulong, @intCast(size * @sizeOf(GPUCell))), - MTLResourceStorageModeShared, - }, - ); - } - - // We can fit within the vertex buffer so we can just replace bytes. - const dst = dst: { - const ptr = target.msgSend(?[*]u8, objc.sel("contents"), .{}) orelse { - log.warn("buf_cells contents ptr is null", .{}); - return error.MetalFailed; - }; - - break :dst ptr[0..req_bytes]; - }; - - const src = src: { - const ptr = @as([*]const u8, @ptrCast(cells.items.ptr)); - break :src ptr[0..req_bytes]; - }; - - @memcpy(dst, src); -} - /// Sync the atlas data to the given texture. This copies the bytes /// associated with the atlas to the given texture. If the atlas no longer /// fits into the texture, the texture will be resized. @@ -1296,7 +1515,7 @@ fn syncAtlasTexture(device: objc.Object, atlas: *const font.Atlas, texture: *obj void, objc.sel("replaceRegion:mipmapLevel:withBytes:bytesPerRow:"), .{ - MTLRegion{ + mtl.MTLRegion{ .origin = .{ .x = 0, .y = 0, .z = 0 }, .size = .{ .width = @intCast(atlas.size), @@ -1305,223 +1524,16 @@ fn syncAtlasTexture(device: objc.Object, atlas: *const font.Atlas, texture: *obj }, }, @as(c_ulong, 0), - atlas.data.ptr, + @as(*const anyopaque, atlas.data.ptr), @as(c_ulong, atlas.format.depth() * atlas.size), }, ); } -/// Initialize the shader library. -fn initLibrary(device: objc.Object, data: []const u8) !objc.Object { - const source = try macos.foundation.String.createWithBytes( - data, - .utf8, - false, - ); - defer source.release(); - - var err: ?*anyopaque = null; - const library = device.msgSend( - objc.Object, - objc.sel("newLibraryWithSource:options:error:"), - .{ - source, - @as(?*anyopaque, null), - &err, - }, - ); - try checkError(err); - - return library; -} - -/// Initialize the render pipeline for our shader library. -fn initPipelineState(device: objc.Object, library: objc.Object) !objc.Object { - // Get our vertex and fragment functions - const func_vert = func_vert: { - const str = try macos.foundation.String.createWithBytes( - "uber_vertex", - .utf8, - false, - ); - defer str.release(); - - const ptr = library.msgSend(?*anyopaque, objc.sel("newFunctionWithName:"), .{str}); - break :func_vert objc.Object.fromId(ptr.?); - }; - const func_frag = func_frag: { - const str = try macos.foundation.String.createWithBytes( - "uber_fragment", - .utf8, - false, - ); - defer str.release(); - - const ptr = library.msgSend(?*anyopaque, objc.sel("newFunctionWithName:"), .{str}); - break :func_frag objc.Object.fromId(ptr.?); - }; - - // Create the vertex descriptor. The vertex descriptor describves the - // data layout of the vertex inputs. We use indexed (or "instanced") - // rendering, so this makes it so that each instance gets a single - // GPUCell as input. - const vertex_desc = vertex_desc: { - const desc = init: { - const Class = objc.Class.getClass("MTLVertexDescriptor").?; - const id_alloc = Class.msgSend(objc.Object, objc.sel("alloc"), .{}); - const id_init = id_alloc.msgSend(objc.Object, objc.sel("init"), .{}); - break :init id_init; - }; - - // Our attributes are the fields of the input - const attrs = objc.Object.fromId(desc.getProperty(?*anyopaque, "attributes")); - { - const attr = attrs.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 0)}, - ); - - attr.setProperty("format", @intFromEnum(MTLVertexFormat.uchar)); - attr.setProperty("offset", @as(c_ulong, @offsetOf(GPUCell, "mode"))); - attr.setProperty("bufferIndex", @as(c_ulong, 0)); - } - { - const attr = attrs.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 1)}, - ); - - attr.setProperty("format", @intFromEnum(MTLVertexFormat.float2)); - attr.setProperty("offset", @as(c_ulong, @offsetOf(GPUCell, "grid_pos"))); - attr.setProperty("bufferIndex", @as(c_ulong, 0)); - } - { - const attr = attrs.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 2)}, - ); - - attr.setProperty("format", @intFromEnum(MTLVertexFormat.uint2)); - attr.setProperty("offset", @as(c_ulong, @offsetOf(GPUCell, "glyph_pos"))); - attr.setProperty("bufferIndex", @as(c_ulong, 0)); - } - { - const attr = attrs.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 3)}, - ); - - attr.setProperty("format", @intFromEnum(MTLVertexFormat.uint2)); - attr.setProperty("offset", @as(c_ulong, @offsetOf(GPUCell, "glyph_size"))); - attr.setProperty("bufferIndex", @as(c_ulong, 0)); - } - { - const attr = attrs.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 4)}, - ); - - attr.setProperty("format", @intFromEnum(MTLVertexFormat.int2)); - attr.setProperty("offset", @as(c_ulong, @offsetOf(GPUCell, "glyph_offset"))); - attr.setProperty("bufferIndex", @as(c_ulong, 0)); - } - { - const attr = attrs.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 5)}, - ); - - attr.setProperty("format", @intFromEnum(MTLVertexFormat.uchar4)); - attr.setProperty("offset", @as(c_ulong, @offsetOf(GPUCell, "color"))); - attr.setProperty("bufferIndex", @as(c_ulong, 0)); - } - { - const attr = attrs.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 6)}, - ); - - attr.setProperty("format", @intFromEnum(MTLVertexFormat.uchar)); - attr.setProperty("offset", @as(c_ulong, @offsetOf(GPUCell, "cell_width"))); - attr.setProperty("bufferIndex", @as(c_ulong, 0)); - } - - // The layout describes how and when we fetch the next vertex input. - const layouts = objc.Object.fromId(desc.getProperty(?*anyopaque, "layouts")); - { - const layout = layouts.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 0)}, - ); - - // Access each GPUCell per instance, not per vertex. - layout.setProperty("stepFunction", @intFromEnum(MTLVertexStepFunction.per_instance)); - layout.setProperty("stride", @as(c_ulong, @sizeOf(GPUCell))); - } - - break :vertex_desc desc; - }; - - // Create our descriptor - const desc = init: { - const Class = objc.Class.getClass("MTLRenderPipelineDescriptor").?; - const id_alloc = Class.msgSend(objc.Object, objc.sel("alloc"), .{}); - const id_init = id_alloc.msgSend(objc.Object, objc.sel("init"), .{}); - break :init id_init; - }; - - // Set our properties - desc.setProperty("vertexFunction", func_vert); - desc.setProperty("fragmentFunction", func_frag); - desc.setProperty("vertexDescriptor", vertex_desc); - - // Set our color attachment - const attachments = objc.Object.fromId(desc.getProperty(?*anyopaque, "colorAttachments")); - { - const attachment = attachments.msgSend( - objc.Object, - objc.sel("objectAtIndexedSubscript:"), - .{@as(c_ulong, 0)}, - ); - - // Value is MTLPixelFormatBGRA8Unorm - attachment.setProperty("pixelFormat", @as(c_ulong, 80)); - - // Blending. This is required so that our text we render on top - // of our drawable properly blends into the bg. - attachment.setProperty("blendingEnabled", true); - attachment.setProperty("rgbBlendOperation", @intFromEnum(MTLBlendOperation.add)); - attachment.setProperty("alphaBlendOperation", @intFromEnum(MTLBlendOperation.add)); - attachment.setProperty("sourceRGBBlendFactor", @intFromEnum(MTLBlendFactor.one)); - attachment.setProperty("sourceAlphaBlendFactor", @intFromEnum(MTLBlendFactor.one)); - attachment.setProperty("destinationRGBBlendFactor", @intFromEnum(MTLBlendFactor.one_minus_source_alpha)); - attachment.setProperty("destinationAlphaBlendFactor", @intFromEnum(MTLBlendFactor.one_minus_source_alpha)); - } - - // Make our state - var err: ?*anyopaque = null; - const pipeline_state = device.msgSend( - objc.Object, - objc.sel("newRenderPipelineStateWithDescriptor:error:"), - .{ desc, &err }, - ); - try checkError(err); - - return pipeline_state; -} - /// Initialize a MTLTexture object for the given atlas. fn initAtlasTexture(device: objc.Object, atlas: *const font.Atlas) !objc.Object { // Determine our pixel format - const pixel_format: MTLPixelFormat = switch (atlas.format) { + const pixel_format: mtl.MTLPixelFormat = switch (atlas.format) { .greyscale => .r8unorm, .rgba => .bgra8unorm, else => @panic("unsupported atlas format for Metal texture"), @@ -1555,150 +1567,3 @@ fn initAtlasTexture(device: objc.Object, atlas: *const font.Atlas) !objc.Object fn deinitMTLResource(obj: objc.Object) void { obj.msgSend(void, objc.sel("release"), .{}); } - -fn checkError(err_: ?*anyopaque) !void { - if (err_) |err| { - const nserr = objc.Object.fromId(err); - const str = @as( - *macos.foundation.String, - @ptrCast(nserr.getProperty(?*anyopaque, "localizedDescription").?), - ); - - log.err("metal error={s}", .{str.cstringPtr(.ascii).?}); - return error.MetalFailed; - } -} - -/// https://developer.apple.com/documentation/metal/mtlloadaction?language=objc -const MTLLoadAction = enum(c_ulong) { - dont_care = 0, - load = 1, - clear = 2, -}; - -/// https://developer.apple.com/documentation/metal/mtlstoreaction?language=objc -const MTLStoreAction = enum(c_ulong) { - dont_care = 0, - store = 1, -}; - -/// https://developer.apple.com/documentation/metal/mtlstoragemode?language=objc -const MTLStorageMode = enum(c_ulong) { - shared = 0, - managed = 1, - private = 2, - memoryless = 3, -}; - -/// https://developer.apple.com/documentation/metal/mtlprimitivetype?language=objc -const MTLPrimitiveType = enum(c_ulong) { - point = 0, - line = 1, - line_strip = 2, - triangle = 3, - triangle_strip = 4, -}; - -/// https://developer.apple.com/documentation/metal/mtlindextype?language=objc -const MTLIndexType = enum(c_ulong) { - uint16 = 0, - uint32 = 1, -}; - -/// https://developer.apple.com/documentation/metal/mtlvertexformat?language=objc -const MTLVertexFormat = enum(c_ulong) { - uchar4 = 3, - float2 = 29, - int2 = 33, - uint2 = 37, - uchar = 45, -}; - -/// https://developer.apple.com/documentation/metal/mtlvertexstepfunction?language=objc -const MTLVertexStepFunction = enum(c_ulong) { - constant = 0, - per_vertex = 1, - per_instance = 2, -}; - -/// https://developer.apple.com/documentation/metal/mtlpixelformat?language=objc -const MTLPixelFormat = enum(c_ulong) { - r8unorm = 10, - bgra8unorm = 80, -}; - -/// https://developer.apple.com/documentation/metal/mtlpurgeablestate?language=objc -const MTLPurgeableState = enum(c_ulong) { - empty = 4, -}; - -/// https://developer.apple.com/documentation/metal/mtlblendfactor?language=objc -const MTLBlendFactor = enum(c_ulong) { - zero = 0, - one = 1, - source_color = 2, - one_minus_source_color = 3, - source_alpha = 4, - one_minus_source_alpha = 5, - dest_color = 6, - one_minus_dest_color = 7, - dest_alpha = 8, - one_minus_dest_alpha = 9, - source_alpha_saturated = 10, - blend_color = 11, - one_minus_blend_color = 12, - blend_alpha = 13, - one_minus_blend_alpha = 14, - source_1_color = 15, - one_minus_source_1_color = 16, - source_1_alpha = 17, - one_minus_source_1_alpha = 18, -}; - -/// https://developer.apple.com/documentation/metal/mtlblendoperation?language=objc -const MTLBlendOperation = enum(c_ulong) { - add = 0, - subtract = 1, - reverse_subtract = 2, - min = 3, - max = 4, -}; - -/// https://developer.apple.com/documentation/metal/mtlresourceoptions?language=objc -/// (incomplete, we only use this mode so we just hardcode it) -const MTLResourceStorageModeShared: c_ulong = @intFromEnum(MTLStorageMode.shared) << 4; - -const MTLClearColor = extern struct { - red: f64, - green: f64, - blue: f64, - alpha: f64, -}; - -const MTLViewport = extern struct { - x: f64, - y: f64, - width: f64, - height: f64, - znear: f64, - zfar: f64, -}; - -const MTLRegion = extern struct { - origin: MTLOrigin, - size: MTLSize, -}; - -const MTLOrigin = extern struct { - x: c_ulong, - y: c_ulong, - z: c_ulong, -}; - -const MTLSize = extern struct { - width: c_ulong, - height: c_ulong, - depth: c_ulong, -}; - -extern "c" fn MTLCreateSystemDefaultDevice() ?*anyopaque; diff --git a/src/renderer/metal/api.zig b/src/renderer/metal/api.zig new file mode 100644 index 000000000..f3dc2f835 --- /dev/null +++ b/src/renderer/metal/api.zig @@ -0,0 +1,138 @@ +//! This file contains the definitions of the Metal API that we use. + +/// https://developer.apple.com/documentation/metal/mtlloadaction?language=objc +pub const MTLLoadAction = enum(c_ulong) { + dont_care = 0, + load = 1, + clear = 2, +}; + +/// https://developer.apple.com/documentation/metal/mtlstoreaction?language=objc +pub const MTLStoreAction = enum(c_ulong) { + dont_care = 0, + store = 1, +}; + +/// https://developer.apple.com/documentation/metal/mtlstoragemode?language=objc +pub const MTLStorageMode = enum(c_ulong) { + shared = 0, + managed = 1, + private = 2, + memoryless = 3, +}; + +/// https://developer.apple.com/documentation/metal/mtlprimitivetype?language=objc +pub const MTLPrimitiveType = enum(c_ulong) { + point = 0, + line = 1, + line_strip = 2, + triangle = 3, + triangle_strip = 4, +}; + +/// https://developer.apple.com/documentation/metal/mtlindextype?language=objc +pub const MTLIndexType = enum(c_ulong) { + uint16 = 0, + uint32 = 1, +}; + +/// https://developer.apple.com/documentation/metal/mtlvertexformat?language=objc +pub const MTLVertexFormat = enum(c_ulong) { + uchar4 = 3, + float2 = 29, + float4 = 31, + int2 = 33, + uint = 36, + uint2 = 37, + uchar = 45, +}; + +/// https://developer.apple.com/documentation/metal/mtlvertexstepfunction?language=objc +pub const MTLVertexStepFunction = enum(c_ulong) { + constant = 0, + per_vertex = 1, + per_instance = 2, +}; + +/// https://developer.apple.com/documentation/metal/mtlpixelformat?language=objc +pub const MTLPixelFormat = enum(c_ulong) { + r8unorm = 10, + rgba8uint = 73, + bgra8unorm = 80, +}; + +/// https://developer.apple.com/documentation/metal/mtlpurgeablestate?language=objc +pub const MTLPurgeableState = enum(c_ulong) { + empty = 4, +}; + +/// https://developer.apple.com/documentation/metal/mtlblendfactor?language=objc +pub const MTLBlendFactor = enum(c_ulong) { + zero = 0, + one = 1, + source_color = 2, + one_minus_source_color = 3, + source_alpha = 4, + one_minus_source_alpha = 5, + dest_color = 6, + one_minus_dest_color = 7, + dest_alpha = 8, + one_minus_dest_alpha = 9, + source_alpha_saturated = 10, + blend_color = 11, + one_minus_blend_color = 12, + blend_alpha = 13, + one_minus_blend_alpha = 14, + source_1_color = 15, + one_minus_source_1_color = 16, + source_1_alpha = 17, + one_minus_source_1_alpha = 18, +}; + +/// https://developer.apple.com/documentation/metal/mtlblendoperation?language=objc +pub const MTLBlendOperation = enum(c_ulong) { + add = 0, + subtract = 1, + reverse_subtract = 2, + min = 3, + max = 4, +}; + +/// https://developer.apple.com/documentation/metal/mtlresourceoptions?language=objc +/// (incomplete, we only use this mode so we just hardcode it) +pub const MTLResourceStorageModeShared: c_ulong = @intFromEnum(MTLStorageMode.shared) << 4; + +pub const MTLClearColor = extern struct { + red: f64, + green: f64, + blue: f64, + alpha: f64, +}; + +pub const MTLViewport = extern struct { + x: f64, + y: f64, + width: f64, + height: f64, + znear: f64, + zfar: f64, +}; + +pub const MTLRegion = extern struct { + origin: MTLOrigin, + size: MTLSize, +}; + +pub const MTLOrigin = extern struct { + x: c_ulong, + y: c_ulong, + z: c_ulong, +}; + +pub const MTLSize = extern struct { + width: c_ulong, + height: c_ulong, + depth: c_ulong, +}; + +pub extern "c" fn MTLCreateSystemDefaultDevice() ?*anyopaque; diff --git a/src/renderer/metal/buffer.zig b/src/renderer/metal/buffer.zig new file mode 100644 index 000000000..eb5c1d193 --- /dev/null +++ b/src/renderer/metal/buffer.zig @@ -0,0 +1,91 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const objc = @import("objc"); + +const mtl = @import("api.zig"); + +const log = std.log.scoped(.metal); + +/// Metal data storage for a certain set of equal types. This is usually +/// used for vertex buffers, etc. This helpful wrapper makes it easy to +/// prealloc, shrink, grow, sync, buffers with Metal. +pub fn Buffer(comptime T: type) type { + return struct { + const Self = @This(); + + buffer: objc.Object, // MTLBuffer + + /// Initialize a buffer with the given length pre-allocated. + pub fn init(device: objc.Object, len: usize) !Self { + const buffer = device.msgSend( + objc.Object, + objc.sel("newBufferWithLength:options:"), + .{ + @as(c_ulong, @intCast(len * @sizeOf(T))), + mtl.MTLResourceStorageModeShared, + }, + ); + + return .{ .buffer = buffer }; + } + + /// Init the buffer filled with the given data. + pub fn initFill(device: objc.Object, data: []const T) !Self { + const buffer = device.msgSend( + objc.Object, + objc.sel("newBufferWithBytes:length:options:"), + .{ + @as(*const anyopaque, @ptrCast(data.ptr)), + @as(c_ulong, @intCast(data.len * @sizeOf(T))), + mtl.MTLResourceStorageModeShared, + }, + ); + + return .{ .buffer = buffer }; + } + + pub fn deinit(self: *Self) void { + self.buffer.msgSend(void, objc.sel("release"), .{}); + } + + /// Sync new contents to the buffer. + pub fn sync(self: *Self, device: objc.Object, data: []const T) !void { + // If we need more bytes than our buffer has, we need to reallocate. + const req_bytes = data.len * @sizeOf(T); + const avail_bytes = self.buffer.getProperty(c_ulong, "length"); + if (req_bytes > avail_bytes) { + // Deallocate previous buffer + self.buffer.msgSend(void, objc.sel("release"), .{}); + + // Allocate a new buffer with enough to hold double what we require. + const size = req_bytes * 2; + self.buffer = device.msgSend( + objc.Object, + objc.sel("newBufferWithLength:options:"), + .{ + @as(c_ulong, @intCast(size * @sizeOf(T))), + mtl.MTLResourceStorageModeShared, + }, + ); + } + + // We can fit within the buffer so we can just replace bytes. + const dst = dst: { + const ptr = self.buffer.msgSend(?[*]u8, objc.sel("contents"), .{}) orelse { + log.warn("buffer contents ptr is null", .{}); + return error.MetalFailed; + }; + + break :dst ptr[0..req_bytes]; + }; + + const src = src: { + const ptr = @as([*]const u8, @ptrCast(data.ptr)); + break :src ptr[0..req_bytes]; + }; + + @memcpy(dst, src); + } + }; +} diff --git a/src/renderer/metal/image.zig b/src/renderer/metal/image.zig new file mode 100644 index 000000000..88de3d27a --- /dev/null +++ b/src/renderer/metal/image.zig @@ -0,0 +1,241 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const objc = @import("objc"); + +const mtl = @import("api.zig"); + +/// Represents a single image placement on the grid. A placement is a +/// request to render an instance of an image. +pub const Placement = struct { + /// The image being rendered. This MUST be in the image map. + image_id: u32, + + /// The grid x/y where this placement is located. + x: u32, + y: u32, + z: i32, + + /// The width/height of the placed image. + width: u32, + height: u32, + + /// The offset in pixels from the top left of the cell. This is + /// clamped to the size of a cell. + cell_offset_x: u32, + cell_offset_y: u32, + + /// The source rectangle of the placement. + source_x: u32, + source_y: u32, + source_width: u32, + source_height: u32, +}; + +/// The map used for storing images. +pub const ImageMap = std.AutoHashMapUnmanaged(u32, Image); + +/// The state for a single image that is to be rendered. The image can be +/// pending upload or ready to use with a texture. +pub const Image = union(enum) { + /// The image is pending upload to the GPU. The different keys are + /// different formats since some formats aren't accepted by the GPU + /// and require conversion. + /// + /// This data is owned by this union so it must be freed once the + /// image is uploaded. + pending_rgb: Pending, + pending_rgba: Pending, + + /// The image is uploaded and ready to be used. + ready: objc.Object, // MTLTexture + + /// The image is uploaded but is scheduled to be unloaded. + unload_pending: []u8, + unload_ready: objc.Object, // MTLTexture + + /// Pending image data that needs to be uploaded to the GPU. + pub const Pending = struct { + height: u32, + width: u32, + + /// Data is always expected to be (width * height * depth). Depth + /// is based on the union key. + data: [*]u8, + + pub fn dataSlice(self: Pending, d: u32) []u8 { + return self.data[0..self.len(d)]; + } + + pub fn len(self: Pending, d: u32) u32 { + return self.width * self.height * d; + } + }; + + pub fn deinit(self: Image, alloc: Allocator) void { + switch (self) { + .pending_rgb => |p| alloc.free(p.dataSlice(3)), + .pending_rgba => |p| alloc.free(p.dataSlice(4)), + .unload_pending => |data| alloc.free(data), + + .ready, + .unload_ready, + => |obj| obj.msgSend(void, objc.sel("release"), .{}), + } + } + + /// Mark this image for unload whatever state it is in. + pub fn markForUnload(self: *Image) void { + self.* = switch (self.*) { + .unload_pending, + .unload_ready, + => return, + + .ready => |obj| .{ .unload_ready = obj }, + .pending_rgb => |p| .{ .unload_pending = p.dataSlice(3) }, + .pending_rgba => |p| .{ .unload_pending = p.dataSlice(4) }, + }; + } + + /// Returns true if this image is pending upload. + pub fn isPending(self: Image) bool { + return self.pending() != null; + } + + /// Returns true if this image is pending an unload. + pub fn isUnloading(self: Image) bool { + return switch (self) { + .unload_pending, + .unload_ready, + => true, + + .ready, + .pending_rgb, + .pending_rgba, + => false, + }; + } + + /// Converts the image data to a format that can be uploaded to the GPU. + /// If the data is already in a format that can be uploaded, this is a + /// no-op. + pub fn convert(self: *Image, alloc: Allocator) !void { + switch (self.*) { + .ready, + .unload_pending, + .unload_ready, + => unreachable, // invalid + + .pending_rgba => {}, // ready + + // RGB needs to be converted to RGBA because Metal textures + // don't support RGB. + .pending_rgb => |*p| { + // Note: this is the slowest possible way to do this... + const data = p.dataSlice(3); + const pixels = data.len / 3; + var rgba = try alloc.alloc(u8, pixels * 4); + errdefer alloc.free(rgba); + var i: usize = 0; + while (i < pixels) : (i += 1) { + const data_i = i * 3; + const rgba_i = i * 4; + rgba[rgba_i] = data[data_i]; + rgba[rgba_i + 1] = data[data_i + 1]; + rgba[rgba_i + 2] = data[data_i + 2]; + rgba[rgba_i + 3] = 255; + } + + alloc.free(data); + p.data = rgba.ptr; + self.* = .{ .pending_rgba = p.* }; + }, + } + } + + /// Upload the pending image to the GPU and change the state of this + /// image to ready. + pub fn upload( + self: *Image, + alloc: Allocator, + device: objc.Object, + ) !void { + // Convert our data if we have to + try self.convert(alloc); + + // Get our pending info + const p = self.pending().?; + + // Create our texture + const texture = try initTexture(p, device); + errdefer texture.msgSend(void, objc.sel("release"), .{}); + + // Upload our data + const d = self.depth(); + texture.msgSend( + void, + objc.sel("replaceRegion:mipmapLevel:withBytes:bytesPerRow:"), + .{ + mtl.MTLRegion{ + .origin = .{ .x = 0, .y = 0, .z = 0 }, + .size = .{ + .width = @intCast(p.width), + .height = @intCast(p.height), + .depth = 1, + }, + }, + @as(c_ulong, 0), + @as(*const anyopaque, p.data), + @as(c_ulong, d * p.width), + }, + ); + + // Uploaded. We can now clear our data and change our state. + self.deinit(alloc); + self.* = .{ .ready = texture }; + } + + /// Our pixel depth + fn depth(self: Image) u32 { + return switch (self) { + .pending_rgb => 3, + .pending_rgba => 4, + else => unreachable, + }; + } + + /// Returns true if this image is in a pending state and requires upload. + fn pending(self: Image) ?Pending { + return switch (self) { + .pending_rgb, + .pending_rgba, + => |p| p, + + else => null, + }; + } + + fn initTexture(p: Pending, device: objc.Object) !objc.Object { + // Create our descriptor + const desc = init: { + const Class = objc.Class.getClass("MTLTextureDescriptor").?; + const id_alloc = Class.msgSend(objc.Object, objc.sel("alloc"), .{}); + const id_init = id_alloc.msgSend(objc.Object, objc.sel("init"), .{}); + break :init id_init; + }; + + // Set our properties + desc.setProperty("pixelFormat", @intFromEnum(mtl.MTLPixelFormat.rgba8uint)); + desc.setProperty("width", @as(c_ulong, @intCast(p.width))); + desc.setProperty("height", @as(c_ulong, @intCast(p.height))); + + // Initialize + const id = device.msgSend( + ?*anyopaque, + objc.sel("newTextureWithDescriptor:"), + .{desc}, + ) orelse return error.MetalFailed; + + return objc.Object.fromId(id); + } +}; diff --git a/src/renderer/metal/shaders.zig b/src/renderer/metal/shaders.zig new file mode 100644 index 000000000..9001ede8f --- /dev/null +++ b/src/renderer/metal/shaders.zig @@ -0,0 +1,465 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const macos = @import("macos"); +const objc = @import("objc"); +const math = @import("../../math.zig"); + +const mtl = @import("api.zig"); + +const log = std.log.scoped(.metal); + +/// This contains the state for the shaders used by the Metal renderer. +pub const Shaders = struct { + library: objc.Object, + cell_pipeline: objc.Object, + image_pipeline: objc.Object, + + pub fn init(device: objc.Object) !Shaders { + const library = try initLibrary(device); + errdefer library.msgSend(void, objc.sel("release"), .{}); + + const cell_pipeline = try initCellPipeline(device, library); + errdefer cell_pipeline.msgSend(void, objc.sel("release"), .{}); + + const image_pipeline = try initImagePipeline(device, library); + errdefer image_pipeline.msgSend(void, objc.sel("release"), .{}); + + return .{ + .library = library, + .cell_pipeline = cell_pipeline, + .image_pipeline = image_pipeline, + }; + } + + pub fn deinit(self: *Shaders) void { + self.cell_pipeline.msgSend(void, objc.sel("release"), .{}); + self.image_pipeline.msgSend(void, objc.sel("release"), .{}); + self.library.msgSend(void, objc.sel("release"), .{}); + } +}; + +/// This is a single parameter for the terminal cell shader. +pub const Cell = extern struct { + mode: Mode, + grid_pos: [2]f32, + glyph_pos: [2]u32 = .{ 0, 0 }, + glyph_size: [2]u32 = .{ 0, 0 }, + glyph_offset: [2]i32 = .{ 0, 0 }, + color: [4]u8, + cell_width: u8, + + pub const Mode = enum(u8) { + bg = 1, + fg = 2, + fg_color = 7, + strikethrough = 8, + }; +}; + +/// Single parameter for the image shader. See shader for field details. +pub const Image = extern struct { + grid_pos: [2]f32, + cell_offset: [2]f32, + source_rect: [4]f32, + dest_size: [2]f32, +}; + +/// The uniforms that are passed to the terminal cell shader. +pub const Uniforms = extern struct { + /// The projection matrix for turning world coordinates to normalized. + /// This is calculated based on the size of the screen. + projection_matrix: math.Mat, + + /// Size of a single cell in pixels, unscaled. + cell_size: [2]f32, + + /// Metrics for underline/strikethrough + strikethrough_position: f32, + strikethrough_thickness: f32, +}; + +/// Initialize the MTLLibrary. A MTLLibrary is a collection of shaders. +fn initLibrary(device: objc.Object) !objc.Object { + // Hardcoded since this file isn't meant to be reusable. + const data = @embedFile("../shaders/cell.metal"); + const source = try macos.foundation.String.createWithBytes( + data, + .utf8, + false, + ); + defer source.release(); + + var err: ?*anyopaque = null; + const library = device.msgSend( + objc.Object, + objc.sel("newLibraryWithSource:options:error:"), + .{ + source, + @as(?*anyopaque, null), + &err, + }, + ); + try checkError(err); + + return library; +} + +/// Initialize the cell render pipeline for our shader library. +fn initCellPipeline(device: objc.Object, library: objc.Object) !objc.Object { + // Get our vertex and fragment functions + const func_vert = func_vert: { + const str = try macos.foundation.String.createWithBytes( + "uber_vertex", + .utf8, + false, + ); + defer str.release(); + + const ptr = library.msgSend(?*anyopaque, objc.sel("newFunctionWithName:"), .{str}); + break :func_vert objc.Object.fromId(ptr.?); + }; + const func_frag = func_frag: { + const str = try macos.foundation.String.createWithBytes( + "uber_fragment", + .utf8, + false, + ); + defer str.release(); + + const ptr = library.msgSend(?*anyopaque, objc.sel("newFunctionWithName:"), .{str}); + break :func_frag objc.Object.fromId(ptr.?); + }; + + // Create the vertex descriptor. The vertex descriptor describes the + // data layout of the vertex inputs. We use indexed (or "instanced") + // rendering, so this makes it so that each instance gets a single + // Cell as input. + const vertex_desc = vertex_desc: { + const desc = init: { + const Class = objc.Class.getClass("MTLVertexDescriptor").?; + const id_alloc = Class.msgSend(objc.Object, objc.sel("alloc"), .{}); + const id_init = id_alloc.msgSend(objc.Object, objc.sel("init"), .{}); + break :init id_init; + }; + + // Our attributes are the fields of the input + const attrs = objc.Object.fromId(desc.getProperty(?*anyopaque, "attributes")); + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 0)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.uchar)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Cell, "mode"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 1)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.float2)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Cell, "grid_pos"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 2)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.uint2)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Cell, "glyph_pos"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 3)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.uint2)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Cell, "glyph_size"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 4)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.int2)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Cell, "glyph_offset"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 5)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.uchar4)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Cell, "color"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 6)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.uchar)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Cell, "cell_width"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + + // The layout describes how and when we fetch the next vertex input. + const layouts = objc.Object.fromId(desc.getProperty(?*anyopaque, "layouts")); + { + const layout = layouts.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 0)}, + ); + + // Access each Cell per instance, not per vertex. + layout.setProperty("stepFunction", @intFromEnum(mtl.MTLVertexStepFunction.per_instance)); + layout.setProperty("stride", @as(c_ulong, @sizeOf(Cell))); + } + + break :vertex_desc desc; + }; + + // Create our descriptor + const desc = init: { + const Class = objc.Class.getClass("MTLRenderPipelineDescriptor").?; + const id_alloc = Class.msgSend(objc.Object, objc.sel("alloc"), .{}); + const id_init = id_alloc.msgSend(objc.Object, objc.sel("init"), .{}); + break :init id_init; + }; + + // Set our properties + desc.setProperty("vertexFunction", func_vert); + desc.setProperty("fragmentFunction", func_frag); + desc.setProperty("vertexDescriptor", vertex_desc); + + // Set our color attachment + const attachments = objc.Object.fromId(desc.getProperty(?*anyopaque, "colorAttachments")); + { + const attachment = attachments.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 0)}, + ); + + // Value is MTLPixelFormatBGRA8Unorm + attachment.setProperty("pixelFormat", @as(c_ulong, 80)); + + // Blending. This is required so that our text we render on top + // of our drawable properly blends into the bg. + attachment.setProperty("blendingEnabled", true); + attachment.setProperty("rgbBlendOperation", @intFromEnum(mtl.MTLBlendOperation.add)); + attachment.setProperty("alphaBlendOperation", @intFromEnum(mtl.MTLBlendOperation.add)); + attachment.setProperty("sourceRGBBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one)); + attachment.setProperty("sourceAlphaBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one)); + attachment.setProperty("destinationRGBBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one_minus_source_alpha)); + attachment.setProperty("destinationAlphaBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one_minus_source_alpha)); + } + + // Make our state + var err: ?*anyopaque = null; + const pipeline_state = device.msgSend( + objc.Object, + objc.sel("newRenderPipelineStateWithDescriptor:error:"), + .{ desc, &err }, + ); + try checkError(err); + + return pipeline_state; +} + +/// Initialize the image render pipeline for our shader library. +fn initImagePipeline(device: objc.Object, library: objc.Object) !objc.Object { + // Get our vertex and fragment functions + const func_vert = func_vert: { + const str = try macos.foundation.String.createWithBytes( + "image_vertex", + .utf8, + false, + ); + defer str.release(); + + const ptr = library.msgSend(?*anyopaque, objc.sel("newFunctionWithName:"), .{str}); + break :func_vert objc.Object.fromId(ptr.?); + }; + const func_frag = func_frag: { + const str = try macos.foundation.String.createWithBytes( + "image_fragment", + .utf8, + false, + ); + defer str.release(); + + const ptr = library.msgSend(?*anyopaque, objc.sel("newFunctionWithName:"), .{str}); + break :func_frag objc.Object.fromId(ptr.?); + }; + + // Create the vertex descriptor. The vertex descriptor describes the + // data layout of the vertex inputs. We use indexed (or "instanced") + // rendering, so this makes it so that each instance gets a single + // Image as input. + const vertex_desc = vertex_desc: { + const desc = init: { + const Class = objc.Class.getClass("MTLVertexDescriptor").?; + const id_alloc = Class.msgSend(objc.Object, objc.sel("alloc"), .{}); + const id_init = id_alloc.msgSend(objc.Object, objc.sel("init"), .{}); + break :init id_init; + }; + + // Our attributes are the fields of the input + const attrs = objc.Object.fromId(desc.getProperty(?*anyopaque, "attributes")); + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 1)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.float2)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Image, "grid_pos"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 2)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.float2)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Image, "cell_offset"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 3)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.float4)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Image, "source_rect"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + { + const attr = attrs.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 4)}, + ); + + attr.setProperty("format", @intFromEnum(mtl.MTLVertexFormat.float2)); + attr.setProperty("offset", @as(c_ulong, @offsetOf(Image, "dest_size"))); + attr.setProperty("bufferIndex", @as(c_ulong, 0)); + } + + // The layout describes how and when we fetch the next vertex input. + const layouts = objc.Object.fromId(desc.getProperty(?*anyopaque, "layouts")); + { + const layout = layouts.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 0)}, + ); + + // Access each Image per instance, not per vertex. + layout.setProperty("stepFunction", @intFromEnum(mtl.MTLVertexStepFunction.per_instance)); + layout.setProperty("stride", @as(c_ulong, @sizeOf(Image))); + } + + break :vertex_desc desc; + }; + + // Create our descriptor + const desc = init: { + const Class = objc.Class.getClass("MTLRenderPipelineDescriptor").?; + const id_alloc = Class.msgSend(objc.Object, objc.sel("alloc"), .{}); + const id_init = id_alloc.msgSend(objc.Object, objc.sel("init"), .{}); + break :init id_init; + }; + + // Set our properties + desc.setProperty("vertexFunction", func_vert); + desc.setProperty("fragmentFunction", func_frag); + desc.setProperty("vertexDescriptor", vertex_desc); + + // Set our color attachment + const attachments = objc.Object.fromId(desc.getProperty(?*anyopaque, "colorAttachments")); + { + const attachment = attachments.msgSend( + objc.Object, + objc.sel("objectAtIndexedSubscript:"), + .{@as(c_ulong, 0)}, + ); + + // Value is MTLPixelFormatBGRA8Unorm + attachment.setProperty("pixelFormat", @as(c_ulong, 80)); + + // Blending. This is required so that our text we render on top + // of our drawable properly blends into the bg. + attachment.setProperty("blendingEnabled", true); + attachment.setProperty("rgbBlendOperation", @intFromEnum(mtl.MTLBlendOperation.add)); + attachment.setProperty("alphaBlendOperation", @intFromEnum(mtl.MTLBlendOperation.add)); + attachment.setProperty("sourceRGBBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one)); + attachment.setProperty("sourceAlphaBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one)); + attachment.setProperty("destinationRGBBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one_minus_source_alpha)); + attachment.setProperty("destinationAlphaBlendFactor", @intFromEnum(mtl.MTLBlendFactor.one_minus_source_alpha)); + } + + // Make our state + var err: ?*anyopaque = null; + const pipeline_state = device.msgSend( + objc.Object, + objc.sel("newRenderPipelineStateWithDescriptor:error:"), + .{ desc, &err }, + ); + try checkError(err); + + return pipeline_state; +} + +fn checkError(err_: ?*anyopaque) !void { + const nserr = objc.Object.fromId(err_ orelse return); + const str = @as( + *macos.foundation.String, + @ptrCast(nserr.getProperty(?*anyopaque, "localizedDescription").?), + ); + + log.err("metal error={s}", .{str.cstringPtr(.ascii).?}); + return error.MetalFailed; +} + +// Intel macOS 13 doesn't like it when any field in a vertex buffer is not +// aligned on the alignment of the struct. I don't understand it, I think +// this must be some macOS 13 Metal GPU driver bug because it doesn't matter +// on macOS 12 or Apple Silicon macOS 13. +// +// To be safe, we put this test in here. +test "Cell offsets" { + const testing = std.testing; + const alignment = @alignOf(Cell); + inline for (@typeInfo(Cell).Struct.fields) |field| { + const offset = @offsetOf(Cell, field.name); + try testing.expectEqual(0, @mod(offset, alignment)); + } +} diff --git a/src/renderer/shaders/cell.metal b/src/renderer/shaders/cell.metal index 4e76f264d..bfc68aaa4 100644 --- a/src/renderer/shaders/cell.metal +++ b/src/renderer/shaders/cell.metal @@ -49,6 +49,11 @@ struct VertexOut { float2 tex_coord; }; +//------------------------------------------------------------------- +// Terminal Grid Cell Shader +//------------------------------------------------------------------- +#pragma mark - Terminal Grid Cell Shader + vertex VertexOut uber_vertex( unsigned int vid [[ vertex_id ]], VertexIn input [[ stage_in ]], @@ -179,3 +184,83 @@ fragment float4 uber_fragment( return in.color; } } + +//------------------------------------------------------------------- +// Image Shader +//------------------------------------------------------------------- +#pragma mark - Image Shader + +struct ImageVertexIn { + // The grid coordinates (x, y) where x < columns and y < rows where + // the image will be rendered. It will be rendered from the top left. + float2 grid_pos [[ attribute(1) ]]; + + // Offset in pixels from the top-left of the cell to make the top-left + // corner of the image. + float2 cell_offset [[ attribute(2) ]]; + + // The source rectangle of the texture to sample from. + float4 source_rect [[ attribute(3) ]]; + + // The final width/height of the image in pixels. + float2 dest_size [[ attribute(4) ]]; +}; + +struct ImageVertexOut { + float4 position [[ position ]]; + float2 tex_coord; +}; + +vertex ImageVertexOut image_vertex( + unsigned int vid [[ vertex_id ]], + ImageVertexIn input [[ stage_in ]], + texture2d image [[ texture(0) ]], + constant Uniforms &uniforms [[ buffer(1) ]] +) { + // The size of the image in pixels + float2 image_size = float2(image.get_width(), image.get_height()); + + // Turn the image position into a vertex point depending on the + // vertex ID. Since we use instanced drawing, we have 4 vertices + // for each corner of the cell. We can use vertex ID to determine + // which one we're looking at. Using this, we can use 1 or 0 to keep + // or discard the value for the vertex. + // + // 0 = top-right + // 1 = bot-right + // 2 = bot-left + // 3 = top-left + float2 position; + position.x = (vid == 0 || vid == 1) ? 1.0f : 0.0f; + position.y = (vid == 0 || vid == 3) ? 0.0f : 1.0f; + + // The texture coordinates start at our source x/y, then add the width/height + // as enabled by our instance id, then normalize to [0, 1] + float2 tex_coord = input.source_rect.xy; + tex_coord += input.source_rect.zw * position; + tex_coord /= image_size; + + ImageVertexOut out; + + // The position of our image starts at the top-left of the grid cell and + // adds the source rect width/height components. + float2 image_pos = (uniforms.cell_size * input.grid_pos) + input.cell_offset; + image_pos += input.dest_size * position; + + out.position = uniforms.projection_matrix * float4(image_pos.x, image_pos.y, 0.0f, 1.0f); + out.tex_coord = tex_coord; + return out; +} + +fragment float4 image_fragment( + ImageVertexOut in [[ stage_in ]], + texture2d image [[ texture(0) ]] +) { + constexpr sampler textureSampler(address::clamp_to_edge, filter::linear); + + // Ehhhhh our texture is in RGBA8Uint but our color attachment is + // BGRA8Unorm. So we need to convert it. We should really be converting + // our texture to BGRA8Unorm. + uint4 rgba = image.sample(textureSampler, in.tex_coord); + return float4(rgba) / 255.0f; +} diff --git a/pkg/stb_image_resize/main.zig b/src/stb/main.zig similarity index 61% rename from pkg/stb_image_resize/main.zig rename to src/stb/main.zig index 3981e9782..b047104af 100644 --- a/pkg/stb_image_resize/main.zig +++ b/src/stb/main.zig @@ -1,7 +1,4 @@ pub usingnamespace @cImport({ + @cInclude("stb_image.h"); @cInclude("stb_image_resize.h"); }); - -test { - // Needed to not crash on test -} diff --git a/src/stb/stb.c b/src/stb/stb.c new file mode 100644 index 000000000..68c4c2258 --- /dev/null +++ b/src/stb/stb.c @@ -0,0 +1,13 @@ +// For STBI we only need PNG because the only use case we have right now +// is the Kitty Graphics protocol which only supports PNG as a format +// besides raw RGB/RGBA buffers. +#define STBI_ONLY_PNG + +// We don't want to support super large images. +#define STBI_MAX_DIMENSIONS 131072 + +#define STB_IMAGE_IMPLEMENTATION +#include + +#define STB_IMAGE_RESIZE_IMPLEMENTATION +#include diff --git a/src/stb/stb_image.h b/src/stb/stb_image.h new file mode 100644 index 000000000..5e807a0a6 --- /dev/null +++ b/src/stb/stb_image.h @@ -0,0 +1,7987 @@ +/* stb_image - v2.28 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine Simon Breuss (16-bit PNM) + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko github:mosra + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + Jacko Dirks + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data); +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// To query the width, height and component count of an image without having to +// decode the full file, you can use the stbi_info family of functions: +// +// int x,y,n,ok; +// ok = stbi_info(filename, &x, &y, &n); +// // returns ok=1 and sets x, y, n if image is a supported format, +// // 0 otherwise. +// +// Note that stb_image pervasively uses ints in its public API for sizes, +// including sizes of memory buffers. This is now part of the API and thus +// hard to change without causing breakage. As a result, the various image +// loaders all have certain limits on image size; these differ somewhat +// by format but generally boil down to either just under 2GB or just under +// 1GB. When the decoded image would be larger than this, stb_image decoding +// will fail. +// +// Additionally, stb_image will reject image files that have any of their +// dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, +// which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, +// the only way to have an image with such dimensions load correctly +// is for it to have a rather extreme aspect ratio. Either way, the +// assumption here is that such larger images are likely to be malformed +// or malicious. If you do need to load an image with individual dimensions +// larger than that, and it still fits in the overall size limit, you can +// #define STBI_MAX_DIMENSIONS on your own to be something larger. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// We optionally support converting iPhone-formatted PNGs (which store +// premultiplied BGRA) back to RGB, even though they're internally encoded +// differently. To enable this conversion, call +// stbi_convert_iphone_png_to_rgb(1). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#if defined(_MSC_VER) || defined(__SYMBIAN32__) +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. +static int stbi__addints_valid(int a, int b) +{ + if ((a >= 0) != (b >= 0)) return 1; // a and b have different signs, so no overflow + if (a < 0 && b < 0) return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. + return a <= INT_MAX - b; +} + +// returns 1 if the product of two signed shorts is valid, 0 on overflow. +static int stbi__mul2shorts_valid(short a, short b) +{ + if (b == 0 || b == -1) return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow + if ((a >= 0) == (b >= 0)) return a <= SHRT_MAX/b; // product is positive, so similar to mul2sizes_valid + if (b < 0) return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN + return a >= SHRT_MIN / b; +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) { + for (j=0; j < count[i]; ++j) { + h->size[k++] = (stbi_uc) (i+1); + if(k >= 257) return stbi__err("bad size list","Corrupt JPEG"); + } + } + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + if(c < 0 || c >= 256) // symbol id out of bounds! + return -1; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & (sgn - 1)); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta","Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, dequant[0])) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta", "Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * (1 << j->succ_low)); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * (1 << shift)); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + if(n > 256) return stbi__err("bad DHT header","Corrupt JPEG"); // Loop over i < n would write past end of values! + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i=0; i < s->img_n; ++i) { + if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +static int stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) +{ + // some JPEGs have junk at end, skip over it but if we find what looks + // like a valid marker, resume there + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + while (x == 255) { // might be a marker + if (stbi__at_eof(j->s)) return STBI__MARKER_none; + x = stbi__get8(j->s); + if (x != 0x00 && x != 0xff) { + // not a stuffed zero or lead-in to another marker, looks + // like an actual marker, return it + return x; + } + // stuffed zero has x=0 now which ends the loop, meaning we go + // back to regular scan loop. + // repeated 0xff keeps trying to read the next byte of the marker. + } + } + return STBI__MARKER_none; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + j->marker = stbi__skip_jpeg_junk_at_end(j); + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + m = stbi__get_marker(j); + if (STBI__RESTART(m)) + m = stbi__get_marker(j); + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + m = stbi__get_marker(j); + } else { + if (!stbi__process_marker(j, m)) return 1; + m = stbi__get_marker(j); + } + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + return -1; /* report error for unexpected end of data. */ + } + stbi__fill_bits(a); + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + if (z >= 286) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0 || z >= 30) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_global = flag_true_if_should_convert; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; + +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; +} + +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + } + // even with SCAN_header, have to scan to see if we have a tRNS + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. + if (scan == STBI__SCAN_header) { ++s->img_n; return 1; } + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { + // header scan definitely stops at first IDAT + if (pal_img_n) + s->img_n = pal_img_n; + return 1; + } + if (c.length > (1u << 30)) return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + stbi__bmp_set_mask_defaults(info, compress); + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + // V4/V5 header + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + // accept some number of extra bytes after the header, but if the offset points either to before + // the header ends or implies a large amount of extra data, reject the file as malformed + int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); + int header_limit = 1024; // max we actually read is below 256 bytes currently. + int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. + if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { + return stbi__errpuc("bad header", "Corrupt BMP"); + } + // we established that bytes_read_so_far is positive and sensible. + // the first half of this test rejects offsets that are either too small positives, or + // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn + // ensures the number computed in the second half of the test can't overflow. + if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } else { + stbi__skip(s, info.offset - bytes_read_so_far); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + if (p == NULL) { + stbi__rewind( s ); + return 0; + } + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) { + STBI_FREE(out); + return stbi__errpuc("bad PNM", "PNM file truncated"); + } + + if (req_comp && req_comp != s->img_n) { + if (ri->bits_per_channel == 16) { + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, s->img_n, req_comp, s->img_x, s->img_y); + } else { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + } + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + if((value > 214748364) || (value == 214748364 && *c > '7')) + return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + if(*x == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + if (*y == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; + else + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/pkg/stb_image_resize/stb_image_resize.h b/src/stb/stb_image_resize.h similarity index 100% rename from pkg/stb_image_resize/stb_image_resize.h rename to src/stb/stb_image_resize.h diff --git a/src/terminal/Parser.zig b/src/terminal/Parser.zig index 3e7a764f8..90aaefcd6 100644 --- a/src/terminal/Parser.zig +++ b/src/terminal/Parser.zig @@ -48,6 +48,7 @@ pub const TransitionAction = enum { csi_dispatch, put, osc_put, + apc_put, }; /// Action is the action that a caller of the parser is expected to @@ -74,6 +75,11 @@ pub const Action = union(enum) { dcs_put: u8, dcs_unhook: void, + /// APC data + apc_start: void, + apc_put: u8, + apc_end: void, + pub const CSI = struct { intermediates: []u8, params: []u16, @@ -247,6 +253,7 @@ pub fn next(self: *Parser, c: u8) [3]?Action { else null, .dcs_passthrough => Action{ .dcs_unhook = {} }, + .sos_pm_apc_string => Action{ .apc_end = {} }, else => null, }, @@ -269,6 +276,7 @@ pub fn next(self: *Parser, c: u8) [3]?Action { .final = c, }, }, + .sos_pm_apc_string => Action{ .apc_start = {} }, .utf8 => utf8: { // When entering the UTF8 state, we need to grab the // last intermediate as our first byte and reset @@ -426,9 +434,8 @@ fn doAction(self: *Parser, action: TransitionAction, c: u8) ?Action { .final = c, }, }, - .put => Action{ - .dcs_put = c, - }, + .put => Action{ .dcs_put = c }, + .apc_put => Action{ .apc_put = c }, }; } diff --git a/src/terminal/Screen.zig b/src/terminal/Screen.zig index 8da118584..0ae944a96 100644 --- a/src/terminal/Screen.zig +++ b/src/terminal/Screen.zig @@ -865,6 +865,9 @@ selection: ?Selection = null, /// The kitty keyboard settings. kitty_keyboard: kitty.KeyFlagStack = .{}, +/// Kitty graphics protocol state. +kitty_images: kitty.graphics.ImageStorage = .{}, + /// Initialize a new screen. pub fn init( alloc: Allocator, @@ -889,6 +892,7 @@ pub fn init( } pub fn deinit(self: *Screen) void { + self.kitty_images.deinit(self.alloc); self.storage.deinit(self.alloc); self.deinitGraphemes(); } @@ -1538,6 +1542,11 @@ pub const Scroll = union(enum) { /// want to do that yet (i.e. are they writing to the end of the screen /// or not). pub fn scroll(self: *Screen, behavior: Scroll) !void { + // No matter what, scrolling marks our image state as dirty since + // it could move placements. If there are no placements or no images + // this is still a very cheap operation. + self.kitty_images.dirty = true; + switch (behavior) { // Setting viewport offset to zero makes row 0 be at self.top // which is the top! @@ -2104,6 +2113,9 @@ pub fn resize(self: *Screen, rows: usize, cols: usize) !void { // No resize necessary if (self.rows == rows) return; + // No matter what we mark our image state as dirty + self.kitty_images.dirty = true; + // If we have the same number of columns, text can't possibly // reflow in any way, so we do the quicker thing and do a resize // without reflow checks. @@ -2111,6 +2123,9 @@ pub fn resize(self: *Screen, rows: usize, cols: usize) !void { return; } + // No matter what we mark our image state as dirty + self.kitty_images.dirty = true; + // If our columns increased, we alloc space for the new column width // and go through each row and reflow if necessary. if (cols > self.cols) { diff --git a/src/terminal/Terminal.zig b/src/terminal/Terminal.zig index 7d37eec24..f474c98c8 100644 --- a/src/terminal/Terminal.zig +++ b/src/terminal/Terminal.zig @@ -15,6 +15,7 @@ const ansi = @import("ansi.zig"); const modes = @import("modes.zig"); const charsets = @import("charsets.zig"); const csi = @import("csi.zig"); +const kitty = @import("kitty.zig"); const sgr = @import("sgr.zig"); const Tabstops = @import("Tabstops.zig"); const trace = @import("tracy").trace; @@ -62,6 +63,10 @@ tabstops: Tabstops, rows: usize, cols: usize, +/// The size of the screen in pixels. This is used for pty events and images +width_px: u32 = 0, +height_px: u32 = 0, + /// The current scrolling region. scrolling_region: ScrollingRegion, @@ -188,7 +193,11 @@ pub const AlternateScreenOptions = struct { /// * has its own cursor state (included saved cursor) /// * does not support scrollback /// -pub fn alternateScreen(self: *Terminal, options: AlternateScreenOptions) void { +pub fn alternateScreen( + self: *Terminal, + alloc: Allocator, + options: AlternateScreenOptions, +) void { const tracy = trace(@src()); defer tracy.end(); @@ -215,12 +224,16 @@ pub fn alternateScreen(self: *Terminal, options: AlternateScreenOptions) void { self.screen.selection = null; if (options.clear_on_enter) { - self.eraseDisplay(.complete); + self.eraseDisplay(alloc, .complete); } } /// Switch back to the primary screen (reset alternate screen mode). -pub fn primaryScreen(self: *Terminal, options: AlternateScreenOptions) void { +pub fn primaryScreen( + self: *Terminal, + alloc: Allocator, + options: AlternateScreenOptions, +) void { const tracy = trace(@src()); defer tracy.end(); @@ -230,7 +243,7 @@ pub fn primaryScreen(self: *Terminal, options: AlternateScreenOptions) void { // TODO(mitchellh): what happens if we enter alternate screen multiple times? if (self.active_screen == .primary) return; - if (options.clear_on_exit) self.eraseDisplay(.complete); + if (options.clear_on_exit) self.eraseDisplay(alloc, .complete); // Switch the screens const old = self.screen; @@ -277,7 +290,7 @@ pub fn deccolm(self: *Terminal, alloc: Allocator, mode: DeccolmMode) !void { try self.resize(alloc, 0, self.rows); // TODO: do not clear screen flag mode - self.eraseDisplay(.complete); + self.eraseDisplay(alloc, .complete); self.setCursorPos(1, 1); // TODO: left/right margins @@ -296,6 +309,9 @@ pub fn resize(self: *Terminal, alloc: Allocator, cols_req: usize, rows: usize) ! else cols_req; + // If our cols/rows didn't change then we're done + if (self.cols == cols and self.rows == rows) return; + // Resize our tabstops // TODO: use resize, but it doesn't set new tabstops if (self.cols != cols) { @@ -986,6 +1002,7 @@ pub fn setCursorColAbsolute(self: *Terminal, col_req: usize) void { /// TODO: test pub fn eraseDisplay( self: *Terminal, + alloc: Allocator, mode: csi.EraseDisplay, ) void { const tracy = trace(@src()); @@ -1002,6 +1019,9 @@ pub fn eraseDisplay( // Unsets pending wrap state self.screen.cursor.pending_wrap = false; + + // Clear all Kitty graphics state for this screen + self.screen.kitty_images.delete(alloc, self, .{ .all = true }); }, .below => { @@ -1555,18 +1575,34 @@ pub fn getPwd(self: *const Terminal) ?[]const u8 { return self.pwd.items; } +/// Execute a kitty graphics command. The buf is used to populate with +/// the response that should be sent as an APC sequence. The response will +/// be a full, valid APC sequence. +/// +/// If an error occurs, the caller should response to the pty that a +/// an error occurred otherwise the behavior of the graphics protocol is +/// undefined. +pub fn kittyGraphics( + self: *Terminal, + alloc: Allocator, + cmd: *kitty.graphics.Command, +) ?kitty.graphics.Response { + return kitty.graphics.execute(alloc, self, cmd); +} + /// Full reset -pub fn fullReset(self: *Terminal) void { - self.primaryScreen(.{ .clear_on_exit = true, .cursor_save = true }); +pub fn fullReset(self: *Terminal, alloc: Allocator) void { + self.primaryScreen(alloc, .{ .clear_on_exit = true, .cursor_save = true }); self.charset = .{}; - self.eraseDisplay(.scrollback); - self.eraseDisplay(.complete); + self.eraseDisplay(alloc, .scrollback); + self.eraseDisplay(alloc, .complete); self.modes = .{}; self.flags = .{}; self.tabstops.reset(0); self.screen.cursor = .{}; self.screen.saved_cursor = .{}; self.screen.selection = null; + self.screen.kitty_keyboard = .{}; self.scrolling_region = .{ .top = 0, .bottom = self.rows - 1 }; self.previous_char = null; self.pwd.clearRetainingCapacity(); @@ -2561,7 +2597,7 @@ test "Terminal: cursorIsAtPrompt alternate screen" { try testing.expect(t.cursorIsAtPrompt()); // Secondary screen is never a prompt - t.alternateScreen(.{}); + t.alternateScreen(alloc, .{}); try testing.expect(!t.cursorIsAtPrompt()); t.markSemanticPrompt(.prompt); try testing.expect(!t.cursorIsAtPrompt()); diff --git a/src/terminal/apc.zig b/src/terminal/apc.zig new file mode 100644 index 000000000..6a6b8cc36 --- /dev/null +++ b/src/terminal/apc.zig @@ -0,0 +1,137 @@ +const std = @import("std"); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; + +const kitty_gfx = @import("kitty/graphics.zig"); + +const log = std.log.scoped(.terminal_apc); + +/// APC command handler. This should be hooked into a terminal.Stream handler. +/// The start/feed/end functions are meant to be called from the terminal.Stream +/// apcStart, apcPut, and apcEnd functions, respectively. +pub const Handler = struct { + state: State = .{ .inactive = {} }, + + pub fn deinit(self: *Handler) void { + self.state.deinit(); + } + + pub fn start(self: *Handler) void { + self.state.deinit(); + self.state = .{ .identify = {} }; + } + + pub fn feed(self: *Handler, alloc: Allocator, byte: u8) void { + switch (self.state) { + .inactive => unreachable, + + // We're ignoring this APC command, likely because we don't + // recognize it so there is no need to store the data in memory. + .ignore => return, + + // We identify the APC command by the first byte. + .identify => { + switch (byte) { + // Kitty graphics protocol + 'G' => self.state = .{ .kitty = kitty_gfx.CommandParser.init(alloc) }, + + // Unknown + else => self.state = .{ .ignore = {} }, + } + }, + + .kitty => |*p| p.feed(byte) catch |err| { + log.warn("kitty graphics protocol error: {}", .{err}); + self.state = .{ .ignore = {} }; + }, + } + } + + pub fn end(self: *Handler) ?Command { + defer { + self.state.deinit(); + self.state = .{ .inactive = {} }; + } + + return switch (self.state) { + .inactive => unreachable, + .ignore, .identify => null, + .kitty => |*p| kitty: { + const command = p.complete() catch |err| { + log.warn("kitty graphics protocol error: {}", .{err}); + break :kitty null; + }; + + break :kitty .{ .kitty = command }; + }, + }; + } +}; + +pub const State = union(enum) { + /// We're not in the middle of an APC command yet. + inactive: void, + + /// We got an unrecognized APC sequence or the APC sequence we + /// recognized became invalid. We're just dropping bytes. + ignore: void, + + /// We're waiting to identify the APC sequence. This is done by + /// inspecting the first byte of the sequence. + identify: void, + + /// Kitty graphics protocol + kitty: kitty_gfx.CommandParser, + + pub fn deinit(self: *State) void { + switch (self.*) { + .inactive, .ignore, .identify => {}, + .kitty => |*v| v.deinit(), + } + } +}; + +/// Possible APC commands. +pub const Command = union(enum) { + kitty: kitty_gfx.Command, + + pub fn deinit(self: *Command, alloc: Allocator) void { + switch (self.*) { + .kitty => |*v| v.deinit(alloc), + } + } +}; + +test "unknown APC command" { + const testing = std.testing; + const alloc = testing.allocator; + + var h: Handler = .{}; + h.start(); + for ("Xabcdef1234") |c| h.feed(alloc, c); + try testing.expect(h.end() == null); +} + +test "garbage Kitty command" { + const testing = std.testing; + const alloc = testing.allocator; + + var h: Handler = .{}; + h.start(); + for ("Gabcdef1234") |c| h.feed(alloc, c); + try testing.expect(h.end() == null); +} + +test "valid Kitty command" { + const testing = std.testing; + const alloc = testing.allocator; + + var h: Handler = .{}; + h.start(); + const input = "Gf=24,s=10,v=20,hello=world"; + for (input) |c| h.feed(alloc, c); + + var cmd = h.end().?; + defer cmd.deinit(alloc); + try testing.expect(cmd == .kitty); +} diff --git a/src/terminal/kitty.zig b/src/terminal/kitty.zig index 35f92da02..497dd4aba 100644 --- a/src/terminal/kitty.zig +++ b/src/terminal/kitty.zig @@ -1,154 +1,8 @@ //! Types and functions related to Kitty protocols. -//! -//! Documentation for the Kitty keyboard protocol: -//! https://sw.kovidgoyal.net/kitty/keyboard-protocol/#progressive-enhancement -const std = @import("std"); +pub const graphics = @import("kitty/graphics.zig"); +pub usingnamespace @import("kitty/key.zig"); -/// Stack for the key flags. This implements the push/pop behavior -/// of the CSI > u and CSI < u sequences. We implement the stack as -/// fixed size to avoid heap allocation. -pub const KeyFlagStack = struct { - const len = 8; - - flags: [len]KeyFlags = .{.{}} ** len, - idx: u3 = 0, - - /// Return the current stack value - pub fn current(self: KeyFlagStack) KeyFlags { - return self.flags[self.idx]; - } - - /// Perform the "set" operation as described in the spec for - /// the CSI = u sequence. - pub fn set( - self: *KeyFlagStack, - mode: KeySetMode, - v: KeyFlags, - ) void { - switch (mode) { - .set => self.flags[self.idx] = v, - .@"or" => self.flags[self.idx] = @bitCast( - self.flags[self.idx].int() | v.int(), - ), - .not => self.flags[self.idx] = @bitCast( - self.flags[self.idx].int() & ~v.int(), - ), - } - } - - /// Push a new set of flags onto the stack. If the stack is full - /// then the oldest entry is evicted. - pub fn push(self: *KeyFlagStack, flags: KeyFlags) void { - // Overflow and wrap around if we're full, which evicts - // the oldest entry. - self.idx +%= 1; - self.flags[self.idx] = flags; - } - - /// Pop `n` entries from the stack. This will just wrap around - /// if `n` is greater than the amount in the stack. - pub fn pop(self: *KeyFlagStack, n: usize) void { - // If n is more than our length then we just reset the stack. - // This also avoids a DoS vector where a malicious client - // could send a huge number of pop commands to waste cpu. - if (n >= self.flags.len) { - self.idx = 0; - self.flags = .{.{}} ** len; - return; - } - - for (0..n) |_| { - self.flags[self.idx] = .{}; - self.idx -%= 1; - } - } - - // Make sure we the overflow works as expected - test { - const testing = std.testing; - var stack: KeyFlagStack = .{}; - stack.idx = stack.flags.len - 1; - stack.idx +%= 1; - try testing.expect(stack.idx == 0); - - stack.idx = 0; - stack.idx -%= 1; - try testing.expect(stack.idx == stack.flags.len - 1); - } -}; - -/// The possible flags for the Kitty keyboard protocol. -pub const KeyFlags = packed struct(u5) { - disambiguate: bool = false, - report_events: bool = false, - report_alternates: bool = false, - report_all: bool = false, - report_associated: bool = false, - - pub fn int(self: KeyFlags) u5 { - return @bitCast(self); - } - - // Its easy to get packed struct ordering wrong so this test checks. - test { - const testing = std.testing; - - try testing.expectEqual( - @as(u5, 0b1), - (KeyFlags{ .disambiguate = true }).int(), - ); - try testing.expectEqual( - @as(u5, 0b10), - (KeyFlags{ .report_events = true }).int(), - ); - } -}; - -/// The possible modes for setting the key flags. -pub const KeySetMode = enum { set, @"or", not }; - -test "KeyFlagStack: push pop" { - const testing = std.testing; - var stack: KeyFlagStack = .{}; - stack.push(.{ .disambiguate = true }); - try testing.expectEqual( - KeyFlags{ .disambiguate = true }, - stack.current(), - ); - - stack.pop(1); - try testing.expectEqual(KeyFlags{}, stack.current()); -} - -test "KeyFlagStack: pop big number" { - const testing = std.testing; - var stack: KeyFlagStack = .{}; - stack.pop(100); - try testing.expectEqual(KeyFlags{}, stack.current()); -} - -test "KeyFlagStack: set" { - const testing = std.testing; - var stack: KeyFlagStack = .{}; - stack.set(.set, .{ .disambiguate = true }); - try testing.expectEqual( - KeyFlags{ .disambiguate = true }, - stack.current(), - ); - - stack.set(.@"or", .{ .report_events = true }); - try testing.expectEqual( - KeyFlags{ - .disambiguate = true, - .report_events = true, - }, - stack.current(), - ); - - stack.set(.not, .{ .report_events = true }); - try testing.expectEqual( - KeyFlags{ .disambiguate = true }, - stack.current(), - ); +test { + @import("std").testing.refAllDecls(@This()); } diff --git a/src/terminal/kitty/graphics.zig b/src/terminal/kitty/graphics.zig new file mode 100644 index 000000000..cfc45adbc --- /dev/null +++ b/src/terminal/kitty/graphics.zig @@ -0,0 +1,22 @@ +//! Kitty graphics protocol support. +//! +//! Documentation: +//! https://sw.kovidgoyal.net/kitty/graphics-protocol +//! +//! Unimplemented features that are still todo: +//! - shared memory transmit +//! - virtual placement w/ unicode +//! - animation +//! +//! Performance: +//! The performance of this particular subsystem of Ghostty is not great. +//! We can avoid a lot more allocations, we can replace some C code (which +//! implicitly allocates) with native Zig, we can improve the data structures +//! to avoid repeated lookups, etc. I tried to avoid pessimization but my +//! aim to ship a v1 of this implementation came at some cost. I learned a lot +//! though and I think we can go back through and fix this up. + +pub usingnamespace @import("graphics_command.zig"); +pub usingnamespace @import("graphics_exec.zig"); +pub usingnamespace @import("graphics_image.zig"); +pub usingnamespace @import("graphics_storage.zig"); diff --git a/src/terminal/kitty/graphics_command.zig b/src/terminal/kitty/graphics_command.zig new file mode 100644 index 000000000..f44974465 --- /dev/null +++ b/src/terminal/kitty/graphics_command.zig @@ -0,0 +1,979 @@ +const std = @import("std"); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; +const ArenaAllocator = std.heap.ArenaAllocator; + +/// The key-value pairs for the control information for a command. The +/// keys are always single characters and the values are either single +/// characters or 32-bit unsigned integers. +/// +/// For the value of this: if the value is a single printable ASCII character +/// it is the ASCII code. Otherwise, it is parsed as a 32-bit unsigned integer. +const KV = std.AutoHashMapUnmanaged(u8, u32); + +/// Command parser parses the Kitty graphics protocol escape sequence. +pub const CommandParser = struct { + /// The memory used by the parser is stored in an arena because it is + /// all freed at the end of the command. + arena: ArenaAllocator, + + /// This is the list of KV pairs that we're building up. + kv: KV = .{}, + + /// This is used as a buffer to store the key/value of a KV pair. + /// The value of a KV pair is at most a 32-bit integer which at most + /// is 10 characters (4294967295). + kv_temp: [10]u8 = undefined, + kv_temp_len: u4 = 0, + kv_current: u8 = 0, // Current kv key + + /// This is the list of bytes that contains both KV data and final + /// data. You shouldn't access this directly. + data: std.ArrayList(u8), + + /// Internal state for parsing. + state: State = .control_key, + + const State = enum { + /// Parsing k/v pairs. The "ignore" variants are in that state + /// but ignore any data because we know they're invalid. + control_key, + control_key_ignore, + control_value, + control_value_ignore, + + /// We're parsing the data blob. + data, + }; + + /// Initialize the parser. The allocator given will be used for both + /// temporary data and long-lived values such as the final image blob. + pub fn init(alloc: Allocator) CommandParser { + var arena = ArenaAllocator.init(alloc); + errdefer arena.deinit(); + return .{ + .arena = arena, + .data = std.ArrayList(u8).init(alloc), + }; + } + + pub fn deinit(self: *CommandParser) void { + // We don't free the hash map because its in the arena + self.arena.deinit(); + self.data.deinit(); + } + + /// Feed a single byte to the parser. + /// + /// The first byte to start parsing should be the byte immediately following + /// the "G" in the APC sequence, i.e. "\x1b_G123" the first byte should + /// be "1". + pub fn feed(self: *CommandParser, c: u8) !void { + switch (self.state) { + .control_key => switch (c) { + // '=' means the key is complete and we're moving to the value. + '=' => if (self.kv_temp_len != 1) { + // All control keys are a single character right now so + // if we're not a single character just ignore follow-up + // data. + self.state = .control_value_ignore; + self.kv_temp_len = 0; + } else { + self.kv_current = self.kv_temp[0]; + self.kv_temp_len = 0; + self.state = .control_value; + }, + + else => try self.accumulateValue(c, .control_key_ignore), + }, + + .control_key_ignore => switch (c) { + '=' => self.state = .control_value_ignore, + else => {}, + }, + + .control_value => switch (c) { + ',' => try self.finishValue(.control_key), // move to next key + ';' => try self.finishValue(.data), // move to data + else => try self.accumulateValue(c, .control_value_ignore), + }, + + .control_value_ignore => switch (c) { + ',' => self.state = .control_key_ignore, + ';' => self.state = .data, + else => {}, + }, + + .data => try self.data.append(c), + } + + // We always add to our data list because this is our stable + // array of bytes that we'll reference everywhere else. + } + + /// Complete the parsing. This must be called after all the + /// bytes have been fed to the parser. + /// + /// The allocator given will be used for the long-lived data + /// of the final command. + pub fn complete(self: *CommandParser) !Command { + switch (self.state) { + // We can't ever end in the control key state and be valid. + // This means the command looked something like "a=1,b" + .control_key, .control_key_ignore => return error.InvalidFormat, + + // Some commands (i.e. placements) end without extra data so + // we end in the value state. i.e. "a=1,b=2" + .control_value => try self.finishValue(.data), + .control_value_ignore => {}, + + // Most commands end in data, i.e. "a=1,b=2;1234" + .data => {}, + } + + // Determine our action, which is always a single character. + const action: u8 = action: { + const value = self.kv.get('a') orelse break :action 't'; + const c = std.math.cast(u8, value) orelse return error.InvalidFormat; + break :action c; + }; + const control: Command.Control = switch (action) { + 'q' => .{ .query = try Transmission.parse(self.kv) }, + 't' => .{ .transmit = try Transmission.parse(self.kv) }, + 'T' => .{ .transmit_and_display = .{ + .transmission = try Transmission.parse(self.kv), + .display = try Display.parse(self.kv), + } }, + 'p' => .{ .display = try Display.parse(self.kv) }, + 'd' => .{ .delete = try Delete.parse(self.kv) }, + 'f' => .{ .transmit_animation_frame = try AnimationFrameLoading.parse(self.kv) }, + 'a' => .{ .control_animation = try AnimationControl.parse(self.kv) }, + 'c' => .{ .compose_animation = try AnimationFrameComposition.parse(self.kv) }, + else => return error.InvalidFormat, + }; + + // Determine our quiet value + const quiet: Command.Quiet = if (self.kv.get('q')) |v| quiet: { + break :quiet switch (v) { + 0 => .no, + 1 => .ok, + 2 => .failures, + else => return error.InvalidFormat, + }; + } else .no; + + return .{ + .control = control, + .quiet = quiet, + .data = if (self.data.items.len == 0) "" else data: { + break :data try self.data.toOwnedSlice(); + }, + }; + } + + fn accumulateValue(self: *CommandParser, c: u8, overflow_state: State) !void { + const idx = self.kv_temp_len; + self.kv_temp_len += 1; + if (self.kv_temp_len > self.kv_temp.len) { + self.state = overflow_state; + self.kv_temp_len = 0; + return; + } + self.kv_temp[idx] = c; + } + + fn finishValue(self: *CommandParser, next_state: State) !void { + const alloc = self.arena.allocator(); + + // We can move states right away, we don't use it. + self.state = next_state; + + // Check for ASCII chars first + if (self.kv_temp_len == 1) { + const c = self.kv_temp[0]; + if (c < '0' or c > '9') { + try self.kv.put(alloc, self.kv_current, @intCast(c)); + self.kv_temp_len = 0; + return; + } + } + + // Only "z" is currently signed. This is a bit of a kloodge; if more + // fields become signed we can rethink this but for now we parse + // "z" as i32 then bitcast it to u32 then bitcast it back later. + if (self.kv_current == 'z') { + const v = try std.fmt.parseInt(i32, self.kv_temp[0..self.kv_temp_len], 10); + try self.kv.put(alloc, self.kv_current, @bitCast(v)); + } else { + const v = try std.fmt.parseInt(u32, self.kv_temp[0..self.kv_temp_len], 10); + try self.kv.put(alloc, self.kv_current, v); + } + + // Clear our temp buffer + self.kv_temp_len = 0; + } +}; + +/// Represents a possible response to a command. +pub const Response = struct { + id: u32 = 0, + image_number: u32 = 0, + placement_id: u32 = 0, + message: []const u8 = "OK", + + pub fn encode(self: Response, writer: anytype) !void { + // We only encode a result if we have either an id or an image number. + if (self.id == 0 and self.image_number == 0) return; + + try writer.writeAll("\x1b_G"); + if (self.id > 0) { + try writer.print("i={}", .{self.id}); + } + if (self.image_number > 0) { + if (self.id > 0) try writer.writeByte(','); + try writer.print("I={}", .{self.image_number}); + } + if (self.placement_id > 0) { + try writer.print(",p={}", .{self.placement_id}); + } + try writer.writeByte(';'); + try writer.writeAll(self.message); + try writer.writeAll("\x1b\\"); + } + + /// Returns true if this response is not an error. + pub fn ok(self: Response) bool { + return std.mem.eql(u8, self.message, "OK"); + } +}; + +pub const Command = struct { + control: Control, + quiet: Quiet = .no, + data: []const u8 = "", + + pub const Action = enum { + query, // q + transmit, // t + transmit_and_display, // T + display, // p + delete, // d + transmit_animation_frame, // f + control_animation, // a + compose_animation, // c + }; + + pub const Quiet = enum { + no, // 0 + ok, // 1 + failures, // 2 + }; + + pub const Control = union(Action) { + query: Transmission, + transmit: Transmission, + transmit_and_display: struct { + transmission: Transmission, + display: Display, + }, + display: Display, + delete: Delete, + transmit_animation_frame: AnimationFrameLoading, + control_animation: AnimationControl, + compose_animation: AnimationFrameComposition, + }; + + /// Take ownership over the data in this command. If the returned value + /// has a length of zero, then the data was empty and need not be freed. + pub fn toOwnedData(self: *Command) []const u8 { + const result = self.data; + self.data = ""; + return result; + } + + /// Returns the transmission data if it has any. + pub fn transmission(self: Command) ?Transmission { + return switch (self.control) { + .query => |t| t, + .transmit => |t| t, + .transmit_and_display => |t| t.transmission, + else => null, + }; + } + + /// Returns the display data if it has any. + pub fn display(self: Command) ?Display { + return switch (self.control) { + .display => |d| d, + .transmit_and_display => |t| t.display, + else => null, + }; + } + + pub fn deinit(self: Command, alloc: Allocator) void { + if (self.data.len > 0) alloc.free(self.data); + } +}; + +pub const Transmission = struct { + format: Format = .rgb, // f + medium: Medium = .direct, // t + width: u32 = 0, // s + height: u32 = 0, // v + size: u32 = 0, // S + offset: u32 = 0, // O + image_id: u32 = 0, // i + image_number: u32 = 0, // I + placement_id: u32 = 0, // p + compression: Compression = .none, // o + more_chunks: bool = false, // m + + pub const Format = enum { + rgb, // 24 + rgba, // 32 + png, // 100 + }; + + pub const Medium = enum { + direct, // d + file, // f + temporary_file, // t + shared_memory, // s + }; + + pub const Compression = enum { + none, + zlib_deflate, // z + }; + + fn parse(kv: KV) !Transmission { + var result: Transmission = .{}; + if (kv.get('f')) |v| { + result.format = switch (v) { + 24 => .rgb, + 32 => .rgba, + 100 => .png, + else => return error.InvalidFormat, + }; + } + + if (kv.get('t')) |v| { + const c = std.math.cast(u8, v) orelse return error.InvalidFormat; + result.medium = switch (c) { + 'd' => .direct, + 'f' => .file, + 't' => .temporary_file, + 's' => .shared_memory, + else => return error.InvalidFormat, + }; + } + + if (kv.get('s')) |v| { + result.width = v; + } + + if (kv.get('v')) |v| { + result.height = v; + } + + if (kv.get('S')) |v| { + result.size = v; + } + + if (kv.get('O')) |v| { + result.offset = v; + } + + if (kv.get('i')) |v| { + result.image_id = v; + } + + if (kv.get('I')) |v| { + result.image_number = v; + } + + if (kv.get('p')) |v| { + result.placement_id = v; + } + + if (kv.get('o')) |v| { + const c = std.math.cast(u8, v) orelse return error.InvalidFormat; + result.compression = switch (c) { + 'z' => .zlib_deflate, + else => return error.InvalidFormat, + }; + } + + if (kv.get('m')) |v| { + result.more_chunks = v > 0; + } + + return result; + } +}; + +pub const Display = struct { + image_id: u32 = 0, // i + image_number: u32 = 0, // I + placement_id: u32 = 0, // p + x: u32 = 0, // x + y: u32 = 0, // y + width: u32 = 0, // w + height: u32 = 0, // h + x_offset: u32 = 0, // X + y_offset: u32 = 0, // Y + columns: u32 = 0, // c + rows: u32 = 0, // r + cursor_movement: CursorMovement = .after, // C + virtual_placement: bool = false, // U + z: i32 = 0, // z + + pub const CursorMovement = enum { + after, // 0 + none, // 1 + }; + + fn parse(kv: KV) !Display { + var result: Display = .{}; + + if (kv.get('i')) |v| { + result.image_id = v; + } + + if (kv.get('I')) |v| { + result.image_number = v; + } + + if (kv.get('p')) |v| { + result.placement_id = v; + } + + if (kv.get('x')) |v| { + result.x = v; + } + + if (kv.get('y')) |v| { + result.y = v; + } + + if (kv.get('w')) |v| { + result.width = v; + } + + if (kv.get('h')) |v| { + result.height = v; + } + + if (kv.get('X')) |v| { + result.x_offset = v; + } + + if (kv.get('Y')) |v| { + result.y_offset = v; + } + + if (kv.get('c')) |v| { + result.columns = v; + } + + if (kv.get('r')) |v| { + result.rows = v; + } + + if (kv.get('C')) |v| { + result.cursor_movement = switch (v) { + 0 => .after, + 1 => .none, + else => return error.InvalidFormat, + }; + } + + if (kv.get('U')) |v| { + result.virtual_placement = switch (v) { + 0 => false, + 1 => true, + else => return error.InvalidFormat, + }; + } + + if (kv.get('z')) |v| { + // We can bitcast here because of how we parse it earlier. + result.z = @bitCast(v); + } + + return result; + } +}; + +pub const AnimationFrameLoading = struct { + x: u32 = 0, // x + y: u32 = 0, // y + create_frame: u32 = 0, // c + edit_frame: u32 = 0, // r + gap_ms: u32 = 0, // z + composition_mode: CompositionMode = .alpha_blend, // X + background: Background = .{}, // Y + + pub const Background = packed struct(u32) { + r: u8 = 0, + g: u8 = 0, + b: u8 = 0, + a: u8 = 0, + }; + + fn parse(kv: KV) !AnimationFrameLoading { + var result: AnimationFrameLoading = .{}; + + if (kv.get('x')) |v| { + result.x = v; + } + + if (kv.get('y')) |v| { + result.y = v; + } + + if (kv.get('c')) |v| { + result.create_frame = v; + } + + if (kv.get('r')) |v| { + result.edit_frame = v; + } + + if (kv.get('z')) |v| { + result.gap_ms = v; + } + + if (kv.get('X')) |v| { + result.composition_mode = switch (v) { + 0 => .alpha_blend, + 1 => .overwrite, + else => return error.InvalidFormat, + }; + } + + if (kv.get('Y')) |v| { + result.background = @bitCast(v); + } + + return result; + } +}; + +pub const AnimationFrameComposition = struct { + frame: u32 = 0, // c + edit_frame: u32 = 0, // r + x: u32 = 0, // x + y: u32 = 0, // y + width: u32 = 0, // w + height: u32 = 0, // h + left_edge: u32 = 0, // X + top_edge: u32 = 0, // Y + composition_mode: CompositionMode = .alpha_blend, // C + + fn parse(kv: KV) !AnimationFrameComposition { + var result: AnimationFrameComposition = .{}; + + if (kv.get('c')) |v| { + result.frame = v; + } + + if (kv.get('r')) |v| { + result.edit_frame = v; + } + + if (kv.get('x')) |v| { + result.x = v; + } + + if (kv.get('y')) |v| { + result.y = v; + } + + if (kv.get('w')) |v| { + result.width = v; + } + + if (kv.get('h')) |v| { + result.height = v; + } + + if (kv.get('X')) |v| { + result.left_edge = v; + } + + if (kv.get('Y')) |v| { + result.top_edge = v; + } + + if (kv.get('C')) |v| { + result.composition_mode = switch (v) { + 0 => .alpha_blend, + 1 => .overwrite, + else => return error.InvalidFormat, + }; + } + + return result; + } +}; + +pub const AnimationControl = struct { + action: AnimationAction = .invalid, // s + frame: u32 = 0, // r + gap_ms: u32 = 0, // z + current_frame: u32 = 0, // c + loops: u32 = 0, // v + + pub const AnimationAction = enum { + invalid, // 0 + stop, // 1 + run_wait, // 2 + run, // 3 + }; + + fn parse(kv: KV) !AnimationControl { + var result: AnimationControl = .{}; + + if (kv.get('s')) |v| { + result.action = switch (v) { + 0 => .invalid, + 1 => .stop, + 2 => .run_wait, + 3 => .run, + else => return error.InvalidFormat, + }; + } + + if (kv.get('r')) |v| { + result.frame = v; + } + + if (kv.get('z')) |v| { + result.gap_ms = v; + } + + if (kv.get('c')) |v| { + result.current_frame = v; + } + + if (kv.get('v')) |v| { + result.loops = v; + } + + return result; + } +}; + +pub const Delete = union(enum) { + // a/A + all: bool, + + // i/I + id: struct { + delete: bool = false, // uppercase + image_id: u32 = 0, // i + placement_id: u32 = 0, // p + }, + + // n/N + newest: struct { + delete: bool = false, // uppercase + image_number: u32 = 0, // I + placement_id: u32 = 0, // p + }, + + // c/C, + intersect_cursor: bool, + + // f/F + animation_frames: bool, + + // p/P + intersect_cell: struct { + delete: bool = false, // uppercase + x: u32 = 0, // x + y: u32 = 0, // y + }, + + // q/Q + intersect_cell_z: struct { + delete: bool = false, // uppercase + x: u32 = 0, // x + y: u32 = 0, // y + z: i32 = 0, // z + }, + + // x/X + column: struct { + delete: bool = false, // uppercase + x: u32 = 0, // x + }, + + // y/Y + row: struct { + delete: bool = false, // uppercase + y: u32 = 0, // y + }, + + // z/Z + z: struct { + delete: bool = false, // uppercase + z: i32 = 0, // z + }, + + fn parse(kv: KV) !Delete { + const what: u8 = what: { + const value = kv.get('d') orelse break :what 'a'; + const c = std.math.cast(u8, value) orelse return error.InvalidFormat; + break :what c; + }; + + return switch (what) { + 'a', 'A' => .{ .all = what == 'A' }, + + 'i', 'I' => blk: { + var result: Delete = .{ .id = .{ .delete = what == 'I' } }; + if (kv.get('i')) |v| { + result.id.image_id = v; + } + if (kv.get('p')) |v| { + result.id.placement_id = v; + } + + break :blk result; + }, + + 'n', 'N' => blk: { + var result: Delete = .{ .newest = .{ .delete = what == 'N' } }; + if (kv.get('I')) |v| { + result.newest.image_number = v; + } + if (kv.get('p')) |v| { + result.newest.placement_id = v; + } + + break :blk result; + }, + + 'c', 'C' => .{ .intersect_cursor = what == 'C' }, + + 'f', 'F' => .{ .animation_frames = what == 'F' }, + + 'p', 'P' => blk: { + var result: Delete = .{ .intersect_cell = .{ .delete = what == 'P' } }; + if (kv.get('x')) |v| { + result.intersect_cell.x = v; + } + if (kv.get('y')) |v| { + result.intersect_cell.y = v; + } + + break :blk result; + }, + + 'q', 'Q' => blk: { + var result: Delete = .{ .intersect_cell_z = .{ .delete = what == 'Q' } }; + if (kv.get('x')) |v| { + result.intersect_cell_z.x = v; + } + if (kv.get('y')) |v| { + result.intersect_cell_z.y = v; + } + if (kv.get('z')) |v| { + // We can bitcast here because of how we parse it earlier. + result.intersect_cell_z.z = @bitCast(v); + } + + break :blk result; + }, + + 'x', 'X' => blk: { + var result: Delete = .{ .column = .{ .delete = what == 'X' } }; + if (kv.get('x')) |v| { + result.column.x = v; + } + + break :blk result; + }, + + 'y', 'Y' => blk: { + var result: Delete = .{ .row = .{ .delete = what == 'Y' } }; + if (kv.get('y')) |v| { + result.row.y = v; + } + + break :blk result; + }, + + 'z', 'Z' => blk: { + var result: Delete = .{ .z = .{ .delete = what == 'Z' } }; + if (kv.get('z')) |v| { + // We can bitcast here because of how we parse it earlier. + result.z.z = @bitCast(v); + } + + break :blk result; + }, + + else => return error.InvalidFormat, + }; + } +}; + +pub const CompositionMode = enum { + alpha_blend, // 0 + overwrite, // 1 +}; + +test "transmission command" { + const testing = std.testing; + const alloc = testing.allocator; + var p = CommandParser.init(alloc); + defer p.deinit(); + + const input = "f=24,s=10,v=20"; + for (input) |c| try p.feed(c); + const command = try p.complete(); + defer command.deinit(alloc); + + try testing.expect(command.control == .transmit); + const v = command.control.transmit; + try testing.expectEqual(Transmission.Format.rgb, v.format); + try testing.expectEqual(@as(u32, 10), v.width); + try testing.expectEqual(@as(u32, 20), v.height); +} + +test "query command" { + const testing = std.testing; + const alloc = testing.allocator; + var p = CommandParser.init(alloc); + defer p.deinit(); + + const input = "i=31,s=1,v=1,a=q,t=d,f=24;AAAA"; + for (input) |c| try p.feed(c); + const command = try p.complete(); + defer command.deinit(alloc); + + try testing.expect(command.control == .query); + const v = command.control.query; + try testing.expectEqual(Transmission.Medium.direct, v.medium); + try testing.expectEqual(@as(u32, 1), v.width); + try testing.expectEqual(@as(u32, 1), v.height); + try testing.expectEqual(@as(u32, 31), v.image_id); + try testing.expectEqualStrings("AAAA", command.data); +} + +test "display command" { + const testing = std.testing; + const alloc = testing.allocator; + var p = CommandParser.init(alloc); + defer p.deinit(); + + const input = "a=p,U=1,i=31,c=80,r=120"; + for (input) |c| try p.feed(c); + const command = try p.complete(); + defer command.deinit(alloc); + + try testing.expect(command.control == .display); + const v = command.control.display; + try testing.expectEqual(@as(u32, 80), v.columns); + try testing.expectEqual(@as(u32, 120), v.rows); + try testing.expectEqual(@as(u32, 31), v.image_id); +} + +test "delete command" { + const testing = std.testing; + const alloc = testing.allocator; + var p = CommandParser.init(alloc); + defer p.deinit(); + + const input = "a=d,d=p,x=3,y=4"; + for (input) |c| try p.feed(c); + const command = try p.complete(); + defer command.deinit(alloc); + + try testing.expect(command.control == .delete); + const v = command.control.delete; + try testing.expect(v == .intersect_cell); + const dv = v.intersect_cell; + try testing.expect(!dv.delete); + try testing.expectEqual(@as(u32, 3), dv.x); + try testing.expectEqual(@as(u32, 4), dv.y); +} + +test "ignore unknown keys (long)" { + const testing = std.testing; + const alloc = testing.allocator; + var p = CommandParser.init(alloc); + defer p.deinit(); + + const input = "f=24,s=10,v=20,hello=world"; + for (input) |c| try p.feed(c); + const command = try p.complete(); + defer command.deinit(alloc); + + try testing.expect(command.control == .transmit); + const v = command.control.transmit; + try testing.expectEqual(Transmission.Format.rgb, v.format); + try testing.expectEqual(@as(u32, 10), v.width); + try testing.expectEqual(@as(u32, 20), v.height); +} + +test "ignore very long values" { + const testing = std.testing; + const alloc = testing.allocator; + var p = CommandParser.init(alloc); + defer p.deinit(); + + const input = "f=24,s=10,v=2000000000000000000000000000000000000000"; + for (input) |c| try p.feed(c); + const command = try p.complete(); + defer command.deinit(alloc); + + try testing.expect(command.control == .transmit); + const v = command.control.transmit; + try testing.expectEqual(Transmission.Format.rgb, v.format); + try testing.expectEqual(@as(u32, 10), v.width); + try testing.expectEqual(@as(u32, 0), v.height); +} + +test "response: encode nothing without ID or image number" { + const testing = std.testing; + var buf: [1024]u8 = undefined; + var fbs = std.io.fixedBufferStream(&buf); + + var r: Response = .{}; + try r.encode(fbs.writer()); + try testing.expectEqualStrings("", fbs.getWritten()); +} + +test "response: encode with only image id" { + const testing = std.testing; + var buf: [1024]u8 = undefined; + var fbs = std.io.fixedBufferStream(&buf); + + var r: Response = .{ .id = 4 }; + try r.encode(fbs.writer()); + try testing.expectEqualStrings("\x1b_Gi=4;OK\x1b\\", fbs.getWritten()); +} + +test "response: encode with only image number" { + const testing = std.testing; + var buf: [1024]u8 = undefined; + var fbs = std.io.fixedBufferStream(&buf); + + var r: Response = .{ .image_number = 4 }; + try r.encode(fbs.writer()); + try testing.expectEqualStrings("\x1b_GI=4;OK\x1b\\", fbs.getWritten()); +} + +test "response: encode with image ID and number" { + const testing = std.testing; + var buf: [1024]u8 = undefined; + var fbs = std.io.fixedBufferStream(&buf); + + var r: Response = .{ .id = 12, .image_number = 4 }; + try r.encode(fbs.writer()); + try testing.expectEqualStrings("\x1b_Gi=12,I=4;OK\x1b\\", fbs.getWritten()); +} diff --git a/src/terminal/kitty/graphics_exec.zig b/src/terminal/kitty/graphics_exec.zig new file mode 100644 index 000000000..0415722b6 --- /dev/null +++ b/src/terminal/kitty/graphics_exec.zig @@ -0,0 +1,345 @@ +const std = @import("std"); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; + +const renderer = @import("../../renderer.zig"); +const point = @import("../point.zig"); +const Terminal = @import("../Terminal.zig"); +const command = @import("graphics_command.zig"); +const image = @import("graphics_image.zig"); +const Command = command.Command; +const Response = command.Response; +const LoadingImage = image.LoadingImage; +const Image = image.Image; +const ImageStorage = @import("graphics_storage.zig").ImageStorage; + +const log = std.log.scoped(.kitty_gfx); + +/// Execute a Kitty graphics command against the given terminal. This +/// will never fail, but the response may indicate an error and the +/// terminal state may not be updated to reflect the command. This will +/// never put the terminal in an unrecoverable state, however. +/// +/// The allocator must be the same allocator that was used to build +/// the command. +pub fn execute( + alloc: Allocator, + terminal: *Terminal, + cmd: *Command, +) ?Response { + // If storage is disabled then we disable the full protocol. This means + // we don't even respond to queries so the terminal completely acts as + // if this feature is not supported. + if (!terminal.screen.kitty_images.enabled()) { + log.debug("kitty graphics requested but disabled", .{}); + return null; + } + + // Only Metal supports rendering the images, right now. + if (comptime renderer.Renderer != renderer.Metal) { + log.warn("kitty graphics not supported on this renderer", .{}); + return null; + } + + log.debug("executing kitty graphics command: quiet={} control={}", .{ + cmd.quiet, + cmd.control, + }); + + const resp_: ?Response = switch (cmd.control) { + .query => query(alloc, cmd), + .transmit, .transmit_and_display => transmit(alloc, terminal, cmd), + .display => display(alloc, terminal, cmd), + .delete => delete(alloc, terminal, cmd), + + .transmit_animation_frame, + .control_animation, + .compose_animation, + => .{ .message = "ERROR: unimplemented action" }, + }; + + // Handle the quiet settings + if (resp_) |resp| { + if (!resp.ok()) { + log.warn("erroneous kitty graphics response: {s}", .{resp.message}); + } + + return switch (cmd.quiet) { + .no => resp, + .ok => if (resp.ok()) null else resp, + .failures => null, + }; + } + + return null; +} +/// Execute a "query" command. +/// +/// This command is used to attempt to load an image and respond with +/// success/error but does not persist any of the command to the terminal +/// state. +fn query(alloc: Allocator, cmd: *Command) Response { + const t = cmd.control.query; + + // Query requires image ID. We can't actually send a response without + // an image ID either but we return an error and this will be logged + // downstream. + if (t.image_id == 0) { + return .{ .message = "EINVAL: image ID required" }; + } + + // Build a partial response to start + var result: Response = .{ + .id = t.image_id, + .image_number = t.image_number, + .placement_id = t.placement_id, + }; + + // Attempt to load the image. If we cannot, then set an appropriate error. + var loading = LoadingImage.init(alloc, cmd) catch |err| { + encodeError(&result, err); + return result; + }; + loading.deinit(alloc); + + return result; +} + +/// Transmit image data. +/// +/// This loads the image, validates it, and puts it into the terminal +/// screen storage. It does not display the image. +fn transmit( + alloc: Allocator, + terminal: *Terminal, + cmd: *Command, +) Response { + const t = cmd.transmission().?; + var result: Response = .{ + .id = t.image_id, + .image_number = t.image_number, + .placement_id = t.placement_id, + }; + if (t.image_id > 0 and t.image_number > 0) { + return .{ .message = "EINVAL: image ID and number are mutually exclusive" }; + } + + const load = loadAndAddImage(alloc, terminal, cmd) catch |err| { + encodeError(&result, err); + return result; + }; + errdefer load.image.deinit(alloc); + + // If we're also displaying, then do that now. This function does + // both transmit and transmit and display. The display might also be + // deferred if it is multi-chunk. + if (load.display) |d| { + assert(!load.more); + var d_copy = d; + d_copy.image_id = load.image.id; + return display(alloc, terminal, &.{ + .control = .{ .display = d_copy }, + .quiet = cmd.quiet, + }); + } + + // If there are more chunks expected we do not respond. + if (load.more) return .{}; + + // After the image is added, set the ID in case it changed + result.id = load.image.id; + + // If the original request had an image number, then we respond. + // Otherwise, we don't respond. + if (load.image.number == 0) return .{}; + + return result; +} + +/// Display a previously transmitted image. +fn display( + alloc: Allocator, + terminal: *Terminal, + cmd: *const Command, +) Response { + const d = cmd.display().?; + + // Display requires image ID or number. + if (d.image_id == 0 and d.image_number == 0) { + return .{ .message = "EINVAL: image ID or number required" }; + } + + // Build up our response + var result: Response = .{ + .id = d.image_id, + .image_number = d.image_number, + .placement_id = d.placement_id, + }; + + // Verify the requested image exists if we have an ID + const storage = &terminal.screen.kitty_images; + const img_: ?Image = if (d.image_id != 0) + storage.imageById(d.image_id) + else + storage.imageByNumber(d.image_number); + const img = img_ orelse { + result.message = "EINVAL: image not found"; + return result; + }; + + // Make sure our response has the image id in case we looked up by number + result.id = img.id; + + // Determine the screen point for the placement. + const placement_point = (point.Viewport{ + .x = terminal.screen.cursor.x, + .y = terminal.screen.cursor.y, + }).toScreen(&terminal.screen); + + // Add the placement + const p: ImageStorage.Placement = .{ + .point = placement_point, + .x_offset = d.x_offset, + .y_offset = d.y_offset, + .source_x = d.x, + .source_y = d.y, + .source_width = d.width, + .source_height = d.height, + .columns = d.columns, + .rows = d.rows, + .z = d.z, + }; + storage.addPlacement(alloc, img.id, d.placement_id, p) catch |err| { + encodeError(&result, err); + return result; + }; + + // Cursor needs to move after placement + switch (d.cursor_movement) { + .none => {}, + .after => { + const rect = p.rect(img, terminal); + + // We can do better by doing this with pure internal screen state + // but this handles scroll regions. + const height = rect.bottom_right.y - rect.top_left.y; + for (0..height) |_| terminal.index() catch |err| { + log.warn("failed to move cursor: {}", .{err}); + break; + }; + + terminal.setCursorPos( + terminal.screen.cursor.y, + rect.bottom_right.x + 1, + ); + }, + } + + // Display does not result in a response on success + return .{}; +} + +/// Display a previously transmitted image. +fn delete( + alloc: Allocator, + terminal: *Terminal, + cmd: *Command, +) Response { + const storage = &terminal.screen.kitty_images; + storage.delete(alloc, terminal, cmd.control.delete); + + // Delete never responds on success + return .{}; +} + +fn loadAndAddImage( + alloc: Allocator, + terminal: *Terminal, + cmd: *Command, +) !struct { + image: Image, + more: bool = false, + display: ?command.Display = null, +} { + const t = cmd.transmission().?; + const storage = &terminal.screen.kitty_images; + + // Determine our image. This also handles chunking and early exit. + var loading: LoadingImage = if (storage.loading) |loading| loading: { + // Note: we do NOT want to call "cmd.toOwnedData" here because + // we're _copying_ the data. We want the command data to be freed. + try loading.addData(alloc, cmd.data); + + // If we have more then we're done + if (t.more_chunks) return .{ .image = loading.image, .more = true }; + + // We have no more chunks. We're going to be completing the + // image so we want to destroy the pointer to the loading + // image and copy it out. + defer { + alloc.destroy(loading); + storage.loading = null; + } + + break :loading loading.*; + } else try LoadingImage.init(alloc, cmd); + + // We only want to deinit on error. If we're chunking, then we don't + // want to deinit at all. If we're not chunking, then we'll deinit + // after we've copied the image out. + errdefer loading.deinit(alloc); + + // If the image has no ID, we assign one + if (loading.image.id == 0) { + loading.image.id = storage.next_id; + storage.next_id +%= 1; + } + + // If this is chunked, this is the beginning of a new chunked transmission. + // (We checked for an in-progress chunk above.) + if (t.more_chunks) { + // We allocate the pointer on the heap because its rare and we + // don't want to always pay the memory cost to keep it around. + const loading_ptr = try alloc.create(LoadingImage); + errdefer alloc.destroy(loading_ptr); + loading_ptr.* = loading; + storage.loading = loading_ptr; + return .{ .image = loading.image, .more = true }; + } + + // Dump the image data before it is decompressed + // loading.debugDump() catch unreachable; + + // Validate and store our image + var img = try loading.complete(alloc); + errdefer img.deinit(alloc); + try storage.addImage(alloc, img); + + // Get our display settings + const display_ = loading.display; + + // Ensure we deinit the loading state because we're done. The image + // won't be deinit because of "complete" above. + loading.deinit(alloc); + + return .{ .image = img, .display = display_ }; +} + +const EncodeableError = Image.Error || Allocator.Error; + +/// Encode an error code into a message for a response. +fn encodeError(r: *Response, err: EncodeableError) void { + switch (err) { + error.OutOfMemory => r.message = "ENOMEM: out of memory", + error.InternalError => r.message = "EINVAL: internal error", + error.InvalidData => r.message = "EINVAL: invalid data", + error.DecompressionFailed => r.message = "EINVAL: decompression failed", + error.FilePathTooLong => r.message = "EINVAL: file path too long", + error.TemporaryFileNotInTempDir => r.message = "EINVAL: temporary file not in temp dir", + error.UnsupportedFormat => r.message = "EINVAL: unsupported format", + error.UnsupportedMedium => r.message = "EINVAL: unsupported medium", + error.UnsupportedDepth => r.message = "EINVAL: unsupported pixel depth", + error.DimensionsRequired => r.message = "EINVAL: dimensions required", + error.DimensionsTooLarge => r.message = "EINVAL: dimensions too large", + } +} diff --git a/src/terminal/kitty/graphics_image.zig b/src/terminal/kitty/graphics_image.zig new file mode 100644 index 000000000..d924bbdba --- /dev/null +++ b/src/terminal/kitty/graphics_image.zig @@ -0,0 +1,764 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; +const ArenaAllocator = std.heap.ArenaAllocator; + +const command = @import("graphics_command.zig"); +const point = @import("../point.zig"); +const internal_os = @import("../../os/main.zig"); +const stb = @import("../../stb/main.zig"); + +const log = std.log.scoped(.kitty_gfx); + +/// Maximum width or height of an image. Taken directly from Kitty. +const max_dimension = 10000; + +/// Maximum size in bytes, taken from Kitty. +const max_size = 400 * 1024 * 1024; // 400MB + +/// An image that is still being loaded. The image should be initialized +/// using init on the first chunk and then addData for each subsequent +/// chunk. Once all chunks have been added, complete should be called +/// to finalize the image. +pub const LoadingImage = struct { + /// The in-progress image. The first chunk must have all the metadata + /// so this comes from that initially. + image: Image, + + /// The data that is being built up. + data: std.ArrayListUnmanaged(u8) = .{}, + + /// This is non-null when a transmit and display command is given + /// so that we display the image after it is fully loaded. + display: ?command.Display = null, + + /// Initialize a chunked immage from the first image transmission. + /// If this is a multi-chunk image, this should only be the FIRST + /// chunk. + pub fn init(alloc: Allocator, cmd: *command.Command) !LoadingImage { + // Build our initial image from the properties sent via the control. + // These can be overwritten by the data loading process. For example, + // PNG loading sets the width/height from the data. + const t = cmd.transmission().?; + var result: LoadingImage = .{ + .image = .{ + .id = t.image_id, + .number = t.image_number, + .width = t.width, + .height = t.height, + .compression = t.compression, + .format = t.format, + }, + + .display = cmd.display(), + }; + + // Special case for the direct medium, we just add it directly + // which will handle copying the data, base64 decoding, etc. + if (t.medium == .direct) { + try result.addData(alloc, cmd.data); + return result; + } + + // For every other medium, we'll need to at least base64 decode + // the data to make it useful so let's do that. Also, all the data + // has to be path data so we can put it in a stack-allocated buffer. + var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + const Base64Decoder = std.base64.standard.Decoder; + const size = Base64Decoder.calcSizeForSlice(cmd.data) catch |err| { + log.warn("failed to calculate base64 size for file path: {}", .{err}); + return error.InvalidData; + }; + if (size > buf.len) return error.FilePathTooLong; + Base64Decoder.decode(&buf, cmd.data) catch |err| { + log.warn("failed to decode base64 data: {}", .{err}); + return error.InvalidData; + }; + var abs_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + const path = std.os.realpath(buf[0..size], &abs_buf) catch |err| { + log.warn("failed to get absolute path: {}", .{err}); + return error.InvalidData; + }; + + // Depending on the medium, load the data from the path. + switch (t.medium) { + .direct => unreachable, // handled above + .file => try result.readFile(.file, alloc, t, path), + .temporary_file => try result.readFile(.temporary_file, alloc, t, path), + .shared_memory => try result.readSharedMemory(alloc, t, path), + } + + return result; + } + + /// Reads the data from a shared memory segment. + fn readSharedMemory( + self: *LoadingImage, + alloc: Allocator, + t: command.Transmission, + path: []const u8, + ) !void { + // We require libc for this for shm_open + if (comptime !builtin.link_libc) return error.UnsupportedMedium; + + // Todo: support shared memory + _ = self; + _ = alloc; + _ = t; + _ = path; + return error.UnsupportedMedium; + } + + /// Reads the data from a temporary file and returns it. This allocates + /// and does not free any of the data, so the caller must free it. + /// + /// This will also delete the temporary file if it is in a safe location. + fn readFile( + self: *LoadingImage, + comptime medium: command.Transmission.Medium, + alloc: Allocator, + t: command.Transmission, + path: []const u8, + ) !void { + switch (medium) { + .file, .temporary_file => {}, + else => @compileError("readFile only supports file and temporary_file"), + } + + // Verify file seems "safe". This is logic copied directly from Kitty, + // mostly. This is really rough but it will catch obvious bad actors. + if (std.mem.startsWith(u8, path, "/proc/") or + std.mem.startsWith(u8, path, "/sys/") or + (std.mem.startsWith(u8, path, "/dev/") and + !std.mem.startsWith(u8, path, "/dev/shm/"))) + { + return error.InvalidData; + } + + // Temporary file logic + if (medium == .temporary_file) { + if (!isPathInTempDir(path)) return error.TemporaryFileNotInTempDir; + } + defer if (medium == .temporary_file) { + std.os.unlink(path) catch |err| { + log.warn("failed to delete temporary file: {}", .{err}); + }; + }; + + var file = std.fs.cwd().openFile(path, .{}) catch |err| { + log.warn("failed to open temporary file: {}", .{err}); + return error.InvalidData; + }; + defer file.close(); + + // File must be a regular file + if (file.stat()) |stat| { + if (stat.kind != .file) { + log.warn("file is not a regular file kind={}", .{stat.kind}); + return error.InvalidData; + } + } else |err| { + log.warn("failed to stat file: {}", .{err}); + return error.InvalidData; + } + + if (t.offset > 0) { + file.seekTo(@intCast(t.offset)) catch |err| { + log.warn("failed to seek to offset {}: {}", .{ t.offset, err }); + return error.InvalidData; + }; + } + + var buf_reader = std.io.bufferedReader(file.reader()); + const reader = buf_reader.reader(); + + // Read the file + var managed = std.ArrayList(u8).init(alloc); + errdefer managed.deinit(); + const size: usize = if (t.size > 0) @min(t.size, max_size) else max_size; + reader.readAllArrayList(&managed, size) catch |err| { + log.warn("failed to read temporary file: {}", .{err}); + return error.InvalidData; + }; + + // Set our data + assert(self.data.items.len == 0); + self.data = .{ .items = managed.items, .capacity = managed.capacity }; + } + + /// Returns true if path appears to be in a temporary directory. + /// Copies logic from Kitty. + fn isPathInTempDir(path: []const u8) bool { + if (std.mem.startsWith(u8, path, "/tmp")) return true; + if (std.mem.startsWith(u8, path, "/dev/shm")) return true; + if (internal_os.tmpDir()) |dir| { + if (std.mem.startsWith(u8, path, dir)) return true; + + // The temporary dir is sometimes a symlink. On macOS for + // example /tmp is /private/var/... + var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + if (std.os.realpath(dir, &buf)) |real_dir| { + if (std.mem.startsWith(u8, path, real_dir)) return true; + } else |_| {} + } + + return false; + } + + pub fn deinit(self: *LoadingImage, alloc: Allocator) void { + self.image.deinit(alloc); + self.data.deinit(alloc); + } + + pub fn destroy(self: *LoadingImage, alloc: Allocator) void { + self.deinit(alloc); + alloc.destroy(self); + } + + /// Adds a chunk of base64-encoded data to the image. Use this if the + /// image is coming in chunks (the "m" parameter in the protocol). + pub fn addData(self: *LoadingImage, alloc: Allocator, data: []const u8) !void { + // If no data, skip + if (data.len == 0) return; + + // Grow our array list by size capacity if it needs it + const Base64Decoder = std.base64.standard.Decoder; + const size = Base64Decoder.calcSizeForSlice(data) catch |err| { + log.warn("failed to calculate size for base64 data: {}", .{err}); + return error.InvalidData; + }; + + // If our data would get too big, return an error + if (self.data.items.len + size > max_size) { + log.warn("image data too large max_size={}", .{max_size}); + return error.InvalidData; + } + + try self.data.ensureUnusedCapacity(alloc, size); + + // We decode directly into the arraylist + const start_i = self.data.items.len; + self.data.items.len = start_i + size; + const buf = self.data.items[start_i..]; + Base64Decoder.decode(buf, data) catch |err| switch (err) { + // We have to ignore invalid padding because lots of encoders + // add the wrong padding. Since we validate image data later + // (PNG decode or simple dimensions check), we can ignore this. + error.InvalidPadding => {}, + + else => { + log.warn("failed to decode base64 data: {}", .{err}); + return error.InvalidData; + }, + }; + } + + /// Complete the chunked image, returning a completed image. + pub fn complete(self: *LoadingImage, alloc: Allocator) !Image { + const img = &self.image; + + // Decompress the data if it is compressed. + try self.decompress(alloc); + + // Decode the png if we have to + if (img.format == .png) try self.decodePng(alloc); + + // Validate our dimensions. + if (img.width == 0 or img.height == 0) return error.DimensionsRequired; + if (img.width > max_dimension or img.height > max_dimension) return error.DimensionsTooLarge; + + // Data length must be what we expect + const bpp: u32 = switch (img.format) { + .rgb => 3, + .rgba => 4, + .png => unreachable, // png should be decoded by here + }; + const expected_len = img.width * img.height * bpp; + const actual_len = self.data.items.len; + if (actual_len != expected_len) { + std.log.warn( + "unexpected length image id={} width={} height={} bpp={} expected_len={} actual_len={}", + .{ img.id, img.width, img.height, bpp, expected_len, actual_len }, + ); + return error.InvalidData; + } + + // Set our time + self.image.transmit_time = std.time.Instant.now() catch |err| { + log.warn("failed to get time: {}", .{err}); + return error.InternalError; + }; + + // Everything looks good, copy the image data over. + var result = self.image; + result.data = try self.data.toOwnedSlice(alloc); + errdefer result.deinit(alloc); + self.image = .{}; + return result; + } + + /// Debug function to write the data to a file. This is useful for + /// capturing some test data for unit tests. + pub fn debugDump(self: LoadingImage) !void { + if (comptime builtin.mode != .Debug) @compileError("debugDump in non-debug"); + + var buf: [1024]u8 = undefined; + const filename = try std.fmt.bufPrint( + &buf, + "image-{s}-{s}-{d}x{d}-{}.data", + .{ + @tagName(self.image.format), + @tagName(self.image.compression), + self.image.width, + self.image.height, + self.image.id, + }, + ); + const cwd = std.fs.cwd(); + const f = try cwd.createFile(filename, .{}); + defer f.close(); + + const writer = f.writer(); + try writer.writeAll(self.data.items); + } + + /// Decompress the data in-place. + fn decompress(self: *LoadingImage, alloc: Allocator) !void { + return switch (self.image.compression) { + .none => {}, + .zlib_deflate => self.decompressZlib(alloc), + }; + } + + fn decompressZlib(self: *LoadingImage, alloc: Allocator) !void { + // Open our zlib stream + var fbs = std.io.fixedBufferStream(self.data.items); + var stream = std.compress.zlib.decompressStream(alloc, fbs.reader()) catch |err| { + log.warn("zlib decompression failed: {}", .{err}); + return error.DecompressionFailed; + }; + defer stream.deinit(); + + // Write it to an array list + var list = std.ArrayList(u8).init(alloc); + errdefer list.deinit(); + stream.reader().readAllArrayList(&list, max_size) catch |err| { + log.warn("failed to read decompressed data: {}", .{err}); + return error.DecompressionFailed; + }; + + // Empty our current data list, take ownership over managed array list + self.data.deinit(alloc); + self.data = .{ .items = list.items, .capacity = list.capacity }; + + // Make sure we note that our image is no longer compressed + self.image.compression = .none; + } + + /// Decode the data as PNG. This will also updated the image dimensions. + fn decodePng(self: *LoadingImage, alloc: Allocator) !void { + assert(self.image.format == .png); + + // Decode PNG + var width: c_int = 0; + var height: c_int = 0; + var bpp: c_int = 0; + const data = stb.stbi_load_from_memory( + self.data.items.ptr, + @intCast(self.data.items.len), + &width, + &height, + &bpp, + 0, + ) orelse return error.InvalidData; + defer stb.stbi_image_free(data); + const len: usize = @intCast(width * height * bpp); + if (len > max_size) { + log.warn("png image too large size={} max_size={}", .{ len, max_size }); + return error.InvalidData; + } + + // Validate our bpp + if (bpp != 3 and bpp != 4) return error.UnsupportedDepth; + + // Replace our data + self.data.deinit(alloc); + self.data = .{}; + try self.data.ensureUnusedCapacity(alloc, len); + try self.data.appendSlice(alloc, data[0..len]); + + // Store updated image dimensions + self.image.width = @intCast(width); + self.image.height = @intCast(height); + self.image.format = switch (bpp) { + 3 => .rgb, + 4 => .rgba, + else => unreachable, // validated above + }; + } +}; + +/// Image represents a single fully loaded image. +pub const Image = struct { + id: u32 = 0, + number: u32 = 0, + width: u32 = 0, + height: u32 = 0, + format: command.Transmission.Format = .rgb, + compression: command.Transmission.Compression = .none, + data: []const u8 = "", + transmit_time: std.time.Instant = undefined, + + pub const Error = error{ + InternalError, + InvalidData, + DecompressionFailed, + DimensionsRequired, + DimensionsTooLarge, + FilePathTooLong, + TemporaryFileNotInTempDir, + UnsupportedFormat, + UnsupportedMedium, + UnsupportedDepth, + }; + + pub fn deinit(self: *Image, alloc: Allocator) void { + if (self.data.len > 0) alloc.free(self.data); + } + + /// Mostly for logging + pub fn withoutData(self: *const Image) Image { + var copy = self.*; + copy.data = ""; + return copy; + } +}; + +/// The rect taken up by some image placement, in grid cells. This will +/// be rounded up to the nearest grid cell since we can't place images +/// in partial grid cells. +pub const Rect = struct { + top_left: point.ScreenPoint = .{}, + bottom_right: point.ScreenPoint = .{}, + + /// True if the rect contains a given screen point. + pub fn contains(self: Rect, p: point.ScreenPoint) bool { + return p.y >= self.top_left.y and + p.y <= self.bottom_right.y and + p.x >= self.top_left.x and + p.x <= self.bottom_right.x; + } +}; + +/// Easy base64 encoding function. +fn testB64(alloc: Allocator, data: []const u8) ![]const u8 { + const B64Encoder = std.base64.standard.Encoder; + var b64 = try alloc.alloc(u8, B64Encoder.calcSize(data.len)); + errdefer alloc.free(b64); + return B64Encoder.encode(b64, data); +} + +/// Easy base64 decoding function. +fn testB64Decode(alloc: Allocator, data: []const u8) ![]const u8 { + const B64Decoder = std.base64.standard.Decoder; + var result = try alloc.alloc(u8, try B64Decoder.calcSizeForSlice(data)); + errdefer alloc.free(result); + try B64Decoder.decode(result, data); + return result; +} + +// This specifically tests we ALLOW invalid RGB data because Kitty +// documents that this should work. +test "image load with invalid RGB data" { + const testing = std.testing; + const alloc = testing.allocator; + + // _Gi=31,s=1,v=1,a=q,t=d,f=24;AAAA\ + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .width = 1, + .height = 1, + .image_id = 31, + } }, + .data = try alloc.dupe(u8, "AAAA"), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); +} + +test "image load with image too wide" { + const testing = std.testing; + const alloc = testing.allocator; + + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .width = max_dimension + 1, + .height = 1, + .image_id = 31, + } }, + .data = try alloc.dupe(u8, "AAAA"), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + try testing.expectError(error.DimensionsTooLarge, loading.complete(alloc)); +} + +test "image load with image too tall" { + const testing = std.testing; + const alloc = testing.allocator; + + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .height = max_dimension + 1, + .width = 1, + .image_id = 31, + } }, + .data = try alloc.dupe(u8, "AAAA"), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + try testing.expectError(error.DimensionsTooLarge, loading.complete(alloc)); +} + +test "image load: rgb, zlib compressed, direct" { + const testing = std.testing; + const alloc = testing.allocator; + + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .medium = .direct, + .compression = .zlib_deflate, + .height = 96, + .width = 128, + .image_id = 31, + } }, + .data = try alloc.dupe( + u8, + @embedFile("testdata/image-rgb-zlib_deflate-128x96-2147483647.data"), + ), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + var img = try loading.complete(alloc); + defer img.deinit(alloc); + + // should be decompressed + try testing.expect(img.compression == .none); +} + +test "image load: rgb, not compressed, direct" { + const testing = std.testing; + const alloc = testing.allocator; + + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .medium = .direct, + .compression = .none, + .width = 20, + .height = 15, + .image_id = 31, + } }, + .data = try alloc.dupe( + u8, + @embedFile("testdata/image-rgb-none-20x15-2147483647.data"), + ), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + var img = try loading.complete(alloc); + defer img.deinit(alloc); + + // should be decompressed + try testing.expect(img.compression == .none); +} + +test "image load: rgb, zlib compressed, direct, chunked" { + const testing = std.testing; + const alloc = testing.allocator; + + const data = @embedFile("testdata/image-rgb-zlib_deflate-128x96-2147483647.data"); + + // Setup our initial chunk + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .medium = .direct, + .compression = .zlib_deflate, + .height = 96, + .width = 128, + .image_id = 31, + .more_chunks = true, + } }, + .data = try alloc.dupe(u8, data[0..1024]), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + + // Read our remaining chunks + var fbs = std.io.fixedBufferStream(data[1024..]); + var buf: [1024]u8 = undefined; + while (fbs.reader().readAll(&buf)) |size| { + try loading.addData(alloc, buf[0..size]); + if (size < buf.len) break; + } else |err| return err; + + // Complete + var img = try loading.complete(alloc); + defer img.deinit(alloc); + try testing.expect(img.compression == .none); +} + +test "image load: rgb, zlib compressed, direct, chunked with zero initial chunk" { + const testing = std.testing; + const alloc = testing.allocator; + + const data = @embedFile("testdata/image-rgb-zlib_deflate-128x96-2147483647.data"); + + // Setup our initial chunk + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .medium = .direct, + .compression = .zlib_deflate, + .height = 96, + .width = 128, + .image_id = 31, + .more_chunks = true, + } }, + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + + // Read our remaining chunks + var fbs = std.io.fixedBufferStream(data); + var buf: [1024]u8 = undefined; + while (fbs.reader().readAll(&buf)) |size| { + try loading.addData(alloc, buf[0..size]); + if (size < buf.len) break; + } else |err| return err; + + // Complete + var img = try loading.complete(alloc); + defer img.deinit(alloc); + try testing.expect(img.compression == .none); +} + +test "image load: rgb, not compressed, temporary file" { + const testing = std.testing; + const alloc = testing.allocator; + + var tmp_dir = try internal_os.TempDir.init(); + defer tmp_dir.deinit(); + const data = try testB64Decode( + alloc, + @embedFile("testdata/image-rgb-none-20x15-2147483647.data"), + ); + defer alloc.free(data); + try tmp_dir.dir.writeFile("image.data", data); + + var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + const path = try tmp_dir.dir.realpath("image.data", &buf); + + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .medium = .temporary_file, + .compression = .none, + .width = 20, + .height = 15, + .image_id = 31, + } }, + .data = try testB64(alloc, path), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + var img = try loading.complete(alloc); + defer img.deinit(alloc); + try testing.expect(img.compression == .none); + + // Temporary file should be gone + try testing.expectError(error.FileNotFound, tmp_dir.dir.access(path, .{})); +} + +test "image load: rgb, not compressed, regular file" { + const testing = std.testing; + const alloc = testing.allocator; + + var tmp_dir = try internal_os.TempDir.init(); + defer tmp_dir.deinit(); + const data = try testB64Decode( + alloc, + @embedFile("testdata/image-rgb-none-20x15-2147483647.data"), + ); + defer alloc.free(data); + try tmp_dir.dir.writeFile("image.data", data); + + var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + const path = try tmp_dir.dir.realpath("image.data", &buf); + + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .rgb, + .medium = .file, + .compression = .none, + .width = 20, + .height = 15, + .image_id = 31, + } }, + .data = try testB64(alloc, path), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + var img = try loading.complete(alloc); + defer img.deinit(alloc); + try testing.expect(img.compression == .none); + try tmp_dir.dir.access(path, .{}); +} + +test "image load: png, not compressed, regular file" { + const testing = std.testing; + const alloc = testing.allocator; + + var tmp_dir = try internal_os.TempDir.init(); + defer tmp_dir.deinit(); + const data = @embedFile("testdata/image-png-none-50x76-2147483647-raw.data"); + try tmp_dir.dir.writeFile("image.data", data); + + var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + const path = try tmp_dir.dir.realpath("image.data", &buf); + + var cmd: command.Command = .{ + .control = .{ .transmit = .{ + .format = .png, + .medium = .file, + .compression = .none, + .width = 0, + .height = 0, + .image_id = 31, + } }, + .data = try testB64(alloc, path), + }; + defer cmd.deinit(alloc); + var loading = try LoadingImage.init(alloc, &cmd); + defer loading.deinit(alloc); + var img = try loading.complete(alloc); + defer img.deinit(alloc); + try testing.expect(img.compression == .none); + try testing.expect(img.format == .rgb); + try tmp_dir.dir.access(path, .{}); +} diff --git a/src/terminal/kitty/graphics_storage.zig b/src/terminal/kitty/graphics_storage.zig new file mode 100644 index 000000000..00c0b0666 --- /dev/null +++ b/src/terminal/kitty/graphics_storage.zig @@ -0,0 +1,746 @@ +const std = @import("std"); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; +const ArenaAllocator = std.heap.ArenaAllocator; + +const terminal = @import("../main.zig"); +const point = @import("../point.zig"); +const command = @import("graphics_command.zig"); +const Screen = @import("../Screen.zig"); +const LoadingImage = @import("graphics_image.zig").LoadingImage; +const Image = @import("graphics_image.zig").Image; +const Rect = @import("graphics_image.zig").Rect; +const Command = command.Command; +const ScreenPoint = point.ScreenPoint; + +const log = std.log.scoped(.kitty_gfx); + +/// An image storage is associated with a terminal screen (i.e. main +/// screen, alt screen) and contains all the transmitted images and +/// placements. +pub const ImageStorage = struct { + const ImageMap = std.AutoHashMapUnmanaged(u32, Image); + const PlacementMap = std.AutoHashMapUnmanaged(PlacementKey, Placement); + + /// Dirty is set to true if placements or images change. This is + /// purely informational for the renderer and doesn't affect the + /// correctness of the program. The renderer must set this to false + /// if it cares about this value. + dirty: bool = false, + + /// This is the next automatically assigned ID. We start mid-way + /// through the u32 range to avoid collisions with buggy programs. + next_id: u32 = 2147483647, + + /// The set of images that are currently known. + images: ImageMap = .{}, + + /// The set of placements for loaded images. + placements: PlacementMap = .{}, + + /// Non-null if there is an in-progress loading image. + loading: ?*LoadingImage = null, + + /// The total bytes of image data that have been loaded and the limit. + /// If the limit is reached, the oldest images will be evicted to make + /// space. Unused images take priority. + total_bytes: usize = 0, + total_limit: usize = 320 * 1000 * 1000, // 320MB + + pub fn deinit(self: *ImageStorage, alloc: Allocator) void { + if (self.loading) |loading| loading.destroy(alloc); + + var it = self.images.iterator(); + while (it.next()) |kv| kv.value_ptr.deinit(alloc); + self.images.deinit(alloc); + + self.placements.deinit(alloc); + } + + /// Kitty image protocol is enabled if we have a non-zero limit. + pub fn enabled(self: *const ImageStorage) bool { + return self.total_limit != 0; + } + + /// Sets the limit in bytes for the total amount of image data that + /// can be loaded. If this limit is lower, this will do an eviction + /// if necessary. If the value is zero, then Kitty image protocol will + /// be disabled. + pub fn setLimit(self: *ImageStorage, alloc: Allocator, limit: usize) !void { + // Special case disabling by quickly deleting all + if (limit == 0) { + self.deinit(alloc); + self.* = .{}; + } + + // If we re lowering our limit, check if we need to evict. + if (limit < self.total_bytes) { + const req_bytes = self.total_bytes - limit; + log.info("evicting images to lower limit, evicting={}", .{req_bytes}); + if (!try self.evictImage(alloc, req_bytes)) { + log.warn("failed to evict enough images for required bytes", .{}); + } + } + + self.total_limit = limit; + } + + /// Add an already-loaded image to the storage. This will automatically + /// free any existing image with the same ID. + pub fn addImage(self: *ImageStorage, alloc: Allocator, img: Image) Allocator.Error!void { + // If the image itself is over the limit, then error immediately + if (img.data.len > self.total_limit) return error.OutOfMemory; + + // If this would put us over the limit, then evict. + const total_bytes = self.total_bytes + img.data.len; + if (total_bytes > self.total_limit) { + const req_bytes = total_bytes - self.total_limit; + log.info("evicting images to make space for {} bytes", .{req_bytes}); + if (!try self.evictImage(alloc, req_bytes)) { + log.warn("failed to evict enough images for required bytes", .{}); + return error.OutOfMemory; + } + } + + // Do the gop op first so if it fails we don't get a partial state + const gop = try self.images.getOrPut(alloc, img.id); + + log.debug("addImage image={}", .{img: { + var copy = img; + copy.data = ""; + break :img copy; + }}); + + // Write our new image + if (gop.found_existing) { + self.total_bytes -= gop.value_ptr.data.len; + gop.value_ptr.deinit(alloc); + } + + gop.value_ptr.* = img; + self.total_bytes += img.data.len; + + self.dirty = true; + } + + /// Add a placement for a given image. The caller must verify in advance + /// the image exists to prevent memory corruption. + pub fn addPlacement( + self: *ImageStorage, + alloc: Allocator, + image_id: u32, + placement_id: u32, + p: Placement, + ) !void { + assert(self.images.get(image_id) != null); + log.debug("placement image_id={} placement_id={} placement={}\n", .{ + image_id, + placement_id, + p, + }); + + const key: PlacementKey = .{ .image_id = image_id, .placement_id = placement_id }; + const gop = try self.placements.getOrPut(alloc, key); + gop.value_ptr.* = p; + + self.dirty = true; + } + + /// Get an image by its ID. If the image doesn't exist, null is returned. + pub fn imageById(self: *const ImageStorage, image_id: u32) ?Image { + return self.images.get(image_id); + } + + /// Get an image by its number. If the image doesn't exist, return null. + pub fn imageByNumber(self: *const ImageStorage, image_number: u32) ?Image { + var newest: ?Image = null; + + var it = self.images.iterator(); + while (it.next()) |kv| { + if (kv.value_ptr.number == image_number) { + if (newest == null or + kv.value_ptr.transmit_time.order(newest.?.transmit_time) == .gt) + { + newest = kv.value_ptr.*; + } + } + } + + return newest; + } + + /// Delete placements, images. + pub fn delete( + self: *ImageStorage, + alloc: Allocator, + t: *const terminal.Terminal, + cmd: command.Delete, + ) void { + switch (cmd) { + .all => |delete_images| if (delete_images) { + // We just reset our entire state. + self.deinit(alloc); + self.* = .{ .dirty = true }; + } else { + // Delete all our placements + self.placements.deinit(alloc); + self.placements = .{}; + self.dirty = true; + }, + + .id => |v| self.deleteById( + alloc, + v.image_id, + v.placement_id, + v.delete, + ), + + .newest => |v| newest: { + const img = self.imageByNumber(v.image_number) orelse break :newest; + self.deleteById(alloc, img.id, v.placement_id, v.delete); + }, + + .intersect_cursor => |delete_images| { + const target = (point.Viewport{ + .x = t.screen.cursor.x, + .y = t.screen.cursor.y, + }).toScreen(&t.screen); + self.deleteIntersecting(alloc, t, target, delete_images, {}, null); + }, + + .intersect_cell => |v| { + const target = (point.Viewport{ .x = v.x, .y = v.y }).toScreen(&t.screen); + self.deleteIntersecting(alloc, t, target, v.delete, {}, null); + }, + + .intersect_cell_z => |v| { + const target = (point.Viewport{ .x = v.x, .y = v.y }).toScreen(&t.screen); + self.deleteIntersecting(alloc, t, target, v.delete, v.z, struct { + fn filter(ctx: i32, p: Placement) bool { + return p.z == ctx; + } + }.filter); + }, + + .column => |v| { + var it = self.placements.iterator(); + while (it.next()) |entry| { + const img = self.imageById(entry.key_ptr.image_id) orelse continue; + const rect = entry.value_ptr.rect(img, t); + if (rect.top_left.x <= v.x and rect.bottom_right.x >= v.x) { + self.placements.removeByPtr(entry.key_ptr); + if (v.delete) self.deleteIfUnused(alloc, img.id); + } + } + }, + + .row => |v| { + // Get the screenpoint y + const y = (point.Viewport{ .x = 0, .y = v.y }).toScreen(&t.screen).y; + + var it = self.placements.iterator(); + while (it.next()) |entry| { + const img = self.imageById(entry.key_ptr.image_id) orelse continue; + const rect = entry.value_ptr.rect(img, t); + if (rect.top_left.y <= y and rect.bottom_right.y >= y) { + self.placements.removeByPtr(entry.key_ptr); + if (v.delete) self.deleteIfUnused(alloc, img.id); + } + } + }, + + .z => |v| { + var it = self.placements.iterator(); + while (it.next()) |entry| { + if (entry.value_ptr.z == v.z) { + const image_id = entry.key_ptr.image_id; + self.placements.removeByPtr(entry.key_ptr); + if (v.delete) self.deleteIfUnused(alloc, image_id); + } + } + }, + + // We don't support animation frames yet so they are successfully + // deleted! + .animation_frames => {}, + } + } + + fn deleteById( + self: *ImageStorage, + alloc: Allocator, + image_id: u32, + placement_id: u32, + delete_unused: bool, + ) void { + // If no placement, we delete all placements with the ID + if (placement_id == 0) { + var it = self.placements.iterator(); + while (it.next()) |entry| { + if (entry.key_ptr.image_id == image_id) { + self.placements.removeByPtr(entry.key_ptr); + } + } + } else { + _ = self.placements.remove(.{ + .image_id = image_id, + .placement_id = placement_id, + }); + } + + // If this is specified, then we also delete the image + // if it is no longer in use. + if (delete_unused) self.deleteIfUnused(alloc, image_id); + } + + /// Delete an image if it is unused. + fn deleteIfUnused(self: *ImageStorage, alloc: Allocator, image_id: u32) void { + var it = self.placements.iterator(); + while (it.next()) |kv| { + if (kv.key_ptr.image_id == image_id) { + return; + } + } + + // If we get here, we can delete the image. + if (self.images.getEntry(image_id)) |entry| { + self.total_bytes -= entry.value_ptr.data.len; + entry.value_ptr.deinit(alloc); + self.images.removeByPtr(entry.key_ptr); + } + } + + /// Deletes all placements intersecting a screen point. + fn deleteIntersecting( + self: *ImageStorage, + alloc: Allocator, + t: *const terminal.Terminal, + p: point.ScreenPoint, + delete_unused: bool, + filter_ctx: anytype, + comptime filter: ?fn (@TypeOf(filter_ctx), Placement) bool, + ) void { + var it = self.placements.iterator(); + while (it.next()) |entry| { + const img = self.imageById(entry.key_ptr.image_id) orelse continue; + const rect = entry.value_ptr.rect(img, t); + if (rect.contains(p)) { + if (filter) |f| if (!f(filter_ctx, entry.value_ptr.*)) continue; + self.placements.removeByPtr(entry.key_ptr); + if (delete_unused) self.deleteIfUnused(alloc, img.id); + } + } + } + + /// Evict image to make space. This will evict the oldest image, + /// prioritizing unused images first, as recommended by the published + /// Kitty spec. + /// + /// This will evict as many images as necessary to make space for + /// req bytes. + fn evictImage(self: *ImageStorage, alloc: Allocator, req: usize) !bool { + assert(req <= self.total_limit); + + // Ironically we allocate to evict. We should probably redesign the + // data structures to avoid this but for now allocating a little + // bit is fine compared to the megabytes we're looking to save. + const Candidate = struct { + id: u32, + time: std.time.Instant, + used: bool, + }; + + var candidates = std.ArrayList(Candidate).init(alloc); + defer candidates.deinit(); + + var it = self.images.iterator(); + while (it.next()) |kv| { + const img = kv.value_ptr; + + // This is a huge waste. See comment above about redesigning + // our data structures to avoid this. Eviction should be very + // rare though and we never have that many images/placements + // so hopefully this will last a long time. + const used = used: { + var p_it = self.placements.iterator(); + while (p_it.next()) |p_kv| { + if (p_kv.key_ptr.image_id == img.id) { + break :used true; + } + } + + break :used false; + }; + + try candidates.append(.{ + .id = img.id, + .time = img.transmit_time, + .used = used, + }); + } + + // Sort + std.mem.sortUnstable( + Candidate, + candidates.items, + {}, + struct { + fn lessThan( + ctx: void, + lhs: Candidate, + rhs: Candidate, + ) bool { + _ = ctx; + + // If they're usage matches, then its based on time. + if (lhs.used == rhs.used) return switch (lhs.time.order(rhs.time)) { + .lt => true, + .gt => false, + .eq => lhs.id < rhs.id, + }; + + // If not used, then its a better candidate + return !lhs.used; + } + }.lessThan, + ); + + // They're in order of best to evict. + var evicted: usize = 0; + for (candidates.items) |c| { + // Delete all the placements for this image and the image. + var p_it = self.placements.iterator(); + while (p_it.next()) |entry| { + if (entry.key_ptr.image_id == c.id) { + self.placements.removeByPtr(entry.key_ptr); + } + } + + if (self.images.getEntry(c.id)) |entry| { + log.info("evicting image id={} bytes={}", .{ c.id, entry.value_ptr.data.len }); + + evicted += entry.value_ptr.data.len; + self.total_bytes -= entry.value_ptr.data.len; + + entry.value_ptr.deinit(alloc); + self.images.removeByPtr(entry.key_ptr); + + if (evicted > req) return true; + } + } + + return false; + } + + /// Every placement is uniquely identified by the image ID and the + /// placement ID. If an image ID isn't specified it is assumed to be 0. + /// Likewise, if a placement ID isn't specified it is assumed to be 0. + pub const PlacementKey = struct { + image_id: u32, + placement_id: u32, + }; + + pub const Placement = struct { + /// The location of the image on the screen. + point: ScreenPoint, + + /// Offset of the x/y from the top-left of the cell. + x_offset: u32 = 0, + y_offset: u32 = 0, + + /// Source rectangle for the image to pull from + source_x: u32 = 0, + source_y: u32 = 0, + source_width: u32 = 0, + source_height: u32 = 0, + + /// The columns/rows this image occupies. + columns: u32 = 0, + rows: u32 = 0, + + /// The z-index for this placement. + z: i32 = 0, + + /// Returns a selection of the entire rectangle this placement + /// occupies within the screen. + pub fn rect( + self: Placement, + image: Image, + t: *const terminal.Terminal, + ) Rect { + // If we have columns/rows specified we can simplify this whole thing. + if (self.columns > 0 and self.rows > 0) { + return .{ + .top_left = self.point, + .bottom_right = .{ + .x = @min(self.point.x + self.columns, t.cols - 1), + .y = self.point.y + self.rows, + }, + }; + } + + // Calculate our cell size. + const terminal_width_f64: f64 = @floatFromInt(t.width_px); + const terminal_height_f64: f64 = @floatFromInt(t.height_px); + const grid_columns_f64: f64 = @floatFromInt(t.cols); + const grid_rows_f64: f64 = @floatFromInt(t.rows); + const cell_width_f64 = terminal_width_f64 / grid_columns_f64; + const cell_height_f64 = terminal_height_f64 / grid_rows_f64; + + // Our image width + const width_px = if (self.source_width > 0) self.source_width else image.width; + const height_px = if (self.source_height > 0) self.source_height else image.height; + + // Calculate our image size in grid cells + const width_f64: f64 = @floatFromInt(width_px); + const height_f64: f64 = @floatFromInt(height_px); + const width_cells: u32 = @intFromFloat(@ceil(width_f64 / cell_width_f64)); + const height_cells: u32 = @intFromFloat(@ceil(height_f64 / cell_height_f64)); + + return .{ + .top_left = self.point, + .bottom_right = .{ + .x = @min(self.point.x + width_cells, t.cols - 1), + .y = self.point.y + height_cells, + }, + }; + } + }; +}; + +test "storage: delete all placements and images" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 3, 3); + defer t.deinit(alloc); + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1 }); + try s.addImage(alloc, .{ .id = 2 }); + try s.addImage(alloc, .{ .id = 3 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 1, .y = 1 } }); + try s.addPlacement(alloc, 2, 1, .{ .point = .{ .x = 1, .y = 1 } }); + + s.delete(alloc, &t, .{ .all = true }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 0), s.images.count()); + try testing.expectEqual(@as(usize, 0), s.placements.count()); +} + +test "storage: delete all placements" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 3, 3); + defer t.deinit(alloc); + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1 }); + try s.addImage(alloc, .{ .id = 2 }); + try s.addImage(alloc, .{ .id = 3 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 1, .y = 1 } }); + try s.addPlacement(alloc, 2, 1, .{ .point = .{ .x = 1, .y = 1 } }); + + s.delete(alloc, &t, .{ .all = false }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 0), s.placements.count()); + try testing.expectEqual(@as(usize, 3), s.images.count()); +} + +test "storage: delete all placements by image id" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 3, 3); + defer t.deinit(alloc); + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1 }); + try s.addImage(alloc, .{ .id = 2 }); + try s.addImage(alloc, .{ .id = 3 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 1, .y = 1 } }); + try s.addPlacement(alloc, 2, 1, .{ .point = .{ .x = 1, .y = 1 } }); + + s.delete(alloc, &t, .{ .id = .{ .image_id = 2 } }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 1), s.placements.count()); + try testing.expectEqual(@as(usize, 3), s.images.count()); +} + +test "storage: delete all placements by image id and unused images" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 3, 3); + defer t.deinit(alloc); + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1 }); + try s.addImage(alloc, .{ .id = 2 }); + try s.addImage(alloc, .{ .id = 3 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 1, .y = 1 } }); + try s.addPlacement(alloc, 2, 1, .{ .point = .{ .x = 1, .y = 1 } }); + + s.delete(alloc, &t, .{ .id = .{ .delete = true, .image_id = 2 } }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 1), s.placements.count()); + try testing.expectEqual(@as(usize, 2), s.images.count()); +} + +test "storage: delete placement by specific id" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 3, 3); + defer t.deinit(alloc); + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1 }); + try s.addImage(alloc, .{ .id = 2 }); + try s.addImage(alloc, .{ .id = 3 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 1, .y = 1 } }); + try s.addPlacement(alloc, 1, 2, .{ .point = .{ .x = 1, .y = 1 } }); + try s.addPlacement(alloc, 2, 1, .{ .point = .{ .x = 1, .y = 1 } }); + + s.delete(alloc, &t, .{ .id = .{ + .delete = true, + .image_id = 1, + .placement_id = 2, + } }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 2), s.placements.count()); + try testing.expectEqual(@as(usize, 3), s.images.count()); +} + +test "storage: delete intersecting cursor" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 100, 100); + defer t.deinit(alloc); + t.width_px = 100; + t.height_px = 100; + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1, .width = 50, .height = 50 }); + try s.addImage(alloc, .{ .id = 2, .width = 25, .height = 25 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 0, .y = 0 } }); + try s.addPlacement(alloc, 1, 2, .{ .point = .{ .x = 25, .y = 25 } }); + + t.screen.cursor.x = 12; + t.screen.cursor.y = 12; + + s.delete(alloc, &t, .{ .intersect_cursor = false }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 1), s.placements.count()); + try testing.expectEqual(@as(usize, 2), s.images.count()); + + // verify the placement is what we expect + try testing.expect(s.placements.get(.{ .image_id = 1, .placement_id = 2 }) != null); +} + +test "storage: delete intersecting cursor plus unused" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 100, 100); + defer t.deinit(alloc); + t.width_px = 100; + t.height_px = 100; + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1, .width = 50, .height = 50 }); + try s.addImage(alloc, .{ .id = 2, .width = 25, .height = 25 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 0, .y = 0 } }); + try s.addPlacement(alloc, 1, 2, .{ .point = .{ .x = 25, .y = 25 } }); + + t.screen.cursor.x = 12; + t.screen.cursor.y = 12; + + s.delete(alloc, &t, .{ .intersect_cursor = true }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 1), s.placements.count()); + try testing.expectEqual(@as(usize, 2), s.images.count()); + + // verify the placement is what we expect + try testing.expect(s.placements.get(.{ .image_id = 1, .placement_id = 2 }) != null); +} + +test "storage: delete intersecting cursor hits multiple" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 100, 100); + defer t.deinit(alloc); + t.width_px = 100; + t.height_px = 100; + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1, .width = 50, .height = 50 }); + try s.addImage(alloc, .{ .id = 2, .width = 25, .height = 25 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 0, .y = 0 } }); + try s.addPlacement(alloc, 1, 2, .{ .point = .{ .x = 25, .y = 25 } }); + + t.screen.cursor.x = 26; + t.screen.cursor.y = 26; + + s.delete(alloc, &t, .{ .intersect_cursor = true }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 0), s.placements.count()); + try testing.expectEqual(@as(usize, 1), s.images.count()); +} + +test "storage: delete by column" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 100, 100); + defer t.deinit(alloc); + t.width_px = 100; + t.height_px = 100; + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1, .width = 50, .height = 50 }); + try s.addImage(alloc, .{ .id = 2, .width = 25, .height = 25 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 0, .y = 0 } }); + try s.addPlacement(alloc, 1, 2, .{ .point = .{ .x = 25, .y = 25 } }); + + s.delete(alloc, &t, .{ .column = .{ + .delete = false, + .x = 60, + } }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 1), s.placements.count()); + try testing.expectEqual(@as(usize, 2), s.images.count()); + + // verify the placement is what we expect + try testing.expect(s.placements.get(.{ .image_id = 1, .placement_id = 1 }) != null); +} + +test "storage: delete by row" { + const testing = std.testing; + const alloc = testing.allocator; + var t = try terminal.Terminal.init(alloc, 100, 100); + defer t.deinit(alloc); + t.width_px = 100; + t.height_px = 100; + + var s: ImageStorage = .{}; + defer s.deinit(alloc); + try s.addImage(alloc, .{ .id = 1, .width = 50, .height = 50 }); + try s.addImage(alloc, .{ .id = 2, .width = 25, .height = 25 }); + try s.addPlacement(alloc, 1, 1, .{ .point = .{ .x = 0, .y = 0 } }); + try s.addPlacement(alloc, 1, 2, .{ .point = .{ .x = 25, .y = 25 } }); + + s.delete(alloc, &t, .{ .row = .{ + .delete = false, + .y = 60, + } }); + try testing.expect(s.dirty); + try testing.expectEqual(@as(usize, 1), s.placements.count()); + try testing.expectEqual(@as(usize, 2), s.images.count()); + + // verify the placement is what we expect + try testing.expect(s.placements.get(.{ .image_id = 1, .placement_id = 1 }) != null); +} diff --git a/src/terminal/kitty/key.zig b/src/terminal/kitty/key.zig new file mode 100644 index 000000000..938bf65b5 --- /dev/null +++ b/src/terminal/kitty/key.zig @@ -0,0 +1,151 @@ +//! Kitty keyboard protocol support. + +const std = @import("std"); + +/// Stack for the key flags. This implements the push/pop behavior +/// of the CSI > u and CSI < u sequences. We implement the stack as +/// fixed size to avoid heap allocation. +pub const KeyFlagStack = struct { + const len = 8; + + flags: [len]KeyFlags = .{.{}} ** len, + idx: u3 = 0, + + /// Return the current stack value + pub fn current(self: KeyFlagStack) KeyFlags { + return self.flags[self.idx]; + } + + /// Perform the "set" operation as described in the spec for + /// the CSI = u sequence. + pub fn set( + self: *KeyFlagStack, + mode: KeySetMode, + v: KeyFlags, + ) void { + switch (mode) { + .set => self.flags[self.idx] = v, + .@"or" => self.flags[self.idx] = @bitCast( + self.flags[self.idx].int() | v.int(), + ), + .not => self.flags[self.idx] = @bitCast( + self.flags[self.idx].int() & ~v.int(), + ), + } + } + + /// Push a new set of flags onto the stack. If the stack is full + /// then the oldest entry is evicted. + pub fn push(self: *KeyFlagStack, flags: KeyFlags) void { + // Overflow and wrap around if we're full, which evicts + // the oldest entry. + self.idx +%= 1; + self.flags[self.idx] = flags; + } + + /// Pop `n` entries from the stack. This will just wrap around + /// if `n` is greater than the amount in the stack. + pub fn pop(self: *KeyFlagStack, n: usize) void { + // If n is more than our length then we just reset the stack. + // This also avoids a DoS vector where a malicious client + // could send a huge number of pop commands to waste cpu. + if (n >= self.flags.len) { + self.idx = 0; + self.flags = .{.{}} ** len; + return; + } + + for (0..n) |_| { + self.flags[self.idx] = .{}; + self.idx -%= 1; + } + } + + // Make sure we the overflow works as expected + test { + const testing = std.testing; + var stack: KeyFlagStack = .{}; + stack.idx = stack.flags.len - 1; + stack.idx +%= 1; + try testing.expect(stack.idx == 0); + + stack.idx = 0; + stack.idx -%= 1; + try testing.expect(stack.idx == stack.flags.len - 1); + } +}; + +/// The possible flags for the Kitty keyboard protocol. +pub const KeyFlags = packed struct(u5) { + disambiguate: bool = false, + report_events: bool = false, + report_alternates: bool = false, + report_all: bool = false, + report_associated: bool = false, + + pub fn int(self: KeyFlags) u5 { + return @bitCast(self); + } + + // Its easy to get packed struct ordering wrong so this test checks. + test { + const testing = std.testing; + + try testing.expectEqual( + @as(u5, 0b1), + (KeyFlags{ .disambiguate = true }).int(), + ); + try testing.expectEqual( + @as(u5, 0b10), + (KeyFlags{ .report_events = true }).int(), + ); + } +}; + +/// The possible modes for setting the key flags. +pub const KeySetMode = enum { set, @"or", not }; + +test "KeyFlagStack: push pop" { + const testing = std.testing; + var stack: KeyFlagStack = .{}; + stack.push(.{ .disambiguate = true }); + try testing.expectEqual( + KeyFlags{ .disambiguate = true }, + stack.current(), + ); + + stack.pop(1); + try testing.expectEqual(KeyFlags{}, stack.current()); +} + +test "KeyFlagStack: pop big number" { + const testing = std.testing; + var stack: KeyFlagStack = .{}; + stack.pop(100); + try testing.expectEqual(KeyFlags{}, stack.current()); +} + +test "KeyFlagStack: set" { + const testing = std.testing; + var stack: KeyFlagStack = .{}; + stack.set(.set, .{ .disambiguate = true }); + try testing.expectEqual( + KeyFlags{ .disambiguate = true }, + stack.current(), + ); + + stack.set(.@"or", .{ .report_events = true }); + try testing.expectEqual( + KeyFlags{ + .disambiguate = true, + .report_events = true, + }, + stack.current(), + ); + + stack.set(.not, .{ .report_events = true }); + try testing.expectEqual( + KeyFlags{ .disambiguate = true }, + stack.current(), + ); +} diff --git a/src/terminal/kitty/testdata/image-png-none-50x76-2147483647-raw.data b/src/terminal/kitty/testdata/image-png-none-50x76-2147483647-raw.data new file mode 100644 index 000000000..032cb07c7 Binary files /dev/null and b/src/terminal/kitty/testdata/image-png-none-50x76-2147483647-raw.data differ diff --git a/src/terminal/kitty/testdata/image-rgb-none-20x15-2147483647.data b/src/terminal/kitty/testdata/image-rgb-none-20x15-2147483647.data new file mode 100644 index 000000000..f65d40ce8 --- /dev/null +++ b/src/terminal/kitty/testdata/image-rgb-none-20x15-2147483647.data @@ -0,0 +1 @@ +DRoeCxgcCxcjEh4qDBgkCxcjChYiCxcjCRclBRMhBxIXHysvTVNRbHJwcXB2Li0zCBYXEyEiCxkaDBobChcbCBUZDxsnBBAcEBwoChYiCxcjDBgkDhwqBxUjDBccm6aqy9HP1NrYzs3UsK+2IjAxCBYXCBYXBxUWFBoaDxUVICYqIyktERcZDxUXDxUVEhgYDhUTCxIQGh8XusC4zM7FvL61q6elmZWTTVtcDBobDRscCxkaKS8vaW9vxMnOur/EiY+RaW5wICYmW2FhfYOBQEZEnqSc4ebeqauilZaOsa2rm5eVcH5/GigpChgZCBYX0NHP3d7c3tzbx8XExsTEvry8wL241dLN0tDF0tDF29nM4d/StbKpzMrAUk5DZmJXeYSGKTU3ER0fDRkb1tfVysvJ0tDPsa+tr6ytop+gmZaRqaahuritw8G2urirqKaZiYZ9paKZZmJXamZbOkZIDhocBxMVBBASxMDBtrKzqqanoZ2ejYeLeHF2eXFvhn58npePta6ml5CKgXp0W1hPaWZdZWdSYmRPFiYADR0AFCQAEyMAt7O0lJCRf3t8eHR1Zl9kY1xhYVpYbGRieXJqeHFpdW1oc2tmcG1kX1xTbW9ajY96jp55kaF8kKB7kaF8sK6rcnFtX11cXFpZW1pWWFdTXVpTXltUaGJgY11bY11da2Vla25dam1ccHtTnqmBorVtp7pypLdvobRsh4aCaGdjWFZVXFpZYWBcZ2ZiaGVeZGFaY11bYlxaV1FRZ2FhdHdmbG9egItjo66GpLdvq752rL93rsF5kpKIZ2ddWFxTW19WbnZdipJ6cnhaaW9RaGhgV1ZPY2Jga2poanFQd35dk6Vpn7B0oLFvorNxm6xqmKlnv760enpwVlpRW19Wc3til5+Hl55/k5p7iIiAcnJqd3Z0bm1rcHdWh45tipxgladrkaJglKVjkaJgkqNh09DJiYZ/YmZdY2deeYZYjJlrj51ijpxhztHClJaIdHNvdHNvanNHi5RpmaxnjKBbmqhrmadqkJ5hi5lcxsO8jImCaGtiYmZdg5Bikp9xjJpfjpxh1djJqq2eamllZ2Zid4BVmKF2kqZhh5tWlaNmlaNmjpxfjJpdw729rqiodnZ0cHBuiplij55nj6FVjJ5SzdC9t7qncW1sXlpZh45iqbCEmKllmapmmqlqnq1unaxtoK9w \ No newline at end of file diff --git a/src/terminal/kitty/testdata/image-rgb-zlib_deflate-128x96-2147483647.data b/src/terminal/kitty/testdata/image-rgb-zlib_deflate-128x96-2147483647.data new file mode 100644 index 000000000..c1d6d87b3 --- /dev/null +++ b/src/terminal/kitty/testdata/image-rgb-zlib_deflate-128x96-2147483647.data @@ -0,0 +1 @@ +eJx8+3dYm3fW742eM4npVb1LgFu6M0km7UlmJr270EGo907v1RjbgMHGVNN7rxK9iKKGhEASvVcbY8dx2szz7H3OH+9168aMJ7P362tdvowshPistb7ru373LUcU0QlNcsZ4uGA9nTEeTmiSI4roiCI6IHEOSJwjCn8Sx48gPRwQJEcEwQGOd0XgYVhPFP40AusJQ5OgKKI72tMd7emM9nJGezlizjigT4PPd0CQ7OFEe7iHHYx0CkYEwxbh4YTydMGcdsF6umA9ndAEIFCewLcgCfYIvB0cZ48AfrQTmmCPwNrC0DYQjC0MawcjWINkD/ewh2Ec4Fjwf1+EoU7B0TYIzCk42g6OecEN/qIT1M4N+YIj5AVHiJ07wsYVZusCt3GGveDg/qIjxMEV4QxBO7gi3OA4VxjW0Q1p5wxzdEPaOEPtXOEvOLidcoLYuaBsnBA2LvA/ObjbuCPsoChXLN4NR3DFkZzQeHsExh6BccJgXXB4KAmP8CIhvbzgHh7ueKI7ngjzOIPwOgchnnZE4e0R2OcC+C5bOMIGBnfGeIDwT/iDAf7WTmiCM4bohCaA/O0RWJCkI4LgiCC4IQkI/Gmcx3ks6RyacAaO8XBHe7qhAKpOKE8H9Gl7lBcI/1kK/o2/DZzkiPRwRns9++nAD3JGewEpAAqAAAb4NsBfwQ6OA8LK3xZKtIUS7aBoexjGDo75A38bKOpFd4QDBG3vjvr/2Dg7wbAOUJQLEucMwzpBMaecoA7uKAdXhAsUg/M4Tzz9isfZ1zzPve557nW850twnCcETXSEoBwhKDsX1ClHuI0L3BGKcYBjbCFIRyTaDUdwxhAckFg7INFoBxTaGYuDEHEwDwLSywvh6QklWlNA8ISSTrvhPR1ReDs48CYBgEAZAymwQyBt4QhXnNd/wgf//Ye8HNckjGAPJzohgXBDEZHEs4QzrxHOvIbzehmG9XRFkkD+DgiSHZJkiyCC5MEu+E/+DgjS/wt/RxTeGUM8CWsNABmxhxNtoXgbCOGUO94WgrKDom1hQJyCo8HKt4WhHeAYFzTB1gXmCEG5InAucCwcR0KTTr984e1zr73phsA4QRAY0umX3njr6x8usQWinPzCnPzCjKyc69m5QlnEOx987PXSa6SzLyMJp92QeBtndwd3uD0E6gCFOaOxjki0ExLnYP1BdnCMAxLrjCG44QgQAglG8oR7eMFInhACyRVHcsESn2UKfDLaEYVzQuPBFNgjMCeE/2/8wQSd8LeHA0idUSQXtIc7mgTHn8Z6voTzehlNOgfFeLgiSaD+gPztkCQn1LEcWZPi+Z/8AQl6pj9A0z3TH0cUwNwV5wHGszYknvA/5Y5/0Q13yg1h4460gaKAgrfWP5gLWwjSxh1hxe5JPPvqy2++++XFK7KYeCqL97fPv4agcF4vvfbOBx8zeaIZy4JxfnFtZ296zjxjWVDpZ1q7eiNiEy/7BfkFU9/9+DPCmVewnmfdkFhnBNIdg3VCYVyxeJC/FT7OEYVzxhBAXXLHEyEEEoRAAnvECY13ROGAan/G3wmNd8YQwAetifN4Pk5SAAZIBuR/ogmOKKIrxtMV4+mC9nDDekHwZyD4M25YL2cUyRXjCcGfcceddkEfy5ob/jQQuLMuGGAW2MEIJ/BtER4g/+N5AbJF4MFwRBFdsCQ3vCcYLlgSyN8RRbSHE20guBfdMC+4ol90Rb7oijzlDowGMAt2UJQ9DO2GxLrA0R6nX3rtwjtUJruprbOzt18WGf319z8gMBivc+f8goKb2zsWllcOHtx/8PBw//7Bzt7u2ubWpEar0RuMJrNKNz0yPsEVST796psL77175rVXsWe8HOFQNzTOHYN3RuEdEVh7GM4ehnNEEJyQRBc00Q3r4YolumKJLmgCkCAU2hGNAcMeibJDIIFhYeVvrX+sNXfEk5p/fv6C8XwunmkC8YQ/qELOKJITkgi+B3fcaTjpPJRw1g3r5YY/7U44AyGehRDPuuPP/YG/LcLDDgmUOjgsgO6wJhdUeHsE3pr9f/F/1gLA2wBexB37giv6Ty6oF1wQQArcUEAKoChbGNreKj7OMBSadPrTL74tKa9WT8+sbe4MjU/euluQV1gkDY+QhkcYTea1za1HP/109PjR1s727v7e5vaW0WSes8yb5hd0M8Y5y7xGbxidUucWFHEkkq8vX37t3XcQJAKC6OkER7mgCdZxgLFxR9tCMA5wvDOK4Ib1cMORnFF4e2sD2sIRDii0EwbrjMU5ojEOKLQDEnvSEWCf/qHg/1Xnz83N5+MYtfWZzw1WIqhLrjgvADjeyw3r4Y7zhOC9oKRz7oQzrjgv8PlW/vhTMLwtwsMe5QUW/7HmgK8GxwPDHRAfDxcs6UR/XLCkE/72CLwNBHNc/y7WcEPZQDB2ULQdFO0MQ7kiMGgc8YOP/lbf2LSwtPzw4cOVlZW1jXWVRq3TTw8MDU4b9Nu7O0ePj/YO9nZ2tze3NtbWVxeXFkwW88ysUaPTGowz04YZjW56Qq3pGxoeGh/Pzsv77LvvXv7zm25oHJzgAUI+5YZ40RVu4460h2GcUXg3HMkN6+GMIjzP3xmLccFhnbEYJwyQCyAR1sF9CoJ80R3xn7LzPH8Q7AneE+cJNsJ/8gcFxxVDcsUA78QN6wEhnnXDn3bGeIC6YQcj2MAJJ/yPlecEvvX1gZ46Hv0kEDsY/9IfBN4Whj3ljgVU6N/528MwLnBAfF678FZlbcP9w4cHDw7X1tb29/d39nZX1lYXlhbHxpXrmxtPf/n54aOHu/u7D48O1zfWNjbXzRaTfsYwa5rTTusMxhm1Vjc+OaXSTetn59QGQ1tPTzCT+e5f/4o/+xIUR3REYO2gKHD62EHRDnAsyN8VQ3JGAa1hD0PbIZDW+gdS4IrHWbMAiJIdMKQAe/YnV9jzsnNi+QC8x46F+MxsE44hw/EO8GOJBh+xRRyHPerZHEERHBE4ZzjOFUl4Pqfg823gQApsEUQ75LH/fDadQf44JyTQyy5oIpjHE/jHBhjwb4BMAYIPAVrABoI75YZygOPt3BH2EKQbAkU8cy4n5/aTJ0/XV1f2d3dWl5eWFuZ3dje3dzbm5+fX19ePHj387fdfHz0+2j/Y+/3XX7Y21rc3N8xzswaDYdH6x2AwLCwsTUxMTUxqJqe0Y5PqSc10cUWVT2DwG+9+gPU8644hOsLQL7hAbdwRgB1CYZxQKCcUygofSI0DHJB9JwwW5O+CQztjUdYvAedjA0WeggAB2pv/VH6Qv9Vm422h+JMU/N/42yFJIH/g1azvwRmOc7GO0X8N1mf1bwMngM8H968TCQLGwTP4IH8X9DH85/mDqxnI/5Q79kU3jB0UGIVuaIILEgdFY1976x2DwbixsbW3s72ztXl4/2BjbXVtfXlh0Tw/P7+8vPzT0yc///J0/2Dv4P7+3s725vqaadY4rdUYjUatVjs3N3dwcGCxLIyNjSvHVb19Q72DI31Do30jY/n3yn7wC7zw3ocYr/NOcAwAHwE4Imc01hWLdUajrfDRf+BvDaQ1AP2xij/qFASwbc9Gm8fz9uYEGoALgrOB4E5ScOzAn7mU5+2KE5oEzmVnFCDjTjC8M5zwB406yZc9ysMBbV1+McfqBL4CSP4knFEEME74g2G1c1iQP/D2ICgHONYVhYVgCafPn7/i57++urK7vfVgf2dve2Nna31pwbywOLe6trCyuryzu/3wwf393Z2VpcXd7a3V5aXFecuEcnR2xjBvNmvV6o21ta2Njbk588jIWH//8NDQ2OjY5NCwcmhsort3ICbl6n999uXp1/4MxXnYw9An/J0xSEcUHFgGrfyBxzHYE/6OaJQDCumAQjqiUVY7CkiQLQz1zNod2/vnU3DCH4R/PAKQ/+cUHO8LaI8T/o5QnBMM/58T3B7lAcZ/8nfGACMDlB0QvhMSD8bJAv48f2sKgPKwg6KBxkGgYXjSm++8c/NWzoLFfH9/b3nBvLxgXlma31hbXlicW1icW1xaWN9YW1tZXpy3zJtNy4sL5rlZ06xxzjizYDGrp6ZWlpYW5+e3NjYmJ1UajU6l0s3OWvSGOd20cWBEKe8fKq2pvxxIfvmt99Ce58Ap4IBAOSLRDkiYPQJqD8NYAw3yd8biwMlrj0SAAWTh2fJlj8CA1uLkeAd0pP/iD8eB5y0gfEckwM0OeZIF6/MRQIBV6oIGUuCM9HSEkxxgRAcY8T8n+DP+RIfnTO+zowYSMMKwxOfhg68P/iyQ/ElYNxrcKXc0ODWgGALG44xfYIBaq1ldXlhbWdzZXFkwz6wsmxcXZs2WGZPZsGCZMxp0c0b9jF5rnpsxTGsMOvXW+srO5tqsQafTqMFE6HXa2ZmZaa3WYjLNm80zMzNarXZweKRbrqisa4qKT37/ky+J519zxwFiawtB2sPQ9nCkHQxh/8wAA2H1/8eag0SBAa4DgBe1/u9zq+Xxgc+/TUzwEOwZfCeU53HpPuMPfNczxf4/8j8Z3ycT5Nm8INg/2y/+bbOzLi/HmvMMviPiRPmxJwcpJ10ANCAMA/LHnz5fWFwE7FO7W0sL5vWV+XmTYWN9cWHeOGfSG2Y0Swvm2ZlpvU5tmNbMm2entap5k3Fnc81k1FuAdOgsprmVpcXlxYWVpSWjwaBRqdRTU2NjY0NDQz2K3raOzprG1tsFJb4U5hvvfUR65c8or5dsIchTbnA7GMIWCge3P5C/PRIFcnZAof/AH0yBIxoD7sgnR20nZuP5FBwr0nP8j+XaSt4N/VygPNxQx/ztoUDYwcD41xy3geNs4Dg7JMHO+rJ/4O+MAlZLJyTO6VlnHdc/HAuecz5/kGWPAEaAE5pk445wQuJcEEis1+mikuK9g/3drfX1lUWTUWeZ0y8vmRYXZk1mw+zctMVsWFyYnTVqFxdmV5bnzCbd8sKcZU6v107qtZPTWpXRoFucNx3e35s3m2ZnDFrN+MT4UG9fT1d3e3NrU1NLY3Vj8627BeKIhG+vBP35o8+xZ1+3cUefcgP85CkIEuwFRwTWEXiH1ngOPpiRZ/VvHQconDXwJ/yf9cK/OVKQP6g/J/zBsndDE9wxRHcM8f/I3x7uYT12+1cLgPxtEXhba2b/0ALPyGOt8S/+J+fM/36QiDueWXAMyB9JJPkFBsya5g52txYtc0a9em5GazbpQf0BJGhu2mzSLy3Ora5YLObpxYWZuRmtZU4/bzJMjA1OKEeGB/tGhvoH+uQa1eTwYH+voqOtpb6xqa66pqKyuqK6tqqwrOJuSWlE/FUaV/rRV5cQHi/ZAqsH9gU3OLCAQ5DA9g1WEVDYeFB8TrLwXEcA4wBkZT2++NcWZuUACIsLEgeGMwLrisC5owiuCAwEjYfjSPjT5z3PvU7wehlLOucGx7ki8G5IAvi3O9oTcD4QrBMM74LAO8GwLmii1RhgQe8KnjCcDHFnDPFktjohAYtrC8GAR9zgKAeXYsBTIfE27kgwHTZQlCMKbwtDO1qPYlzQBCgaiyKQLly4MDg4OG8yHh7sri2YTHqNUaea0U6ZjDqTUWfQTRn16tWl+VmDzqBTTypHplXjevWEQTXW2VTTWltVVVyQk3mztrIiJyeHTqdTgygp8cm52deL8nPzC24VFuVmZmdl3cpOTr8RGhX36tvvoohezgisPQRp646zg+DB4rfaUbwrFjClThg0aHusBY8GTyHAFdgWhgJGJJRw/As+K3WrkwQciBMc4wTHgPzdkHg4zpNw5qVzb/z5/b9/9sk333/53ZW/fPB3z3OvI7CeLnCcO4oIQZNcEcfOE0yBIxTjjiYBqUQT7aBYZxTJDo5zxng8f9rz/PS3cUcf2ycEAXT14EYMZsEJeXzgDNhs6/y1R2BfdEc4wDFuOBKa6EE4ffbDDz9sbW3dXFveWl9ZXzTr1RPTqvEZ7ZRlTr+5tmiZ02tVyknlyNT4aFtTw9jQwNTYUEdz/UB3a2PVvbqykrL8O3k5t3Iyb3I4nE8++QQNR73xyutiATvzetq1jKTrN1Ku37yRln41MS0jMS3jvb99Sjr3CsbrPAzv+aIL2h5KAJ0/yN8Nh/sD/2MVsjofkD8ICiw2oMGtbQ6qujMC6wTHuKLw7iicKwJDPPPSex/9/ctvv7/k4xdMYfJFoWJZ5GWfwAtvvYfCkpA4LyTOC4ohQTEkCJbkgsSB78QVhQdfyhWFd4D/S1LAUztXDOmkpF3QRAc41g1FdEEAXQPBeNi7o5zhOAjGwwkGbHMOELQjFOMIxdi6IuzdUQ4QtBMMaw9H2sORTnAE0sPj/KuvfPDxR8VF+fu7W48O9/a211YXjLP6Kb1GOTnar9eOzxpUlhn9xMhQb3fX3dycgju3x0eGBxQ9eTnZBbezi+/mdrY0tDbU3Mq8FhsVymLSv/7yi5fOnCPhCL4+F8Uiblx8eGJSVFJKcmJyUsq1m1m383myUB9yyJk33oATgN8F3HztYcA67ILBuWAwzmjA+YMB8rdDIMGwhaFsoEiwVp81+3G4YT1ckDh3DBGG90R5nMWfPn/m1Qt/+/xrKosni4iKT04rr67v6RvOK7jHFUg//vsX51+5cPaVP3ucfc3zpTe8Xr6A9jwHw3tCCJ4uaAIES3LHEKE4DwiWBNbtM7NKckF7gLkAGg2JP+WGcETgXAH5IkEwHlCsJ5gIQL4QeHc0CdimoRgHCPqUM8zWFWHnhnSEYmwgMHs40h2DxZ4588kXn9+8lb2+urS/u/X44f7THw8P99Z31hdm9VOTo/3TGqXJqNFNTQz3KWQi4Yfv/oVGDv78738LFQuyb1zLuXkt79bNjub6tsbamsp7xQW3r6alpKYkBfsFvPXGm5cvfcNkhMhC+VHR0vjEhLiE+ITUa9ezbzMEwk+//Q7p4YHy9ER4nAOdP7D5PjuRcEQClX+yf4HXvI7Dyt8FjgUvD7nAAYWBYkgwLBGOI6GIXhiPM+dfv/Dxp59//f3FH6748ESSzJw7NTV1U1Pqra2dnZ09rXa6ubmVxmBdvOz95bff//3zL9/54L/e/Mt7b7z73tnX38CcPo328sKQPFEEYFgg8B4Yj3Mo4hkwHVCsJxiuSII9BOmGJjjBMc4ILBSLg+HwWM+zSIKnOwqHInrBsEQkwROB94Bhic4wlCME4QRFuiIwLnC0gzvcFYmAYNAYD+I7H76fkBRrXpjb213/8fH93389+sdvj44ONvY2F81zqtXlmdmZiQWLVtnfV5hz66N33yVhMHgs7vzZc9fS01KSE69fTcnJvFFXXdav6OxsaywryS/Iv1NYkFdfW30373ZScjSF6s/gUCOiZeHRMRExsVHxyek3b8WmpQbS6e/89eOPv/rytb+8D8ESwH58nr8TCuWMRgONgETaIeC2cNgJf1sYygmKBgcrBE1EEk57nH/9/IW3zl9468yrF17/y/tffvcDlcXhi6URMfHFZZXjKu3CwtKjRz8+fvzkwYOHev0MsKSPKZtb24pKK8qqam/dLYhNShVFRPpTae998sm5N98888prb7//4RvvfvDGux+8/s6HL134C6hpUKynG4oIwXjAcF4uSBwM7wlq1Ctvvf3uX//2/t8/e+nNt71efv2Ndz/48wcf/f3r7z7+4uuX//wOkuDpDLxnpJ0r1BWBcUVgnGBQ3GmvS34+5bXV23ubh4/u/+P3nx7c3/7916P/3//6+f7OysH2Msh/zjhpNIz3d3ZcjY999exZIhpNwOE/++TT7s722urKhpqq5vrapvqqzrbGns6Wof4e9dT4gmUOPJqQ97ZVVhenXE2QhYtEstCImFhpRAzQArdzUzNvRqemMETCcxfecoKj7GAIOxjCAYFyRmNB/XFGo10wGBcMkA6Qvw0MfsLfDYFC4AiE02c9z798/pXX3/3go0+//Prvn3/5wcd//eGKtyw0/FbO7cbmFpVGu7C0bLLM379//+joaH19fW9vb2lhwTA9PTszo9fpjEajWq3WaDRKpVLeq+gb6C+vrODyeXQ2Sxoelnotg80XyCKjP/jr31964y13JNYNiYVhiW4IDBSNd0diXWAo0tmXWXxxSUVZ/8hQU3trTt6dG9lZqdfS0zKu1zQ0llfXJKSk0tmcv332+blXXoEgEHgPDxQOB0VAL7x1QdEnX1pZOtjf3N/b+PXpw/2d1f/555P/9d8/rS0aDvdW1pdm1pdmLCa1crSns6EyJVr2zoVX3n7j5QuvvxYTHdnc0jhjNGhUk/298gWTaUo51tHSvGSxGLRay+ysUqns7+/v7evq7GppbKnPL8yLTUyJiImXRsSFRSdmF+Tfq6mOTIr/6ItPPV55CYrHOiLRJy0AwndGo4+NEAplD4fbwWBggE9DEz3OvPLa+x//7fNvvrvk48cVimMTU2ITU8KjY27nF4wqxxeWlheXV9Y3t/YO7m9u7zx+/Hh/f39vb29ra2tjbc08N6fTaPQ63cbGhtFo1Ol0BoNBp5+eNuinDfqJqcnBsZEpnWZCrRkaU7b1KGKTUi/5BhJOn4dhiYCVReHwXufwXufefv8jCpNb09i6sLK0trVhXpw3L86r9bqRCaVKNz0zZ9LoDUNjyv7hkTsFheLQMG//gAByyF8//eztd98uLC5Y31zf3t1+8uODvd31R4c72xuL//t/nv7y9MHW6tzh3srqgn7RpDFMjw0PdjRXlWSmxvGYVP8rPzDo1Py7d2bnjKtrK8uLC6ZZ4/L8/OjgQHlJcU9He9HdvII7t/Pz8ysqKpqaa+/m52TnZN7Oy8nIvJWWcTMsOlEWGR+VlFTZ2FBUWeZHJX9+8YfTr7/qhiM4oTB2MIQ1BcfiAxohZzTaAYH4A/8z586//+FH8QlJd/Lyb+fd6RvoX1paUqlU09PTWq12a2vr8PDw4YMHj4+O7u/vHx0ePjy6/+jx4YPD/YP7u5vrK0sLZoNObdRrlxfMswbdrEE3NzM9rZky6rUz05q5meml5cW9/d0506zJPGcwzoyNK1vbO25mZotkoa+88cbb737A4gnKyqsNM6at7f3Dhz8ePT7av7+/u7+7trG2vLK0tLxomTevra8uLS8uLgG1sLA4393T2d3TWVJSlJ+fl5Icr54aNxp066tLTx7dP3qwu7u1fLC79uuT+z8d7T55uPXo/vrGsnFGN7a+NKMc7q65dyszLTIxVpKaGKaQt2vUyuWVhaNHD3a3t/Q6bXV5hXJ4JDczhxIY8vab73mRzoWEhAgEgrBwSXJKfFp6clp6clLatbSMm8lpNyNjkuNSUlKvX69ra+zo675bVvLhZ39HeJ52xeIdECh7ONIBgXBCoUDxccFgnodv5Q93RCJfv/DmxcveVTW1RoDR3MLS4u7u7tbW1urq6t7e3uPHj588efLTjz8e3r//4ODg6ZMnR48e/PjkaG9/e2d300rFYpmbmZuZ1mtVeq1qbXnBoFODoZoY06omjLMzlnmzcXbGZJ5bWllWazWzJrNxzjSh1jS1tQ+MKM0Ly9s7Bwf3jx49fvrLr//85fdfnjx9cvT4aGdv5/6Dg/WNtcWlhdW1lbX1VZN5bnVtZXtny2wx6Q3T4GuODPUbDbrd7Y2HD/b/8euTR4d7m2vzRw+2//vXRw92V3862v7nL4fba6bRwc69zXnN5EBjZV7p3Yy6qgJFV8NAf7dGrTTO6h//+PCnHx8bpnUFd/KC/QJOk7y8iJ4QVwTUDfnFF19QKBS+gB0RKYuNj0pKiY9PTktMTU+5mhmbcDUqISH52rVrOTfvlhVl5d9584N3YSRPVyzeEYm2BvJ58XdAIBwQiJNZDH7JF/LuFuTNmWbBixEPDu8/Pnr4+Ojh4f2DX39++vTx48eHh/d3d/e3t398+PDRgwePHx389ORwZ3t1Y31xZdG0aDEum42z02qjTgVsOhNj5mmNYWpyVqPWjSsnBge0qgmgI7QqvU49o582GvTWS9xmi8WybP1jNBpNJtPW1tbPP//85MmT3//xy6+/Pf31t6c/PX38z99/Odjb3tpcXl4yra5YwHOD9bWFtWWLeXZ6a33p/t7mzvrC7sbi3ubST0d7j+5vPdzfePJw55+/HP3j5we//XTwv//x+MnDraP9ZaN2WDfZpxxsb6i4U1eWW5x/o64qf6CvzTSr2ttd39/b2N3e0Gmmgv0CiFg8HIJEwTFQd4y7K+q9v7zrc8WbSgvi8ZmhYaK4+EjAfCYlJqRcjU1MiUtJSEhLup5zI/NO1sUA31fefhNO8HDH4F1RWFcU1gWJckUd8weMKBLpgkVBiDgoCe+GxzihEY4oeLe8y2A0HD58cPjwwaPHR49/fPT46OGjh4ePHh4+ffLj4f7+g729g52d/e3tx4eHe1tbuztruztry0umhXnjvMkwbzKsWGbNBu3CrF49PmKYGp+eGNONKzVjo+MD/eMD/ZqpcYNOrdNMTWtVUxNKw7RuWq/TG6ZNJtPc3JzFYgGv95lMpu3t7aOjo5+ePv7xydEvv/703//z+++/Pj3Y297f21hanFtanJu3zMxbZpYW5xYtxlmDZml+dmsdwL63ufRwf+O3nw73t5Z/++nwt58e7G0u/vOXw//57ei3nw42V2Yf7CzM6kZG+1u7Wytbaws7Gkq62qrknbV63djqsnF/b+Ph4a5epx7ok4u4/I/e+wABRcHcEU4OUCza88P3P/C+fCUo2JfJooglvIhIaVRMdEJSYnxyWkRMfFRCTHJGatK1pBu5N0M4TK9XX4LhSW5o3Al/IJ7VvxMK5YpDwzwICC8SmAIXLGphcX53b+fnX57+8uvP//z9N5D53tbW/vb20f37e9sbB7tbRw/2d7fWN9eWN9eW1xdml+am52c0pumpWe3EnG7SpFPPqCbUo0P6SaVmYnSkXz4xNjw2NDDc36tTTy7OGbSTY7op5fhw/8TYsHpSaZjW6DRTk+NjUxPKGb12dmbaaNDpdWrL3Mzygvlgb/vw/t7hg51ffn60v7exsb64u7W6sbqwtmyaNagWLYZFi2F+Vj2jUy6atGuLMysW3eay8enR9s+Pdp4+2vntp4Off9ze3TT9///n8dH9lYd7C4tzExuLWq2yp7e9qr2+uK+zYbCneXxErp0a2lw3H97fONhbXV+dGx7obqgt47MZF7/96szps0gECoP1eunlP3/04Qc+Vy6TyVeYzEC+kCKRMcPCxfEJUYnJcdeup4XHhMYmRSemJ9y8fSMmKe7say9BccQT8s4IpDMC6YpCu6ExzgikCxLlhsfAPYnI0x4wD4I7AeuGx+zu7Tx6fPT7P3779bdffv356aOHh/vb26uLi+vLy/vb23vbG1vrK5tryxurS2vLC0vzpqW5aaNmfEat1I4PqccGNMpBrXJkanhgRNGtGx8d6u0e6Zf3y7uH+hTqSSXwfNPMxMjA6IBipF8+2CsfGegbGxkcGxkcHxsZ6FNMTYxNa1Uzeu20VmXQqWcNOovJuLm+srpi2dtdX1qcM5v0ixbj+sr82rJpWqNcXzHPGlTT6hHVeP/qgmFGpzSohlfnp3fXzQdbC48fbPzj5wePD9e21oz//duDo/srG0s6k35EM9Y9px3qairtaCgZ6G4yqIaN0+MTo4oHB+uPHm7/+Gh3waLr7mjMvJ4SHS67/P0358+95EHyxOHPnDt/4cP337v0w/eBgRcZjAA2N0gopkllgqjo0Ji4yLiE6Kj4iPiU2JTryddzMqSRoS9deNUdg3dGoJ3gCDBO+B//+7n6d8WhnTHIH58cHT48+PHR4dMnj57+eHSwu7W9sbq+sri5fBwbSwsri5btjdW97Y1Fy5xFPTE9MqAeUEwouib7ejRDfUbVhHpkcHRAIe9oGRroHexXqNQThhmd0WTQG3XjA739nW3yjhblUF9fd6e8o03e2a6eVCq6OuSd7aOD/eOjQ1PjwxNjg+rJUZ16fGpizGjQGWc085aZmWmVVqWc1ijNs9pZg0qvHd9aNZkMk+qJ/ill79qiQTM5oFZ2q5XdWyv6BzuWRfPkL0+2Hh+ubK/P/Phw9cnRmlHXb54Z7mgsUg40TQ40dTcW9vd0TI0NrS2bpsYHf37y4MH+xuqCwaAda2koKynIjI2SfvTBW+fOnTl//qzH6XPv/9dfv/jkrz9886X35U/p1Mscrr9IHCIL40ZGiyJjpKnp8TFJYbHJ4UkZsdHJYd5B3vjTBBckyhEGd4DCHGFwJzgCqHk0Bqx/IDBINzzGnYB1xaGd0HBHFOzX357ef7C3vbm2vbl2sLu1tb6yvrK4vGBeW7CYDdO6qYnVefPB7tbe9sbO5trygtk0pZweGVD2dAy1N4/1dKgHey3TGvXIYFtjrVGn2tpY3VxfmdZrJibHRpRD8r7uzsa6zsa6tsbausrS5rqaptrq9ubG3u7OrrYWRVfHgKJnQNEzNtw3MqgYHeodGVQoR4cmlCOqqVGtZnxSOTQ23KeZGtFrx5UjvVrVqMWoshhVk2OKkYEOvWZkfKRHOdiqGusyqPuXzVNrS9qtNcPR/aWtNcP9Xcve1tzC3NicfrCruaSi6Hpv673uxsK+7vZBRZdOPabXjv94tHd/b33JrFON99dU3M3NSiEHXrn0/ReXLv1w+fLFSz5+voHB/t6Xfvjmy2+++oBC/oEvCJLKaGER/Jg4aVSsLCk1JiYpLC4lIvFajDRa8PHnH+O88E5whD0Eag+BOsLgYPG7Y7BuaIwrCu2MQDogYU5oBDh5HZBQByT0n7//sr+7tbZs2VhdWF80ry2YVi2zCzPT5mmNUT1p0Ki2VpaOHuwfHuzu7WzOm2e31tYWzWadSmWamelub9NrNKvz8+NDQ5rJifm5uY2VFb1GMzzYPzo8qJ6aGBsZUnR1DvYqFF0dFfeKi+7eqS4vrasu6+5oBjf9zrbG7o5meVerorttoLdrsK97bLB/pL93UNEzOTo8Ntjb097c29U2IO+UtzUMKzrmZ1QLRrVqVDGsaB1WtI71d4wPtQ33Nk6OdKxYVDur0waV4uH27O6KdsOs3LSMW7RyzXCjdqSp9HbCvdz4vtaijpaqno66ob6WSWXP3pb5/u6CVj0wMtRecDcjJopPJV+mki8zqAGUYJ/gIF8aNYjF9vP2+dzP7xsazUcqpoVKGfEJgvgEQUKyIDaBG5MsTbgafjHA+69ffXr+zfeQxLPgqZqNO8IehnZBYtzQODBcERgnKNIeAreHwO2sN6mC5w8PH+zvbK1vbyzPmwzLZqPZoF2bn1u1zFr0Woteu7m8+GAHKP6fnzza3d7YWFveXl83aLXzc3NTyjHt1KRmcqKppkbR0THc3zelHJscG+2X94DwZ2cMplnjksWyaDZPKkdqKsruFebXVJQ1N1TXVZfVVN5rqq9qa65rb6lvbqjubGtUdLf19rTLO9oUne39PV193Z193e3NdVWKztYBeWdXc21vR9OCUb1gVE8O98jbaofkLcOK1tH+5tH+ZtVY18GWaXtFpx7r2FnSLM2M6JXt2pGWvtYiRXNBS2VWVUFqRjy3ruRaTcXdfnlzb3f9cH/r4f7Solk1NaHo7226eT1OJqGzGH58brCAS6NT/MnBfnRaMJcXyGT5UihXgoMvioWU6EheVDQ7PkEQHceOiefEpsi4Esq5C69gvAgo0jkXBN56Yw8C/KSGMwJIgSsKC/J3hCDs3GF27jBb62VKkP/B7pY1A0uAmZ8zmA3aOa1q0aifN+hMOvXk6LDZML2zuXZ4sPtgf2d3a319eXnJYrHMzo6PDGunJnu7uxpra1oa6lsbG2orK5rqajtbW/oUPf298tFhYMhOjo0atNqRgb6Gmqr66srCvNs5WRl5uZklhXeqyosbaitam2rra8prq0rrqio6Wpr6e7qUQwNDvfKO5sb2poYBebdRpzLqVCa9Zlo1bpyeXF0waiYHlcM9Q31tY0NdU6OdmvGe4d7GuenhqZHWGbVicWZwzTymGW4e6a5sr7ldW5xxLzcxK0UiYV7Kux5ZWXq7s7Va0VXX19NgmZ2Y1gxOjHU31RVnpEcLeEFcdqCQHyIU0qlUXyrVn8OhiMVsLpdKoVxhsQJEYmpoGCsiip+SFhkWI4lKCPMPCXz1rTcgOIw7Fm0HQ9hAYDYw+Cno8QmbdQVGHW8BSJQD1ArfDQo8DXL8nO2N1Qf7O+sr86tL5nnj9JJpZlYzZdFrF416o3pyqFeunRyfm5neWl852N3a3VpfXVy0zM7qNZrJsdHh/r6RgX55Z0fFvZLi/LslBfmlRYW3s7OyMm8UFea3Nje2tzYP9ir6erq721ury0vzcrLzcrJLi+9WlBaWleQX5N3Ky80EyFeXVZYVVZXdq60s72xpmhgZ6mxpaqmvbW2oGx8e1EyM6qaUU2NDBs2kyaDaWDZpJgfVEwPjIz0To/LJkY6J4faBnjpFR1V/V8WwombJODSnkWtHWoa7KqoL03Ouht5Kk0lZl8mXP44WBRbdvVFdntfRUjE+2jUy0DbY29zX05B/Oz3zRjyPE8DjBPE4QQIBjUr1pdMDeTyaWMwWCpkcTpBMxhJLaPEJkpg4MYsTGEj1ffPd19AkHMYDDyPgnJFwBwTqlDv0RQj0FBRmC0fYIZAn14ZA/o4wOFj/z/PfWF3a2VzbXV+eN05bZnQmvWbeoJvVTOnGR3Xjo5OjwyrlqNkwbdRpzLMGk1EPkh8fGZ4YHQFbQDk8NKCQ11dXVZeX5eXcSk1MuJqWkpV5I+9O7t2820V380oK8gvu5JaXFBXm3c5IS06Mi0xPTbienhwdIY0IFd2+daM4Py8vJ7umoqyxtrqloW5A0dPSUCfvbB/qU0yMDY8O9qsnlerJ0UnlkEmvWZ2fsxg1owM9Y0Pd/fKW0f7WrpaKYXlDd3PZQHdle0N+Z8OdycE61WC9UlFVfCv+RqIwShDEDvw64LuPOUHfJUWLC3OvVd+7PT05qBlXNFYXVJXdzsyIS0sJ57D8BDwyEAIajebHYATxeLSwMF5oKFckocnCWCIZIyJG8M3lrzCeaCiG4IbEgpeEHJFoK1I3W5j7i+5wGygS/GwFeArkBEe5IDHOCLQDFHFM3h1xyg1+CoI4BUEsL5jNs4Z547RRpzJoJlXKYc3Y8MRgn1Y5YvXz8p721qFeuV49ZZmbMeq1k2OjIwP9iq7O9uamrrbWmory5vq65vq6AYW8u72ttKgwNTHhWnpaSXFh2b3i6xnpqYkJmRnXQsUCJpXMooXQQ4Kowf7UYP9QCT81KTY6Qiris+KjI9KS4u8V5leV3aupKKssLWmsre5qa+lsbe5qa1EODw739/bJO0aHeseH+2en1csWw8RIn7yzob25qrO5fFDe2N1c1tFQ0liV096Q31hxs6Ykvb+tuKUyKztVmhbN4pG/Y/p/SfP5ghP0nZAVlBonu5OZomiv0070Vpfm5mYlX00OjwznsJm+gPjwQ/h8KpMZyOFQhEJmeDhfImFJZIyIKF5MgkQgoZ1+1csF4ewCR9u7wezckPbuKLDO7eCQ/6+z4ykIwhaGckBiHVE48Cwa5O8ERzlAEafcoUCPuMKAcAdSsGCenTUAZW/Sa2an1Xr1xKxmak6r0o2PAokYGRpU9DRUV26tLM2bjOCpWr+8u1/eXV9d2d3e2lRX01xfW1V2r6OlqaGmqrqyLP/u7dycrKLCu3U1lRVlJVnXM/JybiXFRVODA8IkQhGPTSMH8Dl0qYibBKzvkeFS0e3szNTEuPSUxKK7d8pLi0tLClua6ttbmxprq+urK0cG+uSd7X09XepJZUdrw1B/j3py1KhXD/Z1NjdU9nU2NFQWNNXktzeW1JVnNVXnNlfdys+OLbmTlJEoDOX586g/0P2/Dbr0Gc3vW6rvN2yyt5ARGB8hyMtKrS3Lu5udlpoYGhvJFwspPE4AlxvM45F5PAqPRxGJGFIpWywhS2UUWQQ7NJIjCOX+4PstDA+YfFsIxvq5G7wDwtMWggECCj/lDrWBYOzguOP785+7ng52io078kVX+PFndtyAFwFWzrmZ9UXz5OjgtGpcOzm2uWTZXLJY9Fr9pLKvu7OhunJ8eHBydBg8PdBrVZPKkeH+3vrqSpB5TUVZwZ3cprqaprqa2uqKwoK8vDs5t3Ozi4vyqypKS4sKryYn5Wbd5DJpoWJBcnxMTKRMLGBHhIpiImXpKYnR4bKczBsgf2BA3MmpLL9XXlpcVQFM5MrSkpaGusFeubyzfbBXruhuU02MDPZ1D/X3jA7Je3ta2+rL+jobyotuVpfeqq/IbqzKaarMLrmTdCNFfDNVEsrzZwR+Rb7yhffXH4V4fxV06XOy9zdUv+8Z5CvJMZKkaHGUlB0qpstENAEvSMALYrMDeVb9kUhYAHwxE+QvDWdFxgqoHPL7f/sLDE+wh0Bt3NF2UOyfXDDWzwBi7GE48Eo0eEfH8X1Tz9/PgCI6w3G2EBTIH4RvD8OBG9b4cP/YYK9yqE/R2TpvnF5fNG8sWbZXF/t7ukoL81vqa1XK0fHhwdGBPvWkcmxoYGxooKejraWhrrG2uqS4oLgov7y0GCzdyvJ7xUX52Vk3cnOy8u7kFBfl37yRnn41mc2iRUbIYqLDo6PCkpPihAJOTHR4Wmri9Yy0mzfS01IThQJedFREWmpiYUHendu3yu4VVVeWFRXera+tVvR0tbU09vf2KLo6Zg06cF8eG+5TdLc1VhU215TUlN3Oy04pzU+vvpfZUHmr+t6Nguz460niCCGZS7kYdOWLEN+vgy5/RfX7nuz3g88PX/j88Fmw77fMEG8xhywT0aVCmpAfIhJQBAKKUEgVi+kiEY0vpAjFNLEshC8KFIfzpZFCKpd39vU33HGnbdzRL7ph/uSC+pML6pQ71h5KsAbJHko6vucNcSxK4K25zmisKwrrjAA8EqA/bpBT7lDwTiG9ekqvnurrblcph4d6u3u72kCnMaOdMupUQ73y8uLCkvy84T7FgLx7fHiwu721vrqyOD+vvrqyX94NcKmvKSkuKLtXdDsXqN68Ozk5tzKzMjNuXL96PSMt8+a1a+kp6VeTo6PCOGx6WKg4KjI0VCaKiQ4PD5NEhEvTUhOvpadcTUsS8LkMOjUpMfZaekp21o38u7cL8u9Y26G07F5xR1tza3ODvLNdNTE2MTaomhgZGVTIu1prSu9UleRWFGffSIuqKLpeVnCt+t6NyuKM4tyknGsRMVKajBvApXpT/b8NuvyV73efkv1+CPT+1u8SkAK/S1+E+H/PoHgDQfNhM/1ZrAAwGAw/JjuAyyezeb5CSbAwlCsM5fqQQ3CnzzghiafcUH/g7wgnOcI9nRBexzc2w5HP8wdSgEA7wYGk2ELh4BQG788ZGxrol3dPjA1OKof6utsH5J3DfT3TqnHlUN+gArDiVaUl9wrzm+pq6qoq2poa2poaqsrulRYVZF1Pr6+tam9tKizIa6irvpuXm3nzWlZmxrX0lLTUxFvZNzNvXktJjk9OiktKjI2KDI2LjeRyGBw2XcBnCwUciZjP57GEAk5iQkxyUkJKcmJYqFQk5CclxqYkx2dl3khOSsjNyb6VnVlcVFBcVFBdWVZclN/V2axRKwf7utua61qbqqsrispLcoru3rhXmJWRFn0nK/nW9fiC3NS87KT87OSbqRHhQhqP5iNkBXHp/gzyFf/LX9KCvWnB3kG+3/tc/NLvypchgT+EBF0MDvg+KOiHkJDLVNplGv0KjeFNpV9hsHy5/CAmP1gYygyLDZNFyz7//nsUiWgPh7/g6vond6cXIM4vQp1OwZzt4BAHJMwRhnSAAmvX8XoFQTpAUQ5QlCMM6QgDprATHOUIQztAUfbW/3KEoZ3gGKNeazLqFd1tY8N9yqG+AXlna0ONcqgPtNzKoYGivNvZN64V5+eVFRfWVJSVFhUU3MmtuFecl5NdVHi3vLQYVPvsrBs3rl9NTUmIj4uKiQ5PiI9OSY6PjgqLigyNCJeKRTyJmB8WKg4hB9BpZJGQK5MKRUIul8MIlYmSEuNjY6LCw2ShMolMKkxKjL2ekZ4QH5uVeSPnFrBNlBQXZmVmFBflV5QVFeTnVpQWtrfU11aVFORl5d++VnAnIy/navaNxJwbCbk3E+9kJWZdi74aL0mM5MWH8wQMPwEzUMINETADmSHelMDLjBBfatCVIN/vyQHfU0D4/t8FBf1AJl+iUC8xmD5sbgCHF8jlB/GFIVwxVRLBkUZJJZGSs6+/hiDgweu5zjikOwkLP41HnCHAPYkwDwIUR3THHN85DN7/+ey2ZJwbGgfFEaE4IgzvCQac4IUknUF5nN3f2dSpJ1ubapsbqsdHBzrbGhtqKzpaGwZ6u2amVXqtqrq8ND46ouBObl5O9u3szNvZmXdzbxXdvXOvML+o8O7dvNzSksJb2TevpiVlXEtNToqLCJeGh0lAnZdJhWIRTyTkSiUCLochlQiolCAWk8rlMED4NGowi0lNiI8ND5NFhIeGh8nYLEZkRFhiQlz61dSE+NjsrJvJSXG5OVk5tzLTryYXF9wuKbxz+1ZGUf6t6+nx5ffuXE+Pz82+mhgriw4XpKdEX78al5EWm5IQnp4YGSnhREpYPEaAkEsOFTNEvBAWzTc44CKT5s9mBNIpviFBlwP9vvfz+yY4+CJY/zT6FRbbjycg84UhfGGIUEwVhDHFkZyYdFloIv9b/4/O/hn18fevvvvFmf/67uwnV179KuDNL/0vfOr96l8vnv/o+3Mffnvm/W8I739D+OBb/IffET78jvBf3xPB+Ntlr0+8z3zme+4L/5e+Cnj568BXvgl8/dugNw4Pdg06dVV58UBvl3Kkv6G2AtxG62vKO9sa++XdZcWFSXHRpUUFuVk3E2Iir6UCZqbo7p3b2Zm3sm/ezs2+npF2LT0lNiYiKjI0OioMLPXwMAmPyxQKOCIhV8Bnc9h0GjWYz2NxOQwWk8pm0XhcJpNBYTGpLCY1PEzG53GkEpFYJODzOGGh0tiYqIT42Pi4mKtpKYkJMRnXUnNzspKT4rJuXM24mhQu42deT7mZkZgUH5aaFHk1JTo2UiTm0eKjJXFR4tTEiOhwQZSUGyZkxoRyk6LFSXHS6HBemIQp5lPoFF8WPYDDDGLRA2ghPsEBFwMCvgsJuRwcfJFG82GyfDncAC4/CCx+iYwhimDLYvihifwQnu8l8iec8AB+jD8/xp8bfUWU4B+aGiRLCRTG+3CjL3GiLnOjr3BjvuPH/SCI/7cQJlzkx30nTLgoTfEOTfONSPePSPePTA+MTA9cW17QqibamusGerssc/rRod6aqtLK8uKiwjtVFSWKro7q8tKb19IyM65m37iWGBt181paRlpyZsbVG+mpCfHR4HiNjgpLSIwRS/ihMpFUIhAJuRw2nRzsz+exQPiBAT7BQX48LpPDpoMtwGbRaNRgagiZyaAJBTw+jyPg8TksNpNBk0pEkRFhoTJJakpSfFxMbExEfFzUjfTUq8kJEaGChNiw1KTopPiI9NSYhFiZWEAX8WkxkaJwGTdSJoiQ8uMipRI+k8+i8JghfBY5VMyKjRTERgokIppYSGUzAhlUPyrVl0bzo1B8yOQrZPKl4OCLVKq3deb6sTj+oPgIJSGhEUxJHCsqTSRNDLx6R5xdzssq4+bW0G6WBWVVBtyqDsqtJefWkrOq/W5UeN+ovHyj8jL4+K3aoOyawMwq/5uVflnVAdk1gbeqA3Nrg+/Uh9xtpOY30Qqa6UUtzOJW1mCvXDUx1tvT3t3RPNTfs2gxDg3Kb+dmJifF5ty6kZeTfetmRnx0RHx0REZacmJs1LXUpPjoiMTYqJSE2LBQcXJSXFRkaFRkqEQqAELMZzIowUF+VEoQjRpMCQkE/w7w9yYH+3PYdEpIICUkkEoJAoMcFMigU5kMGo/LZtIZLAYzwM9XKOCxWQyxSBAqk8RER4bKRGGh4oSYyBvpqbFR0nAZPyE2TCJkhUk5YgFdyKPyOSFR4YLoCGGoiBMXKY2Q8sPEXB4zRCpghYpZIi41VMyIixJy2YFSMZ3HJgPFzwpiMAIoFB8KxYdK9SaTLzGZ/gyGH5V+hW7Vf6D4Q2kR0Zzoq+KEG2EZ+bI7lfH3WiIrO+PKOkQVXZIquaiyR1glF1UrxBUKfmk351438143s0ouqumV1AxIq/sl5XJBWQ+/XC6o7BVVK46jSi6sVohq+yQNg2FNwxGD/YqxkUH11Hh7a9Pk+KhhWtNQVw0a8pTk+KtpSWmpiTHRkTHRkfFxMSnJibExUWKRIDJCFh4mCZUJZVJBqEwoEfNEIhafT2cyg0NCfMlkn5AQ3xCyf1CgDznYjxzsB/JnMignuQgO8gsK9PUP8A4M8mUwKUwWNYQSwGJTA/y9Q8gBXA6DQQ8R8NkR4VIelyngs8NCxXGxkZERsrBQsUTMB+2rUMDhcChsdohYzA4N5VuPC4SRYUIemyLg0vgcamSYUCJkRUeIk+IjBFyaiM/gcAP4ArJYzBYIGCwWmcUiczjA2sVk+bLYfgxWIIMVyOaSeQKqSEYNjWTF3KTmVkVVKIQVCuE9eUBZb1BFL7uil13ZK67qk1T1c2uHBLXD7Mp+ekUvu2aQXzcqrBsV1o4IKgc4Fb2syj52zSCvblhQM8gDv6we4Fb0sip6WTWDvPoR4djIoGpSOTo8oBwdam9tamtpBDemrMyMq2lJsTERiQkxCfGxYpEgOioiIjxUKhGxWQxwpIpFXC6HzuXQWUwKj0djs0NotAAy2Scg4JK/38XAAO/gIF+Qf2CAT1CgLyUkMCjQ18/3MihH/n5X/PyvBAT60OhkOiMkhBJAoweTg/0DA3zoNDKNGsxm0QDCbDqfxxKLeJERssgImVDAAV1TTHS4SMjl8WgsFpnLpQqFTB6PxuVSpSK2iM+gU/y5rBCJkAXyj4uWiQVMiZAlElOlMkZEhCgiQiSVcq3fRWazA2n0K3SGN50ZwGQHgfzFobSoOH70DcqtiohyuaBcLrgnDyhVBJYrWBW97HK5sEIhqh7gNY5JmieEQAr6ONUDvJphfu2IoHqIV97HKu2hlyuY1QPcP/AvVzDL5AwwC8ODfa3NDaPDA0MDve2tTdWVZbeyb4J+43pGWvrV5NiYiJTkRJlUbC37MFAuGPQQFpMq4LMYdDKdFsxiUlhMCp0WTAkJAMve3++yv9/loECf4CDf4CDfwAAfMPz9rvj6XPL3u/J8IsghASGUQPBvOo0MPkijBjPoIeCMBjeFsFBxRLiUy2FwOQwBnx0THR4qE/H5dPCgmMulCgQMJjOYwySL+AwWPYgW4ifg0iJCBWIBUybmRIYJuawQHo8iFNJDZcLwMLFEzONy6EyWL41+hcH0ZnP82NwgNjeIwwvmCUKEsqDQKFr0Td+sCv69Lm5pN6+4i1zSHVLazbOqirCsR1DZx6kfETWPh9UNi6v7wmv6I6r7QusGI2r6wyrkktIuYXmPuKY/rG4QeLy8R1whl1QqpGXdotIuYWmXsKSD31BXXVyU31hf09XRWl9blZuTlZIcHxUZmhAfHRMdfjUtKTJClpgQFxcbzWEzuRxWSHCQUMCj08h0GlkoYIP8+Twmm0WlUYOCg3yDAn2CAn0CA7wD/K8EBng/+xIg7+d72d/vykkEBviEkAOCg/wCg3wDAn0CAn1CKMC8CAoE8kWlBDEZFBo1mMmggHNcJhVKJQI+jyUScllMqkTMjwiXymQ8JjNYLGYHB3uzWGQej0an+IsFTB6bwmGSBVyaVMTmc6h8DtAXPDaFyw0B+YeFisQiLodNY7H9ONwABtObw/Xn8slcPlkgokpkTFkkNTyGcbOMW9QSWdUnrlAIS+XUMgWtrIdf2s2r7BVXKESVfZyaQX7DqLRhVFo3GHXCv34osrovFKRd0x9WOxBe1Ssr7RLe6xSA5CvkkqpeWYVc0ivv6pV3jQ4P9HS1V5SVZGfdiIuN5LDpPC4zKjI0JjrcqgDMsFCpVCIK9AeUnM2ikYP9QetupXqJEhIAFBIjhBzs5+d7CazwAH/vAH/vwAAgQPggf/Bxf1+fkOAgSkjgSV/4+F4KBnrHF5SgEHIAi0kFhYjDpoM1z2bRBHy2gM+mUoLYLNqJBsqkAj/fSzRqkIDPotMD2ewQJjOYzQ4RiVhCIRPUeYGAAcq+SMSSiPmATxOxgMbhBfIFZDbHj8sL4PLJfCGFL6YIJFRZZEhMIvtOLfdeq7S8i1fWyS3v5lT3CqoV0iq5pLxHXKmQVvXzq/r5dUNhTWNRDYOplT1x1b3RdQNxjcOJ9YPxNf1R1X2RYFT1hN1rExW3CIpbBKXt4hpFRPNwfNNQ3NjI4LRWpZpUalQTg/2K6xlpEjEfbPboqDCpRCAUcBh0KjWEzKBTGXQqOdgflHEWk0qlBPl4X/T3u0wO9mMyQtgs6rMUXPb1uQQCB7vgRHZOKj84MIBCDg4O8vP1uQSmwNvnYgAgXFcAX2Qd0OB2QKeR2Swah01nMakMegifx2LQQ8jB/mwWjcmgMBkhAj5LKuFTQgLotGAO+3gMUSh+DEaQVMqVSDjggBCJWDIZTyrlAg+K+UBIOID+84O4vEAeP5DN8ePwgsVShjScJQ1nJaTycwsSK7siauTRVXJheRevUs6r6RNWK6SVPeKyblGFXFKuAGxPuVxYPxzeOJRW05tY0xdTNxDXMJRQPxhfOxBd1RtR1RtR3RdZ2R1a0iq828DJq2cXNPGq5eHNw/HNw/Hy7g69Tg1+JHxyfLS5sQ5co0CHExYqBgwkOZjFpAcHBnhfvgSKtp/vZUpIYHCQn7XCfcjB/lRKEDgU6DQyqO0+3hetWbgEapF1HADCAjocagiZGkI+4U8O9gfzEhToS6eRqZSg4CA/kDyTQWGzaGwWDRwHbBaNTiP7+11hMigB/t4MOpnNonLYNAadTAXGB5ACOi04OMiXw6bxeUwelyGRcKRSrkDAkEq50VGhMdFhoI8KCxMAI1hEEYooPAGZyfZjsshcHo0vpgok/w8T/+EcSXatieF/0S9+Ckm7knb1Hh85nJm2aKCA8pWZld5nljcoh0IVgPIo77338K4baN/jyBmS473lcMhHcsi3b/fFSgqFFIXkUltxIwPoCiBQ3znn+8537r2tt7nQUMJWmYnVuaE8I0pTvH5I/h3/+gFX22crc6I0xSpzuntmmlxuTi43R5f2/oWle2bqnpnaJ4bmkdA+MQzuW7tnpvoBV5riuRFSGKONQ7Z/YeqeGU6PD44O5seHew8uTo8O5tHI9s72pjQ3g/U6nltQvWJtVaWQQ4BOpZDfuf3q36UTBP4mlyrlKgQuyATQqST2kK8tX5eGbG11SZIDtWrRdmrUco1ajiIgrAchQCe1o1JHKq1FaK6tgRSU6wwH/14IUglgKLSyfEenVS7fuw2BapVSJhEgAuv0kEbqAaCFlGhgvXbBTgbKZhPMZpamYZORs1kNFrMo8DTDICQJEaSKojUsD1GMDsMBDAcoDhTNuN2N+bb44pgtT4XSFC/PiOYx3T3nO6fG1rF4DT5dmROLujjk+xeW+aOtg6eh2WNX/8LSPjFINNU45BeheeicPXYNH9g6p2L9gKnuUe2TRf/fPhFG4/7e/rQ/6Mz3Js1WLbEbDYWDNrvJajNSNIYTMIwAa2syhWJNrwcVirXllTt3794EQLVGu+hboMUHRCS6kFp6lXJVSn6FfFmrkSsV9zQamU6rkKRZq5FLUYP1OgQGJP5ftEDXUZN+jxQI6QuprBAYkIpC+ilYr5NKTyFf0UPa5Xu3QUB9zYdq6dAIBKrla/ekoDA0xrC4INIGI8sLFC8QZgvnsJtMxkXvRBCgNHAmaZBm9QSNYBRM8xArwu4tJpSy1eem3omzdcg19pn2MdU7Y/vnhtYRWz/gqnuMBOO1mTLOn6zPHjtnj+yjB+bxpWX60Da8b+qfG6YPbYfP3Re/CBw+d88e2ceXlv65YXjfNH1oG19anjx9eHxyUCzlqrWSf9NjMgt/c7IiQzM4SaEIuoBdo1GhKKxWK+WKleWVO1qdUqVe0wEqEFrooORqJfAlhr/+YkW+tgAfAOQQqAYBlU6ruM7MhTWWBqESzoBOpVHLpQj+HXxJiCXMr+tFJwUCgQE9pJUqRXpKzZJGLb975xXZyh3J9OkXf9iiQ2YZnONJk5m3WEWDkTWZWbvDYLcZrRaR5/HrEw6wKKIkDTIcTDIozeEUB/JG1OTSlzuh4bl78sDXPzN2joXeGds9ZXpnYvuYaxzy7RND79wo4X/tbfnWMd8/NwwujONLy+TKKj3njx37T9ePXniOX/MevfDsPXFOH9omV9bpQ9veE+frbzw/PTvKFzKxeNjjdVqsBpYjERQkSISkUIJE9LBOpVLodBoE0avVC9hlq0tqtVKhWNNq1VqtGtJrQUgjJbBKKVMpZRr12oIEALlCvqRRLgGaFRBUgKACAOQgqJBoQWovpT5fknI9pJGiIwErYStNMKTklyhIwl/6cQQGJLGG9TrZyt17S7du33pFpVwFgYWbgBGApBCGxaVz4w6n2WY3Wm2C22PdDLjMFo4TUKOZEgwoLyI0g5CU3mBmaA7lTTrOqHUEtOmadXrp2Xu0Mb2yj+5bemds85BoHzPdU65/Zpxc2gbnptYh1z5mmodUbR9uHmGtw8Vbo/uWwbmpd2qYXNrmj5zTK/v+4/WTFxsnLzaOnnmPn20cPvEePPacvtgcjnqValECPxD0GU08zeAwAqAYJCU/AC5ABgAtBAFK5aJRlytW1GqlUinX6TQajUpiIYn/IVAtpfqCh0GFQr6kVtzVqZcXJQAp9XoVgmhQBEARQKJ0QKdCEVCa0UkOQqIXPaSVBkdSCCSQ/3v8JS1AEVCpkK3Klm7feuXe0q1V2dLa6r07t1+9e+eGRquAEYBmFuRDMxgvUFabwblusdqEdZfZaGJYDsMIrR5RUgxAMYAgkgYjzYmEwcwwotriRCM5Pt9yDs4csyvv/hP3+IG1fUzV9tDWET24EBeQPnGP7ls6x0L/XBhciL1zun/BSJhPr+y9U0PzgO2fGadX9tlDx94j5+FTz/Fz39nrm/ff3D5/PXj01Hf01FcoZkPhoN1h5ngquLXhXLfyAo3hein/Ib12YUsBLQBoNRrV6uqKSqVQqRRarVp6ajQqrU4JI4DkcyFICcNqBFHhuBbDNCC4plMvQboVVK8kMS1JQpJRxXGAwPUoAiCwDkNBikRIYoH/tY+GpMSW8P+7IgM6ldT5SKYM1uukd9dW763KllZlS7duvqyQr0hRWKi/fFmjVSAoiC8c7iKXjGbBsW41WzheIFiOgBEdCClhRKNH5JwACwbUYqNpHhTNGGdbtW0A6SZemxh6J8L00rr/2Dm6MLUPuc7RgmHGl5bZI/v+0/W9J875Y8fhc/fxa969J87hfdP4vnn/sfPomXt6ae0eC/1Tw+zKfvjUs//Ivf/Iffpi8/IXoctfRM5f3z547Jvcd0ai29s7m1ab0WQW3B6H3WFmOVJSXoJE/o6/VqtWKuXLy0sKxZparZS+VSrlarVSo1Xor3s/EFh03TCsRlE1hmlwXIvjWj0oQ/RrBKqhCYCmYYZBJNWTBkc0hUrIS4um0OtC0EvS/Hf+kZZkAXBMLymI5DIWxkG29OorP7t965WV5TtS07WQeATAcD3LkaKBleRMNHJGs2Aw0hYrz7A4SS0Ih6T0ggEVjRgnwAvba0JFM+bZwTK19fmlf3LhHZ4bh+fG8X1z/1TsnYjTS9v0oU0i8MmVde+J8+iF5+xN//1fBk9e902urKML0/yh/eiZ++DJ+vi+ZXRhnj90HD/3HT7x7j9yHz/bOHstcPTUf/DYd/DYt/fQE09EglsbDqfFv+nZDHjtDjPDElLbQ1IoikELbtdptFq1Wq2UyZZlcplSo9QAGrlKLkmARqvQXvd7C4XVrUGQEsM0BKHjedjp5A0CInB6jgEZSsdSEM/ALItKgeB5nGXwa8z1OAZRJEJTqLRH8/ckl8oBBNQIDEiTHxzTSxWxfO+2lPx379xYvnd7VbakVMhu33pFcnDSTJXlSLNFtNpNZqthAb6JtztMBiPLciRJoQyLcjzOCbDNwRpMGC/Cgllrdui9Ec3wOLj/2D44E3pnePNQ3z5kO0fc8Myx99A3v1qf3LeP7xuH5+LgQhw9MM4fuY5f+E9ebO892phcGqZXxr3Hlv0n1tmVZXZl2X9sP3q2vv/YOX9on13Zp5e2ybl5emE5fLR+/sIfjmxt72wGtzY2/G6X224wcqKBBSENpNcyLMFyJIbrQVAnLaVSLpPLVFqVDtKptCq1WrkwxoBKrZEjsG7RfutVMPy35DeZiFDIvbPl9PvMdist8jDPwCKHSg6U53GDgZIwl0qAoTEcg1iGkOQARUBp5oChkGQESAKhKUza2QEB9b2lW0qFbGX5zqpsSaqFu3du3Lr5slIh02mVBIngBEwzuNHEmyyitMxWg2hgzBaB4ymSQlkOEw2UaMSuJRg2mnHrOuLw4Mkq19nztQ/p2gyr7QGFkbI6xduH7OjcOb/yji9swzPL6MLQP+VbR3TzkOqdmiaXjvlD3/yh7/CZff7IPL0yTq+MEvjHz13Hz13TS+vkgWV4buqfGkanxtGpcXphOXnqjcV3wpHght8VCHo9XodoYDieQlBQyn+WpWEYkstXdTqNRDsymWxlZUUDaHSQTqmVgcjfjACs1wI6pR7SYAvK1RMkzJlXAhEqmhJ34qxvk1z3IBYrarPjdittMmAGfrF4BqYJQBpdSrwkjZpFA0PRKMPiooFBUADSa/42hSNQjsIRQAmqV7XKO8rVG7deeelaBG6t3rulXL0nW7p5b+kGhoIErqdIRDQwooFxOM0mMy+ItNkimEyMwUAxLEFSKMWqBOOC8FlRTxmURqfeGry5k9cWJ0RpShZGZHFM5earydFSoqMozfStY276yD55KA7us5OHYusYzw/Q5gHfOTb1Ti2tE6Z1wnSPLKNzZ/9UnF3Zpleu4xeBk9c9+08d40vT4EJsHoiNfaF7Zhg+sAzvu/vn6+lMPJ4IhcKBzYDH4TTbHSap8ydIZLEIDAQX/ScAaFUqxbUEr66tral1ahAGlVqZSreq1SnBa9cp2X8UASgapRnM6FCEk0Ku4tyJs9GE2bdJutyMx8s57ewiBCKxQB7TUrgOx/+/EJAEwnOU2SJIicpyBIqBILSwVxSJ0jjyd/zV8lvLd356+9Wfy1eWZEs31fIVQC1XyJaW790EdEqKRAhcb7GKVptBEGnJAvACZTTSPI9L1obhNbwB4I2IYEJZs8bo1G/nNLWZsTKncyOstsdXZmx+by03X82OdMUpVJkTg/um+RPT3lPz7LGxc0oWhlj7SBw/cA7ObdV9vLKHVSdcecTUZsT4vnn20D28sLePmd4Z3zykCiMo3YGzPbR1zA/umzsn9smVr9etlkqpbCYRi257fU7nuoXjKV6gKRojKRTDEADQSjq7tia7ll2lRqNR69QQAim1MqVWBoBqcNE36qQR3CL3SHjRcruV3m242nNEs2Qyb4kkheCOyeWlnU5+fV2wmTmagEgMYEg9gepIDOBoPUOCFKVnWdRopI1G2uEQKAokCHDRU8E6ikQ4GqMJWKtcVciWVpdu3Hr5J3duvKSQLWmVq5heBwNq1dqyfO0eBKolTeEFSsp8goSl/L8ucJLl9RQDsKKeNyI0Dwom1OoD/FE62QLyAzQ9UFX29J0jX3lsyw2g0gStTInqjKxMqeGFde+hb3bp6Z/YesfWyoTrndj2HgVmV/7Osbt74mkdGrI9NNsHOyf83hNf58RcGCOFMZIe6BIdVbqHFkZk51TononFMVWdcyfH0+Gg0agVc9ndSHTL43U4nBabfVEFBImAoG7RYWrVOp1Gan40Gg0AABpAA+gBlW5VA8pBSHM9t/9bCDAUJCnEYGSdfp0/jOUbpmrP4d/BW4OtejsWjtk3N21er8lpE00iZRRIo0ByNMIzqMihLAVJM2GOw4xG2m7nOe56REOACKyjKZQhEQTUqNaW15bvLN9++dbLP3n1pX/UKldpAmYpVK9TKWRLOq2CYwlpVxQn9BxPWqyiINJSQZnM/CIEAoyTGopbJD9vROwurtj214c79T22MMSSPUV5DnWPNzpHvtIEre9TjX2mNqeKI7x9JLb2jYU+WRxQhT6Z7eKdI8v+4+D0cqN/5hte+DvHplRbH6mupTraxoFY3xcah0xxgmZHYGYIZPpYuoeWZ3jjkCmMyHQPvbo8Pjocj4bNdDoSjWz5vE6X22YwshxPUTQm9T+S21paunNNQYuXTqeBIEANrGlAuVan1P835FFk0djTDMLxuM2rDMbwSEmf73H1sTvfthycF5uDnWRmYztsc7lEh4Nz2niHlbMaKbNImATcLBJGAWcpSORQh5VbvGWiaQIQOZQmIIbUo3q1WnFPo5BpFDLZ0s3br/7sZz/5XyGdXGAJhkRgQK2Wr6AIYLWIJiNntYhmi2AwshgOoRjIcgRFo7yI8SLGGRCKA2ke4o2oxacMZdhC31gcmKozY27Ahuur8bayNLXVD1ydA8fgxN0+EKoTqtBHG3O2MhKSdTRRhbazylBZ1jpiD19szJ+4p1db80c75T1utwuH6vJwQ5HoIJkh2ToTcxM02QfzUyzWAqNNoDBDyvt4YYzvdsCPP/rVay+u7l8clEqpRDwUDHi9PqfDaTZbRIYlEBRcpDeglfJ/dXVFcf3S6TQwDKl0qwrNilojX/Sreq0kvjSFUjRsNDGugM6xod7YVYQKYLpuGJ+G9s8Ke6f5/iifzGxsbTk3Nixel8nrMrnsgsVAmgTcbmZsZsYkEmYD6bTxRgG3GCmGBK0mmqMRmoAQSKVYvfPf43/7xk8JVGcUaIZEGBLB9IsyMZt4s4k3GTmzRbDZjRxPGk2czW4UDQzLIyyPMIKeMyCsCAsmzB/DqgNfti2kGmyiicXqSKgmCxTv+rNgqIK39+3jc1/v2Fib0oU+uohCj0k1sFQDSVShZFtTnRPDB7bxlWPvcUjCP9lDpPMnoZou2cPqx1yyD2ZGcOWAjjaBSEOXn8LJvjbe0iW70P2Lg5Pj6d683+1UctndRDy0GfB4vA6JgigaQ1BQp1uEQJqCKhRr0rcwvOD/NdU9rU6JXZPtgnmuZZRiIKOZWt/Smr1r4TqwWVAE87pEkygPnPffKD15s1vt+rIlTzRpCYcsoR1zwGf2OHm3Y7F8LoPbwa/beIeFFVjEwGMcrTcbSIFFSExLoBqdelmruqdRLsllt2/f+KlKvsQzqNXEGgXSamJFDhcF2mzibVbD4mk3utw2iXaMJs5iFY0W3mDmKFa/WEaZyaWL18DmvjHZwbZLms0MFCrhkQboSt6zhG+EarrpeeTiReH0eXT2YLN3ZG/tmXM9Ot0mymO2NhPKczg90GRHYKqvzY/ZwoTPT/lIUx9pATt1zUZOk+qz5X0xN2Fqh8bijNup6HYqukRXF24oQzVNvA299uLqnbdf3L84ONgfFgvphRDHdzb8LrfHYTByNIOjGARBAIrCKpVCLl9VKuU4jkIQAEGAQrOi0KzoYR14PVKjKRRFtSQJiUZCMODrW9pc01aa88keFq7AyTbVPQzML+PHl4XhfrTRDRWqG4X8ZjbjS0S8iYg3HvYGN6xbflvAZ/FdF4XVRBt4TORQjtYbBRyDVSSmRSAFDMoRSKFTy+7c/BmgWRVYzGkT7Rbe5TBajIzVIjrsJofdZDELDqdZwt/hNFttBpvdaLIKVoeREzFOxAxOjWebTDbh3YY+WofCVSDTNSRb/EZuzRa95UmvJHvY9Dxy/7Xi2YvY3lVwcOJszIy7DTRRR+pzsTYTChMw1Ven+tqN3C1fZu0aWyzcgLITMt7VbxZ0ubGYHdOFGZfq45sFxWZemeoR5X0qP8V2qmp77OYvf/Hsi8/f//ST37x4frkIQTGZSkY3/W63y+Z0mEUDS1IozeCQXru6uiKXr8IwhGGIXg8u+iKtbIG/Hlz8I7qQSAwHaAaheUg0456wNl03VA+E0pyVmDA7oLIDqtjjsy26NrA2RvZ2f6PV81Uq/nJ5o1IOpVO+TGpzN+6NRTyRkMvnNnvWjVIUDALMsyBNqnkW5BiAIlQ4qgS1yxSuc1g5l0P0rBtdjsWyWkSLWfC47WYTz3LEtdvCBZGkaFgQSbvTZjCJBK/lLYjJf8sdXfOlZfGmPtWhE00iPcDibWgzJ/dn13IDtjQWqiPXwaPk0ZNEc+7KtoVMi98qQL6kOtOjcwN2q6zermiiDb1rd9mVUGyX4FiLiLfJ0p4hP+UTLbY4saZ6VKyJpvu0PboUyIP9C3/9yFDe40IVnWHz559/9t5HH77z/nu/eO/dt6aTTrNRSKdiseh2MOB1OhYqIIVAo1WsrNyTyZYxDEFRGAC0CzsGKXR6pTQd1UOahfiSEEnpGUFvcTAbMTCYRFJ9NDcmo00oXAfCVSBYUG3E5YGkKlEkmmNHu7/RGfiHw0SvF+33Mr1uutVIlovhUiFSKcUyyZ1YeCPod7gcos1CmgyoyOvNRsxmIa+jsFgmkZBExO+1el0Wr8tyvb0o+rxOu23B/NfzT8RgpCkaJkjIYBJFo8AYIaODCGZ0npjck1yO1sH6njVcgbfKSm9mOZBX7JQ1uQFbnRkzTVOp78i2xUgRz3XFzaTWEZb5kuqdEhCrI4Gicqus9ucUhuDLzthassNK+KeHVGEmJDt8ssMHi9ry3Ng+dWaHbLRODB8E2meW3S4cLKhc8Xu/eOvx55/95pOPf/3eu29Nxu1iMVkpZRPxkH/DZbcZzRbRZBYk/FfX7qk1cmnf5G+mDFYhBCDhD0JqBAVIGiQoQDChDjcfTOpdO8pYA8gO8HhLF6mrI3WlN33b4P+P3t2byQbYPTI1J+ZKX2iPfO2RrzcJD2bR8X5itBcfTNKtXqxajaUzgXjUG9p2bvoMXhfnWacdVsxuwY0CZDFgIgtZzeSG17ThNW1eq7nbabDbRZtNcK1bnQ6zwcgSJExSekEkGRalGYTiAEaAGCNg9zGhAuiOylyJpe2SOtGCIzVgI3fLFv2nUEUXb8LFkbGx74jV1O74zY3sve2yPNYA7JEb9sitYEEZa0DlKbddUnuSy+sxmTO6Ei4TsdqiiHIDPt6ECyMm2WYCWcAZvds8tHRPHcUxmxvw/TNPacLGm/pQGQyVwddePPjtd59+/dVHH334zsX5fi4XLxbSkXBQoiCL1cDxFIpBWp1yeeWOSr0mbfahKLxQYVgF44tYQBCAoItFUADN6k020uHmI3nCF9WGq5pEC9rtgJkBUpxg8ZZmI3U719dXJ0xjzlf6QrZB5qqmcsve6AVag63eJDw52J3s5Sd7+b29RrebrVUSpUIkHnGFtmyJ6Pp2wORZZ60m1GVnbSbCYWMCfqvXbVgsl2ndLphMjNnMSi2oNGfGcAAnQAwHKBpmRT1ngBkj4A6KrsiKK7LiTa340rJgQZnqYtuV5VBNlumTqS5em9s6x+5gbsUVuxFtaCJ1tStxJ1RRx5v6aB3M9InKjN/MySM1IFbHYnWse7zRO/FXpqbmgT1ah6J1KNGk/GmtK34v3SPax7ZYQx9v4JWpKVzV7ZQ12T7TOrR99OEvf/vdp198/u7HH7399MlFv1dbNKKJ7a2gb8O3brMaCFyv0SoAUL0iu7siu3v3zg2tRqHXgwiih9BFCPR6EAR1KAYiKEAxEMsjFgfp9HCxMrmdhZMtINkC0h0o3YEyXX2yBfhzy8kuVJwQoaoyVtMmW1C8BsdrcKbJ1MaWxsxUGnDFlqUz9c2PcnvH+YPD2niS77VTpfxWKetPxZxbPtFuhD12dmeBvBgJubxuw7qD87nNLocoCITBQDnsJqOBpWgURnQECWE4oIfV13vrWorTkoZVhx+zb9/dympTPTBSV0brumQHDlc1xTGV6iLhqibXo6tTQ2kCZfua2oGufgikBorsWJ1qgzvFtWQLKI2J3aYs21Mma/JcRzO78Bw9ClTHQnNuqs+YWEUVLekCqbVA+m6yqUm2tPG6yp+SRSu6UEkeLit6J46jZ9sfffjL333/+XUJfPjeu2/1e7VaNZvLxcOhgM/rlPBXKGUICqrUa0tLt6QBu1IpRxA9RkEQqlkEAgJgRIcTepZHBANudVIevyFWJkN5NNkCUm0w3YFiNfVOcS1e1wSLqzsVRbimskVe9u0u7TbBRB0JFXTJGtk9cPWPneUhv1tkU2W+VN948Lj93vuPn784uDjtHe039saFcS/dqcUTIYfLSi+Wk0vEfMFNm9POSn5NEAjL9Q677XraSdEoToAkpZfOlrAiyAgAZ1WZXKArshJIq9N9KN7SlKdM89DQOjJ2TszFMZXswJkO2Tmyt4+pcOVeuHajfgj07tOR5r1IRbmZuZcfoMUR3jrEhhdsbYLUp2hzYugdWJtzU7aNd48MqSZQHrKlARMuroYKsp2iLFKRb6ZXdwqqneJatKrqHtuPnm1/8fm7P/75t3/58fs//vM3H37wdqGwWyqlCoXddCoWDHhd61aBpyBQw9C4TqtcWb4jnRhUqddASEPQeoLW4ziK4yiKgSSF8CJmNFM2D+vbtuzk4K0MtJW9Fy3LM20o04YSdaiwaIHI3JBKtCB/VraZX9ntgNGGyp9bCtdUyS6UHWnSA1UwqwhmFbsF4fVfD37/z7/56JPHr7/Yf/p4cn7UnA6ylcx2Yttp5VAe09qtVCzi2Q46bBZKmm9Lp00sZsFmNUj7jDCiWfAPoWU4mGYpmqVM3mXb5poj/JI99LNgcTXRBioTW/fYN38UaB5Y8yNNsrNWHAjdo/XqVNxMrwXyryY7a6UpsNtezfe4ZIPYzioLPWp0jk8fUN097dFDZnRgSpY0zRmTa0GDE64y0ncPuNacjleWE9WVZEseKt0NFda2c7L1yE/XIz/drQO1KfvXv/zuX/76wx9+/9WHH/xiNGwWi8lmo1App3PZ3dDOpsdtt5gFhsaNBk46cIXAAI7pF6YMWJA/xSI4jjIMJc08eRGz2FiHj/eHbJtJbTANBtJ3I6W1XBcuj4h0G810sGQHzfSJSE27U1btVBTZIbrb0UUbqsIY36ko0gNVqq/0xO/tFNS1nvv9L45//OtH3//wzkfvP3rrjaOr896wk/RYKRMDkKCcghQelxANuwN+q8mAkZhW5FCzmTUaaafDbLMaDEZaEEkE1VI0jJM6hoMZjuZFzhFUbCb0Bv9/pNf/Z/P2z2JNbWlk2W2yrUNbts9sFW96kj8tjwy5LhvMKlzRu5HaUrKzlurKG4doc882PPNGS7raRGwfgP0TpNhdHhyCe2eOUhutDolCBy72gWRNXuhC6YZmJ3crVr4Xq6009rHKmIxVVK7oz7yJl3NdNFRQ/PMP3/7w26++/OKjzz59v1ErtpuVZqtSrRVy+XQ4suXbcDnXrSaTgeMYDEO0WjVBQteNBMawOIYhJImTOMUxPE3TFEVRHGi0khavzhsiykN7osqWp0JhxOy29eUpk+4u1lZJtl1eDVc1C76tr0Yaa/VDe6rHFCdWb0oTKGmjLdSfW9qprOVqtjd+PfrLn35Y/JHffPD+O0+fP5oN2rtWAWNQNQwso5AstmOJbJncDtHAoQQCcRRuMzMMCTrtrNmIGwyUIBC8QBMkgtEISsEUBwomLJxGg3Fwfeuu4Pyp3X8nlodb++JuHSgNzY25M1YxZNr23TYargKhuiLe0VbmZKKtTVSp+szRnNnHZ/7uvrc5Xc/3iFQDjhSB2sRQnfL1uZjtg+murjJlbNsvGzdu2raWnOHbibq+Onb2jwPji/XGXIwUtTs5VbigdoVvf/T+r77/9otPPn73D7//9vXXHk9G3U63XixlsrlUNBbaDHjdHofFYmJZGsdRBNHDiIaiYZrBWI4gCAzHUQzBMQSnr1+cATHZKNsGuJ0UknU+UiSTHSzdI9I9NNPHgnldMK/z55Y8qVvRuq4wIkM1WaSx1j51Jbu0PwtaQyum0K2tCuDPLXnTt+t931c/PP7Xv/7xX378wzef/eqzD958/en+dJA1cwiLaXB4zWrEc0nP1gZvM9EcCSGgliUxm5nhaL1BQIwiajIxPI+zHIkTMErBCKnHaK3VyXamnnLHUhsaSl0uXSXjBSTbQdoHhta+qzQ0hwpcZewtjJhIDdztAbs9oDwjog1VKI9k20KshO5Wic6eJ9cSqxOuPhN2a0hpwGW7eHXKV2b44MIoTUELfXEnD+HW/8URupVqLACpTrhoSRsrA9GSzrlzM1bWPXl49u1XH//1z7//y59+OD3Ze/rkQbO1KIFMNpnYjYYjW5sBr8VqIKkF+Aup1WsYFud4SjSw/+3cCALrQYKEUQykeLXJjm5EUfumxpdQemKLnjlS00aqSKSKuOPqzQyU7eILGaqs7na0kebdWPtepKFxJW/bQgpnVO3LrmZHVDAHB7L6o/P2V9//6t/+9U9/+dP3v//2w28++9VbL4679UR0x0UgcpbUJCLeeMTpc7NOm2gUSOl0tHTIdmHNDJjZzLLX1YrhEEwCMAnQgsrppUanxlIfro6B0kDT3bOWukymjh1eBepTY7yi38nBjbmzc2Srz42pvnanuhKu6pIdNFJRcp7/zej76UZiuXXAlsdY98jSO7a6Ii/n+/pkCzp6vjk88Q9P/I/eToXy6ubcNTjZ9O9qcPM/epLLmzm5L3c3WJYtzFdJYU/8U7By9/fff/mf/+Wf/5//+7/++MffjQbtTz957+Bw2u01iqVcPBHZ2vavu2zSLjAMQzqdRg9rr4/zMQYjJ+FPkTiK6CkapWiUEbW8CdyIovmW05dQmjdvO2O33Lt34w08XIEjFTLdEYtDOtcj0n0wVF0LlF6JtpY2csuO+A1vEnRG1Ru5tfyE2W1y0Sr1wacvfvzPX/6X//THv/75dz/+/vPvv3rvN798kE9uJCLedRtjEpBsMpiMu7c2jW6niWdQ6b6MKJJOp1HkYaO40GJyYckRnNDDJIDSEMHKDVYoWlxNVBSphizfVcYKQKnLtGfm/oG9POSLfTbXFeszx+xqY3a1UZzC3syN7ZI60ydyfcQZeWUrIy8N6coEj9eUpSGd7xHmzX9wR18Jl1XpDryVBgtdw6O3U/5dWbRE5LvGzaTW4L1pCLy0kVmNtTUb+SV3ainWAmMdeaix8q8//vB//dtf/o9/+8u//vUP7/zi+WvPLh/cP241ysVCOhrZCgZ8606rdIeXpjClQgZBAEku8p/jKYpEry+noDgGMRzMi5jZoXd6iUiO8UWgYEazmVJtFQDD5o1cb0GqkRJYHHKJpmq3pW7sM8mW1ptaCeQVvsxtW+RnnvQ9S/gV09ZLyQ68vWvINzZ++MPX//X//Jf/9Nfvf/zTN3/+w6d/+v0nb7911Wlkgpu24KZtc8OSy+wUctvbQavNJmCYDsUg0cAaDJTZzIoizjB6hkFwHCAZGCVBPaHCWYAxqbcTtuNHW7WhEC/LmzMq18R2y2C6rm/NxFKXCWc1g1P7/Mo7vE/U9rTVPWir+Kp589VoGdptKUsT+OCppTbX12ZYuLRcm5jCedCfkrljd03+G76EPFUDGhO2f+SJlVB/atW+c9OXljGefzBtv+qI3d3t4f6C0l+RbTeVOw3YGr/3X/76h3/7l3/+7utPPnzvl19/8eGLpw/uXxwNes1qOZeIhzb9HoPIQqBGuj2NIqDkvGgGpxlcuk/K0BhFIpyAGkyk3Y1thgyxAlcbempTc6HPF4ZCbsBnO46dPCtNbrcK91IdbboDZLpgpAbstpGt0ooj9vPtiirS0G2XlOGqxrNNnj1u/fHPv/39P3/z/Xcf//bbj/74w8fff/PuR++9NhlUCrnwuoMLba93WrlyMez3GQwGCgDkao1cEBmrlV/wD4dgmEY67kXQej2mBVA5Sms9W/zooLR3f6Mztxw+Wp9eWEpdqj4S+oeWcE4ZSKxtxlfnV97z10OHzw3FsSI3VK3H//dAWlmdGraLS9m+LtdX7TZlnSNmdmUZnXn3H27dfyvaObKEi5AlcCtdBw+uPLWxJVWnImVdICNPtGBfWubLrBmCL6cGZLgBxQegLfkqv/WqIyn74+++/N03n3z83ltnR+Oz49mTh2fTUbfXruWyu5Fw0O2y//2yCUPjsF6HE/Ci8ydhnNBTJMLQGMuiLIsazYRjnXd6iUDYmK6Za0Pv/Go7UkJ8KXllasi0XdeLDqTViToUreh2m5pEQx0srsaaWm/m5570S/E609x3BzJy3vOT7ST7+rvT3/7u06+//fCzT3713Tcff/fVu19++vYP337aqmXbjWJoyxsI2LudYjTmXXfxRhOHoIBWp2Q50mRmKRpGUTUEyTmWYBkcJXUArADxVYLXBpL4ydNiaSiOLzYuXk+cvYhVR+ZS39Das4XzoGNrmXW81Jo7Lt/MHD1bD5eWCwPQHPgPGzFNrEjEy0goB7gT/2Gn9HJxIJSGYvfIPb7YPHq2s/coUJsZAhl5rATXp+ZYCQ6m1IL3J7sNfWEopDp0qq+NNuXejCxU022W5Lb4za2KNlQHv/3ig4/effON5w8GnXJkZ6PbKs8n/V67VipmIuGg3WaS/q8Gq8XAsSSgU9EMrgNUKAbqYS1JwBxLcBzG87jBhDvWec8mG0uv76SpaJ6rjM2uyKotdCfdpWqTYPcwmuuy0TKc75Ob6dXCECmO0GhDU55RsZYsWLplD63tFLGtnHI7r+rvR5+9M/zqmw8++exXX37+7mef/Orrz3/1n3785rMP3z7a67cbxVa9kEwGa9VkNOb1+owmM4+ggA5QUTTGCwRJ6VWqJbX6nsBTPEfCuAaAFQit5C2IdVNZ6rsqY2NlbHzwZvLg0XZtbEk36HgZKfWFSAFlHS/lO/zFa8nhuSh4/50/dYN1/Y/xEsk7X60MDfWpOZj/Wbq7PLnvL48MkRISLi6c8uHT7e28qjBY/J72vr05twaSqu28snNkmVxuzh4Gd7vq3Bhyp5ZzY1rY+qkvL4u24HAD+vrTd9554/Jor1fOR30uU72cGvbq3VY5l92NRracDrNr3Sq5YIElaGLReeKEHscBggAJApQulSyWkTBbGV9Q2NgSwxmh3PW5I0oN9e8Y18+DebB3vJVqmRINbbyuyfe4eBWN1+DdBhrILheGWKhyi9/4/4cKAueSlUeWizcy1bHYPbTvnWQqLd9H77/92ce/+e1XH3zz+bvv/vq1157fn4+HJwd7zXoqnQx4vSaXSzSZeYpGdZAGIxGaQfSwWq5YUmtk0jlDPa6EMAVMy2xeknPfLgys2yVNvAl3jrx7jyK+BM67V23BlXSTr++xW3lZdUY3DxZCXB2LlQnuT972xyF/HGpMHdkWv1O8tZW/0Tn0tA/c7qjMn1QmGtrRfVswf6s0gbYLq5Up1dgX0h3Yu3sz0VDXZsbWgTWYvRutrKXqRGNmqY1tyRrt3FJ6wtqvP33nzWdnw265UohFd7yDTnk8aLbqhUopGw4FApser8ex7rTYbUajQJtEBif0KPa3fUbpLpuEv9XOeTesNhfuCy7AL3W8po27KuJ/2ipAkSoWzOLrEa0/vRTMrRT6fGkouqPLySbmTy+FywrLzr9nvf8/f5L0xNDG3JlucrkOlWpghZrrwZPGd19/8t3Xn3z92W/+/Psvv/ri/U8/fue9X799cXJUKcX8PlM87t/ZcRlNHEkhCK4naQzDAQBUaHVrq2t3CBLmeBImVBCmwDi5wQGbA7LSyJ5oIeWpUB5bL97MHj8tdQ5C7ogyUaWlEw65PpLp6jMtYnTmrs3IfF+/mdDHikx5YI4U0GxPWZ1B2bYQTIPpFuVLyHebusUHqdzLD3XlCRmva+J13XrkZn4AF0dovs9sphTJprow0Bd7fHvfXh1Zd7L6YBJpzzc+ee/Fs6u9k4Nep5EpFxKzcWs6au/P+qNBu1TM7Gz7N3zrm363f8PlcpiNAs2yKElCNA1LF96l/Dcaaaudc3vNnk1u3UcXu+5s02HcvGMK3E22qfWYbDuPx2rsblOXbAHhymplhtt3biSbcLrFxCrYZnolkJGFCrpYRW/auCu4b5bGYKarGux5f/uny6++/PCrLz/8/NNf/+GHL7/45Jd//dNX33z20dX5caUQSycC29tuj8f8387TogSJgJBarVnTAUqFckU6AqrHlTChwsU11qqRpvHepCLb54ZnodrU8/EP9/snoWgdLE3Y4kRdnKiDhVe8qX9Kt3X9U0O2v7we+/eB9EppSM4eWXMDoLZHFsdIpoO3Diz9w+BOBg+kV+JVbWmIt/b5bFs8eZYcnoQ9ETDTpDoHjqOngUhRU+rhJ0/8L36T2nvgak6xWHG5MzV3Z5ZP33/ttceHjx7sdZvZXrt0uNe/ON17/uT+1eXp/t6okE/lc8lsJrEoBJfNbhGli2wsi/I8TtMwRekFgTCbWZOFdnvNobh9J2Zrzba20wxi/oedIhqtoekuMzjbOn0tW5lSW3lZtg/GG0r7zo1IWRvM6Cpjc6ikTHfgWEWf7zO24Apm+km6o6xM9b2Z++pF9rffffbVlx9+/92nX3/5wdtvXT04m1ydH98/Oei3i5ndYCIRdLtNRhMniDSKQXpYB4ALeVKpV7U6BYwABLngH5TSGNahUNoequhyQzpcgUNl/WYanV3G3/v2ZCOJ7pQ1pQkbqd1OtO7FGvdClVvptq4wgJuHutJE3j+xDM9sgwsxPwQTTVVxjGzllMUhU+rZq8P1bAfpHpnaB0JjztYmjgdv5qsjz/mL0uDEPXuwef56qHdsrY+Z2YXz6JHv/HmwOcWKXd3s1HP4YPOTD1774DdP33hxfjBrTUb1s5Px46vTt9969uYbT99689mzp5fz6SCbSexs+/1+58LXXOMv3eXkOVK6+2kxC3YnZ3dyG9tcsR5MN8VMyxCpAbkhVRjjrWPx5EUq1RZswZVoiWgc4OHKPd77P0SrS9UpUR5j23lVrkfmB3iiAZi3brOel/rnhvYxlyxTqQr9m9+8+N3vPv3Ln3/71Rfvf/T+k7OT1qRXGHVy405l1q/HI36HlbOYBYGntDrlmnz53vLNBf7KVa1GodWqMQyBcCVMqp1BYnJaLI5V/XOiPKFLY6oyto7OA4NTT2VsLE/oTBfZbalTHW3niE+1tMMz2+S+c3CB1/d17ROif8GUZip/7p98qZvB/FKuR9RmQiRP5duW/r6r1BFyDWZw4O4eeFJ1pn9iKw6oaBFJ1anqWDx8vNWeC7sVbayoyreRwYH38DK8/8B/eBX48N1nn330xrvvPHn0YG82ab71xtXrz6++/uLDjz789cX54XTcm08HlVJ2e2vD73fa7aLdLkoG32ikeY5kmb+d97DaGZ/fEgiL9W4klMen98OFER3IK2JNbf/CcvZ6JpiDTRt3My2hf85leup4fXkj9dJOfqUwgAXvP+024P6ZNV7XeZMK3P4PuQEUKsvqQ9v+/dC33374hz988cP3ny8aoY9fXN4fVPOhUSd3MG5Xc/HAht1qog0iQ5GIVqdckd199cZPIb3mb5cBVQoQ1KmhFQhXGlxAubczvmQqM20gI+udWo6exmeXO+kWNTr3XbwV6p6Y6/vU6IG5OiWa+8zwzNaY8+NLKtG8U93Td07JwkSxkf1JqqPND/XTS095zG6nsUzDOD31z8435xeB7tyZrNGZJlceMaG8Kt2gM02mf+w8fy26f+mOl9Sh7FptRFf75sPLcLwINsaGLz5547uv3vngN08fXc7Gw9qLZ+evv/bwqy8//MVbz0+O57NJfz4d1KrZeHwrHt/a3vZKmzImI2c0sDxHciwhCouP71jnbA4mEKEr7c1gRtM7dqX74HrildLQ2T0KVuZksLjiS96K11XVKVoa6bdza6kmFG/czfRW47W1cGm5OiN3mxpn4u5OVRuqaFtH5uHx9vnz/LvvPv3zn7/413/53R9++PyH7966OKmOOvH9cfbhyX45FQttuX1us0FkMBRUyFfu3H71pZ//w+raknSVWKVSqNVKmeoWiClQYdkX4XItdHhsm5y5Kn2m1BfCebA+Xq+NnGfPE/WJdXAm9E646hgYn7OjE2uuBWebQrLKZHtovL4Qr/aRWJ0Tgwvj/qPtaBm2b92wBl7p7blPHsVOHm315tZim6gP+dbUVOzQe/eD5a7YORSHZ5Zsi93JgrkeUZ3y2Q43PNvI9U3zh5HHV9OvPnvr3XcePXt8MJ+2RoPqL956+vFHv3rj9SdXl6fPn10d7I8b9Xw2G/P57IGASzpXYLWIVotoNLCiQIvCohBEI+p08a5NOLQrZNp0Y27tnArpPtg/2W7t+4PFlWhDFSqtbmbuplqqygQZndtzXbQ618Ubd+O1tXRbvZG6vdvUpAeEL7ta2xNLE7Y2cg2Otr744u0ff/zyxz9985c/f/vVZ0/Pjsone6Wzg8r+oD2ol3ZjgfC2x+kwswyukK/cvPHzn7/8jz9/+R9lK3dVylWNRqVWK1eUN3XIGsLfC2dtuRba3TfNLjzj0/XOgWO3inX3NwZHgd6huz6xXry5cfLC3dpDGjN9oYM2JmypZ66PHbU5503cjdXU/TNz91TonYmDU0++x1XHQn1qmJxsnj/dnZ95hweO0ZEjW0eaE2P/wF7uipsxZbgoL/TRzv5679BdHNKNPUNjbj94HK5OHaOLrf83AAD//xdIsOQ= \ No newline at end of file diff --git a/src/terminal/main.zig b/src/terminal/main.zig index a1b6f6c59..771fcab42 100644 --- a/src/terminal/main.zig +++ b/src/terminal/main.zig @@ -5,6 +5,7 @@ const stream = @import("stream.zig"); const ansi = @import("ansi.zig"); const csi = @import("csi.zig"); const sgr = @import("sgr.zig"); +pub const apc = @import("apc.zig"); pub const point = @import("point.zig"); pub const color = @import("color.zig"); pub const kitty = @import("kitty.zig"); diff --git a/src/terminal/parse_table.zig b/src/terminal/parse_table.zig index a8d2ec768..094128cc7 100644 --- a/src/terminal/parse_table.zig +++ b/src/terminal/parse_table.zig @@ -125,10 +125,10 @@ fn genTable() Table { const source = State.sos_pm_apc_string; // events - single(&result, 0x19, source, source, .ignore); - range(&result, 0, 0x17, source, source, .ignore); - range(&result, 0x1C, 0x1F, source, source, .ignore); - range(&result, 0x20, 0x7F, source, source, .ignore); + single(&result, 0x19, source, source, .apc_put); + range(&result, 0, 0x17, source, source, .apc_put); + range(&result, 0x1C, 0x1F, source, source, .apc_put); + range(&result, 0x20, 0x7F, source, source, .apc_put); } // escape diff --git a/src/terminal/stream.zig b/src/terminal/stream.zig index c57fb3122..4e3774e4d 100644 --- a/src/terminal/stream.zig +++ b/src/terminal/stream.zig @@ -64,8 +64,17 @@ pub fn Stream(comptime Handler: type) type { .esc_dispatch => |esc| try self.escDispatch(esc), .osc_dispatch => |cmd| try self.oscDispatch(cmd), .dcs_hook => |dcs| log.warn("unhandled DCS hook: {}", .{dcs}), - .dcs_put => |code| log.warn("unhandled DCS put: {}", .{code}), + .dcs_put => |code| log.warn("unhandled DCS put: {x}", .{code}), .dcs_unhook => log.warn("unhandled DCS unhook", .{}), + .apc_start => if (@hasDecl(T, "apcStart")) { + try self.handler.apcStart(); + } else log.warn("unimplemented APC start", .{}), + .apc_put => |code| if (@hasDecl(T, "apcPut")) { + try self.handler.apcPut(code); + } else log.warn("unimplemented APC put: {x}", .{code}), + .apc_end => if (@hasDecl(T, "apcEnd")) { + try self.handler.apcEnd(); + } else log.warn("unimplemented APC end", .{}), } } } diff --git a/src/termio/Exec.zig b/src/termio/Exec.zig index e3aece1db..35a74c837 100644 --- a/src/termio/Exec.zig +++ b/src/termio/Exec.zig @@ -47,10 +47,6 @@ subprocess: Subprocess, /// just stores internal state about a grid. terminal: terminal.Terminal, -/// The stream parser. This parses the stream of escape codes and so on -/// from the child process and calls callbacks in the stream handler. -terminal_stream: terminal.Stream(StreamHandler), - /// The shared render state renderer_state: *renderer.State, @@ -75,6 +71,7 @@ data: ?*EventData, /// pass around Config pointers which makes memory management a pain. pub const DerivedConfig = struct { palette: terminal.color.Palette, + image_storage_limit: usize, pub fn init( alloc_gpa: Allocator, @@ -84,6 +81,7 @@ pub const DerivedConfig = struct { return .{ .palette = config.palette.value, + .image_storage_limit = config.@"image-storage-limit", }; } @@ -108,13 +106,20 @@ pub fn init(alloc: Allocator, opts: termio.Options) !Exec { errdefer term.deinit(alloc); term.color_palette = opts.config.palette; + // Set the image size limits + try term.screen.kitty_images.setLimit(alloc, opts.config.image_storage_limit); + try term.secondary_screen.kitty_images.setLimit(alloc, opts.config.image_storage_limit); + var subprocess = try Subprocess.init(alloc, opts); errdefer subprocess.deinit(); + // Initial width/height based on subprocess + term.width_px = subprocess.screen_size.width; + term.height_px = subprocess.screen_size.height; + return Exec{ .alloc = alloc, .terminal = term, - .terminal_stream = undefined, .subprocess = subprocess, .renderer_state = opts.renderer_state, .renderer_wakeup = opts.renderer_wakeup, @@ -247,6 +252,16 @@ pub fn changeConfig(self: *Exec, config: *DerivedConfig) !void { // Update the palette. Note this will only apply to new colors drawn // since we decode all palette colors to RGB on usage. self.terminal.color_palette = config.palette; + + // Set the image size limits + try self.terminal.screen.kitty_images.setLimit( + self.alloc, + config.image_storage_limit, + ); + try self.terminal.secondary_screen.kitty_images.setLimit( + self.alloc, + config.image_storage_limit, + ); } /// Resize the terminal. @@ -256,7 +271,7 @@ pub fn resize( screen_size: renderer.ScreenSize, padding: renderer.Padding, ) !void { - // Update the size of our pty + // Update the size of our pty. const padded_size = screen_size.subPadding(padding); try self.subprocess.resize(grid_size, padded_size); @@ -269,7 +284,15 @@ pub fn resize( defer self.renderer_state.mutex.unlock(); // Update the size of our terminal state - try self.terminal.resize(self.alloc, grid_size.columns, grid_size.rows); + try self.terminal.resize( + self.alloc, + grid_size.columns, + grid_size.rows, + ); + + // Update our pixel sizes + self.terminal.width_px = padded_size.width; + self.terminal.height_px = padded_size.height; } } @@ -441,6 +464,9 @@ const EventData = struct { // Stop our process watcher self.process.deinit(); + + // Clear any StreamHandler state + self.terminal_stream.handler.deinit(); } /// This queues a render operation with the renderer thread. The render @@ -658,6 +684,9 @@ const Subprocess = struct { log.warn("shell could not be detected, no automatic shell integration will be injected", .{}); } + // Our screen size should be our padded size + const padded_size = opts.screen_size.subPadding(opts.padding); + return .{ .arena = arena, .env = env, @@ -665,7 +694,7 @@ const Subprocess = struct { .path = final_path, .args = args, .grid_size = opts.grid_size, - .screen_size = opts.screen_size, + .screen_size = padded_size, }; } @@ -1046,11 +1075,21 @@ const StreamHandler = struct { grid_size: *renderer.GridSize, terminal: *terminal.Terminal, + /// The APC command handler maintains the APC state. APC is like + /// CSI or OSC, but it is a private escape sequence that is used + /// to send commands to the terminal emulator. This is used by + /// the kitty graphics protocol. + apc: terminal.apc.Handler = .{}, + /// This is set to true when a message was written to the writer /// mailbox. This can be used by callers to determine if they need /// to wake up the writer. writer_messaged: bool = false, + pub fn deinit(self: *StreamHandler) void { + self.apc.deinit(); + } + inline fn queueRender(self: *StreamHandler) !void { try self.ev.queueRender(); } @@ -1060,6 +1099,35 @@ const StreamHandler = struct { self.writer_messaged = true; } + pub fn apcStart(self: *StreamHandler) !void { + self.apc.start(); + } + + pub fn apcPut(self: *StreamHandler, byte: u8) !void { + self.apc.feed(self.alloc, byte); + } + + pub fn apcEnd(self: *StreamHandler) !void { + var cmd = self.apc.end() orelse return; + defer cmd.deinit(self.alloc); + + // log.warn("APC command: {}", .{cmd}); + switch (cmd) { + .kitty => |*kitty_cmd| { + if (self.terminal.kittyGraphics(self.alloc, kitty_cmd)) |resp| { + var buf: [1024]u8 = undefined; + var buf_stream = std.io.fixedBufferStream(&buf); + try resp.encode(buf_stream.writer()); + const final = buf_stream.getWritten(); + if (final.len > 2) { + // log.warn("kitty graphics response: {s}", .{std.fmt.fmtSliceHexLower(final)}); + self.messageWriter(try termio.Message.writeReq(self.alloc, final)); + } + } + }, + } + } + pub fn print(self: *StreamHandler, ch: u21) !void { try self.terminal.print(ch); } @@ -1144,7 +1212,7 @@ const StreamHandler = struct { try self.queueRender(); } - self.terminal.eraseDisplay(mode); + self.terminal.eraseDisplay(self.alloc, mode); } pub fn eraseLine(self: *StreamHandler, mode: terminal.EraseLine) !void { @@ -1239,9 +1307,9 @@ const StreamHandler = struct { }; if (enabled) - self.terminal.alternateScreen(opts) + self.terminal.alternateScreen(self.alloc, opts) else - self.terminal.primaryScreen(opts); + self.terminal.primaryScreen(self.alloc, opts); // Schedule a render since we changed screens try self.queueRender(); @@ -1409,7 +1477,7 @@ const StreamHandler = struct { pub fn fullReset( self: *StreamHandler, ) !void { - self.terminal.fullReset(); + self.terminal.fullReset(self.alloc); } pub fn queryKittyKeyboard(self: *StreamHandler) !void { diff --git a/src/termio/Options.zig b/src/termio/Options.zig index cad5d5665..1fd9d034a 100644 --- a/src/termio/Options.zig +++ b/src/termio/Options.zig @@ -12,6 +12,9 @@ grid_size: renderer.GridSize, /// The size of the viewport in pixels. screen_size: renderer.ScreenSize, +/// The padding of the viewport. +padding: renderer.Padding, + /// The full app configuration. This is only available during initialization. /// The memory it points to is NOT stable after the init call so any values /// in here must be copied.