diff --git a/src/Air.zig b/src/Air.zig index 1b394ca1c161..353ac5f6ce69 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1249,8 +1249,9 @@ pub const Inst = struct { // bigger than expected. Note that in safety builds, Zig is allowed // to insert a secret field for safety checks. comptime { - if (!std.debug.runtime_safety) { - assert(@sizeOf(Data) == 8); + switch (builtin.mode) { + .Debug, .ReleaseSafe => {}, + .ReleaseFast, .ReleaseSmall => assert(@sizeOf(Data) == 8), } } }; diff --git a/src/Compilation.zig b/src/Compilation.zig index 7574fefc2008..f98d19577eb4 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3684,7 +3684,9 @@ const Header = extern struct { items_len: u32, extra_len: u32, limbs_len: u32, - string_bytes_len: u32, + large_strings_len: u32, + large_string_bytes_len: u32, + small_string_bytes_len: u32, tracked_insts_len: u32, files_len: u32, }, @@ -3732,7 +3734,9 @@ pub fn saveState(comp: *Compilation) !void { .items_len = @intCast(local.mutate.items.len), .extra_len = @intCast(local.mutate.extra.len), .limbs_len = @intCast(local.mutate.limbs.len), - .string_bytes_len = @intCast(local.mutate.strings.len), + .large_strings_len = @intCast(local.mutate.large_strings.len), + .large_string_bytes_len = @intCast(local.mutate.large_string_bytes.len), + .small_string_bytes_len = @intCast(local.mutate.small_string_bytes.len), .tracked_insts_len = @intCast(local.mutate.tracked_insts.len), .files_len = @intCast(local.mutate.files.len), }, @@ -3775,8 +3779,15 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs, @ptrCast(local.shared.items.view().items(.data)[0..pt_header.intern_pool.items_len])); addBuf(&bufs, @ptrCast(local.shared.items.view().items(.tag)[0..pt_header.intern_pool.items_len])); } - if (pt_header.intern_pool.string_bytes_len > 0) { - addBuf(&bufs, local.shared.strings.view().items(.@"0")[0..pt_header.intern_pool.string_bytes_len]); + if (pt_header.intern_pool.large_strings_len > 0) { + addBuf(&bufs, @ptrCast(local.shared.large_strings.view().items(.offset)[0..pt_header.intern_pool.large_strings_len])); + addBuf(&bufs, @ptrCast(local.shared.large_strings.view().items(.len)[0..pt_header.intern_pool.large_strings_len])); + } + if (pt_header.intern_pool.large_string_bytes_len > 0) { + addBuf(&bufs, local.shared.large_string_bytes.view().items(.@"0")[0..pt_header.intern_pool.large_string_bytes_len]); + } + if (pt_header.intern_pool.small_string_bytes_len > 0) { + addBuf(&bufs, local.shared.small_string_bytes.view().items(.@"0")[0..pt_header.intern_pool.small_string_bytes_len]); } if (pt_header.intern_pool.tracked_insts_len > 0) { addBuf(&bufs, @ptrCast(local.shared.tracked_insts.view().items(.@"0")[0..pt_header.intern_pool.tracked_insts_len])); diff --git a/src/InternPool.zig b/src/InternPool.zig index 5e2d8c4b5c1a..b4369f168aca 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1060,7 +1060,9 @@ const Local = struct { items: ListMutate, extra: ListMutate, limbs: ListMutate, - strings: ListMutate, + large_strings: ListMutate, + large_string_bytes: ListMutate, + small_string_bytes: ListMutate, tracked_insts: ListMutate, files: ListMutate, maps: ListMutate, @@ -1074,7 +1076,9 @@ const Local = struct { items: List(Item), extra: Extra, limbs: Limbs, - strings: Strings, + large_strings: LargeStrings, + large_string_bytes: StringBytes, + small_string_bytes: StringBytes, tracked_insts: TrackedInsts, files: List(File), maps: Maps, @@ -1098,7 +1102,8 @@ const Local = struct { @sizeOf(u64) => List(struct { u64 }), else => @compileError("unsupported host"), }; - const Strings = List(struct { u8 }); + const LargeStrings = List(struct { offset: u32, len: u32 }); + const StringBytes = List(struct { u8 }); const TrackedInsts = List(struct { TrackedInst.MaybeLost }); const Maps = List(struct { FieldMap }); const Navs = List(Nav.Repr); @@ -1428,17 +1433,30 @@ const Local = struct { }; } - /// In order to store references to strings in fewer bytes, we copy all - /// string bytes into here. String bytes can be null. It is up to whomever - /// is referencing the data here whether they want to store both index and length, - /// thus allowing null bytes, or store only index, and use null-termination. The - /// `strings` array is agnostic to either usage. - pub fn getMutableStrings(local: *Local, gpa: Allocator) Strings.Mutable { + /// For a given pair of `s: String, ip: *const InternPool` for which `s.unwrap().data.large` + /// is `true`, `s.unwrap(ip).data.index` refers to an index into this array. The corresponding + /// value is an both an offset into `large_string_bytes` and a length. + pub fn getMutableLargeStrings(local: *Local, gpa: Allocator) LargeStrings.Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, - .mutate = &local.mutate.strings, - .list = &local.shared.strings, + .mutate = &local.mutate.large_strings, + .list = &local.shared.large_strings, + }; + } + + /// Depending on `size_class`, will return the bytes for the respective category of string. + pub fn getMutableStringBytes(local: *Local, gpa: Allocator, size_class: String.SizeClass) StringBytes.Mutable { + const mutate, const list = switch (size_class) { + .large => .{ &local.mutate.large_string_bytes, &local.shared.large_string_bytes }, + .small => .{ &local.mutate.small_string_bytes, &local.shared.small_string_bytes }, + }; + + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = mutate, + .list = list, }; } @@ -1759,6 +1777,8 @@ pub const String = enum(u32) { empty = 0, _, + pub const max_small_string_len = std.simd.suggestVectorLength(u8) orelse std.atomic.cache_line; + pub fn toSlice(string: String, len: u64, ip: *const InternPool) []const u8 { return string.toOverlongSlice(ip)[0..@intCast(len)]; } @@ -1773,27 +1793,64 @@ pub const String = enum(u32) { return @enumFromInt(@intFromEnum(string)); } + pub const SizeClass = enum(u1) { + small = 0, + large = 1, + + pub fn detect(len: u32, tid: Zcu.PerThread.Id, ip: *InternPool) SizeClass { + if (len > max_small_string_len) + return .large; + + const local = ip.getLocal(tid); + + return @enumFromInt(@intFromBool(local.mutate.small_string_bytes.len >= ip.getIndexMask(u31))); + } + }; + const Unwrapped = struct { tid: Zcu.PerThread.Id, - index: u32, + size_class: SizeClass, + index: u31, fn wrap(unwrapped: Unwrapped, ip: *const InternPool) String { assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); - assert(unwrapped.index <= ip.getIndexMask(u32)); - return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | unwrapped.index); + assert(unwrapped.index <= ip.getIndexMask(u31)); + return blk: { + const tid = @as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32; + const size_class = @as(u32, @intFromEnum(unwrapped.size_class)) << (ip.tid_shift_32 - 1); + break :blk @enumFromInt(tid | size_class | unwrapped.index); + }; } }; + fn unwrap(string: String, ip: *const InternPool) Unwrapped { + const bits: u32 = @intFromEnum(string) & ip.getIndexMask(u32); + const large = (bits & (@as(u32, 1) << (ip.tid_shift_32 - 1))) != 0; + return .{ .tid = @enumFromInt(@intFromEnum(string) >> ip.tid_shift_32 & ip.getTidMask()), - .index = @intFromEnum(string) & ip.getIndexMask(u32), + .size_class = @enumFromInt(@intFromBool(large)), + .index = @intCast(bits & ip.getIndexMask(u31)), }; } fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { - const unwrapped_string = string.unwrap(ip); - const strings = ip.getLocalShared(unwrapped_string.tid).strings.acquire(); - return strings.view().items(.@"0")[unwrapped_string.index..]; + const unwrapped = string.unwrap(ip); + const local = ip.getLocalShared(unwrapped.tid); + + switch (unwrapped.size_class) { + .large => { + const large_strings = local.large_strings.acquire(); + const string_bytes = local.large_string_bytes.acquire(); + const data = large_strings.view().get(unwrapped.index); + return string_bytes.view().items(.@"0")[data.offset..]; + }, + .small => { + @branchHint(.likely); + const string_bytes = local.small_string_bytes.acquire(); + return string_bytes.view().items(.@"0")[unwrapped.index..]; + }, + } } const debug_state = InternPool.debug_state; @@ -1848,12 +1905,45 @@ pub const NullTerminatedString = enum(u32) { } pub fn toSlice(string: NullTerminatedString, ip: *const InternPool) [:0]const u8 { - const overlong_slice = string.toString().toOverlongSlice(ip); - return overlong_slice[0..std.mem.indexOfScalar(u8, overlong_slice, 0).? :0]; + const unwrapped = string.toString().unwrap(ip); + const local = ip.getLocalShared(unwrapped.tid); + switch (unwrapped.size_class) { + .large => { + const data = local.large_strings.view().get(unwrapped.index); + return local.large_string_bytes.view().items(.@"0")[data.offset..][0..data.len :0]; + }, + .small => { + // Most calls to this function are on small strings. Let the optimizer know that. + @branchHint(.likely); + const len = string.smallLength(ip); + return local.small_string_bytes.view().items(.@"0")[unwrapped.index..][0..len :0]; + }, + } } pub fn length(string: NullTerminatedString, ip: *const InternPool) u32 { - return @intCast(string.toSlice(ip).len); + const unwrapped = string.toString().unwrap(ip); + switch (unwrapped.size_class) { + .large => { + const local = ip.getLocalShared(unwrapped.tid); + return local.large_strings.view().items(.len)[unwrapped.index]; + }, + .small => { + @branchHint(.likely); + return string.smallLength(ip); + }, + } + } + + /// This is separate from `length` to encourage inlining of `length` and + /// potentially `smallLength` with it. + fn smallLength(string: NullTerminatedString, ip: *const InternPool) u32 { + const unwrapped = string.toString().unwrap(ip); + assert(unwrapped.size_class == .small); + const local = ip.getLocalShared(unwrapped.tid); + const overlong = local.small_string_bytes.view().items(.@"0")[unwrapped.index..]; + + return @intCast(std.mem.findScalar(u8, overlong, 0).?); } pub fn eqlSlice(string: NullTerminatedString, slice: []const u8, ip: *const InternPool) bool { @@ -1895,6 +1985,7 @@ pub const NullTerminatedString = enum(u32) { ip: *const InternPool, id: bool, }; + fn format(data: FormatData, writer: *std.Io.Writer) std.Io.Writer.Error!void { const slice = data.string.toSlice(data.ip); if (!data.id) { @@ -6794,7 +6885,9 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .items = .empty, .extra = .empty, .limbs = .empty, - .strings = .empty, + .large_strings = .empty, + .large_string_bytes = .empty, + .small_string_bytes = .empty, .tracked_insts = .empty, .files = .empty, .maps = .empty, @@ -6809,7 +6902,9 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .items = .empty, .extra = .empty, .limbs = .empty, - .strings = .empty, + .large_strings = .empty, + .large_string_bytes = .empty, + .small_string_bytes = .empty, .tracked_insts = .empty, .files = .empty, .maps = .empty, @@ -8523,7 +8618,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } if (child == .u8_type) bytes: { - const strings = ip.getLocal(tid).getMutableStrings(gpa); + const size_class: String.SizeClass = .detect(@intCast(len_including_sentinel), tid, ip); + const strings = ip.getLocal(tid).getMutableStringBytes(gpa, size_class); const start = strings.mutate.len; try strings.ensureUnusedCapacity(@intCast(len_including_sentinel + 1)); try extra.ensureUnusedCapacity(@typeInfo(Bytes).@"struct".fields.len); @@ -8553,6 +8649,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All gpa, tid, @intCast(len_including_sentinel), + size_class, .maybe_embedded_nulls, ); items.appendAssumeCapacity(.{ @@ -11762,11 +11859,12 @@ pub fn getOrPutString( slice: []const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - const strings = ip.getLocal(tid).getMutableStrings(gpa); - try strings.ensureUnusedCapacity(slice.len + 1); - strings.appendSliceAssumeCapacity(.{slice}); - strings.appendAssumeCapacity(.{0}); - return ip.getOrPutTrailingString(gpa, tid, @intCast(slice.len + 1), embedded_nulls); + const size_class: String.SizeClass = .detect(@intCast(slice.len + 1), tid, ip); + const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa, size_class); + try string_bytes.ensureUnusedCapacity(slice.len + 1); + string_bytes.appendSliceAssumeCapacity(.{slice}); + string_bytes.appendAssumeCapacity(.{0}); + return ip.getOrPutTrailingString(gpa, tid, @intCast(slice.len + 1), size_class, embedded_nulls); } pub fn getOrPutStringFmt( @@ -11780,10 +11878,11 @@ pub fn getOrPutStringFmt( // ensure that references to strings in args do not get invalidated const format_z = format ++ .{0}; const len: u32 = @intCast(std.fmt.count(format_z, args)); - const strings = ip.getLocal(tid).getMutableStrings(gpa); - const slice = try strings.addManyAsSlice(len); + const size_class: String.SizeClass = .detect(len, tid, ip); + const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa, size_class); + const slice = try string_bytes.addManyAsSlice(len); assert((std.fmt.bufPrint(slice[0], format_z, args) catch unreachable).len == len); - return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls); + return ip.getOrPutTrailingString(gpa, tid, len, size_class, embedded_nulls); } pub fn getOrPutStringOpt( @@ -11803,23 +11902,45 @@ pub fn getOrPutTrailingString( gpa: Allocator, tid: Zcu.PerThread.Id, len: u32, + size_class: String.SizeClass, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - const strings = ip.getLocal(tid).getMutableStrings(gpa); - const start: u32 = @intCast(strings.mutate.len - len); - if (len > 0 and strings.view().items(.@"0")[strings.mutate.len - 1] == 0) { - strings.mutate.len -= 1; + const local = ip.getLocal(tid); + const large_strings: Local.LargeStrings.Mutable = switch (size_class) { + .large => local.getMutableLargeStrings(gpa), + .small => undefined, + }; + const string_bytes = local.getMutableStringBytes(gpa, size_class); + const start: u32 = @intCast(string_bytes.mutate.len - len); + const last_byte_is_null = len > 0 and string_bytes.view().items(.@"0")[string_bytes.mutate.len - 1] == 0; + + if (last_byte_is_null) { + string_bytes.mutate.len -= 1; } else { - try strings.ensureUnusedCapacity(1); + try string_bytes.ensureUnusedCapacity(1); } - const key: []const u8 = strings.view().items(.@"0")[start..]; + + if (size_class == .large) + try large_strings.ensureUnusedCapacity(1); + const value: embedded_nulls.StringType() = - @enumFromInt(@intFromEnum((String.Unwrapped{ .tid = tid, .index = start }).wrap(ip))); - const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null; + @enumFromInt(@intFromEnum((String.Unwrapped{ + .tid = tid, + .size_class = size_class, + .index = switch (size_class) { + .large => @intCast(large_strings.mutate.len), + .small => @intCast(start), + }, + }).wrap(ip))); + const key: []const u8 = string_bytes.view().items(.@"0")[start..]; + + const null_index = std.mem.indexOfScalar(u8, key, 0); switch (embedded_nulls) { - .no_embedded_nulls => assert(!has_embedded_null), - .maybe_embedded_nulls => if (has_embedded_null) { - strings.appendAssumeCapacity(.{0}); + .no_embedded_nulls => assert(null_index == null), + .maybe_embedded_nulls => if (null_index) |index| { + if (size_class == .large) + large_strings.appendAssumeCapacity(.{ .offset = start, .len = @intCast(index) }); + string_bytes.appendAssumeCapacity(.{0}); return value; }, } @@ -11837,7 +11958,7 @@ pub fn getOrPutTrailingString( const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; if (!index.eqlSlice(key, ip)) continue; - strings.shrinkRetainingCapacity(start); + string_bytes.shrinkRetainingCapacity(start); return @enumFromInt(@intFromEnum(index)); } shard.mutate.string_map.mutex.lock(); @@ -11853,13 +11974,15 @@ pub fn getOrPutTrailingString( const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; if (!index.eqlSlice(key, ip)) continue; - strings.shrinkRetainingCapacity(start); + string_bytes.shrinkRetainingCapacity(start); return @enumFromInt(@intFromEnum(index)); } defer shard.mutate.string_map.len += 1; const map_header = map.header().*; if (shard.mutate.string_map.len < map_header.capacity * 3 / 5) { - strings.appendAssumeCapacity(.{0}); + if (size_class == .large) + large_strings.appendAssumeCapacity(.{ .offset = start, .len = len - @intFromBool(last_byte_is_null) }); + string_bytes.appendAssumeCapacity(.{0}); const entry = &map.entries[map_index]; entry.hash = hash; entry.release(@enumFromInt(@intFromEnum(value))); @@ -11901,7 +12024,9 @@ pub fn getOrPutTrailingString( map_index &= new_map_mask; if (map.entries[map_index].value == .none) break; } - strings.appendAssumeCapacity(.{0}); + if (size_class == .large) + large_strings.appendAssumeCapacity(.{ .offset = start, .len = len - @intFromBool(last_byte_is_null) }); + string_bytes.appendAssumeCapacity(.{0}); map.entries[map_index] = .{ .value = @enumFromInt(@intFromEnum(value)), .hash = hash, diff --git a/src/Value.zig b/src/Value.zig index 381eacf45ab2..a2567a02191a 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -66,9 +66,10 @@ pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTermi .repeated_elem => |elem| { const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(zcu)); const len: u32 = @intCast(ty.arrayLen(zcu)); - const strings = ip.getLocal(pt.tid).getMutableStrings(zcu.gpa); - try strings.appendNTimes(.{byte}, len); - return ip.getOrPutTrailingString(zcu.gpa, pt.tid, len, .no_embedded_nulls); + const size_class: InternPool.String.SizeClass = .detect(len, pt.tid, ip); + const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(zcu.gpa, size_class); + try string_bytes.appendNTimes(.{byte}, len); + return ip.getOrPutTrailingString(zcu.gpa, pt.tid, len, size_class, .no_embedded_nulls); }, } } @@ -109,18 +110,19 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const gpa = zcu.gpa; const ip = &zcu.intern_pool; const len: u32 = @intCast(len_u64); - const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); - try strings.ensureUnusedCapacity(len); + const size_class: InternPool.String.SizeClass = .detect(len, pt.tid, ip); + const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa, size_class); + try string_bytes.ensureUnusedCapacity(len); for (0..len) |i| { // I don't think elemValue has the possibility to affect ip.string_bytes. Let's // assert just to be sure. - const prev_len = strings.mutate.len; + const prev_len = string_bytes.mutate.len; const elem_val = try val.elemValue(pt, i); - assert(strings.mutate.len == prev_len); + assert(string_bytes.mutate.len == prev_len); const byte: u8 = @intCast(elem_val.toUnsignedInt(zcu)); - strings.appendAssumeCapacity(.{byte}); + string_bytes.appendAssumeCapacity(.{byte}); } - return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, len, size_class, .no_embedded_nulls); } pub fn fromInterned(i: InternPool.Index) Value { diff --git a/src/Zcu.zig b/src/Zcu.zig index 642d743145ff..ccc72c9dd1de 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1115,11 +1115,13 @@ pub const File = struct { pub fn internFullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { const gpa = pt.zcu.gpa; const ip = &pt.zcu.intern_pool; - const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); - var w: Writer = .fixed((try strings.addManyAsSlice(file.fullyQualifiedNameLen()))[0]); + const len: u32 = @intCast(file.fullyQualifiedNameLen()); + const size_class: InternPool.String.SizeClass = .detect(len, pt.tid, ip); + const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa, size_class); + var w: Writer = .fixed((try string_bytes.addManyAsSlice(len))[0]); file.renderFullyQualifiedName(&w) catch unreachable; assert(w.end == w.buffer.len); - return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(w.end), .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(w.end), size_class, .no_embedded_nulls); } pub const Index = InternPool.FileIndex; diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 62b8756c4948..8caf66114206 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2460,10 +2460,11 @@ fn updateEmbedFileInner( // The loaded bytes of the file, including a sentinel 0 byte. const ip_str: InternPool.String = str: { - const strings = ip.getLocal(tid).getMutableStrings(gpa); - const old_len = strings.mutate.len; - errdefer strings.shrinkRetainingCapacity(old_len); - const bytes = (try strings.addManyAsSlice(size_plus_one))[0]; + const size_class: InternPool.String.SizeClass = .detect(@intCast(size), tid, ip); + const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa, size_class); + const old_len = string_bytes.mutate.len; + errdefer string_bytes.shrinkRetainingCapacity(old_len); + const bytes = (try string_bytes.addManyAsSlice(size_plus_one))[0]; var fr = file.reader(&.{}); fr.size = stat.size; fr.interface.readSliceAll(bytes[0..size]) catch |err| switch (err) { @@ -2471,7 +2472,7 @@ fn updateEmbedFileInner( error.EndOfStream => return error.UnexpectedEof, }; bytes[size] = 0; - break :str try ip.getOrPutTrailingString(gpa, tid, @intCast(bytes.len), .maybe_embedded_nulls); + break :str try ip.getOrPutTrailingString(gpa, tid, @intCast(bytes.len), size_class, .maybe_embedded_nulls); }; if (ip_str_out) |p| p.* = ip_str; diff --git a/src/codegen/x86_64/Mir.zig b/src/codegen/x86_64/Mir.zig index caf41ffb392d..38a07cc8a49b 100644 --- a/src/codegen/x86_64/Mir.zig +++ b/src/codegen/x86_64/Mir.zig @@ -1726,11 +1726,13 @@ pub const Inst = struct { }; comptime { - if (!std.debug.runtime_safety) { - // Make sure we don't accidentally make instructions bigger than expected. - // Note that in safety builds, Zig is allowed to insert a secret field for safety checks. - assert(@sizeOf(Data) == 8); + // Make sure we don't accidentally make instructions bigger than expected. + // Note that in safety builds, Zig is allowed to insert a secret field for safety checks. + switch (builtin.mode) { + .Debug, .ReleaseSafe => {}, + .ReleaseFast, .ReleaseSmall => assert(@sizeOf(Data) == 8), } + const Mnemonic = @import("Encoding.zig").Mnemonic; if (@typeInfo(Mnemonic).@"enum".fields.len != 977 or @typeInfo(Fixes).@"enum".fields.len != 231 or