diff --git a/lib/compiler_rt/atomics.zig b/lib/compiler_rt/atomics.zig index 09fd1c2c906c..103241ccb124 100644 --- a/lib/compiler_rt/atomics.zig +++ b/lib/compiler_rt/atomics.zig @@ -12,43 +12,17 @@ pub const panic = common.panic; // Some architectures support atomic load/stores but no CAS, but we ignore this // detail to keep the export logic clean and because we need some kind of CAS to // implement the spinlocks. -const supports_atomic_ops = switch (arch) { - .msp430, .avr, .bpfel, .bpfeb => false, - .arm, .armeb, .thumb, .thumbeb => - // The ARM v6m ISA has no ldrex/strex and so it's impossible to do CAS - // operations (unless we're targeting Linux, the kernel provides a way to - // perform CAS operations). - // XXX: The Linux code path is not implemented yet. - !builtin.cpu.has(.arm, .has_v6m), - else => true, -}; - -// The size (in bytes) of the biggest object that the architecture can -// load/store atomically. -// Objects bigger than this threshold require the use of a lock. -const largest_atomic_size = switch (arch) { - // On SPARC systems that lacks CAS and/or swap instructions, the only - // available atomic operation is a test-and-set (`ldstub`), so we force - // every atomic memory access to go through the lock. - .sparc => if (builtin.cpu.has(.sparc, .hasleoncasa)) @sizeOf(usize) else 0, - - // XXX: On x86/x86_64 we could check the presence of cmpxchg8b/cmpxchg16b - // and set this parameter accordingly. - else => @sizeOf(usize), -}; - -// The size (in bytes) of the smallest atomic object that the architecture can -// perform fetch/exchange atomically. Note, this does not encompass load and store. -// Objects smaller than this threshold are implemented in terms of compare-exchange -// of a larger value. -const smallest_atomic_fetch_exch_size = switch (arch) { - // On AMDGCN, there are no instructions for atomic operations other than load and store - // (as of LLVM 15), and so these need to be implemented in terms of atomic CAS. - .amdgcn => @sizeOf(u32), - else => @sizeOf(u8), -}; - -const cache_line_size = 64; +const supports_atomic_ops = + std.atomic.Op.supported(.{ .cmpxchg = .strong }, usize) or + // We have a specialized SPARC spinlock implementation + builtin.cpu.arch.isSPARC(); + +// This is the size of the smallest value that the target can perform a compare-and-swap on. +// The function `wideUpdate` can be used to implement RMW operations on types smaller than this. +const wide_update_size = std.atomic.Op.supportedSizes( + .{ .cmpxchg = .weak }, + builtin.cpu.arch, +).findMin(builtin.cpu.features); const SpinlockTable = struct { // Allocate ~4096 bytes of memory for the spinlock table @@ -63,7 +37,7 @@ const SpinlockTable = struct { // Prevent false sharing by providing enough padding between two // consecutive spinlock elements - v: if (arch.isSPARC()) sparc_lock else other_lock align(cache_line_size) = .Unlocked, + v: if (arch.isSPARC()) sparc_lock else other_lock align(std.atomic.cache_line) = .Unlocked, fn acquire(self: *@This()) void { while (true) { @@ -165,12 +139,12 @@ fn __atomic_compare_exchange( // aligned. inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T { _ = model; - if (@sizeOf(T) > largest_atomic_size) { + if (comptime std.atomic.Op.supported(.load, T)) { + return @atomicLoad(T, src, .seq_cst); + } else { var sl = spinlocks.get(@intFromPtr(src)); defer sl.release(); return src.*; - } else { - return @atomicLoad(T, src, .seq_cst); } } @@ -196,12 +170,12 @@ fn __atomic_load_16(src: *u128, model: i32) callconv(.c) u128 { inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void { _ = model; - if (@sizeOf(T) > largest_atomic_size) { + if (comptime std.atomic.Op.supported(.store, T)) { + @atomicStore(T, dst, value, .seq_cst); + } else { var sl = spinlocks.get(@intFromPtr(dst)); defer sl.release(); dst.* = value; - } else { - @atomicStore(T, dst, value, .seq_cst); } } @@ -226,13 +200,14 @@ fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.c) void { } fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T { - const WideAtomic = std.meta.Int(.unsigned, smallest_atomic_fetch_exch_size * 8); + comptime std.debug.assert(@sizeOf(T) < wide_update_size); + const WideAtomic = std.meta.Int(.unsigned, wide_update_size * 8); const addr = @intFromPtr(ptr); - const wide_addr = addr & ~(@as(T, smallest_atomic_fetch_exch_size) - 1); - const wide_ptr: *align(smallest_atomic_fetch_exch_size) WideAtomic = @alignCast(@as(*WideAtomic, @ptrFromInt(wide_addr))); + const wide_addr = addr & ~(@as(T, wide_update_size) - 1); + const wide_ptr: *align(wide_update_size) WideAtomic = @alignCast(@as(*WideAtomic, @ptrFromInt(wide_addr))); - const inner_offset = addr & (@as(T, smallest_atomic_fetch_exch_size) - 1); + const inner_offset = addr & (@as(T, wide_update_size) - 1); const inner_shift = @as(std.math.Log2Int(T), @intCast(inner_offset * 8)); const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift; @@ -252,13 +227,9 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T { inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T { _ = model; - if (@sizeOf(T) > largest_atomic_size) { - var sl = spinlocks.get(@intFromPtr(ptr)); - defer sl.release(); - const value = ptr.*; - ptr.* = val; - return value; - } else if (@sizeOf(T) < smallest_atomic_fetch_exch_size) { + if (comptime std.atomic.Op.supported(.{ .rmw = .Xchg }, T)) { + return @atomicRmw(T, ptr, .Xchg, val, .seq_cst); + } else if (@sizeOf(T) < wide_update_size) { // Machine does not support this type, but it does support a larger type. const Updater = struct { fn update(new: T, old: T) T { @@ -268,7 +239,11 @@ inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T { }; return wideUpdate(T, ptr, val, Updater.update); } else { - return @atomicRmw(T, ptr, .Xchg, val, .seq_cst); + var sl = spinlocks.get(@intFromPtr(ptr)); + defer sl.release(); + const value = ptr.*; + ptr.* = val; + return value; } } @@ -302,7 +277,13 @@ inline fn atomic_compare_exchange_N( ) i32 { _ = success; _ = failure; - if (@sizeOf(T) > largest_atomic_size) { + if (comptime std.atomic.Op.supported(.{ .cmpxchg = .strong }, T)) { + if (@cmpxchgStrong(T, ptr, expected.*, desired, .seq_cst, .seq_cst)) |old_value| { + expected.* = old_value; + return 0; + } + return 1; + } else { var sl = spinlocks.get(@intFromPtr(ptr)); defer sl.release(); const value = ptr.*; @@ -312,12 +293,6 @@ inline fn atomic_compare_exchange_N( } expected.* = value; return 0; - } else { - if (@cmpxchgStrong(T, ptr, expected.*, desired, .seq_cst, .seq_cst)) |old_value| { - expected.* = old_value; - return 0; - } - return 1; } } @@ -359,19 +334,19 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr } }; - if (@sizeOf(T) > largest_atomic_size) { + if (comptime std.atomic.Op.supported(.{ .rmw = op }, T)) { + return @atomicRmw(T, ptr, op, val, .seq_cst); + } else if (@sizeOf(T) < wide_update_size) { + // Machine does not support this type, but it does support a larger type. + return wideUpdate(T, ptr, val, Updater.update); + } else { var sl = spinlocks.get(@intFromPtr(ptr)); defer sl.release(); const value = ptr.*; ptr.* = Updater.update(val, value); return value; - } else if (@sizeOf(T) < smallest_atomic_fetch_exch_size) { - // Machine does not support this type, but it does support a larger type. - return wideUpdate(T, ptr, val, Updater.update); } - - return @atomicRmw(T, ptr, op, val, .seq_cst); } fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 { diff --git a/lib/std/Target.zig b/lib/std/Target.zig index c75e2b51fba0..91a6bf49866e 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -1245,6 +1245,11 @@ pub const Cpu = struct { set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints); } + /// Removes all features that are not in the specified set. + pub fn intersectFeatureSet(set: *Set, other_set: Set) void { + set.ints = @as(@Vector(usize_count, usize), set.ints) & @as(@Vector(usize_count, usize), other_set.ints); + } + pub fn populateDependencies(set: *Set, all_features_list: []const Cpu.Feature) void { @setEvalBranchQuota(1000000); @@ -1276,6 +1281,56 @@ pub const Cpu = struct { const other_v: V = other_set.ints; return @reduce(.And, (set_v & other_v) == other_v); } + + pub fn intersectsWith(set: Set, other_set: Set) bool { + const V = @Vector(usize_count, usize); + const set_v: V = set.ints; + const other_v: V = other_set.ints; + return @reduce(.Or, (set_v & other_v) != @as(V, @splat(0))); + } + + /// Formatter to print the feature set as a comma-separated list, ending with a conjunction + pub fn fmtList(set: Set, family: Arch.Family, conjunction: []const u8) FormatList { + return .{ .set = set, .family = family, .conjunction = conjunction }; + } + + pub const FormatList = struct { + set: Set, + conjunction: []const u8, + family: Arch.Family, + + pub fn format(fmt: @This(), writer: *std.Io.Writer) !void { + const BitSet = std.bit_set.ArrayBitSet(usize, Set.needed_bit_count); + const bit_set: BitSet = .{ .masks = fmt.set.ints }; + var it = bit_set.iterator(.{}); + var next = it.next(); + if (next == null) { + return writer.writeAll(""); + } + var i: usize = 0; + while (next) |feat| : (i += 1) { + next = it.next(); + + if (i > 1 or (i > 0 and next != null)) { + try writer.writeAll(", "); + } + if (next == null) { + try writer.print("{s} ", .{fmt.conjunction}); + } + + const name = switch (fmt.family) { + inline else => |family| blk: { + const FeatureEnum = @field(Target, @tagName(family)).Feature; + if (@typeInfo(FeatureEnum).@"enum".fields.len == 0) unreachable; + + const feat_enum: FeatureEnum = @enumFromInt(feat); + break :blk @tagName(feat_enum); + }, + }; + try writer.writeAll(name); + } + } + }; }; pub fn FeatureSetFns(comptime F: type) type { diff --git a/lib/std/atomic.zig b/lib/std/atomic.zig index 194f6459753e..62209b32b61e 100644 --- a/lib/std/atomic.zig +++ b/lib/std/atomic.zig @@ -481,6 +481,461 @@ test "current CPU has a cache line size" { _ = cache_line; } +pub const Op = union(enum) { + load, + store, + rmw: std.builtin.AtomicRmwOp, + cmpxchg: enum { weak, strong }, + + /// Check if the operation is supported on the given type. + pub fn supported(op: Op, comptime T: type) bool { + return op.supportedOnCpu(T, builtin.cpu); + } + /// Check if the operation is supported on the given type, on a specified CPU. + pub fn supportedOnCpu(op: Op, comptime T: type, cpu: std.Target.Cpu) bool { + const valid_types = op.supportedTypes(); + const is_valid_type = switch (@typeInfo(T)) { + .bool => valid_types.bool, + .int => valid_types.integer, + .float => valid_types.float, + .@"enum" => valid_types.@"enum", + .error_set => valid_types.error_set, + .@"struct" => |s| s.layout == .@"packed" and valid_types.packed_struct, + + .optional => |opt| switch (@typeInfo(opt.child)) { + .pointer => |ptr| switch (ptr.size) { + .slice, .c => false, + .one, .many => !ptr.is_allowzero and valid_types.pointer, + }, + }, + .pointer => |ptr| switch (ptr.size) { + .slice => false, + .one, .many, .c => valid_types.pointer, + }, + + else => false, + }; + if (!is_valid_type) return false; + + if (!std.math.isPowerOfTwo(@sizeOf(T))) return false; + const condition = op.supportedSizes(cpu.arch).get(@sizeOf(T)) orelse { + return false; + }; + + return condition.check(cpu.features); + } + + /// Get the set of sizes supported by this operation on the specified architecture. + // TODO: Audit this. I've done my best for the architectures I'm familiar with, but there's probably a lot that can improved + pub fn supportedSizes(op: Op, arch: std.Target.Cpu.Arch) Sizes { + switch (arch) { + .avr, + .msp430, + => return .upTo(2, .always), + + .arc, + .hexagon, + .m68k, + .mips, + .mipsel, + .nvptx, + .or1k, + .powerpc, + .powerpcle, + .riscv32, + .xcore, + .kalimba, + .lanai, + .csky, + .spirv32, + .loongarch32, + .xtensa, + .propeller, + => return .upTo(4, .always), + + .bpfel, + .bpfeb, + .mips64, + .mips64el, + .nvptx64, + .powerpc64, + .powerpc64le, + .riscv64, + .s390x, + .ve, + .spirv64, + .loongarch64, + => return .upTo(8, .always), + + .amdgcn => switch (op) { + .load, .store, .cmpxchg => { + var sizes: Sizes = .none; + sizes.put(4, .always); + sizes.put(8, .always); + return sizes; + }, + // On AMDGCN, there are no instructions for atomic operations other than load and store + // (as of LLVM 15), and so these need to be implemented in terms of atomic CAS. + .rmw => return .none, + }, + + .sparc => { + const cas: Sizes = .upTo(4, .init(.sparc, .{ .require = &.{.hasleoncasa} })); + switch (op) { + .cmpxchg => return cas, + .load, .store => return .upTo(4, .always), + .rmw => |rmw| switch (rmw) { + .Xchg => return .upTo(4, .always), + else => return cas, // Implemented in terms of CASA + }, + } + }, + + .sparc64 => { + const cas: Sizes = .upTo(8, .init(.sparc, .{ .require = &.{.hasleoncasa} })); + switch (op) { + .cmpxchg => return cas, + .load, .store => return .upTo(8, .always), + .rmw => |rmw| switch (rmw) { + .Xchg => return .upTo(8, .always), + else => return cas, // Implemented in terms of CASXA + }, + } + }, + + .arm, .armeb, .thumb, .thumbeb => if (op == .cmpxchg) { + // The ARM v6m ISA has no ldrex/strex and so it's impossible to do CAS + // operations (unless we're targeting Linux, the kernel provides a way to + // perform CAS operations). + // XXX: The Linux code path is not implemented yet. + return .upTo(4, .init(.arm, .{ .prohibit = &.{.has_v6m} })); + } else { + return .upTo(4, .always); + }, + + .aarch64, + .aarch64_be, + => return .upTo(16, .always), + + .wasm32, + .wasm64, + => { + if (op == .rmw) switch (op.rmw) { + .Xchg, + .Add, + .Sub, + .And, + .Or, + .Xor, + => {}, + + .Nand, + .Max, + .Min, + => return .none, // Not supported on wasm + }; + + return .upTo(8, .init(.wasm, .{ .require = &.{.atomics} })); + }, + + .x86 => { + var sizes: Sizes = .upTo(4, .always); + if (op == .cmpxchg) { + sizes.put(8, .init(.x86, .{ .require = &.{.cx8} })); + } + return sizes; + }, + + .x86_64 => { + var sizes: Sizes = .upTo(8, .always); + if (op == .cmpxchg) { + sizes.put(16, .init(.x86, .{ .require = &.{.cx16} })); + } + return sizes; + }, + } + } + + pub const Sizes = struct { + /// Bitset of supported sizes. If size `2^n` is present, `supported & (1 << n)` will be non-zero. + /// For each set bit, the corresponding entry in `required_features` and `prohibited_features` will be populated. + supported: BitsetInt, + /// for each set bit in `supported`, the corresponding entry here stores a `FeatureCondition` that indicates + /// the requirements on CPU features in order to support that size. For unset bits, the element is `undefined`. + feature_conditions: [bit_set_len]FeatureCondition, + + const bit_set_len = std.math.log2_int(usize, max_supported_size) + 1; + const BitsetInt = @Type(.{ .int = .{ + .signedness = .unsigned, + .bits = bit_set_len, + } }); + + pub fn isEmpty(sizes: Sizes) bool { + return sizes.supported == 0; + } + pub fn get(sizes: Sizes, size: u64) ?FeatureCondition { + if (size == 0) return .always; // 0-bit types are always atomic, because they only hold a single value + if (!std.math.isPowerOfTwo(size)) return null; + if (sizes.supported & size == 0) return null; + return sizes.feature_conditions[std.math.log2_int(u64, size)]; + } + + pub fn findMax(sizes: Sizes, features: std.Target.Cpu.Feature.Set) usize { + var bits = sizes.supported; + while (bits != 0) { + const max = std.math.log2_int(BitsetInt, bits); + const mask = @as(BitsetInt, 1) << max; + bits &= ~mask; + if (sizes.feature_conditions[max].check(features)) { + return mask; + } + } + return 0; + } + + pub fn findMin(sizes: Sizes, features: std.Target.Cpu.Feature.Set) usize { + var bits = sizes.supported; + while (bits != 0) { + const min = @ctz(bits); + const mask = @as(BitsetInt, 1) << @intCast(min); + bits &= ~mask; + if (sizes.feature_conditions[min].check(features)) { + return mask; + } + } + return 0; + } + + /// Prints the set as a list of possible sizes. + /// eg. `1, 2, 4, or 8` + pub fn formatPossibilities(sizes: Sizes, writer: *std.Io.Writer) !void { + if (sizes.supported == 0) { + return writer.writeAll(""); + } + + var bits = sizes.supported; + var count: usize = 0; + while (bits != 0) : (count += 1) { + const mask = @as(BitsetInt, 1) << @intCast(@ctz(bits)); + bits &= ~mask; + + if (count > 1 or (count > 0 and bits != 0)) { + try writer.writeAll(", "); + } + if (bits == 0) { + try writer.writeAll("or "); + } + + try writer.print("{d}", .{mask}); + } + } + + const none: Sizes = .{ + .supported = 0, + .feature_conditions = undefined, + }; + fn upTo(max: BitsetInt, condition: FeatureCondition) Sizes { + std.debug.assert(std.math.isPowerOfTwo(max)); + var sizes: Sizes = .{ + .supported = (max << 1) -% 1, + .feature_conditions = @splat(condition), + }; + + // Safety + const max_idx = std.math.log2_int(BitsetInt, max); + @memset(sizes.feature_conditions[max_idx + 1 ..], undefined); + + return sizes; + } + fn put(sizes: *Sizes, size: BitsetInt, condition: FeatureCondition) void { + sizes.supported |= size; + sizes.feature_conditions[std.math.log2_int(u64, size)] = condition; + } + }; + + pub const FeatureCondition = struct { + required: Set, + prohibited: Set, + const Set = std.Target.Cpu.Feature.Set; + + pub const always: FeatureCondition = .{ .required = .empty, .prohibited = .empty }; + + pub fn check(self: FeatureCondition, features: Set) bool { + return features.isSuperSetOf(self.required) and !features.intersectsWith(self.prohibited); + } + + fn init(comptime family: std.Target.Cpu.Arch.Family, opts: struct { + const Feature = @field(std.Target, @tagName(family)).Feature; + require: []const Feature = &.{}, + prohibit: []const Feature = &.{}, + }) FeatureCondition { + const ns = @field(std.Target, @tagName(family)); + return .{ + .required = ns.featureSet(opts.require), + .prohibited = ns.featureSet(opts.prohibit), + }; + } + }; + + /// The maximum size supported by any architecture + const max_supported_size = 16; + + pub fn format(op: Op, writer: *std.Io.Writer) !void { + switch (op) { + .load => try writer.writeAll("@atomicLoad"), + .store => try writer.writeAll("@atomicStore"), + .rmw => |rmw| try writer.print("@atomicRmw(.{s})", .{@tagName(rmw)}), + .cmpxchg => |strength| switch (strength) { + .weak => try writer.writeAll("@cmpxchgWeak"), + .strong => try writer.writeAll("@cmpxchgStrong"), + }, + } + } + + /// Returns a description of the kinds of type supported by this operation. + pub fn supportedTypes(op: Op) Types { + return switch (op) { + .load, .store => .{}, + .rmw => |rmw| switch (rmw) { + .Xchg => .{}, + .Add, .Sub, .Min, .Max => .{ + .bool = false, + .@"enum" = false, + .error_set = false, + }, + .And, .Nand, .Or, .Xor => .{ + .float = false, + .bool = false, + .@"enum" = false, + .error_set = false, + }, + }, + .cmpxchg => .{ + // floats are not supported for cmpxchg because float equality differs from bitwise equality + .float = false, + }, + }; + } + pub const Types = packed struct { + bool: bool = true, + integer: bool = true, + float: bool = true, + @"enum": bool = true, + error_set: bool = true, + packed_struct: bool = true, + pointer: bool = true, + + pub fn format(types: Types, writer: *std.io.Writer) !void { + const bits: @typeInfo(Types).@"struct".backing_integer.? = @bitCast(types); + var count = @popCount(bits); + inline for (@typeInfo(Types).@"struct".fields) |field| { + if (@field(types, field.name)) { + var name = field.name[0..].*; + std.mem.replaceScalar(u8, &name, '_', ' '); + try writer.writeAll(&name); + + count -= 1; + switch (count) { + 0 => {}, + 1 => try writer.writeAll(", or "), + else => try writer.writeAll(", "), + } + } + } + } + }; + + test supportedOnCpu { + const x86 = std.Target.x86; + try std.testing.expect( + supportedOnCpu(.load, u64, x86.cpu.x86_64.toCpu(.x86_64)), + ); + try std.testing.expect( + !supportedOnCpu(.{ .cmpxchg = .weak }, u128, x86.cpu.x86_64.toCpu(.x86_64)), + ); + try std.testing.expect( + supportedOnCpu(.{ .cmpxchg = .weak }, u128, x86.cpu.x86_64_v2.toCpu(.x86_64)), + ); + + const aarch64 = std.Target.aarch64; + try std.testing.expect( + supportedOnCpu(.load, u64, aarch64.cpu.generic.toCpu(.aarch64)), + ); + } + + test supportedSizes { + const sizes = supportedSizes(.{ .cmpxchg = .strong }, .x86); + + try std.testing.expect(sizes.get(4) != null); + try std.testing.expect(sizes.get(4).?.check(.empty)); + + try std.testing.expect(sizes.get(8) != null); + try std.testing.expect(!sizes.get(8).?.check(.empty)); + try std.testing.expect(sizes.get(8).?.check(std.Target.x86.featureSet(&.{.cx8}))); + + try std.testing.expect(sizes.get(16) == null); + } + + test "wasm only supports atomics when the feature is enabled" { + const cpu = std.Target.wasm.cpu; + try std.testing.expect( + !supportedOnCpu(.store, u32, cpu.mvp.toCpu(.wasm32)), + ); + try std.testing.expect( + supportedOnCpu(.store, u32, cpu.bleeding_edge.toCpu(.wasm32)), + ); + } + + test "wasm32 supports up to 64-bit atomics" { + const bleeding = std.Target.wasm.cpu.bleeding_edge.toCpu(.wasm32); + try std.testing.expect( + supportedOnCpu(.store, u64, bleeding), + ); + try std.testing.expect( + !supportedOnCpu(.store, u128, bleeding), + ); + + const sizes = supportedSizes(.{ .rmw = .Add }, .wasm32); + try std.testing.expect(sizes.supported == 0b1111); + } + + test "wasm32 doesn't support min, max, or nand RMW ops" { + const bleeding = std.Target.wasm.cpu.bleeding_edge.toCpu(.wasm32); + try std.testing.expect( + !supportedOnCpu(.{ .rmw = .Min }, u32, bleeding), + ); + try std.testing.expect( + !supportedOnCpu(.{ .rmw = .Max }, u32, bleeding), + ); + try std.testing.expect( + !supportedOnCpu(.{ .rmw = .Nand }, u32, bleeding), + ); + } + + test "x86_64 supports 128-bit cmpxchg with cx16 flag" { + const x86 = std.Target.x86; + const v2 = x86.cpu.x86_64_v2.toCpu(.x86_64); + try std.testing.expect( + supportedOnCpu(.{ .cmpxchg = .strong }, u128, v2), + ); + + const sizes = supportedSizes(.{ .cmpxchg = .strong }, .x86_64); + try std.testing.expect(sizes.get(16) != null); + try std.testing.expect(sizes.get(16).?.check(x86.featureSet(&.{.cx16}))); + try std.testing.expect(!sizes.get(16).?.check(.empty)); + } +}; + +test Op { + try std.testing.expect( + // Query atomic operation support for a specific CPU + Op.supportedOnCpu(.load, u64, std.Target.aarch64.cpu.generic.toCpu(.aarch64)), + ); + + // Query atomic operation support for the target CPU + _ = Op.supported(.load, u64); +} + const std = @import("std.zig"); const builtin = @import("builtin"); const AtomicOrder = std.builtin.AtomicOrder; diff --git a/src/Compilation.zig b/src/Compilation.zig index 3cbddee64bea..e144b83216d8 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3851,10 +3851,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try comp.link_diags.addMessagesToBundle(&bundle, comp.bin_file); - const compile_log_text: []const u8 = compile_log_text: { - const zcu = comp.zcu orelse break :compile_log_text ""; - if (zcu.skip_analysis_this_update) break :compile_log_text ""; - if (zcu.compile_logs.count() == 0) break :compile_log_text ""; + var compile_log_text: std.ArrayListUnmanaged(u8) = .empty; + defer compile_log_text.deinit(gpa); + compile_log_text: { + const zcu = comp.zcu orelse break :compile_log_text; + if (zcu.skip_analysis_this_update) break :compile_log_text; + if (zcu.compile_logs.count() == 0) break :compile_log_text; // If there are no other errors, we include a "found compile log statement" error. // Otherwise, we just show the compile log output, with no error. @@ -3877,7 +3879,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }); } - if (messages.items.len == 0) break :compile_log_text ""; + if (messages.items.len == 0) break :compile_log_text; // Okay, there *are* referenced compile logs. Sort them into a consistent order. @@ -3899,16 +3901,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { std.mem.sort(Zcu.ErrorMsg, messages.items, @as(SortContext, .{ .err = &sort_err, .zcu = zcu }), SortContext.lessThan); if (sort_err) |e| return e; - var log_text: std.ArrayListUnmanaged(u8) = .empty; - defer log_text.deinit(gpa); - // Index 0 will be the root message; the rest will be notes. // Only the actual message, i.e. index 0, will retain its reference trace. - try appendCompileLogLines(&log_text, zcu, messages.items[0].reference_trace_root.unwrap().?); + try appendCompileLogLines(&compile_log_text, zcu, messages.items[0].reference_trace_root.unwrap().?); messages.items[0].notes = messages.items[1..]; messages.items[0].msg = "found compile log statement"; for (messages.items[1..]) |*note| { - try appendCompileLogLines(&log_text, zcu, note.reference_trace_root.unwrap().?); + try appendCompileLogLines(&compile_log_text, zcu, note.reference_trace_root.unwrap().?); note.reference_trace_root = .none; // notes don't have reference traces note.msg = "also here"; } @@ -3918,9 +3917,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (include_compile_log_sources) { try addModuleErrorMsg(zcu, &bundle, messages.items[0], false); } - - break :compile_log_text try log_text.toOwnedSlice(gpa); - }; + } // TODO: eventually, this should be behind `std.debug.runtime_safety`. But right now, this is a // very common way for incremental compilation bugs to manifest, so let's always check it. @@ -3947,7 +3944,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } }; - return bundle.toOwnedBundle(compile_log_text); + return bundle.toOwnedBundle(compile_log_text.items); } /// Writes all compile log lines belonging to `logging_unit` into `log_text` using `zcu.gpa`. diff --git a/src/Sema.zig b/src/Sema.zig index 788107f786ed..35d9ab090a9f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -23581,6 +23581,8 @@ fn checkNumericType( fn checkAtomicPtrOperand( sema: *Sema, block: *Block, + atomic_op: std.atomic.Op, + atomic_op_src: LazySrcLoc, elem_ty: Type, elem_ty_src: LazySrcLoc, ptr: Air.Inst.Ref, @@ -23589,35 +23591,90 @@ fn checkAtomicPtrOperand( ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; - var diag: Zcu.AtomicPtrAlignmentDiagnostics = .{}; - const alignment = zcu.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.FloatTooBig => return sema.fail( + + if (sema.isInvalidAtomicType(atomic_op, elem_ty)) |why| { + const msg = msg: { + const msg = try sema.errMsg( + elem_ty_src, + "expected {f} type; found '{f}'", + .{ atomic_op.supportedTypes(), elem_ty.fmt(pt) }, + ); + errdefer msg.destroy(sema.gpa); + + if (why.len != 0) { + try sema.errNote(elem_ty_src, msg, "{s}", .{why}); + } + + try sema.addDeclaredHereNote(msg, elem_ty); + + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(block, msg); + } + + const target = zcu.getTarget(); + const size_support = atomic_op.supportedSizes(target.cpu.arch); + if (size_support.isEmpty()) { + return sema.fail( block, - elem_ty_src, - "expected {d}-bit float type or smaller; found {d}-bit float type", - .{ diag.max_bits, diag.bits }, - ), - error.IntTooBig => return sema.fail( + atomic_op_src, + "{f} is not supported on {s}", + .{ atomic_op, @tagName(target.cpu.arch) }, + ); + } + + const elem_size = elem_ty.abiSize(zcu); + var feature_condition = size_support.get(elem_size) orelse { + const msg = msg: { + const msg = try sema.errMsg( + elem_ty_src, + "{s} does not support {f} on this type", + .{ @tagName(target.cpu.arch), atomic_op }, + ); + errdefer msg.destroy(sema.gpa); + try sema.errNote( + elem_ty_src, + msg, + "size of type is {d}, but {f} on {s} requires a value of size {f}", + .{ elem_size, atomic_op, @tagName(target.cpu.arch), std.fmt.alt(size_support, .formatPossibilities) }, + ); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(block, msg); + }; + + feature_condition.prohibited.intersectFeatureSet(target.cpu.features); + feature_condition.required.removeFeatureSet(target.cpu.features); + if (!feature_condition.prohibited.isEmpty()) { + return sema.fail( block, - elem_ty_src, - "expected {d}-bit integer type or smaller; found {d}-bit integer type", - .{ diag.max_bits, diag.bits }, - ), - error.BadType => return sema.fail( + atomic_op_src, + "{d}-byte {f} on {s} cannot be used with the following CPU features: {f}", + .{ + elem_size, + atomic_op, + @tagName(target.cpu.arch), + feature_condition.prohibited.fmtList(target.cpu.arch.family(), "or"), + }, + ); + } + if (!feature_condition.required.isEmpty()) { + return sema.fail( block, - elem_ty_src, - "expected bool, integer, float, enum, packed struct, or pointer type; found '{f}'", - .{elem_ty.fmt(pt)}, - ), - }; + atomic_op_src, + "{d}-byte {f} on {s} requires the following missing CPU features: {f}", + .{ + elem_size, + atomic_op, + @tagName(target.cpu.arch), + feature_condition.required.fmtList(target.cpu.arch.family(), "and"), + }, + ); + } var wanted_ptr_data: InternPool.Key.PtrType = .{ .child = elem_ty.toIntern(), - .flags = .{ - .alignment = alignment, - .is_const = ptr_const, - }, + .flags = .{ .is_const = ptr_const }, }; const ptr_ty = sema.typeOf(ptr); @@ -23640,6 +23697,52 @@ fn checkAtomicPtrOperand( return casted_ptr; } +/// If the type is invalid for this atomic operation, returns an error note explaining why. +fn isInvalidAtomicType(sema: *Sema, op: std.atomic.Op, elem_ty: Type) ?[]const u8 { + const zcu = sema.pt.zcu; + const valid_types = op.supportedTypes(); + if (elem_ty.isRuntimeFloat()) { + if (!valid_types.float) { + switch (op) { + .load, .store => unreachable, + .rmw => return "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", + .cmpxchg => return "floats are not supported for cmpxchg because float equality differs from bitwise equality", + } + } + } else if (elem_ty.isPtrAtRuntime(zcu)) { + // TODO: pointers are currently supported for things like rmw add, but maybe this shouldn't be the case? + std.debug.assert(valid_types.pointer); + } else switch (elem_ty.zigTypeTag(zcu)) { + .bool => if (!valid_types.bool) { + switch (op) { + .load, .store, .cmpxchg => unreachable, + .rmw => return "@atomicRmw with bool only allowed with .Xchg", + } + }, + .int => std.debug.assert(valid_types.integer), + .@"enum" => if (!valid_types.@"enum") { + switch (op) { + .load, .store, .cmpxchg => unreachable, + .rmw => return "@atomicRmw with enum only allowed with .Xchg", + } + }, + .error_set => if (!valid_types.error_set) { + switch (op) { + .load, .store, .cmpxchg => unreachable, + .rmw => return "@atomicRmw with error set only allowed with .Xchg", + } + }, + .@"struct" => if (elem_ty.containerLayout(zcu) != .@"packed") { + return ""; // No notes + } else { + std.debug.assert(valid_types.packed_struct); + }, + else => return "", // No notes + } + + return null; // All ok! +} + fn checkPtrIsNotComptimeMutable( sema: *Sema, block: *Block, @@ -23919,16 +24022,15 @@ fn zirCmpxchg( // zig fmt: on const expected_value = try sema.resolveInst(extra.expected_value); const elem_ty = sema.typeOf(expected_value); - if (elem_ty.zigTypeTag(zcu) == .float) { - return sema.fail( - block, - elem_ty_src, - "expected bool, integer, enum, packed struct, or pointer type; found '{f}'", - .{elem_ty.fmt(pt)}, - ); - } const uncasted_ptr = try sema.resolveInst(extra.ptr); - const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); + const op: std.atomic.Op = .{ + .cmpxchg = switch (air_tag) { + .cmpxchg_weak => .weak, + .cmpxchg_strong => .strong, + else => unreachable, + }, + }; + const ptr = try sema.checkAtomicPtrOperand(block, op, src, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const new_value = try sema.coerce(block, elem_ty, try sema.resolveInst(extra.new_value), new_value_src); const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order, .{ .simple = .atomic_order }); const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order, .{ .simple = .atomic_order }); @@ -24428,6 +24530,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data; + const src = block.nodeOffset(inst_data.src_node); // zig fmt: off const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ptr_src = block.builtinCallArgSrc(inst_data.src_node, 1); @@ -24435,7 +24538,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! // zig fmt: on const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type); const uncasted_ptr = try sema.resolveInst(extra.ptr); - const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, true); + const ptr = try sema.checkAtomicPtrOperand(block, .load, src, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, true); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{ .simple = .atomic_order }); switch (order) { @@ -24460,7 +24563,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } } - try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src); + try sema.requireRuntimeBlock(block, src, ptr_src); return block.addInst(.{ .tag = .atomic_load, .data = .{ .atomic_load = .{ @@ -24486,22 +24589,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(extra.operand); const elem_ty = sema.typeOf(operand); const uncasted_ptr = try sema.resolveInst(extra.ptr); - const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); - - switch (elem_ty.zigTypeTag(zcu)) { - .@"enum" => if (op != .Xchg) { - return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); - }, - .bool => if (op != .Xchg) { - return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); - }, - .float => switch (op) { - .Xchg, .Add, .Sub, .Max, .Min => {}, - else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", .{}), - }, - else => {}, - } + const ptr = try sema.checkAtomicPtrOperand(block, .{ .rmw = op }, src, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{ .simple = .atomic_order }); if (order == .unordered) { @@ -24568,7 +24657,7 @@ fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const operand = try sema.resolveInst(extra.operand); const elem_ty = sema.typeOf(operand); const uncasted_ptr = try sema.resolveInst(extra.ptr); - const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); + const ptr = try sema.checkAtomicPtrOperand(block, .store, src, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{ .simple = .atomic_order }); const air_tag: Air.Inst.Tag = switch (order) { diff --git a/src/Zcu.zig b/src/Zcu.zig index df35777231e5..3f9fb31170bf 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3768,112 +3768,6 @@ pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool { return target_util.backendSupportsFeature(backend, feature); } -pub const AtomicPtrAlignmentError = error{ - FloatTooBig, - IntTooBig, - BadType, - OutOfMemory, -}; - -pub const AtomicPtrAlignmentDiagnostics = struct { - bits: u16 = undefined, - max_bits: u16 = undefined, -}; - -/// If ABI alignment of `ty` is OK for atomic operations, returns 0. -/// Otherwise returns the alignment required on a pointer for the target -/// to perform atomic operations. -// TODO this function does not take into account CPU features, which can affect -// this value. Audit this! -pub fn atomicPtrAlignment( - zcu: *Zcu, - ty: Type, - diags: *AtomicPtrAlignmentDiagnostics, -) AtomicPtrAlignmentError!Alignment { - const target = zcu.getTarget(); - const max_atomic_bits: u16 = switch (target.cpu.arch) { - .avr, - .msp430, - => 16, - - .arc, - .arm, - .armeb, - .hexagon, - .m68k, - .mips, - .mipsel, - .nvptx, - .or1k, - .powerpc, - .powerpcle, - .riscv32, - .sparc, - .thumb, - .thumbeb, - .x86, - .xcore, - .kalimba, - .lanai, - .wasm32, - .csky, - .spirv32, - .loongarch32, - .xtensa, - .propeller, - => 32, - - .amdgcn, - .bpfel, - .bpfeb, - .mips64, - .mips64el, - .nvptx64, - .powerpc64, - .powerpc64le, - .riscv64, - .sparc64, - .s390x, - .wasm64, - .ve, - .spirv64, - .loongarch64, - => 64, - - .aarch64, - .aarch64_be, - => 128, - - .x86_64 => if (target.cpu.has(.x86, .cx16)) 128 else 64, - }; - - if (ty.toIntern() == .bool_type) return .none; - if (ty.isRuntimeFloat()) { - const bit_count = ty.floatBits(target); - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.FloatTooBig; - } - return .none; - } - if (ty.isAbiInt(zcu)) { - const bit_count = ty.intInfo(zcu).bits; - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.IntTooBig; - } - return .none; - } - if (ty.isPtrAtRuntime(zcu)) return .none; - return error.BadType; -} - /// Returns null in the following cases: /// * Not a struct. pub fn typeToStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType { diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index cda8b5f03317..8cb36ebe4178 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -3,13 +3,8 @@ const builtin = @import("builtin"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; -const supports_128_bit_atomics = switch (builtin.cpu.arch) { - // TODO: Ideally this could be sync'd with the logic in Sema. - .aarch64 => true, - .aarch64_be => false, // Fails due to LLVM issues. - .x86_64 => builtin.cpu.has(.x86, .cx16), - else => false, -}; +const supports_128_bit_cmpxchg = std.atomic.Op.supported(.{ .cmpxchg = .weak }, u128); +const supports_128_bit_rmw = std.atomic.Op.supported(.{ .rmw = .Xchg }, u128); test "cmpxchg" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; @@ -108,7 +103,7 @@ test "cmpxchg with ignored result" { test "128-bit cmpxchg" { // TODO: this must appear first - if (!supports_128_bit_atomics) return error.SkipZigTest; + if (!supports_128_bit_cmpxchg) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -288,7 +283,7 @@ fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usi test "atomicrmw with 128-bit ints" { // TODO: this must appear first - if (!supports_128_bit_atomics) return error.SkipZigTest; + if (!supports_128_bit_rmw) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; diff --git a/test/cases/compile_errors/atomic_missing_features_wasm32.zig b/test/cases/compile_errors/atomic_missing_features_wasm32.zig new file mode 100644 index 000000000000..67b68cc9a1d9 --- /dev/null +++ b/test/cases/compile_errors/atomic_missing_features_wasm32.zig @@ -0,0 +1,9 @@ +export fn entry() void { + var x: u32 = 0; + _ = @atomicLoad(u32, &x, .monotonic); +} + +// error +// target=wasm32-freestanding:baseline +// +// :3:16: error: 4-byte @atomicLoad on wasm32 requires the following missing CPU features: atomics diff --git a/test/cases/compile_errors/atomic_missing_features_x86_64.zig b/test/cases/compile_errors/atomic_missing_features_x86_64.zig new file mode 100644 index 000000000000..2e32559403f0 --- /dev/null +++ b/test/cases/compile_errors/atomic_missing_features_x86_64.zig @@ -0,0 +1,9 @@ +export fn entry() void { + var x: u128 = 0; + @cmpxchgWeak(u128, &x, 1, 2, .monotonic, .monotonic); +} + +// error +// target=x86_64-native:baseline +// +// :3:16: error: 16-byte @cmpxchgWeak on x86_64 requires the following missing CPU features: cx16 diff --git a/test/cases/compile_errors/atomic_unsupported_op_wasm.zig b/test/cases/compile_errors/atomic_unsupported_op_wasm.zig new file mode 100644 index 000000000000..9190a25a16f4 --- /dev/null +++ b/test/cases/compile_errors/atomic_unsupported_op_wasm.zig @@ -0,0 +1,9 @@ +export fn entry() void { + var x: u32 = 0; + @atomicRmw(u32, &x, .Min, 1, .monotonic); +} + +// error +// target=wasm32-freestanding:bleeding_edge +// +// :3:16: error: @atoimcRmw(.Min) is not supported on wasm32 diff --git a/test/cases/compile_errors/atomic_unsupported_sizes_aarch64.zig b/test/cases/compile_errors/atomic_unsupported_sizes_aarch64.zig new file mode 100644 index 000000000000..3cd123c71bdc --- /dev/null +++ b/test/cases/compile_errors/atomic_unsupported_sizes_aarch64.zig @@ -0,0 +1,14 @@ +export fn valid() void { + var x: u128 = 0; + _ = @atomicRmw(u128, &x, .Xchg, 1, .monotonic); +} +export fn invalid() void { + var x: u256 = 0; + _ = @atomicRmw(u256, &x, .Xchg, 1, .monotonic); +} + +// error +// target=aarch64-native +// +// ":7:16: error: {s} does not support @atomicRmw(.Xchg) on this type", .{arch}), +// ":7:16: note: size of type is 32, but Xchg on {s} requires a value of size 1, 2, 4, 8, or 16", .{arch}), diff --git a/test/cases/compile_errors/atomic_unsupported_sizes_x86_64.zig b/test/cases/compile_errors/atomic_unsupported_sizes_x86_64.zig new file mode 100644 index 000000000000..09e7bf2a45f5 --- /dev/null +++ b/test/cases/compile_errors/atomic_unsupported_sizes_x86_64.zig @@ -0,0 +1,14 @@ +export fn rmw() void { + var x: u128 = 0; + _ = @atomicRmw(u128, &x, .Xchg, 1, .monotonic); +} +export fn cmpxchg() void { + var x: u256 = 0; + _ = @cmpxchgWeak(u256, &x, 0, 1, .monotonic, .monotonic); +} + +// error +// target=x86_64-native:x86_64_v2 +// +// ":7:16: error: {s} does not support @atomicRmw(.Xchg) on this type", .{arch}), +// ":7:16: note: size of type is 32, but Xchg on {s} requires a value of size 1, 2, 4, 8, or 16", .{arch}), diff --git a/test/cases/compile_errors/atomicrmw_with_bool_op_not_.Xchg.zig b/test/cases/compile_errors/atomicrmw_with_bool_op_not_.Xchg.zig index 04e448b8651b..5b1b12898baf 100644 --- a/test/cases/compile_errors/atomicrmw_with_bool_op_not_.Xchg.zig +++ b/test/cases/compile_errors/atomicrmw_with_bool_op_not_.Xchg.zig @@ -4,7 +4,6 @@ export fn entry() void { } // error -// backend=stage2 -// target=native // -// :3:31: error: @atomicRmw with bool only allowed with .Xchg +// :3:20: error: expected integer, float, packed struct, or pointer type; found 'bool' +// :3:20: note: @atomicRmw with bool only allowed with .Xchg diff --git a/test/cases/compile_errors/atomicrmw_with_enum_op_not_.Xchg.zig b/test/cases/compile_errors/atomicrmw_with_enum_op_not_.Xchg.zig index 2725d07c2fe4..5953acb9fc17 100644 --- a/test/cases/compile_errors/atomicrmw_with_enum_op_not_.Xchg.zig +++ b/test/cases/compile_errors/atomicrmw_with_enum_op_not_.Xchg.zig @@ -1,16 +1,16 @@ +const E = enum(u8) { + a, + b, + c, + d, +}; export fn entry() void { - const E = enum(u8) { - a, - b, - c, - d, - }; var x: E = .a; _ = @atomicRmw(E, &x, .Add, .b, .seq_cst); } // error -// backend=stage2 -// target=native // -// :9:28: error: @atomicRmw with enum only allowed with .Xchg +// :9:20: error: expected integer, float, packed struct, or pointer type; found 'tmp.E' +// :9:20: note: @atomicRmw with enum only allowed with .Xchg +// :1:11: note: enum declared here diff --git a/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_.Sub_.Max_or_.Min.zig b/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_.Sub_.Max_or_.Min.zig index c877eea8d2cd..2f4aea34fe75 100644 --- a/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_.Sub_.Max_or_.Min.zig +++ b/test/cases/compile_errors/atomicrmw_with_float_op_not_.Xchg_.Add_.Sub_.Max_or_.Min.zig @@ -4,7 +4,6 @@ export fn entry() void { } // error -// backend=stage2 -// target=native // -// :3:30: error: @atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min +// :3:20: error: expected integer, packed struct, or pointer type; found 'f32' +// :3:20: note: @atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min diff --git a/test/cases/compile_errors/atomics_with_invalid_type.zig b/test/cases/compile_errors/atomics_with_invalid_type.zig index 321cda365566..9001e4581351 100644 --- a/test/cases/compile_errors/atomics_with_invalid_type.zig +++ b/test/cases/compile_errors/atomics_with_invalid_type.zig @@ -1,18 +1,24 @@ -export fn float() void { +export fn floatCmpxchg() void { var x: f32 = 0; _ = @cmpxchgWeak(f32, &x, 1, 2, .seq_cst, .seq_cst); } const NormalStruct = struct { x: u32 }; -export fn normalStruct() void { - var x: NormalStruct = 0; +export fn normalStructCmpxchg() void { + var x: NormalStruct = .{ .x = 0 }; _ = @cmpxchgWeak(NormalStruct, &x, .{ .x = 1 }, .{ .x = 2 }, .seq_cst, .seq_cst); } +export fn normalStructLoad() void { + var x: NormalStruct = .{ .x = 0 }; + _ = @atomicLoad(NormalStruct, &x, .seq_cst); +} + // error -// backend=stage2 -// target=native // -// :3:22: error: expected bool, integer, enum, packed struct, or pointer type; found 'f32' -// :8:27: error: expected type 'tmp.NormalStruct', found 'comptime_int' +// :3:22: error: expected bool, integer, enum, error set, packed struct, or pointer type; found 'f32' +// :3:22: note: floats are not supported for cmpxchg because float equality differs from bitwise equality +// :9:22: error: expected bool, integer, enum, error set, packed struct, or pointer type; found 'tmp.NormalStruct' +// :6:22: note: struct declared here +// :14:21: error: expected bool, integer, float, enum, error set, packed struct, or pointer type; found 'tmp.NormalStruct' // :6:22: note: struct declared here diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 522fe6b38557..1a19cb8f561a 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -1071,7 +1071,14 @@ const TestManifest = struct { fn getDefaultParser(comptime T: type) ParseFn(T) { if (T == std.Target.Query) return struct { fn parse(str: []const u8) anyerror!T { - return std.Target.Query.parse(.{ .arch_os_abi = str }); + if (std.mem.indexOfScalar(u8, str, ':')) |idx| { + return std.Target.Query.parse(.{ + .arch_os_abi = str[0..idx], + .cpu_features = str[idx + 1 ..], + }); + } else { + return std.Target.Query.parse(.{ .arch_os_abi = str }); + } } }.parse;