Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement writeToMemory/readFromMemory for pointers #11734

Merged
merged 1 commit into from
Feb 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 30 additions & 21 deletions src/Sema.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2527,7 +2527,7 @@ fn coerceResultPtr(
_ = try block.addBinOp(.store, new_ptr, null_inst);
return Air.Inst.Ref.void_value;
}
return sema.bitCast(block, ptr_ty, new_ptr, src);
return sema.bitCast(block, ptr_ty, new_ptr, src, null);
}

const trash_inst = trash_block.instructions.pop();
Expand All @@ -2543,7 +2543,7 @@ fn coerceResultPtr(
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val);
} else {
new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src);
new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
}
},
.wrap_optional => {
Expand Down Expand Up @@ -9557,7 +9557,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.Vector,
=> {},
}
return sema.bitCast(block, dest_ty, operand, operand_src);
return sema.bitCast(block, dest_ty, operand, inst_data.src(), operand_src);
}

fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
Expand Down Expand Up @@ -9775,7 +9775,7 @@ fn zirSwitchCapture(

switch (operand_ty.zigTypeTag()) {
.ErrorSet => if (block.switch_else_err_ty) |some| {
return sema.bitCast(block, some, operand, operand_src);
return sema.bitCast(block, some, operand, operand_src, null);
} else {
try block.addUnreachable(false);
return Air.Inst.Ref.unreachable_value;
Expand Down Expand Up @@ -9875,14 +9875,14 @@ fn zirSwitchCapture(
Module.ErrorSet.sortNames(&names);
const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names);

return sema.bitCast(block, else_error_ty, operand, operand_src);
return sema.bitCast(block, else_error_ty, operand, operand_src, null);
} else {
const item_ref = try sema.resolveInst(items[0]);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;

const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?);
return sema.bitCast(block, item_ty, operand, operand_src);
return sema.bitCast(block, item_ty, operand, operand_src, null);
}
},
else => {
Expand Down Expand Up @@ -19839,7 +19839,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
} else is_aligned;
try sema.addSafetyCheck(block, ok, .incorrect_alignment);
}
return sema.bitCast(block, dest_ty, ptr, ptr_src);
return sema.bitCast(block, dest_ty, ptr, ptr_src, null);
}

fn zirBitCount(
Expand Down Expand Up @@ -24026,8 +24026,9 @@ fn unionFieldVal(
return sema.addConstant(field.ty, tag_and_val.val);
} else {
const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod);
const new_val = try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0);
return sema.addConstant(field.ty, new_val);
if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| {
return sema.addConstant(field.ty, new_val);
}
}
},
}
Expand Down Expand Up @@ -26378,8 +26379,12 @@ fn storePtrVal(
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer);
operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]);
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) {
error.ReinterpretDeclRef => unreachable,
};
operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
error.ReinterpretDeclRef => unreachable,
};

const arena = mut_kit.beginArena(sema.mod);
defer mut_kit.finishArena(sema.mod);
Expand Down Expand Up @@ -27262,6 +27267,7 @@ fn bitCast(
dest_ty_unresolved: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
operand_src: ?LazySrcLoc,
) CompileError!Air.Inst.Ref {
const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved);
try sema.resolveTypeLayout(dest_ty);
Expand All @@ -27283,10 +27289,11 @@ fn bitCast(
}

if (try sema.resolveMaybeUndefVal(inst)) |val| {
const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0);
return sema.addConstant(dest_ty, result_val);
if (try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0)) |result_val| {
return sema.addConstant(dest_ty, result_val);
}
}
try sema.requireRuntimeBlock(block, inst_src, null);
try sema.requireRuntimeBlock(block, inst_src, operand_src);
return block.addBitCast(dest_ty, inst);
}

Expand All @@ -27298,7 +27305,7 @@ fn bitCastVal(
old_ty: Type,
new_ty: Type,
buffer_offset: usize,
) !Value {
) !?Value {
const target = sema.mod.getTarget();
if (old_ty.eql(new_ty, sema.mod)) return val;

Expand All @@ -27307,8 +27314,10 @@ fn bitCastVal(
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
val.writeToMemory(old_ty, sema.mod, buffer);
return Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena);
val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) {
error.ReinterpretDeclRef => return null,
};
return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena);
}

fn coerceArrayPtrToSlice(
Expand Down Expand Up @@ -27415,7 +27424,7 @@ fn coerceCompatiblePtrs(
} else is_non_zero;
try sema.addSafetyCheck(block, ok, .cast_to_null);
}
return sema.bitCast(block, dest_ty, inst, inst_src);
return sema.bitCast(block, dest_ty, inst, inst_src, null);
}

fn coerceEnumToUnion(
Expand Down Expand Up @@ -28155,7 +28164,7 @@ fn analyzeRef(
try sema.storePtr(block, src, alloc, operand);

// TODO: Replace with sema.coerce when that supports adding pointer constness.
return sema.bitCast(block, ptr_type, alloc, src);
return sema.bitCast(block, ptr_type, alloc, src, null);
}

fn analyzeLoad(
Expand Down Expand Up @@ -32168,11 +32177,11 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value

// Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty))
return DerefResult{ .val = try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0) };
return DerefResult{ .val = (try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0)) orelse return .runtime_load };

// If that fails, try to bit-cast from the largest parent value with a well-defined layout
if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty))
return DerefResult{ .val = try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset) };
return DerefResult{ .val = (try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset)) orelse return .runtime_load };

if (deref.ty_without_well_defined_layout) |bad_ty| {
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem
Expand Down
4 changes: 2 additions & 2 deletions src/arch/wasm/CodeGen.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2896,7 +2896,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.layout == .Packed);
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0);
val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
var payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = std.mem.readIntLittle(u64, &buf),
Expand All @@ -2907,7 +2907,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
.Vector => {
assert(determineSimdStoreStrategy(ty, target) == .direct);
var buf: [16]u8 = undefined;
val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf);
val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
Expand Down
2 changes: 1 addition & 1 deletion src/codegen.zig
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ pub fn generateSymbol(
.fail => |em| return Result{ .fail = em },
}
} else {
field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits);
field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @intCast(u16, field_ty.bitSize(target));
}
Expand Down
49 changes: 39 additions & 10 deletions src/value.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1249,11 +1249,22 @@ pub const Value = extern union {
};
}

fn isDeclRef(val: Value) bool {
var check = val;
while (true) switch (check.tag()) {
.variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true,
.field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr,
.elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr,
.eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr,
else => return false,
};
}

/// Write a Value's contents to `buffer`.
///
/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
/// the end of the value in memory.
pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) void {
pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ReinterpretDeclRef}!void {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef()) {
Expand Down Expand Up @@ -1309,15 +1320,15 @@ pub const Value = extern union {
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf);
elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]);
try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]);
buf_off += elem_size;
}
},
.Vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
.Struct => switch (ty.containerLayout()) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
Expand All @@ -1326,12 +1337,12 @@ pub const Value = extern union {
const field_vals = val.castTag(.aggregate).?.data;
for (fields) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
}
},
.Packed => {
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
.ErrorSet => {
Expand All @@ -1345,9 +1356,14 @@ pub const Value = extern union {
.Extern => @panic("TODO implement writeToMemory for extern unions"),
.Packed => {
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
if (val.isDeclRef()) return error.ReinterpretDeclRef;
return val.writeToMemory(Type.usize, mod, buffer);
},
else => @panic("TODO implement writeToMemory for more types"),
}
}
Expand All @@ -1356,7 +1372,7 @@ pub const Value = extern union {
///
/// Both the start and the end of the provided buffer must be tight, since
/// big-endian packed memory layouts start at the end of the buffer.
pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) void {
pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) error{ReinterpretDeclRef}!void {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef()) {
Expand Down Expand Up @@ -1420,7 +1436,7 @@ pub const Value = extern union {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i;
const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf);
elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits);
try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits);
bits += elem_bit_size;
}
},
Expand All @@ -1433,7 +1449,7 @@ pub const Value = extern union {
const field_vals = val.castTag(.aggregate).?.data;
for (fields) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
},
Expand All @@ -1446,9 +1462,14 @@ pub const Value = extern union {
const field_type = ty.unionFields().values()[field_index.?].ty;
const field_val = val.fieldValue(field_type, field_index.?);

field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
},
},
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
if (val.isDeclRef()) return error.ReinterpretDeclRef;
return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
},
else => @panic("TODO implement writeToPackedMemory for more types"),
}
}
Expand Down Expand Up @@ -1553,6 +1574,10 @@ pub const Value = extern union {
};
return Value.initPayload(&payload.base);
},
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
return readFromMemory(Type.usize, mod, buffer, arena);
},
else => @panic("TODO implement readFromMemory for more types"),
}
}
Expand Down Expand Up @@ -1640,6 +1665,10 @@ pub const Value = extern union {
return Tag.aggregate.create(arena, field_vals);
},
},
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
},
else => @panic("TODO implement readFromPackedMemory for more types"),
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ export fn entry(byte: u8) void {
// backend=stage2
// target=native
//
// :2:29: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits
// :2:16: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :2:29: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits
// :2:16: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits