Skip to content

Commit

Permalink
add casting docs, __extenddftf2, and __extendsftf2
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewrk committed Jun 21, 2018
1 parent 47dd104 commit 5f38d6e
Show file tree
Hide file tree
Showing 8 changed files with 384 additions and 18 deletions.
1 change: 1 addition & 0 deletions CMakeLists.txt
Expand Up @@ -558,6 +558,7 @@ set(ZIG_STD_FILES
"special/compiler_rt/aullrem.zig"
"special/compiler_rt/comparetf2.zig"
"special/compiler_rt/divti3.zig"
"special/compiler_rt/extendXfYf2.zig"
"special/compiler_rt/fixuint.zig"
"special/compiler_rt/fixunsdfdi.zig"
"special/compiler_rt/fixunsdfsi.zig"
Expand Down
171 changes: 156 additions & 15 deletions doc/langref.html.in
Expand Up @@ -3573,14 +3573,161 @@ const optional_value: ?i32 = null;
{#header_close#}
{#header_close#}
{#header_open|Casting#}
<p>TODO: explain implicit vs explicit casting</p>
<p>TODO: resolve peer types builtin</p>
<p>TODO: truncate builtin</p>
<p>TODO: bitcast builtin</p>
<p>TODO: int to ptr builtin</p>
<p>TODO: ptr to int builtin</p>
<p>TODO: ptrcast builtin</p>
<p>TODO: explain number literals vs concrete types</p>
<p>
A <strong>type cast</strong> converts a value of one type to another.
Zig has {#link|Implicit Casts#} for conversions that are known to be completely safe and unambiguous,
and {#link|Explicit Casts#} for conversions that one would not want to happen on accident.
There is also a third kind of type conversion called {#link|Peer Type Resolution#} for
the case when a result type must be decided given multiple operand types.
</p>
{#header_open|Implicit Casts#}
<p>
An implicit cast occurs when one type is expected, but different type is provided:
</p>
{#code_begin|test#}
test "implicit cast - variable declaration" {
var a: u8 = 1;
var b: u16 = a;
}

test "implicit cast - function call" {
var a: u8 = 1;
foo(a);
}

fn foo(b: u16) void {}

test "implicit cast - invoke a type as a function" {
var a: u8 = 1;
var b = u16(a);
}
{#code_end#}
{#header_open|Implicit Cast: Stricter Qualification#}
<p>
Values which have the same representation at runtime can be cast to increase the strictness
of the qualifiers, no matter how nested the qualifiers are:
</p>
<ul>
<li><code>const</code> - non-const to const is allowed</li>
<li><code>volatile</code> - non-volatile to volatile is allowed</li>
<li><code>align</code> - bigger to smaller alignment is allowed </li>
<li>{#link|error sets|Error Set Type#} to supersets is allowed</li>
</ul>
<p>
These casts are no-ops at runtime since the value representation does not change.
</p>
{#code_begin|test#}
test "implicit cast - const qualification" {
var a: i32 = 1;
var b: *i32 = &a;
foo(b);
}

fn foo(a: *const i32) void {}
{#code_end#}
<p>
In addition, pointers implicitly cast to const optional pointers:
</p>
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;

test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
const window_name = [1][*]const u8{c"window name"};
const x: [*]const ?[*]const u8 = &window_name;
assert(mem.eql(u8, std.cstr.toSliceConst(x[0].?), "window name"));
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: Integer and Float Widening#}
<p>
{#link|Integers#} implicitly cast to integer types which can represent every value of the old type, and likewise
{#link|Floats#} implicitly cast to float types which can represent every value of the old type.
</p>
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;

test "integer widening" {
var a: u8 = 250;
var b: u16 = a;
var c: u32 = b;
var d: u64 = c;
var e: u64 = d;
var f: u128 = e;
assert(f == a);
}

test "implicit unsigned integer to signed integer" {
var a: u8 = 250;
var b: i16 = a;
assert(b == 250);
}

test "float widening" {
var a: f32 = 12.34;
var b: f64 = a;
var c: f128 = b;
assert(c == a);
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: Arrays#}
<p>TODO: [N]T to []const T</p>
<p>TODO: *const [N]T to []const T</p>
<p>TODO: [N]T to *const []const T</p>
<p>TODO: [N]T to ?[]const T</p>
<p>TODO: *[N]T to []T</p>
<p>TODO: *[N]T to [*]T</p>
<p>TODO: *T to *[1]T</p>
<p>TODO: [N]T to E![]const T</p>
{#header_close#}
{#header_open|Implicit Cast: Optionals#}
<p>TODO: T to ?T</p>
<p>TODO: T to E!?T</p>
<p>TODO: null to ?T</p>
{#header_close#}
{#header_open|Implicit Cast: T to E!T#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: E to E!T#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: comptime_int to *const integer#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: comptime_float to *const float#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: compile-time known numbers#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: union to enum#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: enum to union#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: T to *T when @sizeOf(T) == 0#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: undefined#}
<p>TODO</p>
{#header_close#}
{#header_open|Implicit Cast: T to *const T#}
<p>TODO</p>
{#header_close#}
{#header_close#}

{#header_open|Explicit Casts#}
<p>TODO</p>
{#header_close#}

{#header_open|Peer Type Resolution#}
<p>TODO</p>
{#header_close#}
{#header_close#}

{#header_open|void#}
Expand Down Expand Up @@ -5522,12 +5669,6 @@ pub const FloatMode = enum {
</p>
{#see_also|Compile Variables#}
{#header_close#}
{#header_open|@setGlobalSection#}
<pre><code class="zig">@setGlobalSection(global_variable_name, comptime section_name: []const u8) bool</code></pre>
<p>
Puts the global variable in the specified section.
</p>
{#header_close#}
{#header_open|@shlExact#}
<pre><code class="zig">@shlExact(value: T, shift_amt: Log2T) T</code></pre>
<p>
Expand Down Expand Up @@ -6928,7 +7069,7 @@ hljs.registerLanguage("zig", function(t) {
a = t.IR + "\\s*\\(",
c = {
keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse",
built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum",
built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum",
literal: "true false null undefined"
},
n = [e, t.CLCM, t.CBCM, s, r];
Expand Down
6 changes: 3 additions & 3 deletions src/ir.cpp
Expand Up @@ -10092,7 +10092,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}

// cast from &const [N]T to []const T
// cast from *const [N]T to []const T
if (is_slice(wanted_type) &&
actual_type->id == TypeTableEntryIdPointer &&
actual_type->data.pointer.is_const &&
Expand All @@ -10111,7 +10111,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}

// cast from [N]T to &const []const T
// cast from [N]T to *const []const T
if (wanted_type->id == TypeTableEntryIdPointer &&
wanted_type->data.pointer.is_const &&
is_slice(wanted_type->data.pointer.child_type) &&
Expand All @@ -10136,7 +10136,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}

// cast from [N]T to ?[]const N
// cast from [N]T to ?[]const T
if (wanted_type->id == TypeTableEntryIdOptional &&
is_slice(wanted_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
Expand Down
87 changes: 87 additions & 0 deletions std/special/compiler_rt/extendXfYf2.zig
@@ -0,0 +1,87 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;

pub extern fn __extenddftf2(a: f64) f128 {
return extendXfYf2(f128, f64, a);
}

pub extern fn __extendsftf2(a: f32) f128 {
return extendXfYf2(f128, f32, a);
}

const CHAR_BIT = 8;

pub fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
const SrcShift = std.math.Log2Int(src_rep_t);
const DstShift = std.math.Log2Int(dst_rep_t);

// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits: i32 = @sizeOf(src_t) * CHAR_BIT;
const srcExpBits: i32 = srcBits - srcSigBits - 1;
const srcInfExp: i32 = (1 << srcExpBits) - 1;
const srcExpBias: i32 = srcInfExp >> 1;

const srcMinNormal: src_rep_t = src_rep_t(1) << srcSigBits;
const srcInfinity: src_rep_t = src_rep_t(@bitCast(u32, srcInfExp)) << srcSigBits;
const srcSignMask: src_rep_t = src_rep_t(1) << @intCast(SrcShift, srcSigBits +% srcExpBits);
const srcAbsMask: src_rep_t = srcSignMask -% 1;
const srcQNaN: src_rep_t = src_rep_t(1) << @intCast(SrcShift, srcSigBits -% 1);
const srcNaNCode: src_rep_t = srcQNaN -% 1;

const dstBits: i32 = @sizeOf(dst_t) * CHAR_BIT;
const dstExpBits: i32 = dstBits - dstSigBits - 1;
const dstInfExp: i32 = (1 << dstExpBits) - 1;
const dstExpBias: i32 = dstInfExp >> 1;

const dstMinNormal: dst_rep_t = dst_rep_t(1) << dstSigBits;

// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(src_rep_t, a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;

// If @sizeOf(src_rep_t) < @sizeOf(int), the subtraction result is promoted
// to (signed) int. To avoid that, explicitly cast to src_rep_t.
if ((src_rep_t)(aAbs -% srcMinNormal) < srcInfinity -% srcMinNormal) {
// a is a normal number.
// Extend to the destination type by shifting the significand and
// exponent into the proper position and rebiasing the exponent.
absResult = dst_rep_t(aAbs) << (dstSigBits -% srcSigBits);
absResult += dst_rep_t(@bitCast(u32, dstExpBias -% srcExpBias)) << dstSigBits;
} else if (aAbs >= srcInfinity) {
// a is NaN or infinity.
// Conjure the result by beginning with infinity, then setting the qNaN
// bit (if needed) and right-aligning the rest of the trailing NaN
// payload field.
absResult = dst_rep_t(@bitCast(u32, dstInfExp)) << dstSigBits;
absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
absResult |= (dst_rep_t)(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
} else if (aAbs != 0) {
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: i32 = @clz(aAbs) - @clz(srcMinNormal);
absResult = dst_rep_t(aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
absResult ^= dstMinNormal;
const resultExponent: i32 = dstExpBias - srcExpBias - scale + 1;
absResult |= dst_rep_t(@bitCast(u32, resultExponent)) << @intCast(DstShift, dstSigBits);
} else {
// a is zero.
absResult = 0;
}

// Apply the signbit to (dst_t)abs(a).
const result: dst_rep_t align(@alignOf(dst_t)) = absResult | dst_rep_t(sign) << @intCast(DstShift, dstBits - srcBits);
return @bitCast(dst_t, result);
}

test "import extendXfYf2" {
_ = @import("extendXfYf2_test.zig");
}

0 comments on commit 5f38d6e

Please sign in to comment.