From 87900d34cc16936a795949391840c0a2a0e6fde8 Mon Sep 17 00:00:00 2001 From: Luke Boswell Date: Sun, 14 Jul 2024 17:53:16 +1000 Subject: [PATCH] update-glue-6894 --- .gitignore | 2 +- README.md | 7 +- build.zig | 24 +- build_roc.zig | 4 +- examples/snake.roc | 5 +- glue.roc | 11 + glue.sh | 12 + platform/glue/main.zig | 3 - platform/host.zig | 8 +- platform/roc/dec.zig | 1604 ++++++++++++++++++++++++++++++ platform/{glue => roc}/list.zig | 603 +++++------ platform/roc/num.zig | 670 +++++++++++++ platform/roc/panic.zig | 15 + platform/roc/result.zig | 18 + platform/{glue => roc}/str.zig | 62 +- platform/{glue => roc}/utils.zig | 81 +- 16 files changed, 2746 insertions(+), 383 deletions(-) create mode 100644 glue.roc create mode 100644 glue.sh delete mode 100644 platform/glue/main.zig create mode 100644 platform/roc/dec.zig rename platform/{glue => roc}/list.zig (68%) create mode 100644 platform/roc/num.zig create mode 100644 platform/roc/panic.zig create mode 100644 platform/roc/result.zig rename platform/{glue => roc}/str.zig (97%) rename platform/{glue => roc}/utils.zig (86%) diff --git a/.gitignore b/.gitignore index 0039dc1..b5caa43 100644 --- a/.gitignore +++ b/.gitignore @@ -26,4 +26,4 @@ examples/*.wasm generated-docs/ -format.sh \ No newline at end of file +format.sh diff --git a/README.md b/README.md index fc78575..4d02d7b 100644 --- a/README.md +++ b/README.md @@ -6,9 +6,12 @@ The intent for this platform is to have some fun, learn more about Roc and platf ### Setup -Clone this repository. +1. Clone this repository. -Make sure you have [roc](https://www.roc-lang.org/install) newer than 2023-1-8, [zig](https://ziglang.org/download/) version 0.11.0, and [w4](https://wasm4.org) in your `PATH` environment variable. +2. Make sure you have the following in your `PATH` environment variable +- [roc](https://www.roc-lang.org/install), +- [zig](https://ziglang.org/download/) version **0.11.0** +- [w4](https://wasm4.org) ### Run diff --git a/build.zig b/build.zig index 55b494d..a43af14 100644 --- a/build.zig +++ b/build.zig @@ -16,9 +16,9 @@ pub fn build(b: *std.Build) !void { const build_roc = b.addExecutable(.{ .name = "build_roc", - .root_source_file = b.path("build_roc.zig"), + .root_source_file = .{ .path = "build_roc.zig" }, // Empty means native. - .target = b.graph.host, + .target = .{}, .optimize = .Debug, }); const run_build_roc = b.addRunArtifact(build_roc); @@ -27,10 +27,10 @@ pub fn build(b: *std.Build) !void { run_build_roc.has_side_effects = true; if (roc_src) |val| { - run_build_roc.addFileArg(b.path(val)); + run_build_roc.addFileArg(.{ .path = val }); } else { const default_path = "examples/snake.roc"; - run_build_roc.addFileArg(b.path(default_path)); + run_build_roc.addFileArg(.{ .path = default_path }); } switch (optimize) { @@ -44,32 +44,28 @@ pub fn build(b: *std.Build) !void { } // TODO: change to addExecutable with entry disabled when we update to zig 0.12.0. - const lib = b.addExecutable(.{ + const lib = b.addSharedLibrary(.{ .name = "cart", - .root_source_file = b.path("platform/host.zig"), - .target = b.resolveTargetQuery(.{ - .cpu_arch = .wasm32, - .os_tag = .freestanding, - }), + .root_source_file = .{ .path = "platform/host.zig" }, + .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding }, .optimize = optimize, }); const options = b.addOptions(); options.addOption(usize, "mem_size", mem_size); options.addOption(bool, "zero_on_alloc", zero_on_alloc); options.addOption(bool, "trace_allocs", trace_allocs); - lib.root_module.addOptions("config", options); + lib.addOptions("config", options); - lib.entry = .disabled; lib.import_memory = true; lib.initial_memory = 65536; lib.max_memory = 65536; lib.stack_size = 14752; // Export WASM-4 symbols - lib.root_module.export_symbol_names = &[_][]const u8{ "start", "update" }; + lib.export_symbol_names = &[_][]const u8{ "start", "update" }; lib.step.dependOn(&run_build_roc.step); - lib.addObjectFile(b.path("zig-cache/app.o")); + lib.addObjectFile(.{ .path = "zig-cache/app.o" }); b.installArtifact(lib); diff --git a/build_roc.zig b/build_roc.zig index fa9bdd6..99ad3f7 100644 --- a/build_roc.zig +++ b/build_roc.zig @@ -28,7 +28,7 @@ pub fn main() !void { // Run `roc check` const roc_check_args = [_][]const u8{ "roc", "check", app_name }; - const roc_check = try std.process.Child.run(.{ + const roc_check = try std.ChildProcess.exec(.{ .allocator = allocator, .argv = &roc_check_args, }); @@ -53,7 +53,7 @@ pub fn main() !void { try roc_build_args.append(optimize_flag); } try roc_build_args.append(app_name); - const roc_build = try std.process.Child.run(.{ + const roc_build = try std.ChildProcess.exec(.{ .allocator = allocator, .argv = roc_build_args.items, }); diff --git a/examples/snake.roc b/examples/snake.roc index b62f001..67b11af 100644 --- a/examples/snake.roc +++ b/examples/snake.roc @@ -118,7 +118,7 @@ drawGame = \model -> tertiary: green, quaternary: blue, } - + Sprite.blit! model.fruitSprite { x: model.fruit.x * 8, y: model.fruit.y * 8 } # Draw snake body @@ -214,7 +214,7 @@ moveSnake = \prev -> walkBody = \last, remaining, newBody -> when remaining is [] -> newBody - [curr, .. as rest] -> + [curr, ..] -> walkBody curr (List.dropFirst remaining 1) (List.append newBody last) body = walkBody prev.head prev.body [] @@ -243,4 +243,3 @@ getRandomFruit = \{ head, body } -> Step {} |> Task.ok else Done fruit |> Task.ok - diff --git a/glue.roc b/glue.roc new file mode 100644 index 0000000..64c9d22 --- /dev/null +++ b/glue.roc @@ -0,0 +1,11 @@ +app [makeGlue] { + pf: platform "https://github.com/lukewilliamboswell/roc/releases/download/glue-0.2/5bWcGHHGninJr_RP0-Mg5lPEGYGpPeYwYmVCwG_cZG4.tar.br", + glue: "https://github.com/lukewilliamboswell/roc-glue-code-gen/releases/download/0.4.0/E6x8uVSMI0YQ9CgpMww6Cj2g3BlN1lV8H04UEZh_-QA.tar.br", +} + +import pf.Types exposing [Types] +import pf.File exposing [File] +import glue.Zig + +makeGlue : List Types -> Result (List File) Str +makeGlue = \_ -> Ok Zig.builtins diff --git a/glue.sh b/glue.sh new file mode 100644 index 0000000..981eb12 --- /dev/null +++ b/glue.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +## This script is used to re-generate the glue type for Roc. For now this is limited to the std library. + +# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/ +set -euxo pipefail + +# remove previous generated code +rm -rf platform/roc/ + +# regenerate builtins +roc glue glue.roc platform/ platform/main-glue.roc diff --git a/platform/glue/main.zig b/platform/glue/main.zig deleted file mode 100644 index d95eb65..0000000 --- a/platform/glue/main.zig +++ /dev/null @@ -1,3 +0,0 @@ -// ⚠️ GENERATED CODE ⚠️ -// -// This package is generated by the `roc glue` CLI command \ No newline at end of file diff --git a/platform/host.zig b/platform/host.zig index 650c044..d0a9fc0 100644 --- a/platform/host.zig +++ b/platform/host.zig @@ -5,13 +5,13 @@ const config = @import("config"); const w4 = @import("vendored/wasm4.zig"); -const str = @import("glue/str.zig"); +const str = @import("roc/str.zig"); const RocStr = str.RocStr; -const list = @import("glue/list.zig"); +const list = @import("roc/list.zig"); const RocList = list.RocList; -const utils = @import("glue/utils.zig"); +const utils = @import("roc/utils.zig"); const ALIGN = @alignOf(u128); const Range = std.bit_set.Range; @@ -303,7 +303,7 @@ export fn roc_fx_diskw(bytes: *RocList) callconv(.C) bool { export fn roc_fx_diskr() callconv(.C) RocList { // This is just gonna always get as many bytes as possible. - var out = RocList.allocateExact(@alignOf(u8), MAX_DISK_SIZE, @sizeOf(u8)); + var out = RocList.allocateExact(@alignOf(u8), MAX_DISK_SIZE, @sizeOf(u8), false); const data: [*]u8 = out.elements(u8).?; const get = w4.diskr(data, MAX_DISK_SIZE); diff --git a/platform/roc/dec.zig b/platform/roc/dec.zig new file mode 100644 index 0000000..5473bbd --- /dev/null +++ b/platform/roc/dec.zig @@ -0,0 +1,1604 @@ +const std = @import("std"); +const str = @import("str.zig"); +const num_ = @import("num.zig"); +const utils = @import("utils.zig"); + +const math = std.math; +const RocStr = str.RocStr; +const WithOverflow = utils.WithOverflow; +const roc_panic = @import("panic.zig").panic_help; +const U256 = num_.U256; +const mul_u128 = num_.mul_u128; + +pub const RocDec = extern struct { + num: i128, + + pub const decimal_places: u5 = 18; + pub const whole_number_places: u5 = 21; + const max_digits: u6 = 39; + const max_str_length: u6 = max_digits + 2; // + 2 here to account for the sign & decimal dot + + pub const min: RocDec = .{ .num = math.minInt(i128) }; + pub const max: RocDec = .{ .num = math.maxInt(i128) }; + + pub const one_point_zero_i128: i128 = math.pow(i128, 10, RocDec.decimal_places); + pub const one_point_zero: RocDec = .{ .num = one_point_zero_i128 }; + + pub const two_point_zero: RocDec = RocDec.add(RocDec.one_point_zero, RocDec.one_point_zero); + pub const zero_point_five: RocDec = RocDec.div(RocDec.one_point_zero, RocDec.two_point_zero); + + pub fn fromU64(num: u64) RocDec { + return .{ .num = num * one_point_zero_i128 }; + } + + pub fn fromF64(num: f64) ?RocDec { + var result: f64 = num * comptime @as(f64, @floatFromInt(one_point_zero_i128)); + + if (result > comptime @as(f64, @floatFromInt(math.maxInt(i128)))) { + return null; + } + + if (result < comptime @as(f64, @floatFromInt(math.minInt(i128)))) { + return null; + } + + var ret: RocDec = .{ .num = @as(i128, @intFromFloat(result)) }; + return ret; + } + + pub fn toF64(dec: RocDec) f64 { + return @as(f64, @floatFromInt(dec.num)) / comptime @as(f64, @floatFromInt(one_point_zero_i128)); + } + + // TODO: If Str.toDec eventually supports more error types, return errors here. + // For now, just return null which will give the default error. + pub fn fromStr(roc_str: RocStr) ?RocDec { + if (roc_str.isEmpty()) { + return null; + } + + const length = roc_str.len(); + + const roc_str_slice = roc_str.asSlice(); + + var is_negative: bool = roc_str_slice[0] == '-'; + var initial_index: usize = if (is_negative) 1 else 0; + + var point_index: ?usize = null; + var index: usize = initial_index; + while (index < length) { + var byte: u8 = roc_str_slice[index]; + if (byte == '.' and point_index == null) { + point_index = index; + index += 1; + continue; + } + + if (!isDigit(byte)) { + return null; + } + index += 1; + } + + var before_str_length = length; + var after_val_i128: ?i128 = null; + if (point_index) |pi| { + before_str_length = pi; + + var after_str_len = (length - 1) - pi; + if (after_str_len > decimal_places) { + // TODO: runtime exception for too many decimal places! + return null; + } + var diff_decimal_places = decimal_places - after_str_len; + + var after_str = roc_str_slice[pi + 1 .. length]; + var after_u64 = std.fmt.parseUnsigned(u64, after_str, 10) catch null; + after_val_i128 = if (after_u64) |f| @as(i128, @intCast(f)) * math.pow(i128, 10, diff_decimal_places) else null; + } + + var before_str = roc_str_slice[initial_index..before_str_length]; + var before_val_not_adjusted = std.fmt.parseUnsigned(i128, before_str, 10) catch null; + + var before_val_i128: ?i128 = null; + if (before_val_not_adjusted) |before| { + const answer = @mulWithOverflow(before, one_point_zero_i128); + const result = answer[0]; + const overflowed = answer[1]; + if (overflowed == 1) { + // TODO: runtime exception for overflow! + return null; + } + before_val_i128 = result; + } + + const dec: RocDec = blk: { + if (before_val_i128) |before| { + if (after_val_i128) |after| { + var answer = @addWithOverflow(before, after); + const result = answer[0]; + const overflowed = answer[1]; + if (overflowed == 1) { + // TODO: runtime exception for overflow! + return null; + } + break :blk .{ .num = result }; + } else { + break :blk .{ .num = before }; + } + } else if (after_val_i128) |after| { + break :blk .{ .num = after }; + } else { + return null; + } + }; + + if (is_negative) { + return dec.negate(); + } else { + return dec; + } + } + + inline fn isDigit(c: u8) bool { + return (c -% 48) <= 9; + } + + pub fn toStr(self: RocDec) RocStr { + // Special case + if (self.num == 0) { + return RocStr.init("0.0", 3); + } + + const num = self.num; + const is_negative = num < 0; + + // Format the backing i128 into an array of digit (ascii) characters (u8s) + var digit_bytes_storage: [max_digits + 1]u8 = undefined; + var num_digits = std.fmt.formatIntBuf(digit_bytes_storage[0..], num, 10, .lower, .{}); + var digit_bytes: [*]u8 = digit_bytes_storage[0..]; + + // space where we assemble all the characters that make up the final string + var str_bytes: [max_str_length]u8 = undefined; + var position: usize = 0; + + // if negative, the first character is a negating minus + if (is_negative) { + str_bytes[position] = '-'; + position += 1; + + // but also, we have one fewer digit than we have characters + num_digits -= 1; + + // and we drop the minus to make later arithmetic correct + digit_bytes += 1; + } + + // Get the slice for before the decimal point + var before_digits_offset: usize = 0; + if (num_digits > decimal_places) { + // we have more digits than fit after the decimal point, + // so we must have digits before the decimal point + before_digits_offset = num_digits - decimal_places; + + for (digit_bytes[0..before_digits_offset]) |c| { + str_bytes[position] = c; + position += 1; + } + } else { + // otherwise there are no actual digits before the decimal point + // but we format it with a '0' + str_bytes[position] = '0'; + position += 1; + } + + // we've done everything before the decimal point, so now we can put the decimal point in + str_bytes[position] = '.'; + position += 1; + + const trailing_zeros: u6 = count_trailing_zeros_base10(num); + if (trailing_zeros >= decimal_places) { + // add just a single zero if all decimal digits are zero + str_bytes[position] = '0'; + position += 1; + } else { + // Figure out if we need to prepend any zeros to the after decimal point + // For example, for the number 0.000123 we need to prepend 3 zeros after the decimal point + const after_zeros_num = if (num_digits < decimal_places) decimal_places - num_digits else 0; + + var i: usize = 0; + while (i < after_zeros_num) : (i += 1) { + str_bytes[position] = '0'; + position += 1; + } + + // otherwise append the decimal digits except the trailing zeros + for (digit_bytes[before_digits_offset .. num_digits - trailing_zeros]) |c| { + str_bytes[position] = c; + position += 1; + } + } + + return RocStr.init(&str_bytes, position); + } + + pub fn toI128(self: RocDec) i128 { + return self.num; + } + + pub fn fromI128(num: i128) RocDec { + return .{ .num = num }; + } + + pub fn eq(self: RocDec, other: RocDec) bool { + return self.num == other.num; + } + + pub fn neq(self: RocDec, other: RocDec) bool { + return self.num != other.num; + } + + pub fn negate(self: RocDec) ?RocDec { + var negated = math.negate(self.num) catch null; + return if (negated) |n| .{ .num = n } else null; + } + + pub fn abs(self: RocDec) !RocDec { + const absolute = try math.absInt(self.num); + return RocDec{ .num = absolute }; + } + + pub fn addWithOverflow(self: RocDec, other: RocDec) WithOverflow(RocDec) { + const answer = @addWithOverflow(self.num, other.num); + + return .{ .value = RocDec{ .num = answer[0] }, .has_overflowed = answer[1] == 1 }; + } + + pub fn add(self: RocDec, other: RocDec) RocDec { + const answer = RocDec.addWithOverflow(self, other); + + if (answer.has_overflowed) { + roc_panic("Decimal addition overflowed!", 0); + } else { + return answer.value; + } + } + + pub fn addSaturated(self: RocDec, other: RocDec) RocDec { + const answer = RocDec.addWithOverflow(self, other); + if (answer.has_overflowed) { + // We can unambiguously tell which way it wrapped, because we have 129 bits including the overflow bit + if (answer.value.num < 0) { + return RocDec.max; + } else { + return RocDec.min; + } + } else { + return answer.value; + } + } + + pub fn subWithOverflow(self: RocDec, other: RocDec) WithOverflow(RocDec) { + const answer = @subWithOverflow(self.num, other.num); + + return .{ .value = RocDec{ .num = answer[0] }, .has_overflowed = answer[1] == 1 }; + } + + pub fn sub(self: RocDec, other: RocDec) RocDec { + const answer = RocDec.subWithOverflow(self, other); + + if (answer.has_overflowed) { + roc_panic("Decimal subtraction overflowed!", 0); + } else { + return answer.value; + } + } + + pub fn subSaturated(self: RocDec, other: RocDec) RocDec { + const answer = RocDec.subWithOverflow(self, other); + if (answer.has_overflowed) { + if (answer.value.num < 0) { + return RocDec.max; + } else { + return RocDec.min; + } + } else { + return answer.value; + } + } + + pub fn mulWithOverflow(self: RocDec, other: RocDec) WithOverflow(RocDec) { + const self_i128 = self.num; + const other_i128 = other.num; + // const answer = 0; //self_i256 * other_i256; + + const is_answer_negative = (self_i128 < 0) != (other_i128 < 0); + + const self_u128 = @as(u128, @intCast(math.absInt(self_i128) catch { + if (other_i128 == 0) { + return .{ .value = RocDec{ .num = 0 }, .has_overflowed = false }; + } else if (other_i128 == RocDec.one_point_zero.num) { + return .{ .value = self, .has_overflowed = false }; + } else if (is_answer_negative) { + return .{ .value = RocDec.min, .has_overflowed = true }; + } else { + return .{ .value = RocDec.max, .has_overflowed = true }; + } + })); + + const other_u128 = @as(u128, @intCast(math.absInt(other_i128) catch { + if (self_i128 == 0) { + return .{ .value = RocDec{ .num = 0 }, .has_overflowed = false }; + } else if (self_i128 == RocDec.one_point_zero.num) { + return .{ .value = other, .has_overflowed = false }; + } else if (is_answer_negative) { + return .{ .value = RocDec.min, .has_overflowed = true }; + } else { + return .{ .value = RocDec.max, .has_overflowed = true }; + } + })); + + const unsigned_answer: i128 = mul_and_decimalize(self_u128, other_u128); + + if (is_answer_negative) { + return .{ .value = RocDec{ .num = -unsigned_answer }, .has_overflowed = false }; + } else { + return .{ .value = RocDec{ .num = unsigned_answer }, .has_overflowed = false }; + } + } + + fn trunc(self: RocDec) RocDec { + return RocDec.sub(self, self.fract()); + } + + fn fract(self: RocDec) RocDec { + const sign = std.math.sign(self.num); + const digits = @mod(sign * self.num, RocDec.one_point_zero.num); + + return RocDec{ .num = sign * digits }; + } + + // Returns the nearest integer to self. If a value is half-way between two integers, round away from 0.0. + fn round(arg1: RocDec) RocDec { + // this rounds towards zero + const tmp = arg1.trunc(); + + const sign = std.math.sign(arg1.num); + const abs_fract = sign * arg1.fract().num; + + if (abs_fract >= RocDec.zero_point_five.num) { + return RocDec.add(tmp, RocDec{ .num = sign * RocDec.one_point_zero.num }); + } else { + return tmp; + } + } + + // Returns the largest integer less than or equal to itself + fn floor(arg1: RocDec) RocDec { + const tmp = arg1.trunc(); + + if (arg1.num < 0 and arg1.fract().num != 0) { + return RocDec.sub(tmp, RocDec.one_point_zero); + } else { + return tmp; + } + } + + // Returns the smallest integer greater than or equal to itself + fn ceiling(arg1: RocDec) RocDec { + const tmp = arg1.trunc(); + + if (arg1.num > 0 and arg1.fract().num != 0) { + return RocDec.add(tmp, RocDec.one_point_zero); + } else { + return tmp; + } + } + + fn powInt(base: RocDec, exponent: i128) RocDec { + if (exponent == 0) { + return RocDec.one_point_zero; + } else if (exponent > 0) { + if (@mod(exponent, 2) == 0) { + const half_power = RocDec.powInt(base, exponent >> 1); // `>> 1` == `/ 2` + return RocDec.mul(half_power, half_power); + } else { + return RocDec.mul(base, RocDec.powInt(base, exponent - 1)); + } + } else { + return RocDec.div(RocDec.one_point_zero, RocDec.powInt(base, -exponent)); + } + } + + fn pow(base: RocDec, exponent: RocDec) RocDec { + if (exponent.trunc().num == exponent.num) { + return base.powInt(@divTrunc(exponent.num, RocDec.one_point_zero_i128)); + } else { + return fromF64(std.math.pow(f64, base.toF64(), exponent.toF64())).?; + } + } + + pub fn mul(self: RocDec, other: RocDec) RocDec { + const answer = RocDec.mulWithOverflow(self, other); + + if (answer.has_overflowed) { + roc_panic("Decimal multiplication overflowed!", 0); + } else { + return answer.value; + } + } + + pub fn mulSaturated(self: RocDec, other: RocDec) RocDec { + const answer = RocDec.mulWithOverflow(self, other); + return answer.value; + } + + pub fn div(self: RocDec, other: RocDec) RocDec { + const numerator_i128 = self.num; + const denominator_i128 = other.num; + + // (0 / n) is always 0 + if (numerator_i128 == 0) { + return RocDec{ .num = 0 }; + } + + // (n / 0) is an error + if (denominator_i128 == 0) { + roc_panic("Decimal division by 0!", 0); + } + + // If they're both negative, or if neither is negative, the final answer + // is positive or zero. If one is negative and the denominator isn't, the + // final answer is negative (or zero, in which case final sign won't matter). + // + // It's important that we do this in terms of negatives, because doing + // it in terms of positives can cause bugs when one is zero. + const is_answer_negative = (numerator_i128 < 0) != (denominator_i128 < 0); + + // Break the two i128s into two { hi: u64, lo: u64 } tuples, discarding + // the sign for now. + // + // We'll multiply all 4 combinations of these (hi1 x lo1, hi2 x lo2, + // hi1 x lo2, hi2 x lo1) and add them as appropriate, then apply the + // appropriate sign at the very end. + // + // We do checked_abs because if we had -i128::MAX before, this will overflow. + + const numerator_abs_i128 = math.absInt(numerator_i128) catch { + // Currently, if you try to do multiplication on i64::MIN, panic + // unless you're specifically multiplying by 0 or 1. + // + // Maybe we could support more cases in the future + if (denominator_i128 == one_point_zero_i128) { + return self; + } else { + roc_panic("Decimal division overflow in numerator!", 0); + } + }; + const numerator_u128 = @as(u128, @intCast(numerator_abs_i128)); + + const denominator_abs_i128 = math.absInt(denominator_i128) catch { + // Currently, if you try to do multiplication on i64::MIN, panic + // unless you're specifically multiplying by 0 or 1. + // + // Maybe we could support more cases in the future + if (numerator_i128 == one_point_zero_i128) { + return other; + } else { + roc_panic("Decimal division overflow in denominator!", 0); + } + }; + const denominator_u128 = @as(u128, @intCast(denominator_abs_i128)); + + const numerator_u256: U256 = mul_u128(numerator_u128, math.pow(u128, 10, decimal_places)); + const answer = div_u256_by_u128(numerator_u256, denominator_u128); + + var unsigned_answer: i128 = undefined; + if (answer.hi == 0 and answer.lo <= math.maxInt(i128)) { + unsigned_answer = @as(i128, @intCast(answer.lo)); + } else { + roc_panic("Decimal division overflow!", 0); + } + + return RocDec{ .num = if (is_answer_negative) -unsigned_answer else unsigned_answer }; + } + + fn mod2pi(self: RocDec) RocDec { + // This is made to be used before calling trig functions that work on the range 0 to 2*pi. + // It should be reasonable fast (much faster than calling @mod) and much more accurate as well. + // b is 2*pi as a dec. which is 6.2831853071795864769252867665590057684 + // as dec is times 10^18 so 6283185307179586476.9252867665590057684 + const b0: u64 = 6283185307179586476; + // Fraction that reprensents 64 bits of precision past what dec normally supports. + // 0.9252867665590057684 as binary to 64 places. + const b1: u64 = 0b1110110011011111100101111111000111001010111000100101011111110111; + + // This is dec/(b0+1), but as a multiplication. + // So dec * (1/(b0+1)). This is way faster. + const dec = self.num; + const tmp = @as(i128, @intCast(num_.mul_u128(math.absCast(dec), 249757942369376157886101012127821356963).hi >> (190 - 128))); + const q0 = if (dec < 0) -tmp else tmp; + + const upper = q0 * b0; + const answer = @mulWithOverflow(q0, b1); + const lower = answer[0]; + const overflowed = answer[1]; + // TODO: maybe write this out branchlessly. + // Currently is is probably cmovs, but could be just math? + const q0_sign: i128 = + if (q0 > 0) 1 else -1; + const overflowed_val: i128 = if (overflowed == 1) q0_sign << 64 else 0; + const full = upper + @as(i128, @intCast(lower >> 64)) + overflowed_val; + + var out = dec - full; + if (out < 0) { + out += b0; + } + + return RocDec{ .num = out }; + } + + pub fn log(self: RocDec) RocDec { + return fromF64(@log(self.toF64())).?; + } + + // I belive the output of the trig functions is always in range of Dec. + // If not, we probably should just make it saturate the Dec. + // I don't think this should crash or return errors. + pub fn sin(self: RocDec) RocDec { + return fromF64(math.sin(self.mod2pi().toF64())).?; + } + + pub fn cos(self: RocDec) RocDec { + return fromF64(math.cos(self.mod2pi().toF64())).?; + } + + pub fn tan(self: RocDec) RocDec { + return fromF64(math.tan(self.mod2pi().toF64())).?; + } + + pub fn asin(self: RocDec) RocDec { + return fromF64(math.asin(self.toF64())).?; + } + + pub fn acos(self: RocDec) RocDec { + return fromF64(math.acos(self.toF64())).?; + } + + pub fn atan(self: RocDec) RocDec { + return fromF64(math.atan(self.toF64())).?; + } +}; + +// A number has `k` trailling zeros if `10^k` divides into it cleanly +inline fn count_trailing_zeros_base10(input: i128) u6 { + if (input == 0) { + // this should not happen in practice + return 0; + } + + var count: u6 = 0; + var k: i128 = 1; + + while (true) { + if (@mod(input, std.math.pow(i128, 10, k)) == 0) { + count += 1; + k += 1; + } else { + break; + } + } + + return count; +} + +fn mul_and_decimalize(a: u128, b: u128) i128 { + const answer_u256 = mul_u128(a, b); + + var lhs_hi = answer_u256.hi; + var lhs_lo = answer_u256.lo; + + // Divide - or just add 1, multiply by floor(2^315/10^18), then right shift 315 times. + // floor(2^315/10^18) is 66749594872528440074844428317798503581334516323645399060845050244444366430645 + + // Add 1. + // This can't overflow because the initial numbers are only 127bit due to removing the sign bit. + var answer = @addWithOverflow(lhs_lo, 1); + lhs_lo = answer[0]; + var overflowed = answer[1]; + lhs_hi = blk: { + if (overflowed == 1) { + break :blk lhs_hi + 1; + } else { + break :blk lhs_hi + 0; + } + }; + + // This needs to do multiplication in a way that expands, + // since we throw away 315 bits we care only about the higher end, not lower. + // So like need to do high low mult with 2 U256's and then bitshift. + // I bet this has a lot of room for multiplication optimization. + const rhs_hi: u128 = 0x9392ee8e921d5d073aff322e62439fcf; + const rhs_lo: u128 = 0x32d7f344649470f90cac0c573bf9e1b5; + + const ea = mul_u128(lhs_lo, rhs_lo); + const gf = mul_u128(lhs_hi, rhs_lo); + const jh = mul_u128(lhs_lo, rhs_hi); + const lk = mul_u128(lhs_hi, rhs_hi); + + const e = ea.hi; + // const _a = ea.lo; + + const g = gf.hi; + const f = gf.lo; + + const j = jh.hi; + const h = jh.lo; + + const l = lk.hi; + const k = lk.lo; + + // b = e + f + h + answer = @addWithOverflow(e, f); + const e_plus_f = answer[0]; + overflowed = answer[1]; + var b_carry1: u128 = undefined; + if (overflowed == 1) { + b_carry1 = 1; + } else { + b_carry1 = 0; + } + + answer = @addWithOverflow(e_plus_f, h); + overflowed = answer[1]; + var b_carry2: u128 = undefined; + if (overflowed == 1) { + b_carry2 = 1; + } else { + b_carry2 = 0; + } + + // c = carry + g + j + k // it doesn't say +k but I think it should be? + answer = @addWithOverflow(g, j); + const g_plus_j = answer[0]; + overflowed = answer[1]; + var c_carry1: u128 = undefined; + if (overflowed == 1) { + c_carry1 = 1; + } else { + c_carry1 = 0; + } + + answer = @addWithOverflow(g_plus_j, k); + const g_plus_j_plus_k = answer[0]; + overflowed = answer[1]; + var c_carry2: u128 = undefined; + if (overflowed == 1) { + c_carry2 = 1; + } else { + c_carry2 = 0; + } + + answer = @addWithOverflow(g_plus_j_plus_k, b_carry1); + const c_without_bcarry2 = answer[0]; + overflowed = answer[1]; + var c_carry3: u128 = undefined; + if (overflowed == 1) { + c_carry3 = 1; + } else { + c_carry3 = 0; + } + + answer = @addWithOverflow(c_without_bcarry2, b_carry2); + const c = answer[0]; + overflowed = answer[1]; + var c_carry4: u128 = undefined; + if (overflowed == 1) { + c_carry4 = 1; + } else { + c_carry4 = 0; + } + + // d = carry + l + answer = @addWithOverflow(l, c_carry1); + overflowed = answer[1]; + answer = @addWithOverflow(answer[0], c_carry2); + overflowed = overflowed | answer[1]; + answer = @addWithOverflow(answer[0], c_carry3); + overflowed = overflowed | answer[1]; + answer = @addWithOverflow(answer[0], c_carry4); + overflowed = overflowed | answer[1]; + const d = answer[0]; + + if (overflowed == 1) { + roc_panic("Decimal multiplication overflow!", 0); + } + + // Final 512bit value is d, c, b, a + // need to left shift 321 times + // 315 - 256 is 59. So left shift d, c 59 times. + return @as(i128, @intCast(c >> 59 | (d << (128 - 59)))); +} + +// Multiply two 128-bit ints and divide the result by 10^DECIMAL_PLACES +// +// Adapted from https://github.com/nlordell/ethnum-rs/blob/c9ed57e131bffde7bcc8274f376e5becf62ef9ac/src/intrinsics/native/divmod.rs +// Copyright (c) 2020 Nicholas Rodrigues Lordello +// Licensed under the Apache License version 2.0 +// +// When translating this to Zig, we often have to use math.shr/shl instead of >>/<< +// This is because casting to the right types for Zig can be kind of tricky. +// See https://github.com/ziglang/zig/issues/7605 +fn div_u256_by_u128(numer: U256, denom: u128) U256 { + const N_UDWORD_BITS: u8 = 128; + const N_UTWORD_BITS: u9 = 256; + + var q: U256 = undefined; + var r: U256 = undefined; + var sr: u8 = undefined; + + // special case + if (numer.hi == 0) { + // 0 X + // --- + // 0 X + return .{ + .hi = 0, + .lo = numer.lo / denom, + }; + } + + // numer.hi != 0 + if (denom == 0) { + // K X + // --- + // 0 0 + return .{ + .hi = 0, + .lo = numer.hi / denom, + }; + } else { + // K X + // --- + // 0 K + // NOTE: Modified from `if (d.low() & (d.low() - 1)) == 0`. + if (math.isPowerOfTwo(denom)) { + // if d is a power of 2 + if (denom == 1) { + return numer; + } + + sr = @ctz(denom); + + return .{ + .hi = math.shr(u128, numer.hi, sr), + .lo = math.shl(u128, numer.hi, N_UDWORD_BITS - sr) | math.shr(u128, numer.lo, sr), + }; + } + + // K X + // --- + // 0 K + var denom_leading_zeros = @clz(denom); + var numer_hi_leading_zeros = @clz(numer.hi); + sr = 1 + N_UDWORD_BITS + denom_leading_zeros - numer_hi_leading_zeros; + // 2 <= sr <= N_UTWORD_BITS - 1 + // q.all = n.all << (N_UTWORD_BITS - sr); + // r.all = n.all >> sr; + // #[allow(clippy::comparison_chain)] + if (sr == N_UDWORD_BITS) { + q = .{ + .hi = numer.lo, + .lo = 0, + }; + r = .{ + .hi = 0, + .lo = numer.hi, + }; + } else if (sr < N_UDWORD_BITS) { + // 2 <= sr <= N_UDWORD_BITS - 1 + q = .{ + .hi = math.shl(u128, numer.lo, N_UDWORD_BITS - sr), + .lo = 0, + }; + r = .{ + .hi = math.shr(u128, numer.hi, sr), + .lo = math.shl(u128, numer.hi, N_UDWORD_BITS - sr) | math.shr(u128, numer.lo, sr), + }; + } else { + // N_UDWORD_BITS + 1 <= sr <= N_UTWORD_BITS - 1 + q = .{ + .hi = math.shl(u128, numer.hi, N_UTWORD_BITS - sr) | math.shr(u128, numer.lo, sr - N_UDWORD_BITS), + .lo = math.shl(u128, numer.lo, N_UTWORD_BITS - sr), + }; + r = .{ + .hi = 0, + .lo = math.shr(u128, numer.hi, sr - N_UDWORD_BITS), + }; + } + } + + // Not a special case + // q and r are initialized with: + // q.all = n.all << (N_UTWORD_BITS - sr); + // r.all = n.all >> sr; + // 1 <= sr <= N_UTWORD_BITS - 1 + var carry: u128 = 0; + + while (sr > 0) { + // r:q = ((r:q) << 1) | carry + r.hi = (r.hi << 1) | (r.lo >> (N_UDWORD_BITS - 1)); + r.lo = (r.lo << 1) | (q.hi >> (N_UDWORD_BITS - 1)); + q.hi = (q.hi << 1) | (q.lo >> (N_UDWORD_BITS - 1)); + q.lo = (q.lo << 1) | carry; + + // carry = 0; + // if (r.all >= d.all) + // { + // r.all -= d.all; + // carry = 1; + // } + // NOTE: Modified from `(d - r - 1) >> (N_UTWORD_BITS - 1)` to be an + // **arithmetic** shift. + + var answer = @subWithOverflow(denom, r.lo); + var lo = answer[0]; + var lo_overflowed = answer[1]; + var hi = 0 -% @as(u128, @intCast(@as(u1, @bitCast(lo_overflowed)))) -% r.hi; + + answer = @subWithOverflow(lo, 1); + lo = answer[0]; + lo_overflowed = answer[1]; + hi = hi -% @as(u128, @intCast(@as(u1, @bitCast(lo_overflowed)))); + + // NOTE: this U256 was originally created by: + // + // ((hi as i128) >> 127).as_u256() + // + // As an implementation of `as_u256`, we wrap a negative value around to the maximum value of U256. + + var s_u128 = math.shr(u128, hi, 127); + var s_hi: u128 = undefined; + var s_lo: u128 = undefined; + if (s_u128 == 1) { + s_hi = math.maxInt(u128); + s_lo = math.maxInt(u128); + } else { + s_hi = 0; + s_lo = 0; + } + var s = .{ + .hi = s_hi, + .lo = s_lo, + }; + + carry = s.lo & 1; + + // var (lo, carry) = r.lo.overflowing_sub(denom & s.lo); + answer = @subWithOverflow(r.lo, (denom & s.lo)); + lo = answer[0]; + lo_overflowed = answer[1]; + hi = r.hi -% @as(u128, @intCast(@as(u1, @bitCast(lo_overflowed)))); + + r = .{ .hi = hi, .lo = lo }; + + sr -= 1; + } + + var hi = (q.hi << 1) | (q.lo >> (127)); + var lo = (q.lo << 1) | carry; + + return .{ .hi = hi, .lo = lo }; +} + +const testing = std.testing; +const expectEqual = testing.expectEqual; +const expectError = testing.expectError; +const expectEqualSlices = testing.expectEqualSlices; +const expect = testing.expect; + +test "fromU64" { + var dec = RocDec.fromU64(25); + + try expectEqual(RocDec{ .num = 25000000000000000000 }, dec); +} + +test "fromF64" { + var dec = RocDec.fromF64(25.5); + try expectEqual(RocDec{ .num = 25500000000000000000 }, dec.?); +} + +test "fromF64 overflow" { + var dec = RocDec.fromF64(1e308); + try expectEqual(dec, null); +} + +test "fromStr: empty" { + var roc_str = RocStr.init("", 0); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(dec, null); +} + +test "fromStr: 0" { + var roc_str = RocStr.init("0", 1); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = 0 }, dec.?); +} + +test "fromStr: 1" { + var roc_str = RocStr.init("1", 1); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec.one_point_zero, dec.?); +} + +test "fromStr: 123.45" { + var roc_str = RocStr.init("123.45", 6); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = 123450000000000000000 }, dec.?); +} + +test "fromStr: .45" { + var roc_str = RocStr.init(".45", 3); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = 450000000000000000 }, dec.?); +} + +test "fromStr: 0.45" { + var roc_str = RocStr.init("0.45", 4); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = 450000000000000000 }, dec.?); +} + +test "fromStr: 123" { + var roc_str = RocStr.init("123", 3); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = 123000000000000000000 }, dec.?); +} + +test "fromStr: -.45" { + var roc_str = RocStr.init("-.45", 4); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = -450000000000000000 }, dec.?); +} + +test "fromStr: -0.45" { + var roc_str = RocStr.init("-0.45", 5); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = -450000000000000000 }, dec.?); +} + +test "fromStr: -123" { + var roc_str = RocStr.init("-123", 4); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = -123000000000000000000 }, dec.?); +} + +test "fromStr: -123.45" { + var roc_str = RocStr.init("-123.45", 7); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(RocDec{ .num = -123450000000000000000 }, dec.?); +} + +test "fromStr: abc" { + var roc_str = RocStr.init("abc", 3); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(dec, null); +} + +test "fromStr: 123.abc" { + var roc_str = RocStr.init("123.abc", 7); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(dec, null); +} + +test "fromStr: abc.123" { + var roc_str = RocStr.init("abc.123", 7); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(dec, null); +} + +test "fromStr: .123.1" { + var roc_str = RocStr.init(".123.1", 6); + var dec = RocDec.fromStr(roc_str); + + try expectEqual(dec, null); +} + +test "toStr: 100.00" { + var dec: RocDec = .{ .num = 100000000000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "100.0"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 123.45" { + var dec: RocDec = .{ .num = 123450000000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "123.45"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: -123.45" { + var dec: RocDec = .{ .num = -123450000000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "-123.45"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 123.0" { + var dec: RocDec = .{ .num = 123000000000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "123.0"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: -123.0" { + var dec: RocDec = .{ .num = -123000000000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "-123.0"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 0.45" { + var dec: RocDec = .{ .num = 450000000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "0.45"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: -0.45" { + var dec: RocDec = .{ .num = -450000000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "-0.45"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 0.00045" { + var dec: RocDec = .{ .num = 450000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "0.00045"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: -0.00045" { + var dec: RocDec = .{ .num = -450000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "-0.00045"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: -111.123456" { + var dec: RocDec = .{ .num = -111123456000000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "-111.123456"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 123.1111111" { + var dec: RocDec = .{ .num = 123111111100000000000 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "123.1111111"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 123.1111111111111 (big str)" { + var dec: RocDec = .{ .num = 123111111111111000000 }; + var res_roc_str = dec.toStr(); + errdefer res_roc_str.decref(); + defer res_roc_str.decref(); + + const res_slice: []const u8 = "123.111111111111"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 123.111111111111444444 (max number of decimal places)" { + var dec: RocDec = .{ .num = 123111111111111444444 }; + var res_roc_str = dec.toStr(); + errdefer res_roc_str.decref(); + defer res_roc_str.decref(); + + const res_slice: []const u8 = "123.111111111111444444"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 12345678912345678912.111111111111111111 (max number of digits)" { + var dec: RocDec = .{ .num = 12345678912345678912111111111111111111 }; + var res_roc_str = dec.toStr(); + errdefer res_roc_str.decref(); + defer res_roc_str.decref(); + + const res_slice: []const u8 = "12345678912345678912.111111111111111111"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: std.math.maxInt" { + var dec: RocDec = .{ .num = std.math.maxInt(i128) }; + var res_roc_str = dec.toStr(); + errdefer res_roc_str.decref(); + defer res_roc_str.decref(); + + const res_slice: []const u8 = "170141183460469231731.687303715884105727"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: std.math.minInt" { + var dec: RocDec = .{ .num = std.math.minInt(i128) }; + var res_roc_str = dec.toStr(); + errdefer res_roc_str.decref(); + defer res_roc_str.decref(); + + const res_slice: []const u8 = "-170141183460469231731.687303715884105728"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "toStr: 0" { + var dec: RocDec = .{ .num = 0 }; + var res_roc_str = dec.toStr(); + + const res_slice: []const u8 = "0.0"[0..]; + try expectEqualSlices(u8, res_slice, res_roc_str.asSlice()); +} + +test "add: 0" { + var dec: RocDec = .{ .num = 0 }; + + try expectEqual(RocDec{ .num = 0 }, dec.add(.{ .num = 0 })); +} + +test "add: 1" { + var dec: RocDec = .{ .num = 0 }; + + try expectEqual(RocDec{ .num = 1 }, dec.add(.{ .num = 1 })); +} + +test "sub: 0" { + var dec: RocDec = .{ .num = 1 }; + + try expectEqual(RocDec{ .num = 1 }, dec.sub(.{ .num = 0 })); +} + +test "sub: 1" { + var dec: RocDec = .{ .num = 1 }; + + try expectEqual(RocDec{ .num = 0 }, dec.sub(.{ .num = 1 })); +} + +test "mul: by 0" { + var dec: RocDec = .{ .num = 0 }; + + try expectEqual(RocDec{ .num = 0 }, dec.mul(.{ .num = 0 })); +} + +test "mul: by 1" { + var dec: RocDec = RocDec.fromU64(15); + + try expectEqual(RocDec.fromU64(15), dec.mul(RocDec.fromU64(1))); +} + +test "mul: by 2" { + var dec: RocDec = RocDec.fromU64(15); + + try expectEqual(RocDec.fromU64(30), dec.mul(RocDec.fromU64(2))); +} + +test "div: 0 / 2" { + var dec: RocDec = RocDec.fromU64(0); + + try expectEqual(RocDec.fromU64(0), dec.div(RocDec.fromU64(2))); +} + +test "div: 2 / 2" { + var dec: RocDec = RocDec.fromU64(2); + + try expectEqual(RocDec.fromU64(1), dec.div(RocDec.fromU64(2))); +} + +test "div: 20 / 2" { + var dec: RocDec = RocDec.fromU64(20); + + try expectEqual(RocDec.fromU64(10), dec.div(RocDec.fromU64(2))); +} + +test "div: 8 / 5" { + var dec: RocDec = RocDec.fromU64(8); + var res: RocDec = RocDec.fromStr(RocStr.init("1.6", 3)).?; + try expectEqual(res, dec.div(RocDec.fromU64(5))); +} + +test "div: 10 / 3" { + var numer: RocDec = RocDec.fromU64(10); + var denom: RocDec = RocDec.fromU64(3); + + var roc_str = RocStr.init("3.333333333333333333", 20); + errdefer roc_str.decref(); + defer roc_str.decref(); + + var res: RocDec = RocDec.fromStr(roc_str).?; + + try expectEqual(res, numer.div(denom)); +} + +test "div: 341 / 341" { + var number1: RocDec = RocDec.fromU64(341); + var number2: RocDec = RocDec.fromU64(341); + try expectEqual(RocDec.fromU64(1), number1.div(number2)); +} + +test "div: 342 / 343" { + var number1: RocDec = RocDec.fromU64(342); + var number2: RocDec = RocDec.fromU64(343); + var roc_str = RocStr.init("0.997084548104956268", 20); + try expectEqual(RocDec.fromStr(roc_str), number1.div(number2)); +} + +test "div: 680 / 340" { + var number1: RocDec = RocDec.fromU64(680); + var number2: RocDec = RocDec.fromU64(340); + try expectEqual(RocDec.fromU64(2), number1.div(number2)); +} + +test "div: 500 / 1000" { + var number1: RocDec = RocDec.fromU64(500); + var number2: RocDec = RocDec.fromU64(1000); + var roc_str = RocStr.init("0.5", 3); + try expectEqual(RocDec.fromStr(roc_str), number1.div(number2)); +} + +test "log: 1" { + try expectEqual(RocDec.fromU64(0), RocDec.log(RocDec.fromU64(1))); +} + +test "fract: 0" { + var roc_str = RocStr.init("0", 1); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 0 }, dec.fract()); +} + +test "fract: 1" { + var roc_str = RocStr.init("1", 1); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 0 }, dec.fract()); +} + +test "fract: 123.45" { + var roc_str = RocStr.init("123.45", 6); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 450000000000000000 }, dec.fract()); +} + +test "fract: -123.45" { + var roc_str = RocStr.init("-123.45", 7); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = -450000000000000000 }, dec.fract()); +} + +test "fract: .45" { + var roc_str = RocStr.init(".45", 3); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 450000000000000000 }, dec.fract()); +} + +test "fract: -0.00045" { + const dec: RocDec = .{ .num = -450000000000000 }; + const res = dec.fract(); + + try expectEqual(dec.num, res.num); +} + +test "trunc: 0" { + var roc_str = RocStr.init("0", 1); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 0 }, dec.trunc()); +} + +test "trunc: 1" { + var roc_str = RocStr.init("1", 1); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec.one_point_zero, dec.trunc()); +} + +test "trunc: 123.45" { + var roc_str = RocStr.init("123.45", 6); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 123000000000000000000 }, dec.trunc()); +} + +test "trunc: -123.45" { + var roc_str = RocStr.init("-123.45", 7); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = -123000000000000000000 }, dec.trunc()); +} + +test "trunc: .45" { + var roc_str = RocStr.init(".45", 3); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 0 }, dec.trunc()); +} + +test "trunc: -0.00045" { + const dec: RocDec = .{ .num = -450000000000000 }; + const res = dec.trunc(); + + try expectEqual(RocDec{ .num = 0 }, res); +} + +test "round: 123.45" { + var roc_str = RocStr.init("123.45", 6); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = 123000000000000000000 }, dec.round()); +} + +test "round: -123.45" { + var roc_str = RocStr.init("-123.45", 7); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = -123000000000000000000 }, dec.round()); +} + +test "round: 0.5" { + var roc_str = RocStr.init("0.5", 3); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec.one_point_zero, dec.round()); +} + +test "round: -0.5" { + var roc_str = RocStr.init("-0.5", 4); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec{ .num = -1000000000000000000 }, dec.round()); +} + +test "powInt: 3.1 ^ 0" { + var roc_str = RocStr.init("3.1", 3); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(RocDec.one_point_zero, dec.powInt(0)); +} + +test "powInt: 3.1 ^ 1" { + var roc_str = RocStr.init("3.1", 3); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(dec, dec.powInt(1)); +} + +test "powInt: 2 ^ 2" { + var roc_str = RocStr.init("4", 1); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(dec, RocDec.two_point_zero.powInt(2)); +} + +test "powInt: 0.5 ^ 2" { + var roc_str = RocStr.init("0.25", 4); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(dec, RocDec.zero_point_five.powInt(2)); +} + +test "pow: 0.5 ^ 2.0" { + var roc_str = RocStr.init("0.25", 4); + var dec = RocDec.fromStr(roc_str).?; + + try expectEqual(dec, RocDec.zero_point_five.pow(RocDec.two_point_zero)); +} + +// exports + +pub fn fromStr(arg: RocStr) callconv(.C) num_.NumParseResult(i128) { + if (@call(.always_inline, RocDec.fromStr, .{arg})) |dec| { + return .{ .errorcode = 0, .value = dec.num }; + } else { + return .{ .errorcode = 1, .value = 0 }; + } +} + +pub fn toStr(arg: RocDec) callconv(.C) RocStr { + return @call(.always_inline, RocDec.toStr, .{arg}); +} + +pub fn fromF64C(arg: f64) callconv(.C) i128 { + if (@call(.always_inline, RocDec.fromF64, .{arg})) |dec| { + return dec.num; + } else { + roc_panic("Decimal conversion from f64 failed!", 0); + } +} + +pub fn fromF32C(arg_f32: f32) callconv(.C) i128 { + const arg_f64 = arg_f32; + if (@call(.always_inline, RocDec.fromF64, .{arg_f64})) |dec| { + return dec.num; + } else { + roc_panic("Decimal conversion from f32!", 0); + } +} + +pub fn toF64(arg: RocDec) callconv(.C) f64 { + return @call(.always_inline, RocDec.toF64, .{arg}); +} + +pub fn exportFromInt(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T) callconv(.C) i128 { + const this = @as(i128, @intCast(self)); + + const answer = @mulWithOverflow(this, RocDec.one_point_zero_i128); + if (answer[1] == 1) { + roc_panic("Decimal conversion from Integer failed!", 0); + } else { + return answer[0]; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn fromU64C(arg: u64) callconv(.C) i128 { + return @call(.always_inline, RocDec.fromU64, .{arg}).toI128(); +} + +pub fn toI128(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.toI128, .{arg}); +} + +pub fn fromI128(arg: i128) callconv(.C) RocDec { + return @call(.always_inline, RocDec.fromI128, .{arg}); +} + +pub fn eqC(arg1: RocDec, arg2: RocDec) callconv(.C) bool { + return @call(.always_inline, RocDec.eq, .{ arg1, arg2 }); +} + +pub fn neqC(arg1: RocDec, arg2: RocDec) callconv(.C) bool { + return @call(.always_inline, RocDec.neq, .{ arg1, arg2 }); +} + +pub fn negateC(arg: RocDec) callconv(.C) i128 { + return if (@call(.always_inline, RocDec.negate, .{arg})) |dec| dec.num else { + roc_panic("Decimal negation overflow!", 0); + }; +} + +pub fn absC(arg: RocDec) callconv(.C) i128 { + const result = @call(.always_inline, RocDec.abs, .{arg}) catch { + roc_panic("Decimal absolute value overflow!", 0); + }; + return result.num; +} + +pub fn addC(arg1: RocDec, arg2: RocDec) callconv(.C) WithOverflow(RocDec) { + return @call(.always_inline, RocDec.addWithOverflow, .{ arg1, arg2 }); +} + +pub fn subC(arg1: RocDec, arg2: RocDec) callconv(.C) WithOverflow(RocDec) { + return @call(.always_inline, RocDec.subWithOverflow, .{ arg1, arg2 }); +} + +pub fn mulC(arg1: RocDec, arg2: RocDec) callconv(.C) WithOverflow(RocDec) { + return @call(.always_inline, RocDec.mulWithOverflow, .{ arg1, arg2 }); +} + +pub fn divC(arg1: RocDec, arg2: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.div, .{ arg1, arg2 }).num; +} + +pub fn logC(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.log, .{arg}).num; +} + +pub fn powC(arg1: RocDec, arg2: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.pow, .{ arg1, arg2 }).num; +} + +pub fn sinC(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.sin, .{arg}).num; +} + +pub fn cosC(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.cos, .{arg}).num; +} + +pub fn tanC(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.tan, .{arg}).num; +} + +pub fn asinC(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.asin, .{arg}).num; +} + +pub fn acosC(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.acos, .{arg}).num; +} + +pub fn atanC(arg: RocDec) callconv(.C) i128 { + return @call(.always_inline, RocDec.atan, .{arg}).num; +} + +pub fn addOrPanicC(arg1: RocDec, arg2: RocDec) callconv(.C) RocDec { + return @call(.always_inline, RocDec.add, .{ arg1, arg2 }); +} + +pub fn addSaturatedC(arg1: RocDec, arg2: RocDec) callconv(.C) RocDec { + return @call(.always_inline, RocDec.addSaturated, .{ arg1, arg2 }); +} + +pub fn subOrPanicC(arg1: RocDec, arg2: RocDec) callconv(.C) RocDec { + return @call(.always_inline, RocDec.sub, .{ arg1, arg2 }); +} + +pub fn subSaturatedC(arg1: RocDec, arg2: RocDec) callconv(.C) RocDec { + return @call(.always_inline, RocDec.subSaturated, .{ arg1, arg2 }); +} + +pub fn mulOrPanicC(arg1: RocDec, arg2: RocDec) callconv(.C) RocDec { + return @call(.always_inline, RocDec.mul, .{ arg1, arg2 }); +} + +pub fn mulSaturatedC(arg1: RocDec, arg2: RocDec) callconv(.C) RocDec { + return @call(.always_inline, RocDec.mulSaturated, .{ arg1, arg2 }); +} + +pub fn exportRound(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: RocDec) callconv(.C) T { + return @as(T, @intCast(@divFloor(input.round().num, RocDec.one_point_zero_i128))); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportFloor(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: RocDec) callconv(.C) T { + return @as(T, @intCast(@divFloor(input.floor().num, RocDec.one_point_zero_i128))); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportCeiling(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: RocDec) callconv(.C) T { + return @as(T, @intCast(@divFloor(input.ceiling().num, RocDec.one_point_zero_i128))); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} diff --git a/platform/glue/list.zig b/platform/roc/list.zig similarity index 68% rename from platform/glue/list.zig rename to platform/roc/list.zig index 4784a4b..d2975dd 100644 --- a/platform/glue/list.zig +++ b/platform/roc/list.zig @@ -1,5 +1,6 @@ const std = @import("std"); const utils = @import("utils.zig"); +const str = @import("str.zig"); const UpdateMode = utils.UpdateMode; const mem = std.mem; const math = std.math; @@ -83,12 +84,12 @@ pub const RocList = extern struct { return true; } - pub fn fromSlice(comptime T: type, slice: []const T) RocList { + pub fn fromSlice(comptime T: type, slice: []const T, elements_refcounted: bool) RocList { if (slice.len == 0) { return RocList.empty(); } - const list = allocate(@alignOf(T), slice.len, @sizeOf(T)); + const list = allocate(@alignOf(T), slice.len, @sizeOf(T), elements_refcounted); if (slice.len > 0) { const dest = list.bytes orelse unreachable; @@ -106,7 +107,7 @@ pub const RocList = extern struct { // The pointer is to just after the refcount. // For big lists, it just returns their bytes pointer. // For seamless slices, it returns the pointer stored in capacity_or_alloc_ptr. - pub fn getAllocationPtr(self: RocList) ?[*]u8 { + pub fn getAllocationDataPtr(self: RocList) ?[*]u8 { const list_alloc_ptr = @intFromPtr(self.bytes); const slice_alloc_ptr = self.capacity_or_alloc_ptr << 1; const slice_mask = self.seamlessSliceMask(); @@ -114,9 +115,60 @@ pub const RocList = extern struct { return @as(?[*]u8, @ptrFromInt(alloc_ptr)); } - pub fn decref(self: RocList, alignment: u32) void { + // This function is only valid if the list has refcounted elements. + fn getAllocationElementCount(self: RocList) usize { + if (self.isSeamlessSlice()) { + // Seamless slices always refer to an underlying allocation. + const alloc_ptr = self.getAllocationDataPtr() orelse unreachable; + // - 1 is refcount. + // - 2 is size on heap. + const ptr = @as([*]usize, @ptrCast(@alignCast(alloc_ptr))) - 2; + return ptr[0]; + } else { + return self.length; + } + } + + // This needs to be called when creating seamless slices from unique list. + // It will put the allocation size on the heap to enable the seamless slice to free the underlying allocation. + fn setAllocationElementCount(self: RocList, elements_refcounted: bool) void { + if (elements_refcounted) { + // - 1 is refcount. + // - 2 is size on heap. + const ptr = @as([*]usize, @alignCast(@ptrCast(self.getAllocationDataPtr()))) - 2; + ptr[0] = self.length; + } + } + + pub fn incref(self: RocList, amount: isize, elements_refcounted: bool) void { + // If the list is unique and not a seamless slice, the length needs to be store on the heap if the elements are refcounted. + if (elements_refcounted and self.isUnique() and !self.isSeamlessSlice()) { + if (self.getAllocationDataPtr()) |source| { + // - 1 is refcount. + // - 2 is size on heap. + const ptr = @as([*]usize, @alignCast(@ptrCast(source))) - 2; + ptr[0] = self.length; + } + } + utils.increfDataPtrC(self.getAllocationDataPtr(), amount); + } + + pub fn decref(self: RocList, alignment: u32, element_width: usize, elements_refcounted: bool, dec: Dec) void { + // If unique, decref will free the list. Before that happens, all elements must be decremented. + if (elements_refcounted and self.isUnique()) { + if (self.getAllocationDataPtr()) |source| { + const count = self.getAllocationElementCount(); + + var i: usize = 0; + while (i < count) : (i += 1) { + const element = source + i * element_width; + dec(element); + } + } + } + // We use the raw capacity to ensure we always decrement the refcount of seamless slices. - utils.decref(self.getAllocationPtr(), self.capacity_or_alloc_ptr, alignment); + utils.decref(self.getAllocationDataPtr(), self.capacity_or_alloc_ptr, alignment, elements_refcounted); } pub fn elements(self: RocList, comptime T: type) ?[*]T { @@ -141,15 +193,22 @@ pub const RocList = extern struct { return self.refcountMachine() - utils.REFCOUNT_ONE + 1; } - pub fn makeUniqueExtra(self: RocList, alignment: u32, element_width: usize, update_mode: UpdateMode) RocList { + pub fn makeUniqueExtra(self: RocList, alignment: u32, element_width: usize, elements_refcounted: bool, dec: Dec, update_mode: UpdateMode) RocList { if (update_mode == .InPlace) { return self; } else { - return self.makeUnique(alignment, element_width); + return self.makeUnique(alignment, element_width, elements_refcounted, dec); } } - pub fn makeUnique(self: RocList, alignment: u32, element_width: usize) RocList { + pub fn makeUnique( + self: RocList, + alignment: u32, + element_width: usize, + elements_refcounted: bool, + inc: Inc, + dec: Dec, + ) RocList { if (self.isUnique()) { return self; } @@ -157,12 +216,12 @@ pub const RocList = extern struct { if (self.isEmpty()) { // Empty is not necessarily unique on it's own. // The list could have capacity and be shared. - self.decref(alignment); + self.decref(alignment, element_width, elements_refcounted, dec); return RocList.empty(); } // unfortunately, we have to clone - const new_list = RocList.allocate(alignment, self.length, element_width); + const new_list = RocList.allocate(alignment, self.length, element_width, elements_refcounted); var old_bytes: [*]u8 = @as([*]u8, @ptrCast(self.bytes)); var new_bytes: [*]u8 = @as([*]u8, @ptrCast(new_list.bytes)); @@ -170,8 +229,15 @@ pub const RocList = extern struct { const number_of_bytes = self.len() * element_width; @memcpy(new_bytes[0..number_of_bytes], old_bytes[0..number_of_bytes]); - // NOTE we fuse an increment of all keys/values with a decrement of the input list. - self.decref(alignment); + // Increment refcount of all elements now in a new list. + if (elements_refcounted) { + var i: usize = 0; + while (i < self.len()) : (i += 1) { + inc(new_bytes + i * element_width); + } + } + + self.decref(alignment, element_width, elements_refcounted, dec); return new_list; } @@ -180,6 +246,7 @@ pub const RocList = extern struct { alignment: u32, length: usize, element_width: usize, + elements_refcounted: bool, ) RocList { if (length == 0) { return empty(); @@ -188,7 +255,7 @@ pub const RocList = extern struct { const capacity = utils.calculateCapacity(0, length, element_width); const data_bytes = capacity * element_width; return RocList{ - .bytes = utils.allocateWithRefcount(data_bytes, alignment), + .bytes = utils.allocateWithRefcount(data_bytes, alignment, elements_refcounted), .length = length, .capacity_or_alloc_ptr = capacity, }; @@ -198,6 +265,7 @@ pub const RocList = extern struct { alignment: u32, length: usize, element_width: usize, + elements_refcounted: bool, ) RocList { if (length == 0) { return empty(); @@ -205,7 +273,7 @@ pub const RocList = extern struct { const data_bytes = length * element_width; return RocList{ - .bytes = utils.allocateWithRefcount(data_bytes, alignment), + .bytes = utils.allocateWithRefcount(data_bytes, alignment, elements_refcounted), .length = length, .capacity_or_alloc_ptr = length, }; @@ -216,6 +284,8 @@ pub const RocList = extern struct { alignment: u32, new_length: usize, element_width: usize, + elements_refcounted: bool, + inc: Inc, ) RocList { if (self.bytes) |source_ptr| { if (self.isUnique() and !self.isSeamlessSlice()) { @@ -224,13 +294,13 @@ pub const RocList = extern struct { return RocList{ .bytes = self.bytes, .length = new_length, .capacity_or_alloc_ptr = capacity }; } else { const new_capacity = utils.calculateCapacity(capacity, new_length, element_width); - const new_source = utils.unsafeReallocate(source_ptr, alignment, capacity, new_capacity, element_width); + const new_source = utils.unsafeReallocate(source_ptr, alignment, capacity, new_capacity, element_width, elements_refcounted); return RocList{ .bytes = new_source, .length = new_length, .capacity_or_alloc_ptr = new_capacity }; } } - return self.reallocateFresh(alignment, new_length, element_width); + return self.reallocateFresh(alignment, new_length, element_width, elements_refcounted, inc); } - return RocList.allocate(alignment, new_length, element_width); + return RocList.allocate(alignment, new_length, element_width, elements_refcounted); } /// reallocate by explicitly making a new allocation and copying elements over @@ -239,244 +309,52 @@ pub const RocList = extern struct { alignment: u32, new_length: usize, element_width: usize, + elements_refcounted: bool, + inc: Inc, ) RocList { const old_length = self.length; - const result = RocList.allocate(alignment, new_length, element_width); + const result = RocList.allocate(alignment, new_length, element_width, elements_refcounted); - // transfer the memory if (self.bytes) |source_ptr| { + // transfer the memory const dest_ptr = result.bytes orelse unreachable; @memcpy(dest_ptr[0..(old_length * element_width)], source_ptr[0..(old_length * element_width)]); @memset(dest_ptr[(old_length * element_width)..(new_length * element_width)], 0); - } - - self.decref(alignment); - - return result; - } -}; - -const Caller0 = *const fn (?[*]u8, ?[*]u8) callconv(.C) void; -const Caller1 = *const fn (?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void; -const Caller2 = *const fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void; -const Caller3 = *const fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void; -const Caller4 = *const fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void; - -pub fn listMap( - list: RocList, - caller: Caller1, - data: Opaque, - inc_n_data: IncN, - data_is_owned: bool, - alignment: u32, - old_element_width: usize, - new_element_width: usize, -) callconv(.C) RocList { - if (list.bytes) |source_ptr| { - const size = list.len(); - var i: usize = 0; - const output = RocList.allocate(alignment, size, new_element_width); - const target_ptr = output.bytes orelse unreachable; - - if (data_is_owned) { - inc_n_data(data, size); - } - - while (i < size) : (i += 1) { - caller(data, source_ptr + (i * old_element_width), target_ptr + (i * new_element_width)); - } - - return output; - } else { - return RocList.empty(); - } -} - -fn decrementTail(list: RocList, start_index: usize, element_width: usize, dec: Dec) void { - if (list.bytes) |source| { - var i = start_index; - while (i < list.len()) : (i += 1) { - const element = source + i * element_width; - dec(element); - } - } -} - -pub fn listMap2( - list1: RocList, - list2: RocList, - caller: Caller2, - data: Opaque, - inc_n_data: IncN, - data_is_owned: bool, - alignment: u32, - a_width: usize, - b_width: usize, - c_width: usize, - dec_a: Dec, - dec_b: Dec, -) callconv(.C) RocList { - const output_length = @min(list1.len(), list2.len()); - - // if the lists don't have equal length, we must consume the remaining elements - // In this case we consume by (recursively) decrementing the elements - decrementTail(list1, output_length, a_width, dec_a); - decrementTail(list2, output_length, b_width, dec_b); - - if (data_is_owned) { - inc_n_data(data, output_length); - } - - if (list1.bytes) |source_a| { - if (list2.bytes) |source_b| { - const output = RocList.allocate(alignment, output_length, c_width); - const target_ptr = output.bytes orelse unreachable; - - var i: usize = 0; - while (i < output_length) : (i += 1) { - const element_a = source_a + i * a_width; - const element_b = source_b + i * b_width; - const target = target_ptr + i * c_width; - caller(data, element_a, element_b, target); - } - - return output; - } else { - return RocList.empty(); - } - } else { - return RocList.empty(); - } -} - -pub fn listMap3( - list1: RocList, - list2: RocList, - list3: RocList, - caller: Caller3, - data: Opaque, - inc_n_data: IncN, - data_is_owned: bool, - alignment: u32, - a_width: usize, - b_width: usize, - c_width: usize, - d_width: usize, - dec_a: Dec, - dec_b: Dec, - dec_c: Dec, -) callconv(.C) RocList { - const smaller_length = @min(list1.len(), list2.len()); - const output_length = @min(smaller_length, list3.len()); - - decrementTail(list1, output_length, a_width, dec_a); - decrementTail(list2, output_length, b_width, dec_b); - decrementTail(list3, output_length, c_width, dec_c); - - if (data_is_owned) { - inc_n_data(data, output_length); - } - - if (list1.bytes) |source_a| { - if (list2.bytes) |source_b| { - if (list3.bytes) |source_c| { - const output = RocList.allocate(alignment, output_length, d_width); - const target_ptr = output.bytes orelse unreachable; + // Increment refcount of all elements now in a new list. + if (elements_refcounted) { var i: usize = 0; - while (i < output_length) : (i += 1) { - const element_a = source_a + i * a_width; - const element_b = source_b + i * b_width; - const element_c = source_c + i * c_width; - const target = target_ptr + i * d_width; - - caller(data, element_a, element_b, element_c, target); + while (i < old_length) : (i += 1) { + inc(dest_ptr + i * element_width); } - - return output; - } else { - return RocList.empty(); } - } else { - return RocList.empty(); } - } else { - return RocList.empty(); - } -} - -pub fn listMap4( - list1: RocList, - list2: RocList, - list3: RocList, - list4: RocList, - caller: Caller4, - data: Opaque, - inc_n_data: IncN, - data_is_owned: bool, - alignment: u32, - a_width: usize, - b_width: usize, - c_width: usize, - d_width: usize, - e_width: usize, - dec_a: Dec, - dec_b: Dec, - dec_c: Dec, - dec_d: Dec, -) callconv(.C) RocList { - const output_length = @min(@min(list1.len(), list2.len()), @min(list3.len(), list4.len())); - decrementTail(list1, output_length, a_width, dec_a); - decrementTail(list2, output_length, b_width, dec_b); - decrementTail(list3, output_length, c_width, dec_c); - decrementTail(list4, output_length, d_width, dec_d); + // Calls utils.decref directly to avoid decrementing the refcount of elements. + utils.decref(self.getAllocationDataPtr(), self.capacity_or_alloc_ptr, alignment, elements_refcounted); - if (data_is_owned) { - inc_n_data(data, output_length); + return result; } +}; - if (list1.bytes) |source_a| { - if (list2.bytes) |source_b| { - if (list3.bytes) |source_c| { - if (list4.bytes) |source_d| { - const output = RocList.allocate(alignment, output_length, e_width); - const target_ptr = output.bytes orelse unreachable; - - var i: usize = 0; - while (i < output_length) : (i += 1) { - const element_a = source_a + i * a_width; - const element_b = source_b + i * b_width; - const element_c = source_c + i * c_width; - const element_d = source_d + i * d_width; - const target = target_ptr + i * e_width; - - caller(data, element_a, element_b, element_c, element_d, target); - } +pub fn listIncref(list: RocList, amount: isize, elements_refcounted: bool) callconv(.C) void { + list.incref(amount, elements_refcounted); +} - return output; - } else { - return RocList.empty(); - } - } else { - return RocList.empty(); - } - } else { - return RocList.empty(); - } - } else { - return RocList.empty(); - } +pub fn listDecref(list: RocList, alignment: u32, element_width: usize, elements_refcounted: bool, dec: Dec) callconv(.C) void { + list.decref(alignment, element_width, elements_refcounted, dec); } pub fn listWithCapacity( capacity: u64, alignment: u32, element_width: usize, + elements_refcounted: bool, + inc: Inc, ) callconv(.C) RocList { - return listReserve(RocList.empty(), alignment, capacity, element_width, .InPlace); + return listReserve(RocList.empty(), alignment, capacity, element_width, elements_refcounted, inc, .InPlace); } pub fn listReserve( @@ -484,6 +362,8 @@ pub fn listReserve( alignment: u32, spare: u64, element_width: usize, + elements_refcounted: bool, + inc: Inc, update_mode: UpdateMode, ) callconv(.C) RocList { const original_len = list.len(); @@ -496,7 +376,7 @@ pub fn listReserve( // Make sure on 32-bit targets we don't accidentally wrap when we cast our U64 desired capacity to U32. const reserve_size: u64 = @min(desired_cap, @as(u64, @intCast(std.math.maxInt(usize)))); - var output = list.reallocate(alignment, @as(usize, @intCast(reserve_size)), element_width); + var output = list.reallocate(alignment, @as(usize, @intCast(reserve_size)), element_width, elements_refcounted, inc); output.length = original_len; return output; } @@ -506,6 +386,9 @@ pub fn listReleaseExcessCapacity( list: RocList, alignment: u32, element_width: usize, + elements_refcounted: bool, + inc: Inc, + dec: Dec, update_mode: UpdateMode, ) callconv(.C) RocList { const old_length = list.len(); @@ -513,16 +396,27 @@ pub fn listReleaseExcessCapacity( if ((update_mode == .InPlace or list.isUnique()) and list.capacity_or_alloc_ptr == old_length) { return list; } else if (old_length == 0) { - list.decref(alignment); + list.decref(alignment, element_width, elements_refcounted, dec); return RocList.empty(); } else { - const output = RocList.allocateExact(alignment, old_length, element_width); + // TODO: This can be made more efficient, but has to work around the `decref`. + // If the list is unique, we can avoid incrementing and decrementing the live items. + // We can just decrement the dead elements and free the old list. + // This pattern is also like true in other locations like listConcat and listDropAt. + const output = RocList.allocateExact(alignment, old_length, element_width, elements_refcounted); if (list.bytes) |source_ptr| { const dest_ptr = output.bytes orelse unreachable; @memcpy(dest_ptr[0..(old_length * element_width)], source_ptr[0..(old_length * element_width)]); + if (elements_refcounted) { + var i: usize = 0; + while (i < old_length) : (i += 1) { + const element = source_ptr + i * element_width; + inc(element); + } + } } - list.decref(alignment); + list.decref(alignment, element_width, elements_refcounted, dec); return output; } } @@ -546,15 +440,30 @@ pub fn listAppendUnsafe( return output; } -fn listAppend(list: RocList, alignment: u32, element: Opaque, element_width: usize, update_mode: UpdateMode) callconv(.C) RocList { - const with_capacity = listReserve(list, alignment, 1, element_width, update_mode); +fn listAppend( + list: RocList, + alignment: u32, + element: Opaque, + element_width: usize, + elements_refcounted: bool, + inc: Inc, + update_mode: UpdateMode, +) callconv(.C) RocList { + const with_capacity = listReserve(list, alignment, 1, element_width, elements_refcounted, inc, update_mode); return listAppendUnsafe(with_capacity, element, element_width); } -pub fn listPrepend(list: RocList, alignment: u32, element: Opaque, element_width: usize) callconv(.C) RocList { +pub fn listPrepend( + list: RocList, + alignment: u32, + element: Opaque, + element_width: usize, + elements_refcounted: bool, + inc: Inc, +) callconv(.C) RocList { const old_length = list.len(); // TODO: properly wire in update mode. - var with_capacity = listReserve(list, alignment, 1, element_width, .Immutable); + var with_capacity = listReserve(list, alignment, 1, element_width, elements_refcounted, inc, .Immutable); with_capacity.length += 1; // can't use one memcpy here because source and target overlap @@ -585,8 +494,15 @@ pub fn listSwap( element_width: usize, index_1: u64, index_2: u64, + elements_refcounted: bool, + inc: Inc, + dec: Dec, update_mode: UpdateMode, ) callconv(.C) RocList { + // Early exit to avoid swapping the same element. + if (index_1 == index_2) + return list; + const size = @as(u64, @intCast(list.len())); if (index_1 == index_2 or index_1 >= size or index_2 >= size) { // Either one index was out of bounds, or both indices were the same; just return @@ -597,7 +513,7 @@ pub fn listSwap( if (update_mode == .InPlace) { break :blk list; } else { - break :blk list.makeUnique(alignment, element_width); + break :blk list.makeUnique(alignment, element_width, elements_refcounted, inc, dec); } }; @@ -615,26 +531,30 @@ pub fn listSublist( list: RocList, alignment: u32, element_width: usize, + elements_refcounted: bool, start_u64: u64, len_u64: u64, dec: Dec, ) callconv(.C) RocList { const size = list.len(); - if (size == 0 or start_u64 >= @as(u64, @intCast(size))) { - // Decrement the reference counts of all elements. - if (list.bytes) |source_ptr| { - var i: usize = 0; - while (i < size) : (i += 1) { - const element = source_ptr + i * element_width; - dec(element); - } - } + if (size == 0 or len_u64 == 0 or start_u64 >= @as(u64, @intCast(size))) { if (list.isUnique()) { + // Decrement the reference counts of all elements. + if (list.bytes) |source_ptr| { + if (elements_refcounted) { + var i: usize = 0; + while (i < size) : (i += 1) { + const element = source_ptr + i * element_width; + dec(element); + } + } + } + var output = list; output.length = 0; return output; } - list.decref(alignment); + list.decref(alignment, element_width, elements_refcounted, dec); return RocList.empty(); } @@ -642,7 +562,6 @@ pub fn listSublist( // This cast is lossless because we would have early-returned already // if `start_u64` were greater than `size`, and `size` fits in usize. const start: usize = @intCast(start_u64); - const drop_start_len = start; // (size - start) can't overflow because we would have early-returned already // if `start` were greater than `size`. @@ -653,32 +572,25 @@ pub fn listSublist( // than something that fit in usize. const keep_len = @as(usize, @intCast(@min(len_u64, @as(u64, @intCast(size_minus_start))))); - // This can't overflow because if len > size_minus_start, - // then keep_len == size_minus_start and this will be 0. - // Alternatively, if len <= size_minus_start, then keep_len will - // be equal to len, meaning keep_len <= size_minus_start too, - // which in turn means this won't overflow. - const drop_end_len = size_minus_start - keep_len; - - // Decrement the reference counts of elements before `start`. - var i: usize = 0; - while (i < drop_start_len) : (i += 1) { - const element = source_ptr + i * element_width; - dec(element); - } - - // Decrement the reference counts of elements after `start + keep_len`. - i = 0; - while (i < drop_end_len) : (i += 1) { - const element = source_ptr + (start + keep_len + i) * element_width; - dec(element); - } - if (start == 0 and list.isUnique()) { + // The list is unique, we actually have to decrement refcounts to elements we aren't keeping around. + // Decrement the reference counts of elements after `start + keep_len`. + if (elements_refcounted) { + const drop_end_len = size_minus_start - keep_len; + var i: usize = 0; + while (i < drop_end_len) : (i += 1) { + const element = source_ptr + (start + keep_len + i) * element_width; + dec(element); + } + } + var output = list; output.length = keep_len; return output; } else { + if (list.isUnique()) { + list.setAllocationElementCount(elements_refcounted); + } const list_alloc_ptr = (@intFromPtr(source_ptr) >> 1) | SEAMLESS_SLICE_BIT; const slice_alloc_ptr = list.capacity_or_alloc_ptr; const slice_mask = list.seamlessSliceMask(); @@ -698,7 +610,9 @@ pub fn listDropAt( list: RocList, alignment: u32, element_width: usize, + elements_refcounted: bool, drop_index_u64: u64, + inc: Inc, dec: Dec, ) callconv(.C) RocList { const size = list.len(); @@ -707,11 +621,11 @@ pub fn listDropAt( // For simplicity, do this by calling listSublist. // In the future, we can test if it is faster to manually inline the important parts here. if (drop_index_u64 == 0) { - return listSublist(list, alignment, element_width, 1, size -| 1, dec); + return listSublist(list, alignment, element_width, elements_refcounted, 1, size -| 1, dec); } else if (drop_index_u64 == size_u64 - 1) { // It's fine if (size - 1) wraps on size == 0 here, // because if size is 0 then it's always fine for this branch to be taken; no // matter what drop_index was, we're size == 0, so empty list will always be returned. - return listSublist(list, alignment, element_width, 0, size -| 1, dec); + return listSublist(list, alignment, element_width, elements_refcounted, 0, size -| 1, dec); } if (list.bytes) |source_ptr| { @@ -723,15 +637,17 @@ pub fn listDropAt( // were >= than `size`, and we know `size` fits in usize. const drop_index: usize = @intCast(drop_index_u64); - const element = source_ptr + drop_index * element_width; - dec(element); + if (elements_refcounted) { + const element = source_ptr + drop_index * element_width; + dec(element); + } // NOTE // we need to return an empty list explicitly, // because we rely on the pointer field being null if the list is empty // which also requires duplicating the utils.decref call to spend the RC token if (size < 2) { - list.decref(alignment); + list.decref(alignment, element_width, elements_refcounted, dec); return RocList.empty(); } @@ -750,7 +666,7 @@ pub fn listDropAt( return new_list; } - const output = RocList.allocate(alignment, size - 1, element_width); + const output = RocList.allocate(alignment, size - 1, element_width, elements_refcounted); const target_ptr = output.bytes orelse unreachable; const head_size = drop_index * element_width; @@ -761,7 +677,15 @@ pub fn listDropAt( const tail_size = (size - drop_index - 1) * element_width; @memcpy(tail_target[0..tail_size], tail_source[0..tail_size]); - list.decref(alignment); + if (elements_refcounted) { + var i: usize = 0; + while (i < output.len()) : (i += 1) { + const cloned_elem = target_ptr + i * element_width; + inc(cloned_elem); + } + } + + list.decref(alignment, element_width, elements_refcounted, dec); return output; } else { @@ -811,8 +735,11 @@ pub fn listSortWith( data_is_owned: bool, alignment: u32, element_width: usize, + elements_refcounted: bool, + inc: Inc, + dec: Dec, ) callconv(.C) RocList { - var list = input.makeUnique(alignment, element_width); + var list = input.makeUnique(alignment, element_width, elements_refcounted, inc, dec); if (data_is_owned) { inc_n_data(data, list.len()); @@ -868,37 +795,54 @@ fn swapElements(source_ptr: [*]u8, element_width: usize, index_1: usize, index_2 return swap(element_width, element_at_i, element_at_j); } -pub fn listConcat(list_a: RocList, list_b: RocList, alignment: u32, element_width: usize) callconv(.C) RocList { +pub fn listConcat( + list_a: RocList, + list_b: RocList, + alignment: u32, + element_width: usize, + elements_refcounted: bool, + inc: Inc, + dec: Dec, +) callconv(.C) RocList { // NOTE we always use list_a! because it is owned, we must consume it, and it may have unused capacity if (list_b.isEmpty()) { if (list_a.getCapacity() == 0) { // a could be a seamless slice, so we still need to decref. - list_a.decref(alignment); + list_a.decref(alignment, element_width, elements_refcounted, dec); return list_b; } else { // we must consume this list. Even though it has no elements, it could still have capacity - list_b.decref(alignment); + list_b.decref(alignment, element_width, elements_refcounted, dec); return list_a; } } else if (list_a.isUnique()) { const total_length: usize = list_a.len() + list_b.len(); - const resized_list_a = list_a.reallocate(alignment, total_length, element_width); + const resized_list_a = list_a.reallocate(alignment, total_length, element_width, elements_refcounted, inc); // These must exist, otherwise, the lists would have been empty. const source_a = resized_list_a.bytes orelse unreachable; const source_b = list_b.bytes orelse unreachable; @memcpy(source_a[(list_a.len() * element_width)..(total_length * element_width)], source_b[0..(list_b.len() * element_width)]); + // Increment refcount of all cloned elements. + if (elements_refcounted) { + var i: usize = 0; + while (i < list_b.len()) : (i += 1) { + const cloned_elem = source_b + i * element_width; + inc(cloned_elem); + } + } + // decrement list b. - list_b.decref(alignment); + list_b.decref(alignment, element_width, elements_refcounted, dec); return resized_list_a; } else if (list_b.isUnique()) { const total_length: usize = list_a.len() + list_b.len(); - const resized_list_b = list_b.reallocate(alignment, total_length, element_width); + const resized_list_b = list_b.reallocate(alignment, total_length, element_width, elements_refcounted, inc); // These must exist, otherwise, the lists would have been empty. const source_a = list_a.bytes orelse unreachable; @@ -912,14 +856,23 @@ pub fn listConcat(list_a: RocList, list_b: RocList, alignment: u32, element_widt mem.copyBackwards(u8, source_b[byte_count_a .. byte_count_a + byte_count_b], source_b[0..byte_count_b]); @memcpy(source_b[0..byte_count_a], source_a[0..byte_count_a]); + // Increment refcount of all cloned elements. + if (elements_refcounted) { + var i: usize = 0; + while (i < list_a.len()) : (i += 1) { + const cloned_elem = source_a + i * element_width; + inc(cloned_elem); + } + } + // decrement list a. - list_a.decref(alignment); + list_a.decref(alignment, element_width, elements_refcounted, dec); return resized_list_b; } const total_length: usize = list_a.len() + list_b.len(); - const output = RocList.allocate(alignment, total_length, element_width); + const output = RocList.allocate(alignment, total_length, element_width, elements_refcounted); // These must exist, otherwise, the lists would have been empty. const target = output.bytes orelse unreachable; @@ -929,9 +882,23 @@ pub fn listConcat(list_a: RocList, list_b: RocList, alignment: u32, element_widt @memcpy(target[0..(list_a.len() * element_width)], source_a[0..(list_a.len() * element_width)]); @memcpy(target[(list_a.len() * element_width)..(total_length * element_width)], source_b[0..(list_b.len() * element_width)]); + // Increment refcount of all cloned elements. + if (elements_refcounted) { + var i: usize = 0; + while (i < list_a.len()) : (i += 1) { + const cloned_elem = source_a + i * element_width; + inc(cloned_elem); + } + i = 0; + while (i < list_b.len()) : (i += 1) { + const cloned_elem = source_b + i * element_width; + inc(cloned_elem); + } + } + // decrement list a and b. - list_a.decref(alignment); - list_b.decref(alignment); + list_a.decref(alignment, element_width, elements_refcounted, dec); + list_b.decref(alignment, element_width, elements_refcounted, dec); return output; } @@ -959,6 +926,9 @@ pub fn listReplace( index: u64, element: Opaque, element_width: usize, + elements_refcounted: bool, + inc: Inc, + dec: Dec, out_element: ?[*]u8, ) callconv(.C) RocList { // INVARIANT: bounds checking happens on the roc side @@ -968,7 +938,8 @@ pub fn listReplace( // so we don't do a bounds check here. Hence, the list is also non-empty, // because inserting into an empty list is always out of bounds, // and it's always safe to cast index to usize. - return listReplaceInPlaceHelp(list.makeUnique(alignment, element_width), @as(usize, @intCast(index)), element, element_width, out_element); + // because inserting into an empty list is always out of bounds + return listReplaceInPlaceHelp(list.makeUnique(alignment, element_width, elements_refcounted, inc, dec), @as(usize, @intCast(index)), element, element_width, out_element); } inline fn listReplaceInPlaceHelp( @@ -1000,8 +971,11 @@ pub fn listClone( list: RocList, alignment: u32, element_width: usize, + elements_refcounted: bool, + inc: Inc, + dec: Dec, ) callconv(.C) RocList { - return list.makeUnique(alignment, element_width); + return list.makeUnique(alignment, element_width, elements_refcounted, inc, dec); } pub fn listCapacity( @@ -1013,23 +987,56 @@ pub fn listCapacity( pub fn listAllocationPtr( list: RocList, ) callconv(.C) ?[*]u8 { - return list.getAllocationPtr(); + return list.getAllocationDataPtr(); } +fn rcNone(_: ?[*]u8) callconv(.C) void {} + test "listConcat: non-unique with unique overlapping" { - var nonUnique = RocList.fromSlice(u8, ([_]u8{1})[0..]); + var nonUnique = RocList.fromSlice(u8, ([_]u8{1})[0..], false); const bytes: [*]u8 = @as([*]u8, @ptrCast(nonUnique.bytes)); const ptr_width = @sizeOf(usize); const refcount_ptr = @as([*]isize, @ptrCast(@as([*]align(ptr_width) u8, @alignCast(bytes)) - ptr_width)); utils.increfRcPtrC(&refcount_ptr[0], 1); - defer nonUnique.decref(@sizeOf(u8)); // listConcat will dec the other refcount + defer nonUnique.decref(@alignOf(u8), @sizeOf(u8), false, rcNone); // listConcat will dec the other refcount - var unique = RocList.fromSlice(u8, ([_]u8{ 2, 3, 4 })[0..]); - defer unique.decref(@sizeOf(u8)); + var unique = RocList.fromSlice(u8, ([_]u8{ 2, 3, 4 })[0..], false); + defer unique.decref(@alignOf(u8), @sizeOf(u8), false, rcNone); - var concatted = listConcat(nonUnique, unique, 1, 1); - var wanted = RocList.fromSlice(u8, ([_]u8{ 1, 2, 3, 4 })[0..]); - defer wanted.decref(@sizeOf(u8)); + var concatted = listConcat(nonUnique, unique, 1, 1, false, rcNone, rcNone); + var wanted = RocList.fromSlice(u8, ([_]u8{ 1, 2, 3, 4 })[0..], false); + defer wanted.decref(@alignOf(u8), @sizeOf(u8), false, rcNone); try expect(concatted.eql(wanted)); } + +pub fn listConcatUtf8( + list: RocList, + string: str.RocStr, +) callconv(.C) RocList { + if (string.len() == 0) { + return list; + } else { + const combined_length = list.len() + string.len(); + + // List U8 has alignment 1 and element_width 1 + const result = list.reallocate(1, combined_length, 1, false, &rcNone); + // We just allocated combined_length, which is > 0 because string.len() > 0 + var bytes = result.bytes orelse unreachable; + @memcpy(bytes[list.len()..combined_length], string.asU8ptr()[0..string.len()]); + + return result; + } +} + +test "listConcatUtf8" { + const list = RocList.fromSlice(u8, &[_]u8{ 1, 2, 3, 4 }, false); + defer list.decref(1, 1, false, &rcNone); + const string_bytes = "🐦"; + const string = str.RocStr.init(string_bytes, string_bytes.len); + defer string.decref(); + const ret = listConcatUtf8(list, string); + const expected = RocList.fromSlice(u8, &[_]u8{ 1, 2, 3, 4, 240, 159, 144, 166 }, false); + defer expected.decref(1, 1, false, &rcNone); + try expect(ret.eql(expected)); +} diff --git a/platform/roc/num.zig b/platform/roc/num.zig new file mode 100644 index 0000000..f980b64 --- /dev/null +++ b/platform/roc/num.zig @@ -0,0 +1,670 @@ +const std = @import("std"); +const math = std.math; +const RocList = @import("list.zig").RocList; +const RocStr = @import("str.zig").RocStr; +const WithOverflow = @import("utils.zig").WithOverflow; +const Ordering = @import("utils.zig").Ordering; +const roc_panic = @import("panic.zig").panic_help; + +pub fn NumParseResult(comptime T: type) type { + // on the roc side we sort by alignment; putting the errorcode last + // always works out (no number with smaller alignment than 1) + return extern struct { + value: T, + errorcode: u8, // 0 indicates success + }; +} + +pub const F32Parts = extern struct { + fraction: u32, + exponent: u8, + sign: bool, +}; + +pub const F64Parts = extern struct { + fraction: u64, + exponent: u16, + sign: bool, +}; + +pub const U256 = struct { + hi: u128, + lo: u128, +}; + +pub fn mul_u128(a: u128, b: u128) U256 { + var hi: u128 = undefined; + var lo: u128 = undefined; + + const bits_in_dword_2: u32 = 64; + const lower_mask: u128 = math.maxInt(u128) >> bits_in_dword_2; + + lo = (a & lower_mask) * (b & lower_mask); + + var t = lo >> bits_in_dword_2; + + lo &= lower_mask; + + t += (a >> bits_in_dword_2) * (b & lower_mask); + + lo += (t & lower_mask) << bits_in_dword_2; + + hi = t >> bits_in_dword_2; + + t = lo >> bits_in_dword_2; + + lo &= lower_mask; + + t += (b >> bits_in_dword_2) * (a & lower_mask); + + lo += (t & lower_mask) << bits_in_dword_2; + + hi += t >> bits_in_dword_2; + + hi += (a >> bits_in_dword_2) * (b >> bits_in_dword_2); + + return .{ .hi = hi, .lo = lo }; +} + +pub fn exportParseInt(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(buf: RocStr) callconv(.C) NumParseResult(T) { + // a radix of 0 will make zig determine the radix from the frefix: + // * A prefix of "0b" implies radix=2, + // * A prefix of "0o" implies radix=8, + // * A prefix of "0x" implies radix=16, + // * Otherwise radix=10 is assumed. + const radix = 0; + if (std.fmt.parseInt(T, buf.asSlice(), radix)) |success| { + return .{ .errorcode = 0, .value = success }; + } else |_| { + return .{ .errorcode = 1, .value = 0 }; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportParseFloat(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(buf: RocStr) callconv(.C) NumParseResult(T) { + if (std.fmt.parseFloat(T, buf.asSlice())) |success| { + return .{ .errorcode = 0, .value = success }; + } else |_| { + return .{ .errorcode = 1, .value = 0 }; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportNumToFloatCast(comptime T: type, comptime F: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(x: T) callconv(.C) F { + return @floatFromInt(x); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportPow(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(base: T, exp: T) callconv(.C) T { + return std.math.pow(T, base, exp); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportIsNan(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) bool { + return std.math.isNan(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportIsInfinite(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) bool { + return std.math.isInf(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportIsFinite(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) bool { + return std.math.isFinite(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportAsin(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return std.math.asin(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportAcos(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return std.math.acos(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportAtan(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return std.math.atan(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportSin(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return math.sin(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportCos(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return math.cos(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportTan(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return math.tan(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportLog(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return @log(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportFAbs(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return @fabs(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportSqrt(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: T) callconv(.C) T { + return math.sqrt(input); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportRound(comptime F: type, comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: F) callconv(.C) T { + return @as(T, @intFromFloat((math.round(input)))); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportFloor(comptime F: type, comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: F) callconv(.C) T { + return @as(T, @intFromFloat((math.floor(input)))); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportCeiling(comptime F: type, comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: F) callconv(.C) T { + return @as(T, @intFromFloat((math.ceil(input)))); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportDivCeil(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(a: T, b: T) callconv(.C) T { + return math.divCeil(T, a, b) catch { + roc_panic("Integer division by 0!", 0); + }; + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn ToIntCheckedResult(comptime T: type) type { + // On the Roc side we sort by alignment; putting the errorcode last + // always works out (no number with smaller alignment than 1). + return extern struct { + value: T, + out_of_bounds: bool, + }; +} + +pub fn exportToIntCheckingMax(comptime From: type, comptime To: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: From) callconv(.C) ToIntCheckedResult(To) { + if (input > std.math.maxInt(To)) { + return .{ .out_of_bounds = true, .value = 0 }; + } + return .{ .out_of_bounds = false, .value = @as(To, @intCast(input)) }; + } + }.func; + @export(f, .{ .name = name ++ @typeName(From), .linkage = .Strong }); +} + +pub fn exportToIntCheckingMaxAndMin(comptime From: type, comptime To: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(input: From) callconv(.C) ToIntCheckedResult(To) { + if (input > std.math.maxInt(To) or input < std.math.minInt(To)) { + return .{ .out_of_bounds = true, .value = 0 }; + } + return .{ .out_of_bounds = false, .value = @as(To, @intCast(input)) }; + } + }.func; + @export(f, .{ .name = name ++ @typeName(From), .linkage = .Strong }); +} + +fn isMultipleOf(comptime T: type, lhs: T, rhs: T) bool { + if (rhs == 0 or rhs == -1) { + // lhs is a multiple of rhs iff + // + // - rhs == -1 + // - both rhs and lhs are 0 + // + // the -1 case is important for overflow reasons `isize::MIN % -1` crashes in rust + return (rhs == -1) or (lhs == 0); + } else { + const rem = @mod(lhs, rhs); + return rem == 0; + } +} + +pub fn exportIsMultipleOf(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) bool { + return @call(.always_inline, isMultipleOf, .{ T, self, other }); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +fn addWithOverflow(comptime T: type, self: T, other: T) WithOverflow(T) { + switch (@typeInfo(T)) { + .Int => { + const answer = @addWithOverflow(self, other); + return .{ .value = answer[0], .has_overflowed = answer[1] == 1 }; + }, + else => { + const answer = self + other; + const overflowed = !std.math.isFinite(answer); + return .{ .value = answer, .has_overflowed = overflowed }; + }, + } +} + +pub fn exportAddWithOverflow(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) WithOverflow(T) { + return @call(.always_inline, addWithOverflow, .{ T, self, other }); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportAddSaturatedInt(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) T { + const result = addWithOverflow(T, self, other); + if (result.has_overflowed) { + // We can unambiguously tell which way it wrapped, because we have N+1 bits including the overflow bit + if (result.value >= 0 and @typeInfo(T).Int.signedness == .signed) { + return std.math.minInt(T); + } else { + return std.math.maxInt(T); + } + } else { + return result.value; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportAddOrPanic(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) T { + const result = addWithOverflow(T, self, other); + if (result.has_overflowed) { + roc_panic("Integer addition overflowed!", 0); + } else { + return result.value; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +fn subWithOverflow(comptime T: type, self: T, other: T) WithOverflow(T) { + switch (@typeInfo(T)) { + .Int => { + const answer = @subWithOverflow(self, other); + return .{ .value = answer[0], .has_overflowed = answer[1] == 1 }; + }, + else => { + const answer = self - other; + const overflowed = !std.math.isFinite(answer); + return .{ .value = answer, .has_overflowed = overflowed }; + }, + } +} + +pub fn exportSubWithOverflow(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) WithOverflow(T) { + return @call(.always_inline, subWithOverflow, .{ T, self, other }); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportSubSaturatedInt(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) T { + const result = subWithOverflow(T, self, other); + if (result.has_overflowed) { + if (@typeInfo(T).Int.signedness == .unsigned) { + return 0; + } else if (self < 0) { + return std.math.minInt(T); + } else { + return std.math.maxInt(T); + } + } else { + return result.value; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportSubOrPanic(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) T { + const result = subWithOverflow(T, self, other); + if (result.has_overflowed) { + roc_panic("Integer subtraction overflowed!", 0); + } else { + return result.value; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +fn mulWithOverflow(comptime T: type, comptime W: type, self: T, other: T) WithOverflow(T) { + switch (@typeInfo(T)) { + .Int => { + if (T == i128) { + const is_answer_negative = (self < 0) != (other < 0); + const max = std.math.maxInt(i128); + const min = std.math.minInt(i128); + + const self_u128 = @as(u128, @intCast(math.absInt(self) catch { + if (other == 0) { + return .{ .value = 0, .has_overflowed = false }; + } else if (other == 1) { + return .{ .value = self, .has_overflowed = false }; + } else if (is_answer_negative) { + return .{ .value = min, .has_overflowed = true }; + } else { + return .{ .value = max, .has_overflowed = true }; + } + })); + + const other_u128 = @as(u128, @intCast(math.absInt(other) catch { + if (self == 0) { + return .{ .value = 0, .has_overflowed = false }; + } else if (self == 1) { + return .{ .value = other, .has_overflowed = false }; + } else if (is_answer_negative) { + return .{ .value = min, .has_overflowed = true }; + } else { + return .{ .value = max, .has_overflowed = true }; + } + })); + + const answer256: U256 = mul_u128(self_u128, other_u128); + + if (is_answer_negative) { + if (answer256.hi != 0 or answer256.lo > (1 << 127)) { + return .{ .value = min, .has_overflowed = true }; + } else if (answer256.lo == (1 << 127)) { + return .{ .value = min, .has_overflowed = false }; + } else { + return .{ .value = -@as(i128, @intCast(answer256.lo)), .has_overflowed = false }; + } + } else { + if (answer256.hi != 0 or answer256.lo > @as(u128, @intCast(max))) { + return .{ .value = max, .has_overflowed = true }; + } else { + return .{ .value = @as(i128, @intCast(answer256.lo)), .has_overflowed = false }; + } + } + } else { + const self_wide: W = self; + const other_wide: W = other; + const answer: W = self_wide * other_wide; + + const max: W = std.math.maxInt(T); + const min: W = std.math.minInt(T); + + if (answer > max) { + return .{ .value = max, .has_overflowed = true }; + } else if (answer < min) { + return .{ .value = min, .has_overflowed = true }; + } else { + return .{ .value = @as(T, @intCast(answer)), .has_overflowed = false }; + } + } + }, + else => { + const answer = self * other; + const overflowed = !std.math.isFinite(answer); + return .{ .value = answer, .has_overflowed = overflowed }; + }, + } +} + +pub fn exportMulWithOverflow(comptime T: type, comptime W: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) WithOverflow(T) { + return @call(.always_inline, mulWithOverflow, .{ T, W, self, other }); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportMulSaturatedInt(comptime T: type, comptime W: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) T { + const result = @call(.always_inline, mulWithOverflow, .{ T, W, self, other }); + return result.value; + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportMulWrappedInt(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) T { + return self *% other; + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn shiftRightZeroFillI128(self: i128, other: u8) callconv(.C) i128 { + if (other & 0b1000_0000 > 0) { + return 0; + } else { + return self >> @as(u7, @intCast(other)); + } +} + +pub fn shiftRightZeroFillU128(self: u128, other: u8) callconv(.C) u128 { + if (other & 0b1000_0000 > 0) { + return 0; + } else { + return self >> @as(u7, @intCast(other)); + } +} + +pub fn compareI128(self: i128, other: i128) callconv(.C) Ordering { + if (self == other) { + return Ordering.EQ; + } else if (self < other) { + return Ordering.LT; + } else { + return Ordering.GT; + } +} + +pub fn compareU128(self: u128, other: u128) callconv(.C) Ordering { + if (self == other) { + return Ordering.EQ; + } else if (self < other) { + return Ordering.LT; + } else { + return Ordering.GT; + } +} + +pub fn lessThanI128(self: i128, other: i128) callconv(.C) bool { + return self < other; +} + +pub fn lessThanOrEqualI128(self: i128, other: i128) callconv(.C) bool { + return self <= other; +} + +pub fn greaterThanI128(self: i128, other: i128) callconv(.C) bool { + return self > other; +} + +pub fn greaterThanOrEqualI128(self: i128, other: i128) callconv(.C) bool { + return self >= other; +} + +pub fn lessThanU128(self: u128, other: u128) callconv(.C) bool { + return self < other; +} + +pub fn lessThanOrEqualU128(self: u128, other: u128) callconv(.C) bool { + return self <= other; +} + +pub fn greaterThanU128(self: u128, other: u128) callconv(.C) bool { + return self > other; +} + +pub fn greaterThanOrEqualU128(self: u128, other: u128) callconv(.C) bool { + return self >= other; +} + +pub fn exportMulOrPanic(comptime T: type, comptime W: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T, other: T) callconv(.C) T { + const result = @call(.always_inline, mulWithOverflow, .{ T, W, self, other }); + if (result.has_overflowed) { + roc_panic("Integer multiplication overflowed!", 0); + } else { + return result.value; + } + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportCountLeadingZeroBits(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T) callconv(.C) u8 { + return @as(u8, @clz(self)); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportCountTrailingZeroBits(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T) callconv(.C) u8 { + return @as(u8, @ctz(self)); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn exportCountOneBits(comptime T: type, comptime name: []const u8) void { + comptime var f = struct { + fn func(self: T) callconv(.C) u8 { + return @as(u8, @popCount(self)); + } + }.func; + @export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong }); +} + +pub fn f32ToParts(self: f32) callconv(.C) F32Parts { + const u32Value = @as(u32, @bitCast(self)); + return F32Parts{ + .fraction = u32Value & 0x7fffff, + .exponent = @truncate(u32Value >> 23 & 0xff), + .sign = u32Value >> 31 & 1 == 1, + }; +} + +pub fn f64ToParts(self: f64) callconv(.C) F64Parts { + const u64Value = @as(u64, @bitCast(self)); + return F64Parts{ + .fraction = u64Value & 0xfffffffffffff, + .exponent = @truncate(u64Value >> 52 & 0x7ff), + .sign = u64Value >> 63 & 1 == 1, + }; +} + +pub fn f32FromParts(parts: F32Parts) callconv(.C) f32 { + return @as(f32, @bitCast(parts.fraction & 0x7fffff | (@as(u32, parts.exponent) << 23) | (@as(u32, @intFromBool(parts.sign)) << 31))); +} + +pub fn f64FromParts(parts: F64Parts) callconv(.C) f64 { + return @as(f64, @bitCast(parts.fraction & 0xfffffffffffff | (@as(u64, parts.exponent & 0x7ff) << 52) | (@as(u64, @intFromBool(parts.sign)) << 63))); +} diff --git a/platform/roc/panic.zig b/platform/roc/panic.zig new file mode 100644 index 0000000..13224a6 --- /dev/null +++ b/platform/roc/panic.zig @@ -0,0 +1,15 @@ +const std = @import("std"); +const RocStr = @import("str.zig").RocStr; + +// Signals to the host that the program has panicked +extern fn roc_panic(msg: *const RocStr, tag_id: u32) callconv(.C) noreturn; + +pub fn panic_help(msg: []const u8, tag_id: u32) noreturn { + var str = RocStr.init(msg.ptr, msg.len); + roc_panic(&str, tag_id); +} + +// must export this explicitly because right now it is not used from zig code +pub fn panic(msg: *const RocStr, alignment: u32) callconv(.C) noreturn { + return roc_panic(msg, alignment); +} diff --git a/platform/roc/result.zig b/platform/roc/result.zig new file mode 100644 index 0000000..e3192f2 --- /dev/null +++ b/platform/roc/result.zig @@ -0,0 +1,18 @@ +pub fn RocResult(comptime T: type, comptime E: type) type { + return extern struct { + payload: RocResultPayload(T, E), + tag: RocResultTag, + }; +} + +pub fn RocResultPayload(comptime T: type, comptime E: type) type { + return extern union { + ok: T, + err: E, + }; +} + +const RocResultTag = enum(u8) { + RocErr = 0, + RocOk = 1, +}; diff --git a/platform/glue/str.zig b/platform/roc/str.zig similarity index 97% rename from platform/glue/str.zig rename to platform/roc/str.zig index 6b93560..1fbac1b 100644 --- a/platform/glue/str.zig +++ b/platform/roc/str.zig @@ -96,7 +96,7 @@ pub const RocStr = extern struct { } fn allocateBig(length: usize, capacity: usize) RocStr { - const first_element = utils.allocateWithRefcount(capacity, @sizeOf(usize)); + const first_element = utils.allocateWithRefcount(capacity, @sizeOf(usize), false); return RocStr{ .bytes = first_element, @@ -172,7 +172,7 @@ pub const RocStr = extern struct { pub fn decref(self: RocStr) void { if (!self.isSmallStr()) { - utils.decref(self.getAllocationPtr(), self.capacity_or_alloc_ptr, RocStr.alignment); + utils.decref(self.getAllocationPtr(), self.capacity_or_alloc_ptr, RocStr.alignment, false); } } @@ -212,7 +212,7 @@ pub const RocStr = extern struct { // just return the bytes return str; } else { - const new_str = RocStr.allocateBig(str.length, str.length); + var new_str = RocStr.allocateBig(str.length, str.length); var old_bytes: [*]u8 = @as([*]u8, @ptrCast(str.bytes)); var new_bytes: [*]u8 = @as([*]u8, @ptrCast(new_str.bytes)); @@ -247,6 +247,7 @@ pub const RocStr = extern struct { old_capacity, new_capacity, element_width, + false, ); return RocStr{ .bytes = new_source, .length = new_length, .capacity_or_alloc_ptr = new_capacity }; @@ -273,7 +274,7 @@ pub const RocStr = extern struct { const source_ptr = self.asU8ptr(); const dest_ptr = result.asU8ptrMut(); - std.mem.copyForwards(u8, dest_ptr[0..old_length], source_ptr[0..old_length]); + std.mem.copy(u8, dest_ptr[0..old_length], source_ptr[0..old_length]); @memset(dest_ptr[old_length..new_length], 0); self.decref(); @@ -289,7 +290,7 @@ pub const RocStr = extern struct { const source_ptr = self.asU8ptr(); - std.mem.copyForwards(u8, dest_ptr[0..old_length], source_ptr[0..old_length]); + std.mem.copy(u8, dest_ptr[0..old_length], source_ptr[0..old_length]); @memset(dest_ptr[old_length..new_length], 0); self.decref(); @@ -553,7 +554,7 @@ pub fn strNumberOfBytes(string: RocStr) callconv(.C) usize { // Str.fromInt pub fn exportFromInt(comptime T: type, comptime name: []const u8) void { - const f = comptime struct { + comptime var f = struct { fn func(int: T) callconv(.C) RocStr { return @call(.always_inline, strFromIntHelp, .{ T, int }); } @@ -567,9 +568,9 @@ fn strFromIntHelp(comptime T: type, int: T) RocStr { const size = comptime blk: { // the string representation of the minimum i128 value uses at most 40 characters var buf: [40]u8 = undefined; - const resultMin = std.fmt.bufPrint(&buf, "{}", .{std.math.minInt(T)}) catch unreachable; - const resultMax = std.fmt.bufPrint(&buf, "{}", .{std.math.maxInt(T)}) catch unreachable; - const result = if (resultMin.len > resultMax.len) resultMin.len else resultMax.len; + var resultMin = std.fmt.bufPrint(&buf, "{}", .{std.math.minInt(T)}) catch unreachable; + var resultMax = std.fmt.bufPrint(&buf, "{}", .{std.math.maxInt(T)}) catch unreachable; + var result = if (resultMin.len > resultMax.len) resultMin.len else resultMax.len; break :blk result; }; @@ -581,7 +582,7 @@ fn strFromIntHelp(comptime T: type, int: T) RocStr { // Str.fromFloat pub fn exportFromFloat(comptime T: type, comptime name: []const u8) void { - const f = comptime struct { + comptime var f = struct { fn func(float: T) callconv(.C) RocStr { return @call(.always_inline, strFromFloatHelp, .{ T, float }); } @@ -600,7 +601,7 @@ fn strFromFloatHelp(comptime T: type, float: T) RocStr { // Str.split pub fn strSplit(string: RocStr, delimiter: RocStr) callconv(.C) RocList { const segment_count = countSegments(string, delimiter); - const list = RocList.allocate(@alignOf(RocStr), segment_count, @sizeOf(RocStr)); + const list = RocList.allocate(@alignOf(RocStr), segment_count, @sizeOf(RocStr), true); if (list.bytes) |bytes| { const strings = @as([*]RocStr, @ptrCast(@alignCast(bytes))); @@ -661,7 +662,7 @@ test "strSplitHelp: empty delimiter" { strSplitHelp(array_ptr, str, delimiter); - const expected = [1]RocStr{ + var expected = [1]RocStr{ str, }; @@ -695,7 +696,7 @@ test "strSplitHelp: no delimiter" { strSplitHelp(array_ptr, str, delimiter); - const expected = [1]RocStr{ + var expected = [1]RocStr{ str, }; @@ -734,7 +735,7 @@ test "strSplitHelp: empty start" { const one = RocStr.init("a", 1); - const expected = [2]RocStr{ + var expected = [2]RocStr{ RocStr.empty(), one, }; @@ -776,7 +777,7 @@ test "strSplitHelp: empty end" { const one = RocStr.init("1", 1); const two = RocStr.init("2", 1); - const expected = [3]RocStr{ + var expected = [3]RocStr{ one, two, RocStr.empty(), }; @@ -812,7 +813,7 @@ test "strSplitHelp: string equals delimiter" { strSplitHelp(array_ptr, str_delimiter, str_delimiter); - const expected = [2]RocStr{ RocStr.empty(), RocStr.empty() }; + var expected = [2]RocStr{ RocStr.empty(), RocStr.empty() }; defer { for (array) |rocStr| { @@ -850,7 +851,7 @@ test "strSplitHelp: delimiter on sides" { const ghi_arr = "ghi"; const ghi = RocStr.init(ghi_arr, ghi_arr.len); - const expected = [3]RocStr{ + var expected = [3]RocStr{ RocStr.empty(), ghi, RocStr.empty(), }; @@ -891,7 +892,7 @@ test "strSplitHelp: three pieces" { const b = RocStr.init("b", 1); const c = RocStr.init("c", 1); - const expected_array = [array_len]RocStr{ + var expected_array = [array_len]RocStr{ a, b, c, }; @@ -927,7 +928,7 @@ test "strSplitHelp: overlapping delimiter 1" { strSplitHelp(array_ptr, str, delimiter); - const expected = [2]RocStr{ + var expected = [2]RocStr{ RocStr.empty(), RocStr.init("a", 1), }; @@ -952,7 +953,7 @@ test "strSplitHelp: overlapping delimiter 2" { strSplitHelp(array_ptr, str, delimiter); - const expected = [3]RocStr{ + var expected = [3]RocStr{ RocStr.empty(), RocStr.empty(), RocStr.empty(), @@ -1363,7 +1364,7 @@ fn strJoinWith(list: RocListStr, separator: RocStr) RocStr { total_size += separator.len() * (len - 1); var result = RocStr.allocate(total_size); - const result_ptr = result.asU8ptrMut(); + var result_ptr = result.asU8ptrMut(); var offset: usize = 0; for (slice[0 .. len - 1]) |substr| { @@ -1427,7 +1428,7 @@ inline fn strToBytes(arg: RocStr) RocList { if (length == 0) { return RocList.empty(); } else if (arg.isSmallStr()) { - const ptr = utils.allocateWithRefcount(length, RocStr.alignment); + const ptr = utils.allocateWithRefcount(length, RocStr.alignment, false); @memcpy(ptr[0..length], arg.asU8ptr()[0..length]); @@ -1457,7 +1458,7 @@ pub fn fromUtf8( update_mode: UpdateMode, ) FromUtf8Result { if (list.len() == 0) { - list.decref(1); // Alignment 1 for List U8 + list.decref(@alignOf(u8), @sizeOf(u8), false, rcNone); return FromUtf8Result{ .is_ok = true, .string = RocStr.empty(), @@ -1479,7 +1480,7 @@ pub fn fromUtf8( } else { const temp = errorToProblem(bytes); - list.decref(1); // Alignment 1 for List U8 + list.decref(@alignOf(u8), @sizeOf(u8), false, rcNone); return FromUtf8Result{ .is_ok = false, @@ -1603,7 +1604,7 @@ fn expectOk(result: FromUtf8Result) !void { } fn sliceHelp(bytes: [*]const u8, length: usize) RocList { - var list = RocList.allocate(RocStr.alignment, length, @sizeOf(u8)); + var list = RocList.allocate(RocStr.alignment, length, @sizeOf(u8), false); var list_bytes = list.bytes orelse unreachable; @memcpy(list_bytes[0..length], bytes[0..length]); list.length = length; @@ -1942,7 +1943,7 @@ pub fn strTrimEnd(input_string: RocStr) callconv(.C) RocStr { fn countLeadingWhitespaceBytes(string: RocStr) usize { var byte_count: usize = 0; - const bytes = string.asU8ptr()[0..string.len()]; + var bytes = string.asU8ptr()[0..string.len()]; var iter = unicode.Utf8View.initUnchecked(bytes).iterator(); while (iter.nextCodepoint()) |codepoint| { if (isWhitespace(codepoint)) { @@ -1958,7 +1959,7 @@ fn countLeadingWhitespaceBytes(string: RocStr) usize { fn countTrailingWhitespaceBytes(string: RocStr) usize { var byte_count: usize = 0; - const bytes = string.asU8ptr()[0..string.len()]; + var bytes = string.asU8ptr()[0..string.len()]; var iter = ReverseUtf8View.initUnchecked(bytes).iterator(); while (iter.nextCodepoint()) |codepoint| { if (isWhitespace(codepoint)) { @@ -1971,6 +1972,13 @@ fn countTrailingWhitespaceBytes(string: RocStr) usize { return byte_count; } +fn rcNone(_: ?[*]u8) callconv(.C) void {} + +fn decStr(ptr: ?[*]u8) callconv(.C) void { + const str_ptr = @as(*RocStr, @ptrCast(@alignCast(ptr orelse unreachable))); + str_ptr.decref(); +} + /// A backwards version of Utf8View from std.unicode const ReverseUtf8View = struct { bytes: []const u8, diff --git a/platform/glue/utils.zig b/platform/roc/utils.zig similarity index 86% rename from platform/glue/utils.zig rename to platform/roc/utils.zig index 988098c..78d18b9 100644 --- a/platform/glue/utils.zig +++ b/platform/roc/utils.zig @@ -219,31 +219,34 @@ pub fn increfRcPtrC(ptr_to_refcount: *isize, amount: isize) callconv(.C) void { pub fn decrefRcPtrC( bytes_or_null: ?[*]isize, alignment: u32, + elements_refcounted: bool, ) callconv(.C) void { // IMPORTANT: bytes_or_null is this case is expected to be a pointer to the refcount // (NOT the start of the data, or the start of the allocation) // this is of course unsafe, but we trust what we get from the llvm side - const bytes = @as([*]isize, @ptrCast(bytes_or_null)); + var bytes = @as([*]isize, @ptrCast(bytes_or_null)); - return @call(.always_inline, decref_ptr_to_refcount, .{ bytes, alignment }); + return @call(.always_inline, decref_ptr_to_refcount, .{ bytes, alignment, elements_refcounted }); } pub fn decrefCheckNullC( bytes_or_null: ?[*]u8, alignment: u32, + elements_refcounted: bool, ) callconv(.C) void { if (bytes_or_null) |bytes| { const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(bytes))); - return @call(.always_inline, decref_ptr_to_refcount, .{ isizes - 1, alignment }); + return @call(.always_inline, decref_ptr_to_refcount, .{ isizes - 1, alignment, elements_refcounted }); } } pub fn decrefDataPtrC( bytes_or_null: ?[*]u8, alignment: u32, + elements_refcounted: bool, ) callconv(.C) void { - const bytes = bytes_or_null orelse return; + var bytes = bytes_or_null orelse return; const data_ptr = @intFromPtr(bytes); const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11; @@ -252,14 +255,14 @@ pub fn decrefDataPtrC( const isizes: [*]isize = @as([*]isize, @ptrFromInt(unmasked_ptr)); const rc_ptr = isizes - 1; - return decrefRcPtrC(rc_ptr, alignment); + return decrefRcPtrC(rc_ptr, alignment, elements_refcounted); } pub fn increfDataPtrC( bytes_or_null: ?[*]u8, inc_amount: isize, ) callconv(.C) void { - const bytes = bytes_or_null orelse return; + var bytes = bytes_or_null orelse return; const ptr = @intFromPtr(bytes); const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11; @@ -273,8 +276,9 @@ pub fn increfDataPtrC( pub fn freeDataPtrC( bytes_or_null: ?[*]u8, alignment: u32, + elements_refcounted: bool, ) callconv(.C) void { - const bytes = bytes_or_null orelse return; + var bytes = bytes_or_null orelse return; const ptr = @intFromPtr(bytes); const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11; @@ -283,39 +287,44 @@ pub fn freeDataPtrC( const isizes: [*]isize = @as([*]isize, @ptrFromInt(masked_ptr)); // we always store the refcount right before the data - return freeRcPtrC(isizes - 1, alignment); + return freeRcPtrC(isizes - 1, alignment, elements_refcounted); } pub fn freeRcPtrC( bytes_or_null: ?[*]isize, alignment: u32, + elements_refcounted: bool, ) callconv(.C) void { - const bytes = bytes_or_null orelse return; - return free_ptr_to_refcount(bytes, alignment); + var bytes = bytes_or_null orelse return; + return free_ptr_to_refcount(bytes, alignment, elements_refcounted); } pub fn decref( bytes_or_null: ?[*]u8, data_bytes: usize, alignment: u32, + elements_refcounted: bool, ) void { if (data_bytes == 0) { return; } - const bytes = bytes_or_null orelse return; + var bytes = bytes_or_null orelse return; const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(bytes))); - decref_ptr_to_refcount(isizes - 1, alignment); + decref_ptr_to_refcount(isizes - 1, alignment, elements_refcounted); } inline fn free_ptr_to_refcount( refcount_ptr: [*]isize, alignment: u32, + elements_refcounted: bool, ) void { if (RC_TYPE == Refcount.none) return; - const extra_bytes = @max(alignment, @sizeOf(usize)); + const ptr_width = @sizeOf(usize); + const required_space: usize = if (elements_refcounted) (2 * ptr_width) else ptr_width; + const extra_bytes = @max(required_space, alignment); const allocation_ptr = @as([*]u8, @ptrCast(refcount_ptr)) - (extra_bytes - @sizeOf(usize)); // NOTE: we don't even check whether the refcount is "infinity" here! @@ -328,7 +337,8 @@ inline fn free_ptr_to_refcount( inline fn decref_ptr_to_refcount( refcount_ptr: [*]isize, - alignment: u32, + element_alignment: u32, + elements_refcounted: bool, ) void { if (RC_TYPE == Refcount.none) return; @@ -336,6 +346,10 @@ inline fn decref_ptr_to_refcount( std.debug.print("| decrement {*}: ", .{refcount_ptr}); } + // Due to RC alignmen tmust take into acount pointer size. + const ptr_width = @sizeOf(usize); + const alignment = @max(ptr_width, element_alignment); + // Ensure that the refcount is not whole program lifetime. const refcount: isize = refcount_ptr[0]; if (refcount != REFCOUNT_MAX_ISIZE) { @@ -353,13 +367,13 @@ inline fn decref_ptr_to_refcount( } if (refcount == REFCOUNT_ONE_ISIZE) { - free_ptr_to_refcount(refcount_ptr, alignment); + free_ptr_to_refcount(refcount_ptr, alignment, elements_refcounted); } }, Refcount.atomic => { - const last = @atomicRmw(isize, &refcount_ptr[0], std.builtin.AtomicRmwOp.Sub, 1, Monotonic); + var last = @atomicRmw(isize, &refcount_ptr[0], std.builtin.AtomicRmwOp.Sub, 1, Monotonic); if (last == REFCOUNT_ONE_ISIZE) { - free_ptr_to_refcount(refcount_ptr, alignment); + free_ptr_to_refcount(refcount_ptr, alignment, elements_refcounted); } }, Refcount.none => unreachable, @@ -370,7 +384,7 @@ inline fn decref_ptr_to_refcount( pub fn isUnique( bytes_or_null: ?[*]u8, ) callconv(.C) bool { - const bytes = bytes_or_null orelse return true; + var bytes = bytes_or_null orelse return true; const ptr = @intFromPtr(bytes); const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11; @@ -438,25 +452,31 @@ pub inline fn calculateCapacity( pub fn allocateWithRefcountC( data_bytes: usize, element_alignment: u32, + elements_refcounted: bool, ) callconv(.C) [*]u8 { - return allocateWithRefcount(data_bytes, element_alignment); + return allocateWithRefcount(data_bytes, element_alignment, elements_refcounted); } pub fn allocateWithRefcount( data_bytes: usize, element_alignment: u32, + elements_refcounted: bool, ) [*]u8 { + // If the element type is refcounted, we need to also allocate space to store the element count on the heap. + // This is used so that a seamless slice can de-allocate the underlying list type. const ptr_width = @sizeOf(usize); const alignment = @max(ptr_width, element_alignment); - const length = alignment + data_bytes; + const required_space: usize = if (elements_refcounted) (2 * ptr_width) else ptr_width; + const extra_bytes = @max(required_space, element_alignment); + const length = extra_bytes + data_bytes; - const new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable; + var new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable; if (DEBUG_ALLOC and builtin.target.cpu.arch != .wasm32) { std.debug.print("+ allocated {*} ({} bytes with alignment {})\n", .{ new_bytes, data_bytes, alignment }); } - const data_ptr = new_bytes + alignment; + const data_ptr = new_bytes + extra_bytes; const refcount_ptr = @as([*]usize, @ptrCast(@as([*]align(ptr_width) u8, @alignCast(data_ptr)) - ptr_width)); refcount_ptr[0] = if (RC_TYPE == Refcount.none) REFCOUNT_MAX_ISIZE else REFCOUNT_ONE; @@ -474,11 +494,14 @@ pub fn unsafeReallocate( old_length: usize, new_length: usize, element_width: usize, + elements_refcounted: bool, ) [*]u8 { - const align_width: usize = @max(alignment, @sizeOf(usize)); + const ptr_width: usize = @sizeOf(usize); + const required_space: usize = if (elements_refcounted) (2 * ptr_width) else ptr_width; + const extra_bytes = @max(required_space, alignment); - const old_width = align_width + old_length * element_width; - const new_width = align_width + new_length * element_width; + const old_width = extra_bytes + old_length * element_width; + const new_width = extra_bytes + new_length * element_width; if (old_width >= new_width) { return source_ptr; @@ -486,10 +509,10 @@ pub fn unsafeReallocate( // TODO handle out of memory // NOTE realloc will dealloc the original allocation - const old_allocation = source_ptr - align_width; + const old_allocation = source_ptr - extra_bytes; const new_allocation = realloc(old_allocation, new_width, old_width, alignment); - const new_source = @as([*]u8, @ptrCast(new_allocation)) + align_width; + const new_source = @as([*]u8, @ptrCast(new_allocation)) + extra_bytes; return new_source; } @@ -506,14 +529,14 @@ pub const UpdateMode = enum(u8) { test "increfC, refcounted data" { var mock_rc: isize = REFCOUNT_ONE_ISIZE + 17; - const ptr_to_refcount: *isize = &mock_rc; + var ptr_to_refcount: *isize = &mock_rc; increfRcPtrC(ptr_to_refcount, 2); try std.testing.expectEqual(mock_rc, REFCOUNT_ONE_ISIZE + 19); } test "increfC, static data" { var mock_rc: isize = REFCOUNT_MAX_ISIZE; - const ptr_to_refcount: *isize = &mock_rc; + var ptr_to_refcount: *isize = &mock_rc; increfRcPtrC(ptr_to_refcount, 2); try std.testing.expectEqual(mock_rc, REFCOUNT_MAX_ISIZE); }