From 4516869e37459b91569a0f7d879d535ed929b41f Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Thu, 16 Aug 2018 19:59:47 +0200 Subject: [PATCH 01/32] Add a codegen sub-project to convert the LIR to machine code using Cranelift. --- Cargo.toml | 7 +- bin/Cargo.toml | 6 +- codegen/Cargo.toml | 17 ++ codegen/src/error.rs | 39 +++ codegen/src/exec_alloc.rs | 70 ++++++ codegen/src/lib.rs | 101 ++++++++ codegen/src/lower.rs | 147 +++++++++++ default.nix | 6 +- lir/Cargo.toml | 16 ++ lir/src/lib.rs | 495 ++++++++++++++++++++++++++++++++++++++ 10 files changed, 897 insertions(+), 7 deletions(-) create mode 100644 codegen/Cargo.toml create mode 100644 codegen/src/error.rs create mode 100644 codegen/src/exec_alloc.rs create mode 100644 codegen/src/lib.rs create mode 100644 codegen/src/lower.rs create mode 100644 lir/Cargo.toml create mode 100644 lir/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 35c6273..414996c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,12 @@ [package] -name = "holyjit_pkg" +name = "holyjit" version = "0.0.0" authors = [ "Nicolas B. Pierron " ] [workspace] members = [ + "lir", + "codegen", "lib", "plugin", "bin" @@ -12,8 +14,7 @@ members = [ [dependencies] holyjit_lib = { path = "./lib" } -holyjit_plugin = { path = "./plugin" } -holyjit = { path = "./bin" } +holyjit_bin = { path = "./bin" } # Before running any tests or examples, make sure to set the RUSTC_WRAPPER as # follow: diff --git a/bin/Cargo.toml b/bin/Cargo.toml index d26e474..acf9348 100644 --- a/bin/Cargo.toml +++ b/bin/Cargo.toml @@ -1,7 +1,11 @@ [package] -name = "holyjit" +name = "holyjit_bin" version = "0.0.0" authors = ["Nicolas B. Pierron "] [dependencies] holyjit_plugin = { path = "../plugin" } + +[[bin]] +name = "holyjit" +path = "src/main.rs" \ No newline at end of file diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml new file mode 100644 index 0000000..afb0f7a --- /dev/null +++ b/codegen/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "holyjit_codegen" +version = "0.0.0" +authors = [ "Nicolas B. Pierron " ] + +[dependencies] +mmap = "*" +region = "0.3" +cranelift-codegen = "0.18" +cranelift-native = "0.18" +cranelift-frontend = "0.18" +holyjit_lir = { path = "../lir" } + +[lib] +name = "holyjit_codegen" +path = "src/lib.rs" + diff --git a/codegen/src/error.rs b/codegen/src/error.rs new file mode 100644 index 0000000..3e08f40 --- /dev/null +++ b/codegen/src/error.rs @@ -0,0 +1,39 @@ +use codegen::CodegenError; +use mmap::MapError; +use region; + +/// Simple wrapper type to wrap any lowerring error. +pub type LowerResult = Result; + +/// Lowering error can be any error involve either during code generation, or +/// during the allocation of code to memory. +#[derive(Debug)] +pub enum LowerError { + CodeGen(CodegenError), + Map(MapError), + Protect, +} + +// TODO: impl Error for LowerError +// TODO: impl Display for LowerError + +impl From for LowerError { + /// Implictly convert Cranelift codegen errors into LowerError. + fn from(err: CodegenError) -> LowerError { + LowerError::CodeGen(err) + } +} + +impl From for LowerError { + /// Implictly convert mmap errors into LowerError. + fn from(err: MapError) -> LowerError { + LowerError::Map(err) + } +} + +impl From for LowerError { + /// Implictly convert region errors into LowerError. + fn from(_err: region::Error) -> LowerError { + LowerError::Protect + } +} diff --git a/codegen/src/exec_alloc.rs b/codegen/src/exec_alloc.rs new file mode 100644 index 0000000..3751038 --- /dev/null +++ b/codegen/src/exec_alloc.rs @@ -0,0 +1,70 @@ +/// This module is used as part of the lowering to allocate memory pages, copy +/// the code into them, patch the code based on relocation informations, and map +/// these pages as executable. +use mmap::*; +use region; +use region::Protection; +use error::LowerResult; +use codegen::binemit::{RelocSink, CodeOffset, Reloc, Addend}; +use codegen::ir; + +/// This structure hold the code while it is being written down in memory, and +/// mutated based on relocation informations provided by Cranelift. +pub struct WrittableCode { + page: MemoryMap, +} + +/// This structure hold the code in a executable-only state (readable if +/// needed), and with no way to convert it back to a writtable format. The +/// implicit Drop trait implemented for it will discard the executable code as +/// soon as there is no more references to it. +pub struct ExecutableCode { + page: MemoryMap, +} + +impl WrittableCode { + /// Allocate a new set of pages in which we can copy the result of a + /// compilation into. + pub fn with_capacity(size: usize) -> LowerResult { + Ok(WrittableCode { + page: MemoryMap::new(size, &[ MapOption::MapWritable ])?, + }) + } + + /// Get a mutable pointer to write the code into. + pub fn as_ptr(&mut self) -> *mut u8 { + // Note: Based on mmap.data(), we only require a &self argument. + self.page.data() + } + + /// Map the pages as executable, and replace the write permission by an + /// executable permission. Return the executable code. + pub fn make_executable(self, _reloc: NullRelocSink, _trap: NullTrapSink) -> LowerResult { + let WrittableCode { page } = self; + unsafe { region::protect(page.data(), page.len(), Protection::ReadExecute)?; } + Ok(ExecutableCode { page }) + } +} + +impl ExecutableCode { + pub fn as_ptr(&self) -> *const u8 { + self.page.data() + } +} + +pub use codegen::binemit::NullTrapSink; +pub struct NullRelocSink {} + +impl RelocSink for NullRelocSink { + fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { + unimplemented!(); + } + + fn reloc_external(&mut self, _offset: CodeOffset, _reloc: Reloc, _name: &ir::ExternalName, _addend: Addend) { + unimplemented!(); + } + + fn reloc_jt(&mut self, _offset: CodeOffset, _reloc: Reloc, _jt: ir::JumpTable) { + unimplemented!(); + } +} diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs new file mode 100644 index 0000000..a872d35 --- /dev/null +++ b/codegen/src/lib.rs @@ -0,0 +1,101 @@ +extern crate mmap; +extern crate region; +extern crate cranelift_codegen as codegen; +extern crate cranelift_frontend as frontend; +extern crate cranelift_native as native; +extern crate holyjit_lir as lir; + +mod lower; +mod exec_alloc; +pub mod error; + +use codegen::settings::Configurable; +use exec_alloc::{WrittableCode, ExecutableCode}; + +/// This is a code generator context, which is used to lower a LIR Unit into +/// machine code. +pub struct Context { + ctx: codegen::Context, + isa: Box, +} + +/// Result of a compiled lir::Unit. +pub struct JitCode { + code: ExecutableCode, +} + +impl Context { + /// Create a lowering (code generator and executable page allocation) + /// context for the architecture on which this code is running. + pub fn new() -> Self { + // Extract configuration builders tuned for the architecture on which + // this code is running. + let (mut settings_bld, isa_bld) = native::builders().unwrap(); + // Optimize for compilation time. + settings_bld.set("opt_level", "fastest").unwrap(); + // Check the emitted Cranelift IR. + settings_bld.set("enable_verifier", "1").unwrap(); + // Use Rust call convention. + settings_bld.set("call_conv", "system_v").unwrap(); + // Generate position independent code. + settings_bld.set("is_pic", "1").unwrap(); + // No need to generate a single return per function. + settings_bld.set("return_at_end", "0").unwrap(); + // Do not attempt to avoid trap on divisions. (TODO: double check that + // this is what rust expects) + settings_bld.set("avoid_div_traps", "0").unwrap(); + let flags = codegen::settings::Flags::new(settings_bld); + let isa = isa_bld.finish(flags); + + Self { + ctx: codegen::Context::new(), + isa: isa, + } + } + + /// Given an HolyJIT LIR Unit, convert it to a Cranelift function in order + /// to generate the corresponding bytes, then allocate memory pages and map + /// them as executable. + pub fn compile(&mut self, unit: &lir::Unit) -> error::LowerResult { + let &mut Context { ref mut ctx, ref isa, .. } = self; + ctx.func = lower::convert(unit)?; + let mut reloc_sink = exec_alloc::NullRelocSink {}; + let mut trap_sink = exec_alloc::NullTrapSink {}; + let code_size = ctx.compile(isa.as_ref())?; + let mut code = WrittableCode::with_capacity(code_size as usize)?; + unsafe { + ctx.emit_to_memory(isa.as_ref(), code.as_ptr(), &mut reloc_sink, &mut trap_sink); + } + let code = code.make_executable(reloc_sink, trap_sink)?; + Ok(JitCode { code }) + } +} + +impl JitCode { + pub fn as_ptr(&self) -> *const u8 { + self.code.as_ptr() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::mem; + + #[test] + fn check_create_context() { + let _ctx = Context::new(); + assert!(true); + } + + #[test] + fn check_add1_unit() { + let mut ctx = Context::new(); + let simple_unit = lir::Unit::new(lir::UnitId::Function(0)); + let code = ctx.compile(&simple_unit).unwrap(); + let add1 : fn(i32) -> i32 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add1(12), 13); + } +} diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs new file mode 100644 index 0000000..6d6ac72 --- /dev/null +++ b/codegen/src/lower.rs @@ -0,0 +1,147 @@ +use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; + +use codegen::entity::EntityRef; +use codegen::ir::{ExternalName, Function, Signature, AbiParam, InstBuilder}; +use codegen::ir::types::*; +use codegen::settings::{self, CallConv}; +use codegen::verifier::verify_function; + +use lir; +use error; + +/// Convert a LIR Unit into a Cranelift IR (Function). +pub fn convert(_unit: &lir::Unit) -> error::LowerResult { + let mut sig = Signature::new(CallConv::SystemV); + sig.returns.push(AbiParam::new(I32)); + sig.params.push(AbiParam::new(I32)); + let mut fn_builder_ctx = FunctionBuilderContext::::new(); + let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); + { + let mut builder = FunctionBuilder::::new(&mut func, &mut fn_builder_ctx); + + let block0 = builder.create_ebb(); + let x = Variable::new(0); + let y = Variable::new(1); + let z = Variable::new(2); + builder.declare_var(x, I32); + builder.declare_var(y, I32); + builder.declare_var(z, I32); + builder.append_ebb_params_for_function_params(block0); + + builder.switch_to_block(block0); + builder.seal_block(block0); + { + let tmp = builder.ebb_params(block0)[0]; // the first function parameter + builder.def_var(x, tmp); + } + { + let tmp = builder.ins().iconst(I32, 1); + builder.def_var(y, tmp); + } + { + let arg1 = builder.use_var(x); + let arg2 = builder.use_var(y); + let tmp = builder.ins().iadd(arg1, arg2); + builder.def_var(z, tmp); + } + { + let arg = builder.use_var(z); + builder.ins().return_(&[arg]); + } + + builder.finalize(); + } + + let flags = settings::Flags::new(settings::builder()); + let res = verify_function(&func, &flags); + println!("{}", func.display(None)); + if let Err(errors) = res { + panic!("{}", errors); + } + + Ok(func) + /* + let mut sig = Signature::new(CallConv::SystemV); + sig.returns.push(AbiParam::new(I32)); + sig.params.push(AbiParam::new(I32)); + let mut fn_builder_ctx = FunctionBuilderContext::::new(); + let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); + { + let mut builder = FunctionBuilder::::new(&mut func, &mut fn_builder_ctx); + + let block0 = builder.create_ebb(); + let block1 = builder.create_ebb(); + let block2 = builder.create_ebb(); + let x = Variable::new(0); + let y = Variable::new(1); + let z = Variable::new(2); + builder.declare_var(x, I32); + builder.declare_var(y, I32); + builder.declare_var(z, I32); + builder.append_ebb_params_for_function_params(block0); + + builder.switch_to_block(block0); + builder.seal_block(block0); + { + let tmp = builder.ebb_params(block0)[0]; // the first function parameter + builder.def_var(x, tmp); + } + { + let tmp = builder.ins().iconst(I32, 2); + builder.def_var(y, tmp); + } + { + let arg1 = builder.use_var(x); + let arg2 = builder.use_var(y); + let tmp = builder.ins().iadd(arg1, arg2); + builder.def_var(z, tmp); + } + builder.ins().jump(block1, &[]); + + builder.switch_to_block(block1); + { + let arg1 = builder.use_var(y); + let arg2 = builder.use_var(z); + let tmp = builder.ins().iadd(arg1, arg2); + builder.def_var(z, tmp); + } + { + let arg = builder.use_var(y); + builder.ins().brnz(arg, block2, &[]); + } + { + let arg1 = builder.use_var(z); + let arg2 = builder.use_var(x); + let tmp = builder.ins().isub(arg1, arg2); + builder.def_var(z, tmp); + } + { + let arg = builder.use_var(y); + builder.ins().return_(&[arg]); + } + + builder.switch_to_block(block2); + builder.seal_block(block2); + + { + let arg1 = builder.use_var(y); + let arg2 = builder.use_var(x); + let tmp = builder.ins().isub(arg1, arg2); + builder.def_var(y, tmp); + } + builder.ins().jump(block1, &[]); + builder.seal_block(block1); + + builder.finalize(); + } + + let flags = settings::Flags::new(settings::builder()); + let res = verify_function(&func, &flags); + println!("{}", func.display(None)); + if let Err(errors) = res { + panic!("{}", errors); + } + + Ok(func) + */ +} diff --git a/default.nix b/default.nix index 197395e..da7acd6 100644 --- a/default.nix +++ b/default.nix @@ -1,11 +1,11 @@ -{ stdenv, rust }: +{ stdenv, rust, python }: stdenv.mkDerivation rec { version = "0.0.0"; name = "holyjit-${version}"; - buildInputs = [ rust ]; + buildInputs = [ rust python ]; shellHook = " export RUSTC_WRAPPER=$PWD/rustc.sh export RUST_BACKTRACE=1 "; -} \ No newline at end of file +} diff --git a/lir/Cargo.toml b/lir/Cargo.toml new file mode 100644 index 0000000..5485ef8 --- /dev/null +++ b/lir/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "holyjit_lir" +version = "0.0.0" +authors = [ "Nicolas B. Pierron " ] + +# Lock dynasm and serde_derive in order to build against old versions of +# rustc. +[dependencies] +serde = "1.0" +serde_derive = "=1.0.12" +bincode = "0.8" + +[lib] +name = "holyjit_lir" +path = "src/lib.rs" + diff --git a/lir/src/lib.rs b/lir/src/lib.rs new file mode 100644 index 0000000..9bdbfd9 --- /dev/null +++ b/lir/src/lib.rs @@ -0,0 +1,495 @@ +/// The LIR format is used for encoding the Rust code which would be optimized +/// at compile time and the also at runtime. It should mostly be generated by +/// the holyjit driver, and consumed by the holyjit compiler. +/// +/// This format is made to optimize for recompilation. Optimizing for +/// recompilation is extremely interesting for a JIT as a JIT work is mostly +/// composed of recompilation. For example, inlining is recompiling a compiled +/// function in another and an invalidation causes a few optimization to be +/// discarded when the same function is recompiled. Optimizing for recompilation +/// implies that we apply versionning ideas on this format, such that the cost +/// of a recompilation is less than the cost of the initial compilation. +/// +/// This format is serializable because it has to be saved by the Rust driver, +/// in the data section of the generated program, and read at runtime by the +/// HolyJIT library. +/// +/// The LIR is represented as a mix between a Sea-Of-Nodes and SSA, which uses +/// Instruction hashes as SSA indexes. Hashes are used as a way to have a +/// position independent representation to make the versionning effective. The +/// mixed sea-of-nodes approach is used as a way to reduce the number of blocks +/// mutations until we reach the Scheduler. + +// Serde is used for serializing and deserializing the LIR which is stored +// by the driver in a constant, and deserialized by the JIT compiler in +// order to be manipulated. +#[macro_use] +extern crate serde_derive; +extern crate serde; +extern crate bincode; + +/// Automatically derive a hashing function for each type, to make sure that we +/// can apply patches to a subset of instructions. +use std::hash::{Hash, Hasher}; + +/// A LIR Unit is a connected set of basic blocks with an entry and exit blocks. +/// This might correspond to a Rust function, a subset of a Rust function which +/// corresponds to an opcode or inline caches, or to an target specific +/// intrinsic abstract code. A Unit contains the set of instructions and blocks +/// which are indexing the instruction in the order in which they are expected +/// to be executed. +#[derive(Serialize, Deserialize, Debug)] +pub struct Unit { + /// Unique Unit Identifier. + pub id: UnitId, + + /// Data flow, contains all the instructions and their operands, as well as the potentially memory dependencies. + pub data_flow: DataFlow, + + /// Control flow, contains all the blocks which are making references to the + /// data flow instructions, and also the control flow instructions. + pub control_flow: ControlFlow, +} + +/// A Data flow contains all the instructions from one Unit, and they describe +/// how data are flowing through instructions. +#[derive(Serialize, Deserialize, Debug)] +pub struct DataFlow { + /// Set of instruction, where each instruction should be referenced by at + /// least a Block using their ValueHash. + pub instructions: Vec, +} + +/// A Control flow contains all the blocks and how they flow from one to +/// another. They reference data flow instructions to determine the order in +/// which side-effectful instructions are expected. They are ending with a +/// control flow instruction. +#[derive(Serialize, Deserialize, Debug)] +pub struct ControlFlow { + /// List of basic blocks. + pub blocks: Vec, + /// Index of the entry block. + pub entry: BlockIndex, + /// Set of exit blocks indexes. + pub exit: Vec, +} + +/// Unique Unit identifier of an intrinsic. +type IntrinsicId = usize; +/// Unique unit identifier of a function. +type FunctionId = usize; +/// Unique unit identifier of a sub-set of a function. +type SubSetId = usize; + +/// Unique Unit identifier. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy, PartialEq, Eq)] +pub enum UnitId { + /// Identifier of a pseudo-code of an intrinsic used to represent the + /// equivalent LIR of a target specific optimization. Intrisic do not have + /// support for unwinding. + Intrinsic(IntrinsicId), + + /// Identifier of a callable function. + Function(FunctionId), + + /// Identifier of a sub-set of a Rust function. + SubSet(SubSetId) +} + + +/// A LIR Block Index is an integer which corresponds to an index within a given Unit. +#[derive(Serialize, Deserialize, Debug, Hash)] +pub struct BlockIndex { + pub value: usize, +} + +/// A LIR Block is a sequence of computation of value and a Terminator. +#[derive(Serialize, Deserialize, Debug, Hash)] +pub struct Block { + /// Sequence of instructions contained in the current block in the order in + /// which they have to be executed. + pub sequence: Vec, + /// Control flow instruction. + pub terminator: Value, + + /// Goto's branch, Call's return location and Switch default case. + pub default: Option, + /// Error handling block, if any error happens during the control flow + /// instruction. + pub unwind: Option, + /// Switch targets. + pub targets: Option>, +} + +/// A LIR Instruction is a single operation which aggregates operands and an +/// operation to produce a ValueHash. +#[derive(Serialize, Deserialize, Debug)] /* derive(Hash)-manually */ +pub struct Instruction { + /// Opcode of the instruction. + pub opcode: Opcode, + /// Ordered list of operands of the instruction. + pub operands: Vec, + /// Set of previous instructions which might change the memory read by this + /// instruction. + pub dependencies: Vec, + /// Set if this instruction got replaced by another. This is not taken into + /// account while computing the hash of an instruction. + pub replaced_by: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy)] /* derive(Hash)-manually */ +pub enum ConstData { + Unsigned(u64), + Signed(i64), + Float(f64), +} +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub struct SwitchData { + pub low: i32, + pub high: i32, +} + +/// An Opcode is an instruction which contains basic operations. This list of +/// opcode is minimal and contains all the building blocks to express higher +/// level IR instructions which are platform specific as intrinsic units. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum Opcode { + /// WithHash is a way to wrap a Value with another ValueHash which will be + /// used either for loop-back or to limit the amount of context referrenced + /// in a patch. + /// (1 operand) + Rehash(usize), + + /// Phi is an instruction which merges values from different blocks. Note + /// that this LIR uses Phi instead of extended basic block, in order to + /// avoid carrying around too many variables to the next block, which would + /// imply additional rewrite of the graphs on inlining. + /// (multiple operands) + Phi, + + /// Encode a constant. + /// (0 operand + data_index to ConstData) + Const(ConstData), + + /// Cast is used to change the type interpretation of a Value without any content checks. + /// (1 operand + data_index to CastData) + Cast(ComplexTypeId), + + /// Extract overflow flag from the operation which created the value. + /// (0 operand, 1 dependency) + OverflowFlag, + + /// Addition. (2 operands) + Add(NumberType), + /// Substraction. (2 operands: result = lhs - rhs) + Sub(NumberType), + /// Multiplication. (2 operands) + Mul(NumberType), + /// Division. (2 operands: result = lhs / rhs) + Div(NumberType), + /// Remainder. (2 operands: result = lhs % rhs) + Rem(NumberType), + /// Sign-extend. (1 operand) + SignExt(SignedType), + /// Zero-extend. (1 operand) + ZeroExt(IntType), + + /// Truncate. (round towards zero) (1 operand) + Truncate(FloatType), + /// Round. (round towards nearest) (1 operand) + Round(FloatType), + /// Floor. (round towards -Inf) (1 operand) + Floor(FloatType), + /// Ceil. (round towards +Inf) (1 operand) + Ceil(FloatType), + + /// Bitwise exclusive or. (2 operands) + BwXor(IntType), + /// Bitwise And. (2 operands) + BwAnd(IntType), + /// Bitwise Or. (2 operands) + BwOr(IntType), + /// Bitwise Not. (2 operands) + BwNot(IntType), + /// Shift left. (2 operands: result = lhs << rhs) + ShiftLeft(IntType), + /// Shift right. (2 operands: result = lhs >> rhs) + ShiftRight(SignedType), + + /// Equal. (2 operands) + Eq(NumberType), + /// Less than. (2 operands: result = lhs < rhs) + Lt(NumberType), + /// Less than or equal. (2 operands: result = lhs <= rhs) + Le(NumberType), + /// Not equal. (2 operands) + Ne(NumberType), + /// Greather than. (2 operands: result = lhs > rhs) + Gt(NumberType), + // Greather than or equal. (2 operands: result = lhs >= rhs) + Ge(NumberType), + + /// StaticAddress is used to refer to data which is not yet known at compile + /// time, but known at the execution, such as function pointer addresses. + /// (0 operand) + StaticAddress, + /// CPUAddress is used to refer to internal CPU data, and help the compiler + /// reason about the aliasing intrinsic using CPU data, such as flags and + /// cpuid. + /// (0 operand) + CPUAddress, + /// Get the address of where the input operand is stored. At the end of the + /// pipeline, if any of these instructions remain it enforces the data to + /// live in memory at least as long as the address exists. + /// (1 operand) + Address, + + /// Load content from the address. (1 operand: result = *input, data_index) + Load(ComplexTypeId), + /// Store content to the address. (2 operands: *lhs = rhs, data_index) + Store(ComplexTypeId), + + // Acquire = LoadFence{Load, Store} + // Release = {Load, Store}FenceStore + + /// LoadFenceLoad or read barrier implies that all loads must be completed + /// before proceeding with any loads. (Prevent the compiler from moving load + /// instructions) (0 operand) + LoadFenceLoad, + /// LoadFenceStore implies that all loads must be completed before + /// proceeding with any stores. (Prevent the compiler from moving load and + /// store instructions) (0 operand) + LoadFenceStore, + /// StoreFenceLoad implies that all stores must be completed before + /// proceeding with any loads. (Prevent the compiler from moving load and + /// store instructions) (0 operand) + StoreFenceLoad, + /// StoreFenceStore or write barrier implies that all stores must be + /// completed before proceeding with any stores. (Prevent the compiler from + /// moving store instructions) (0 operand) + StoreFenceStore, + + /// Unit is used for non fallible unit. For example, this can be used for + /// non-inlinable and non-optimizable intrinsics which are expressed in + /// terms of the minimal set of instructions. This is used to provide target + /// specific instructions such as SIMD, locked-instructions or cpuid which + /// are not represented in this LIR. (maybe operands) + Unit(UnitId), + + // + // List of Control instructions. + // + + /// Return the value computed by the instruction behind Value. This + /// corresponds either to the returned value of a function, or to the result + /// of an expression for subset of Rust functions. + /// (1 operand) + Return, + + /// Unwind ends the control flow, and unwind everything. + Unwind, + + /// Unreachable is used either as an assertion / optimization mechanism. + Unreachable, + + /// Goto is an unconditional jump to another basic block in the same Unit. + /// (0 operand, default target) + Goto, + + /// Switch is a conditional branch implemented as a switch case over + /// variable ranges of integer values. This is used even for simple if/else + /// branches. + /// (1 operand, maybe default target, targets) + Switch(SwitchData), + + /// Call implements a function call, such as any Rust function, an assertion + /// or a drop function. + /// (many operands: function + arguments, maybe default target, maybe unwind target) + Call, + + /// CallUnit implements an internal Unit call or inline. + /// (many operands: arguments, maybe default target, maybe unwind target) + CallUnit(UnitId), +} + +/// NumberType are used for math and bitwise operators. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum NumberType { + I(SignedType), + F(FloatType), +} +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum IntType { + I8, I16, I32, I64, +} +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum SignedType { + S(IntType), + U(IntType), +} +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum FloatType { + F32, F64, +} + +/// ComplexType indexes are used to index an arithmetic type or an aggregated +/// type, such as tuples, enums or structures within the Assembly. +type ComplexTypeId = usize; + +/// A LIR Value corresponds to the computation of either instructions or +/// terminator. As opposed to ordinary SSA notation, we use a hash instead of an +/// instruction index, in order to be able to generate position-independent +/// patches for each Unit. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub struct Value { + pub hash: u64, + pub index: usize, +} + +impl Opcode { + pub fn is_control(self) -> bool { + match self { + Opcode::Return | + Opcode::Unwind | + Opcode::Unreachable | + Opcode::Goto | + Opcode::Switch(_) | + Opcode::Call | + Opcode::CallUnit(_) => true, + _ => false, + } + } +} + +impl Hash for ConstData { + fn hash(&self, state: &mut H) { + use std::mem; + mem::discriminant(self).hash(state); + match self { + &ConstData::Unsigned(v) => v.hash(state), + &ConstData::Signed(v) => v.hash(state), + &ConstData::Float(v) => { + assert_eq!(mem::size_of::(), mem::size_of::()); + assert_eq!(mem::align_of::(), mem::align_of::()); + let v : u64 = unsafe { mem::transmute_copy(&v) }; + v.hash(state); + } + } + } +} + +impl Hash for Instruction { + fn hash(&self, state: &mut H) { + self.opcode.hash(state); + self.operands.hash(state); + self.dependencies.hash(state); + // Exclude self.replaced_by. + } +} + +impl<'a> From<&'a Instruction> for u64 { + fn from(ins: &'a Instruction) -> u64 { + use std::collections::hash_map::DefaultHasher; + let mut hasher = DefaultHasher::new(); + ins.hash(&mut hasher); + hasher.finish() + } +} + +impl Unit { + pub fn new(id: UnitId) -> Unit { + Unit { + id, + data_flow: DataFlow::new(), + control_flow: ControlFlow::new(), + } + } +} + +impl ControlFlow { + pub fn new() -> ControlFlow { + ControlFlow { + blocks: vec![], + entry: BlockIndex { value: 0 }, + exit: vec![], + } + } +} + +impl DataFlow { + pub fn new() -> DataFlow { + DataFlow { instructions: vec![] } + } + + fn get_value(&self, index: usize) -> Value { + Value { hash: (&self.instructions[index]).into(), index } + } + + pub fn add_ins(&mut self, ins: Instruction) -> Value { + // TODO: Ensure that if the instruction already exists, then it is not + // being added a second time, and the returned Value output correspond + // to the existing Instruction. + // TODO: Add consistency checks that all value references are indeed in + // the current DataFlow structure. + self.instructions.push(ins); + self.get_value(self.instructions.len() - 1) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn check_rehash() { + let mut df = DataFlow::new(); + let v0 = df.add_ins(Instruction { + opcode: Opcode::Const(ConstData::Unsigned(1024)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + let v1 = df.add_ins(Instruction { + opcode: Opcode::Rehash(21), + operands: vec![v0], + dependencies: vec![], + replaced_by: None, + }); + let v2 = df.add_ins(Instruction { + opcode: Opcode::Rehash(69), + operands: vec![v0], + dependencies: vec![], + replaced_by: None, + }); + // Rehash opcode compute a different hash value based on the number + // which is given as argument. This is used to handle loops. + assert_ne!(v1.hash, v2.hash); + } + + #[test] + fn check_replaced_by() { + let mut df = DataFlow::new(); + let v0 = df.add_ins(Instruction { + opcode: Opcode::Const(ConstData::Unsigned(1)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + let v1 = df.add_ins(Instruction { + opcode: Opcode::Add(NumberType::I(SignedType::U(IntType::I32))), + operands: vec![v0, v0], + dependencies: vec![], + replaced_by: None, + }); + let v2 = df.add_ins(Instruction { + opcode: Opcode::Const(ConstData::Unsigned(2)), + operands: vec![v0], + dependencies: vec![], + replaced_by: None, + }); + df.instructions[v1.index].replaced_by = Some(v2); + // When setting the replaced_by field, the hash of an instruction should + // not change. + assert_eq!(v1.hash, df.get_value(v1.index).hash); + } +} From 06e63dd642bc6995842d409e4dda414339e95c02 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Thu, 16 Aug 2018 23:53:12 +0200 Subject: [PATCH 02/32] Move NumberType and NumberValue to a dedicated module. --- lir/src/lib.rs | 106 ++++++++++++++------------------------------- lir/src/number.rs | 108 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 73 deletions(-) create mode 100644 lir/src/number.rs diff --git a/lir/src/lib.rs b/lir/src/lib.rs index 9bdbfd9..0e97a88 100644 --- a/lir/src/lib.rs +++ b/lir/src/lib.rs @@ -28,6 +28,8 @@ extern crate serde_derive; extern crate serde; extern crate bincode; +pub mod number; + /// Automatically derive a hashing function for each type, to make sure that we /// can apply patches to a subset of instructions. use std::hash::{Hash, Hasher}; @@ -43,7 +45,8 @@ pub struct Unit { /// Unique Unit Identifier. pub id: UnitId, - /// Data flow, contains all the instructions and their operands, as well as the potentially memory dependencies. + /// Data flow, contains all the instructions and their operands, as well as + /// the potentially memory dependencies. pub data_flow: DataFlow, /// Control flow, contains all the blocks which are making references to the @@ -137,12 +140,6 @@ pub struct Instruction { pub replaced_by: Option, } -#[derive(Serialize, Deserialize, Debug, Clone, Copy)] /* derive(Hash)-manually */ -pub enum ConstData { - Unsigned(u64), - Signed(i64), - Float(f64), -} #[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] pub struct SwitchData { pub low: i32, @@ -168,8 +165,8 @@ pub enum Opcode { Phi, /// Encode a constant. - /// (0 operand + data_index to ConstData) - Const(ConstData), + /// (0 operand) + Const(number::NumberValue), /// Cast is used to change the type interpretation of a Value without any content checks. /// (1 operand + data_index to CastData) @@ -180,54 +177,54 @@ pub enum Opcode { OverflowFlag, /// Addition. (2 operands) - Add(NumberType), + Add(number::NumberType), /// Substraction. (2 operands: result = lhs - rhs) - Sub(NumberType), + Sub(number::NumberType), /// Multiplication. (2 operands) - Mul(NumberType), + Mul(number::NumberType), /// Division. (2 operands: result = lhs / rhs) - Div(NumberType), + Div(number::NumberType), /// Remainder. (2 operands: result = lhs % rhs) - Rem(NumberType), + Rem(number::NumberType), /// Sign-extend. (1 operand) - SignExt(SignedType), + SignExt(number::SignedType), /// Zero-extend. (1 operand) - ZeroExt(IntType), + ZeroExt(number::IntType), /// Truncate. (round towards zero) (1 operand) - Truncate(FloatType), + Truncate(number::FloatType), /// Round. (round towards nearest) (1 operand) - Round(FloatType), + Round(number::FloatType), /// Floor. (round towards -Inf) (1 operand) - Floor(FloatType), + Floor(number::FloatType), /// Ceil. (round towards +Inf) (1 operand) - Ceil(FloatType), + Ceil(number::FloatType), /// Bitwise exclusive or. (2 operands) - BwXor(IntType), + BwXor(number::IntType), /// Bitwise And. (2 operands) - BwAnd(IntType), + BwAnd(number::IntType), /// Bitwise Or. (2 operands) - BwOr(IntType), + BwOr(number::IntType), /// Bitwise Not. (2 operands) - BwNot(IntType), + BwNot(number::IntType), /// Shift left. (2 operands: result = lhs << rhs) - ShiftLeft(IntType), + ShiftLeft(number::IntType), /// Shift right. (2 operands: result = lhs >> rhs) - ShiftRight(SignedType), + ShiftRight(number::SignedType), /// Equal. (2 operands) - Eq(NumberType), + Eq(number::NumberType), /// Less than. (2 operands: result = lhs < rhs) - Lt(NumberType), + Lt(number::NumberType), /// Less than or equal. (2 operands: result = lhs <= rhs) - Le(NumberType), + Le(number::NumberType), /// Not equal. (2 operands) - Ne(NumberType), + Ne(number::NumberType), /// Greather than. (2 operands: result = lhs > rhs) - Gt(NumberType), + Gt(number::NumberType), // Greather than or equal. (2 operands: result = lhs >= rhs) - Ge(NumberType), + Ge(number::NumberType), /// StaticAddress is used to refer to data which is not yet known at compile /// time, but known at the execution, such as function pointer addresses. @@ -312,26 +309,6 @@ pub enum Opcode { CallUnit(UnitId), } -/// NumberType are used for math and bitwise operators. -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] -pub enum NumberType { - I(SignedType), - F(FloatType), -} -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] -pub enum IntType { - I8, I16, I32, I64, -} -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] -pub enum SignedType { - S(IntType), - U(IntType), -} -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] -pub enum FloatType { - F32, F64, -} - /// ComplexType indexes are used to index an arithmetic type or an aggregated /// type, such as tuples, enums or structures within the Assembly. type ComplexTypeId = usize; @@ -361,23 +338,6 @@ impl Opcode { } } -impl Hash for ConstData { - fn hash(&self, state: &mut H) { - use std::mem; - mem::discriminant(self).hash(state); - match self { - &ConstData::Unsigned(v) => v.hash(state), - &ConstData::Signed(v) => v.hash(state), - &ConstData::Float(v) => { - assert_eq!(mem::size_of::(), mem::size_of::()); - assert_eq!(mem::align_of::(), mem::align_of::()); - let v : u64 = unsafe { mem::transmute_copy(&v) }; - v.hash(state); - } - } - } -} - impl Hash for Instruction { fn hash(&self, state: &mut H) { self.opcode.hash(state); @@ -444,7 +404,7 @@ mod tests { fn check_rehash() { let mut df = DataFlow::new(); let v0 = df.add_ins(Instruction { - opcode: Opcode::Const(ConstData::Unsigned(1024)), + opcode: Opcode::Const(number::NumberValue::U32(1024)), operands: vec![], dependencies: vec![], replaced_by: None, @@ -470,19 +430,19 @@ mod tests { fn check_replaced_by() { let mut df = DataFlow::new(); let v0 = df.add_ins(Instruction { - opcode: Opcode::Const(ConstData::Unsigned(1)), + opcode: Opcode::Const(number::NumberValue::I32(1)), operands: vec![], dependencies: vec![], replaced_by: None, }); let v1 = df.add_ins(Instruction { - opcode: Opcode::Add(NumberType::I(SignedType::U(IntType::I32))), + opcode: Opcode::Add(number::NumberType::I32), operands: vec![v0, v0], dependencies: vec![], replaced_by: None, }); let v2 = df.add_ins(Instruction { - opcode: Opcode::Const(ConstData::Unsigned(2)), + opcode: Opcode::Const(number::NumberValue::I32(2)), operands: vec![v0], dependencies: vec![], replaced_by: None, diff --git a/lir/src/number.rs b/lir/src/number.rs new file mode 100644 index 0000000..2709d24 --- /dev/null +++ b/lir/src/number.rs @@ -0,0 +1,108 @@ +/// This module implement all Number types, to specialize operations for a given +/// numerical type, and to encode constant with the proper numerical type too. +use std::hash::{Hash, Hasher}; + +/// NumberType are used for math and bitwise operators. All other number types +/// can be convert to this one, including NumberValue, using the `into()` +/// method. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy, PartialEq, Eq)] +pub enum NumberType { + U8, U16, U32, U64, + I8, I16, I32, I64, + F32, F64, +} +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum IntType { + U8, U16, U32, U64, +} +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum SignedType { + U8, U16, U32, U64, + I8, I16, I32, I64, +} +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum FloatType { + F32, F64, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy)] /* derive(Hash)-manually */ +pub enum NumberValue { + U8(u8), U16(u16), U32(u32), U64(u64), + I8(i8), I16(i16), I32(i32), I64(i64), + F32(f32), F64(f64), +} + +macro_rules! from_with_same_prefix { + (impl From<$from:ident> for $to:ident => $($as:ident,)*) => { + impl From<$from> for $to { + fn from(input: $from) -> $to { + match input { + $($from::$as => $to::$as),* + } + } + } + } +} + +from_with_same_prefix!(impl From for SignedType => + U8, U16, U32, U64, +); +from_with_same_prefix!(impl From for NumberType => + U8, U16, U32, U64, +); +from_with_same_prefix!(impl From for NumberType => + U8, U16, U32, U64, + I8, I16, I32, I64, +); +from_with_same_prefix!(impl From for NumberType => + F32, F64, +); + +macro_rules! from_with_same_prefix_remove_parent { + (impl From<$from:ident> for $to:ident => $($as:ident,)* ) => { + impl From<$from> for $to { + fn from(input: $from) -> $to { + match input { + $($from::$as(_) => $to::$as,)* + } + } + } + } +} + +from_with_same_prefix_remove_parent!(impl From for NumberType => + U8, U16, U32, U64, + I8, I16, I32, I64, + F32, F64, +); + + +impl Hash for NumberValue { + fn hash(&self, state: &mut H) { + use std::mem; + mem::discriminant(self).hash(state); + match self { + &NumberValue::U8(v) => v.hash(state), + &NumberValue::U16(v) => v.hash(state), + &NumberValue::U32(v) => v.hash(state), + &NumberValue::U64(v) => v.hash(state), + &NumberValue::I8(v) => v.hash(state), + &NumberValue::I16(v) => v.hash(state), + &NumberValue::I32(v) => v.hash(state), + &NumberValue::I64(v) => v.hash(state), + &NumberValue::F32(v) => { + assert_eq!(mem::size_of::(), mem::size_of::()); + assert_eq!(mem::align_of::(), mem::align_of::()); + let v : u32 = unsafe { mem::transmute_copy(&v) }; + v.hash(state); + } + &NumberValue::F64(v) => { + assert_eq!(mem::size_of::(), mem::size_of::()); + assert_eq!(mem::align_of::(), mem::align_of::()); + let v : u64 = unsafe { mem::transmute_copy(&v) }; + v.hash(state); + } + } + } +} + From c45626b6f096d7825b20e1fbb1de5eb364ae7e06 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 18 Aug 2018 17:56:11 +0200 Subject: [PATCH 03/32] Improve the LIR for the codegen test case. --- codegen/src/lib.rs | 33 ++- codegen/src/lower.rs | 9 +- lir/src/builder.rs | 219 ++++++++++++++++++++ lir/src/context.rs | 48 +++++ lir/src/control_flow.rs | 83 ++++++++ lir/src/data_flow.rs | 383 ++++++++++++++++++++++++++++++++++ lir/src/lib.rs | 445 ++-------------------------------------- lir/src/number.rs | 8 +- lir/src/types.rs | 40 ++++ lir/src/unit.rs | 69 +++++++ 10 files changed, 897 insertions(+), 440 deletions(-) create mode 100644 lir/src/builder.rs create mode 100644 lir/src/context.rs create mode 100644 lir/src/control_flow.rs create mode 100644 lir/src/data_flow.rs create mode 100644 lir/src/types.rs create mode 100644 lir/src/unit.rs diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index a872d35..b6347a0 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -56,7 +56,7 @@ impl Context { /// Given an HolyJIT LIR Unit, convert it to a Cranelift function in order /// to generate the corresponding bytes, then allocate memory pages and map /// them as executable. - pub fn compile(&mut self, unit: &lir::Unit) -> error::LowerResult { + pub fn compile(&mut self, unit: &lir::unit::Unit) -> error::LowerResult { let &mut Context { ref mut ctx, ref isa, .. } = self; ctx.func = lower::convert(unit)?; let mut reloc_sink = exec_alloc::NullRelocSink {}; @@ -82,6 +82,13 @@ mod tests { use super::*; use std::mem; + use lir::unit::*; + use lir::data_flow::*; + use lir::number::*; + use lir::builder::*; + use lir::types::*; + + #[test] fn check_create_context() { let _ctx = Context::new(); @@ -90,9 +97,29 @@ mod tests { #[test] fn check_add1_unit() { + let mut ctx_bld = ContextBuilder::new(); + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); + let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v1], + dependencies: vec![], + replaced_by: None, + }) + } + let add1_unit = bld.finish(); + let mut ctx = Context::new(); - let simple_unit = lir::Unit::new(lir::UnitId::Function(0)); - let code = ctx.compile(&simple_unit).unwrap(); + let code = ctx.compile(&add1_unit).unwrap(); let add1 : fn(i32) -> i32 = unsafe { mem::transmute(code.as_ptr()) }; diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 6d6ac72..7d1b0b0 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -6,11 +6,12 @@ use codegen::ir::types::*; use codegen::settings::{self, CallConv}; use codegen::verifier::verify_function; -use lir; +use lir::unit::Unit; use error; + /// Convert a LIR Unit into a Cranelift IR (Function). -pub fn convert(_unit: &lir::Unit) -> error::LowerResult { +pub fn convert(_unit: &Unit) -> error::LowerResult { let mut sig = Signature::new(CallConv::SystemV); sig.returns.push(AbiParam::new(I32)); sig.params.push(AbiParam::new(I32)); @@ -22,10 +23,8 @@ pub fn convert(_unit: &lir::Unit) -> error::LowerResult { let block0 = builder.create_ebb(); let x = Variable::new(0); let y = Variable::new(1); - let z = Variable::new(2); builder.declare_var(x, I32); builder.declare_var(y, I32); - builder.declare_var(z, I32); builder.append_ebb_params_for_function_params(block0); builder.switch_to_block(block0); @@ -38,6 +37,8 @@ pub fn convert(_unit: &lir::Unit) -> error::LowerResult { let tmp = builder.ins().iconst(I32, 1); builder.def_var(y, tmp); } + let z = Variable::new(2); + builder.declare_var(z, I32); { let arg1 = builder.use_var(x); let arg2 = builder.use_var(y); diff --git a/lir/src/builder.rs b/lir/src/builder.rs new file mode 100644 index 0000000..b7f0d02 --- /dev/null +++ b/lir/src/builder.rs @@ -0,0 +1,219 @@ +/// This module contains everything need for constructing a Unit, with its data +/// flow graph and its control flow graph. + +use std::collections::HashMap; + +use unit::{Unit, UnitId}; +use data_flow::{Instruction, Opcode, Value}; +use control_flow::{Sequence, SequenceIndex, SuccessorIndex}; +use types::{ComplexType, ComplexTypeId}; +use context; + +/// A UnitContext should be used across multiple `UnitBuilder`, in order to have +/// different indexes and hashes for identical data. While this is not a +/// critical issue, this would cause additional hash collisions on Rehash +/// instructions when inlining. +pub struct ContextBuilder { + /// Context which is being built by the context builder. + ctx: context::Context, + + /// This HashMap is made to avoid the duplication of too many types, by + /// making sure we have a unique identifier in the vector of types. + types_lookup: HashMap, +} + +pub struct UnitBuilder<'a> { + /// Identifier of the constructed unit. + unit: Unit, + /// Context used for building the current unit. + ctx: &'a mut ContextBuilder, + /// Sequence which is currently being editted. + sequence: Option, +} + +impl ContextBuilder { + pub fn new() -> ContextBuilder { + ContextBuilder { + ctx: context::Context::new(), + types_lookup: HashMap::new() + } + } + + pub fn finish(self) -> context::Context { + self.ctx + } + + pub fn get_rehash(&mut self) -> Opcode { + Opcode::Rehash(self.ctx.get_hash_seed()) + } + pub fn get_newhash(&mut self) -> Opcode { + Opcode::Newhash(self.ctx.get_hash_seed()) + } + + /// Add a type and reuse a type which already got registered if any. + pub fn add_type(&mut self, ty: ComplexType) -> ComplexTypeId { + match self.types_lookup.get(&ty) { + Some(id) => return *id, + None => (), + }; + let id = self.ctx.add_type(ty); + // TODO: Can we avoid cloning here? Or maybe restrict it only to scalar + // and vector types. + let ty = self.ctx.get_type(id).clone(); + self.types_lookup.insert(ty, id); + id + } + + /// Add a type which would not be handled by the hash table, but would add + /// the ability to be replaced with get_type_mut later. + pub fn add_type_unshared(&mut self, ty: ComplexType) -> ComplexTypeId { + self.ctx.add_type(ty) + } + + /// Get a reference to the type corresponding to the given id. + pub fn get_type(&self, id: ComplexTypeId) -> &ComplexType { + self.ctx.get_type(id) + } + +} + +impl<'a> UnitBuilder<'a> { + /// Create a new UnitBuilder, which will populate the Unit information + /// incrementally. + pub fn new(id: UnitId, ctx: &'a mut ContextBuilder) -> UnitBuilder<'a> { + UnitBuilder { + unit: Unit::new(id), + ctx, + sequence: None, + } + } + + /// Context accessor in order to be able to add additional types without + /// repeating each ContextBuilder function. + pub fn ctx(&mut self) -> &mut ContextBuilder { self.ctx } + + /// Set the signature of the Unit, and allocate the corresponding Values. + /// This function does not create SSA bindings for the arguments. + pub fn set_signature(&mut self, signature: ComplexTypeId) { + self.unit.sig = signature; + let ty = self.ctx.get_type(signature); + let (ins, outs) = match ty { + &ComplexType::Function(ref ins, ref outs, _) => (ins, outs), + _ => panic!("Unit signatures are expected to be a Function.") + }; + self.unit.inputs = ins.iter().map(|_| Value::dummy()).collect(); + self.unit.outputs = outs.iter().map(|_| Value::dummy()).collect(); + } + + /// Once the signature is defined with `set_signature`, and a block is + /// entered. This function can be used to create a new SSA value for the + /// argument at the index `arg_index`. If the argument was created + /// previously, it would be reused. + pub fn unit_arg(&mut self, arg_index: usize) -> Value { + let arg = self.unit.inputs[arg_index]; + if !arg.is_dummy() { + return arg + } + let opcode = self.ctx.get_newhash(); + self.dfg_add_ins(Instruction { + opcode, + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + *(&mut self.unit.inputs[arg_index]) = arg; + arg + } + + /// Add one instruction in the data flow graph. + fn dfg_add_ins(&mut self, ins: Instruction) -> Value { + self.unit.dfg.add_ins(ins) + } + + /// Create a new sequence to hold phi instructions and any effectful or + /// garded instructions. + pub fn create_sequence(&mut self) -> SequenceIndex { + self.unit.cfg.sequences.push(Sequence::new()); + SequenceIndex(self.unit.cfg.sequences.len() - 1) + } + + /// Switch to a sequence of code, such that newly added instructions are + /// going to be added to this block by default. + pub fn switch_to_sequence(&mut self, seq: SequenceIndex) { + // TODO: Assert that the previous sequence is properly ended with a + // control instruction, and a number of successors matching the number + // the expected successors of the control instruction. + self.sequence = Some(seq); + } + + /// Add an instruction to both the data flow graph and the active sequence + /// of the control flow graph. + pub fn add_ins(&mut self, ins: Instruction) -> Value { + let value = self.dfg_add_ins(ins); + let SequenceIndex(index) = self.sequence.unwrap(); + assert!(self.unit.cfg.sequences[index].control.is_dummy()); + self.unit.cfg.sequences[index].sequence.push(value); + value + } + + /// Add an instruction based only on its opcode, this function creates a + /// conservative aliasing between load, store, calls and units. + pub fn add_op(&mut self, opcode: Opcode, operands: &[Value]) -> Value { + self.add_ins(Instruction { + opcode, + operands: operands.iter().map(|x| *x).collect(), + dependencies: vec![], + replaced_by: None, + }) + } + + /// Add a control flow instruction to end the current sequence. + pub fn end_sequence(&mut self, ins: Instruction) { + assert!(ins.is_control()); + let is_return = ins.opcode.is_return(); + let value = self.dfg_add_ins(ins); + { + let SequenceIndex(index) = self.sequence.unwrap(); + let edit = &mut self.unit.cfg.sequences[index]; + assert!(edit.control.is_dummy()); + edit.control = value; + } + // If the last instruction is a return statement, then add this return + // statement in the list of outputs of the unit. + if is_return { + self.unit.outputs.push(value); + } + } + + /// Set conditional branch. + pub fn sequence_value_jump(&mut self, value: isize, seq: SequenceIndex) { + let SequenceIndex(index) = self.sequence.unwrap(); + let edit = &mut self.unit.cfg.sequences[index]; + edit.successors.push(seq); + let succ_idx = SuccessorIndex(edit.successors.len() - 1); + assert!(!edit.targets.iter().any(|&(v, _)| v == value)); + edit.targets.push((value, succ_idx)); + } + /// Set default branch. + pub fn sequence_default_jump(&mut self, seq: SequenceIndex) { + let SequenceIndex(index) = self.sequence.unwrap(); + let edit = &mut self.unit.cfg.sequences[index]; + edit.successors.push(seq); + let succ_idx = SuccessorIndex(edit.successors.len() - 1); + assert_eq!(edit.default, None); + edit.default = Some(succ_idx); + } + /// Set unwind branch. + pub fn sequence_unwind_jump(&mut self, seq: SequenceIndex) { + let SequenceIndex(index) = self.sequence.unwrap(); + let edit = &mut self.unit.cfg.sequences[index]; + edit.successors.push(seq); + let succ_idx = SuccessorIndex(edit.successors.len() - 1); + assert_eq!(edit.unwind, None); + edit.unwind = Some(succ_idx); + } + + pub fn finish(self) -> Unit { + self.unit + } +} diff --git a/lir/src/context.rs b/lir/src/context.rs new file mode 100644 index 0000000..bb68ce8 --- /dev/null +++ b/lir/src/context.rs @@ -0,0 +1,48 @@ +use types::{ComplexType, ComplexTypeId}; + +/// A context is a structure which centralize all the data necessary for the +/// execution of any Unit. It holds the collection of complex types, and any +/// counter related to having unique identifiers. +pub struct Context { + /// This counter is used for both Rehash instructions and Newhash + /// instructions. It holds the next value to be allocated if any of these + /// instruction should be added to the graph. + wrapper_seed: usize, + + /// This vector holds the list of types references by all Unit associated to + /// this context. Any ComplexTypeId is an index in this Vector. + types: Vec, +} + +impl Context { + /// Create a new Context. This function should not be used externally, + /// instead use a ContextBuilder to build a context for you. + pub fn new() -> Context { + Context { + wrapper_seed: 0, + types: vec![], + } + } + + /// Create a new hash seed, such that we can avoid aliasing of hash values. + /// This function is used by the ContextBuilder for creating new Rehash / + /// Newhash instructions. + pub fn get_hash_seed(&mut self) -> usize { + let value = self.wrapper_seed; + self.wrapper_seed += 1; + value + } + + /// Add a new complex type in the list of known types. This function is used + /// by the ContextBuilder to register types seen while generating Units. + pub fn add_type(&mut self, ty: ComplexType) -> ComplexTypeId { + self.types.push(ty); + ComplexTypeId(self.types.len() - 1) + } + + /// Given a ComplexTypeId, returns the associated type. + pub fn get_type(&self, id: ComplexTypeId) -> &ComplexType { + let ComplexTypeId(index) = id; + &self.types[index] + } +} diff --git a/lir/src/control_flow.rs b/lir/src/control_flow.rs new file mode 100644 index 0000000..ea98336 --- /dev/null +++ b/lir/src/control_flow.rs @@ -0,0 +1,83 @@ +/// This module contains the definition of the control flow. The control flow + +use data_flow::Value; + +/// A Control flow contains all the sequences and how they flow from one to +/// another. They reference data flow instructions to determine the order in +/// which side-effectful instructions are expected. They are ending with a +/// control flow instruction. +#[derive(Serialize, Deserialize, Debug)] +pub struct ControlFlow { + /// List of basic sequences. + pub sequences: Vec, + /// Index of the entry sequence. + pub entry: SequenceIndex, + /// Set of exit sequences indexes. + pub exit: Vec, +} + +/// A LIR Sequence Index is an integer which corresponds to an index within a given Unit. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub struct SequenceIndex(pub usize); + +/// Index within the list of successors. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub struct SuccessorIndex(pub usize); + +/// A LIR Sequence is a sequence of computation of value and a control value, +/// used to decide how the evaluation change sequences. Instead of the common +/// wording of "block", the term sequence is used to express sequences of +/// instructions which are ending with a control instruction to jump to another +/// sequence of code. +#[derive(Serialize, Deserialize, Debug, Hash)] +pub struct Sequence { + /// Sequence of instructions in the order in which they have to be executed. + pub sequence: Vec, + /// Control instruction. + pub control: Value, + + /// List of sequences from which the control flow flowed into the current + /// block. This list is ordered such that it matches the order of Phi + /// instructions' operands. + pub predecessors: Vec<(SequenceIndex, SuccessorIndex)>, + + /// List of sequences the control flow instruction can flow into. + pub successors: Vec, + + /// Goto's branch, Call's return location and Switch default case. + pub default: Option, + /// Error handling sequence, if any error happens during the control flow + /// instruction. + pub unwind: Option, + /// Switch targets. + pub targets: Vec<(isize, SuccessorIndex)>, +} + +impl ControlFlow { + pub fn new() -> ControlFlow { + ControlFlow { + sequences: vec![], + entry: SequenceIndex::dummy(), + exit: vec![], + } + } +} + +impl SequenceIndex { + pub fn dummy() -> Self { SequenceIndex(usize::max_value()) } + pub fn is_dummy(self) -> bool { self == SequenceIndex(usize::max_value()) } +} + +impl Sequence { + pub fn new() -> Self { + Sequence { + sequence: vec![], + control: Value::dummy(), + predecessors: vec![], + successors: vec![], + default: None, + unwind: None, + targets: vec![], + } + } +} diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs new file mode 100644 index 0000000..eefc1b3 --- /dev/null +++ b/lir/src/data_flow.rs @@ -0,0 +1,383 @@ +/// The data flow is a graph of operation described by their opcodes and memory +/// dependencies. The data flow express how the data are flowing from the Unit +/// inputs to the Unit output. The data flow cannot be evaluated with the +/// addition of a control flow, which describes the interpretation of Phi nodes +/// operands. + +/// Automatically derive a hashing function for each type, to make sure that we +/// can apply patches to a subset of instructions. +use std::hash::{Hash, Hasher}; + +use number; +use unit; +use types::ComplexTypeId; + +/// A Data flow contains all the instructions from one Unit, and they describe +/// how data are flowing through instructions. +#[derive(Serialize, Deserialize, Debug)] +pub struct DataFlow { + /// Set of instruction, where each instruction should be referenced by at + /// least a Block using their ValueHash. + pub instructions: Vec, +} + +/// A LIR Value corresponds to the computation of either instructions or +/// terminator. As opposed to ordinary SSA notation, we use a hash instead of an +/// instruction index, in order to be able to generate position-independent +/// patches for each Unit. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub struct Value { + pub hash: u64, + pub index: usize, +} + +/// A LIR Instruction is a single operation which aggregates operands and an +/// operation to produce a ValueHash. +#[derive(Serialize, Deserialize, Debug)] /* derive(Hash)-manually */ +pub struct Instruction { + /// Opcode of the instruction. + pub opcode: Opcode, + /// Ordered list of operands of the instruction. + pub operands: Vec, + /// Set of previous instructions which might change the memory read by this + /// instruction. + pub dependencies: Vec, + /// Set if this instruction got replaced by another. This is not taken into + /// account while computing the hash of an instruction. + pub replaced_by: Option, +} + +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub struct SwitchData { + pub low: i32, + pub high: i32, +} + +/// An Opcode is an instruction which contains basic operations. This list of +/// opcode is minimal and contains all the building blocks to express higher +/// level IR instructions which are platform specific as intrinsic units. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +pub enum Opcode { + /// Denote the Entry point of the given Unit. This instruction should be + /// replaced when the data flow graph is inlined in another as the entry + /// should not alias the entry of the caller. + Entry(unit::UnitId), + + /// Rehash is a way to wrap a Value with another ValueHash which will be + /// used either for loop-back or to limit the amount of context referrenced + /// in a patch. + /// (1 operand) + Rehash(usize), + + /// Same as Rehash except that we are wrapping an unknown value which does + /// not exists yet. This is useful for mapping arguments of a function, or + /// for extra entry points. + Newhash(usize), + + /// Phi is an instruction which merges values from different blocks. Note + /// that this LIR uses Phi instead of extended basic block, in order to + /// avoid carrying around too many variables to the next block, which would + /// imply additional rewrite of the graphs on inlining. + /// (multiple operands) + Phi, + + /// Encode a constant. + /// (0 operand) + Const(number::NumberValue), + + /// Cast is used to change the type interpretation of a Value without any content checks. + /// (1 operand) + Cast(ComplexTypeId), + + /// Extract overflow flag from the operation which created the value. + /// (0 operand, 1 dependency) + OverflowFlag, + + /// Addition. (2 operands) + Add(number::NumberType), + /// Substraction. (2 operands: result = lhs - rhs) + Sub(number::NumberType), + /// Multiplication. (2 operands) + Mul(number::NumberType), + /// Division. (2 operands: result = lhs / rhs) + Div(number::NumberType), + /// Remainder. (2 operands: result = lhs % rhs) + Rem(number::NumberType), + /// Sign-extend. (1 operand) + SignExt(number::SignedType), + /// Zero-extend. (1 operand) + ZeroExt(number::IntType), + + /// Truncate. (round towards zero) (1 operand) + Truncate(number::FloatType), + /// Round. (round towards nearest) (1 operand) + Round(number::FloatType), + /// Floor. (round towards -Inf) (1 operand) + Floor(number::FloatType), + /// Ceil. (round towards +Inf) (1 operand) + Ceil(number::FloatType), + + /// Bitwise exclusive or. (2 operands) + BwXor(number::IntType), + /// Bitwise And. (2 operands) + BwAnd(number::IntType), + /// Bitwise Or. (2 operands) + BwOr(number::IntType), + /// Bitwise Not. (2 operands) + BwNot(number::IntType), + /// Shift left. (2 operands: result = lhs << rhs) + ShiftLeft(number::IntType), + /// Shift right. (2 operands: result = lhs >> rhs) + ShiftRight(number::SignedType), + + /// Equal. (2 operands) + Eq(number::NumberType), + /// Less than. (2 operands: result = lhs < rhs) + Lt(number::NumberType), + /// Less than or equal. (2 operands: result = lhs <= rhs) + Le(number::NumberType), + /// Not equal. (2 operands) + Ne(number::NumberType), + /// Greather than. (2 operands: result = lhs > rhs) + Gt(number::NumberType), + // Greather than or equal. (2 operands: result = lhs >= rhs) + Ge(number::NumberType), + + /// StaticAddress is used to refer to data which is not yet known at compile + /// time, but known at the execution, such as function pointer addresses. + /// (0 operand) + StaticAddress, + /// CPUAddress is used to refer to internal CPU data, and help the compiler + /// reason about the aliasing intrinsic using CPU data, such as flags and + /// cpuid. + /// (0 operand) + CPUAddress, + /// Get the address of where the input operand is stored. At the end of the + /// pipeline, if any of these instructions remain it enforces the data to + /// live in memory at least as long as the address exists. + /// (1 operand) + Address, + + /// Load content from the address. (1 operand: result = *input) + Load(ComplexTypeId), + /// Store content to the address. (2 operands: *lhs = rhs) + Store(ComplexTypeId), + + // Acquire = LoadFence{Load, Store} + // Release = {Load, Store}FenceStore + + /// LoadFenceLoad or read barrier implies that all loads must be completed + /// before proceeding with any loads. (Prevent the compiler from moving load + /// instructions) (0 operand) + LoadFenceLoad, + /// LoadFenceStore implies that all loads must be completed before + /// proceeding with any stores. (Prevent the compiler from moving load and + /// store instructions) (0 operand) + LoadFenceStore, + /// StoreFenceLoad implies that all stores must be completed before + /// proceeding with any loads. (Prevent the compiler from moving load and + /// store instructions) (0 operand) + StoreFenceLoad, + /// StoreFenceStore or write barrier implies that all stores must be + /// completed before proceeding with any stores. (Prevent the compiler from + /// moving store instructions) (0 operand) + StoreFenceStore, + + /// Unit is used for non fallible unit. For example, this can be used for + /// non-inlinable and non-optimizable intrinsics which are expressed in + /// terms of the minimal set of instructions. This is used to provide target + /// specific instructions such as SIMD, locked-instructions or cpuid which + /// are not represented in this LIR. (maybe operands) + Unit(unit::UnitId), + + // + // List of Control instructions. + // + + /// Return the value computed by the instruction behind Value. This + /// corresponds either to the returned value of a function, or to the result + /// of an expression for subset of Rust functions. Note, the number of + /// operands of any return opcode should match the number of outputs + /// described in the signature of the Unit. + /// (? operand) + Return, + + /// Unwind ends the control flow, and unwind everything. + Unwind, + + /// Unreachable is used either as an assertion / optimization mechanism. + Unreachable, + + /// Goto is an unconditional jump to another basic block in the same Unit. + /// (0 operand, default target) + Goto, + + /// Switch is a conditional branch implemented as a switch case over + /// variable ranges of integer values. This is used even for simple if/else + /// branches. + /// (1 operand, maybe default target, targets) + Switch(SwitchData), + + /// Call implements a function call, such as any Rust function, an assertion + /// or a drop function. The argument correspond to the signature of the + /// function being called. (many operands: function + arguments, maybe + /// default target, maybe unwind target) + Call(ComplexTypeId), + + /// CallUnit implements an internal Unit call or inline. + /// (many operands: arguments, maybe default target, maybe unwind target) + CallUnit(unit::UnitId), +} + +impl Opcode { + pub fn is_control(self) -> bool { + match self { + Opcode::Return | + Opcode::Unwind | + Opcode::Unreachable | + Opcode::Goto | + Opcode::Switch(_) | + Opcode::Call(_) | + Opcode::CallUnit(_) => true, + _ => false, + } + } + + pub fn is_return(self) -> bool { + match self { + Opcode::Return => true, + _ => false, + } + } +} + +impl Instruction { + pub fn is_control(&self) -> bool { self.opcode.is_control() } +} + +impl Hash for Instruction { + fn hash(&self, state: &mut H) { + self.opcode.hash(state); + self.operands.hash(state); + self.dependencies.hash(state); + // Exclude self.replaced_by. + } +} + +impl<'a> From<&'a Instruction> for u64 { + fn from(ins: &'a Instruction) -> u64 { + use std::collections::hash_map::DefaultHasher; + let mut hasher = DefaultHasher::new(); + ins.hash(&mut hasher); + hasher.finish() + } +} + +impl DataFlow { + pub fn new() -> DataFlow { + DataFlow { instructions: vec![] } + } + + fn get_value(&self, index: usize) -> Value { + Value { hash: (&self.instructions[index]).into(), index } + } + + pub fn add_ins(&mut self, ins: Instruction) -> Value { + // TODO: Ensure that if the instruction already exists, then it is not + // being added a second time, and the returned Value output correspond + // to the existing Instruction. + // TODO: Add consistency checks that all value references are indeed in + // the current DataFlow structure. + self.instructions.push(ins); + self.get_value(self.instructions.len() - 1) + } +} + +impl Value { + pub fn dummy() -> Value { + Value { hash: 0, index: usize::max_value() } + } + pub fn is_dummy(self) -> bool { + self.index == usize::max_value() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn rehash() { + let mut df = DataFlow::new(); + let v0 = df.add_ins(Instruction { + opcode: Opcode::Const(number::NumberValue::U32(1024)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + let v1 = df.add_ins(Instruction { + opcode: Opcode::Rehash(21), + operands: vec![v0], + dependencies: vec![], + replaced_by: None, + }); + let v2 = df.add_ins(Instruction { + opcode: Opcode::Rehash(69), + operands: vec![v0], + dependencies: vec![], + replaced_by: None, + }); + // Rehash opcode compute a different hash value based on the number + // which is given as argument. This is used to handle loops. + assert_ne!(v1.hash, v2.hash); + } + + #[test] + fn stable_hash() { + // TODO: We should add test cases like that for each instruction, to + // ensure that 2 identical instructions are given the same hash. + let mut df = DataFlow::new(); + let v0 = df.add_ins(Instruction { + opcode: Opcode::Const(number::NumberValue::U32(1024)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + let v1 = df.add_ins(Instruction { + opcode: Opcode::Const(number::NumberValue::U32(1024)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + // Rehash opcode compute a different hash value based on the number + // which is given as argument. This is used to handle loops. + assert_eq!(v0.hash, v1.hash); + } + + #[test] + fn replaced_by() { + let mut df = DataFlow::new(); + let v0 = df.add_ins(Instruction { + opcode: Opcode::Const(number::NumberValue::I32(1)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + let v1 = df.add_ins(Instruction { + opcode: Opcode::Add(number::NumberType::I32), + operands: vec![v0, v0], + dependencies: vec![], + replaced_by: None, + }); + let v2 = df.add_ins(Instruction { + opcode: Opcode::Const(number::NumberValue::I32(2)), + operands: vec![v0], + dependencies: vec![], + replaced_by: None, + }); + df.instructions[v1.index].replaced_by = Some(v2); + // When setting the replaced_by field, the hash of an instruction should + // not change. + assert_eq!(v1.hash, df.get_value(v1.index).hash); + } +} diff --git a/lir/src/lib.rs b/lir/src/lib.rs index 0e97a88..83941a7 100644 --- a/lir/src/lib.rs +++ b/lir/src/lib.rs @@ -14,11 +14,16 @@ /// in the data section of the generated program, and read at runtime by the /// HolyJIT library. /// -/// The LIR is represented as a mix between a Sea-Of-Nodes and SSA, which uses -/// Instruction hashes as SSA indexes. Hashes are used as a way to have a -/// position independent representation to make the versionning effective. The -/// mixed sea-of-nodes approach is used as a way to reduce the number of blocks -/// mutations until we reach the Scheduler. +/// The LIR is represented as a mix between a Sea-Of-Nodes and SSA, i-e that the +/// data-flow is the primary graph in which instructions are manipulated, and +/// then we have a control-flow graph which specify the ownership of Phi nodes +/// and control instruction. +/// +/// One addition compared to the SSA notation and the sea-of-nodes approach for +/// manipulating the graph, is that this SSA notation is not using any +/// integer/pointer as indexes, but an index based on a stable hashing mechanism +/// such that we can patch the graph with the result of previous compilation. +// TODO: Add references to each part of the code in the previous comment. // Serde is used for serializing and deserializing the LIR which is stored // by the driver in a constant, and deserialized by the JIT compiler in @@ -28,428 +33,10 @@ extern crate serde_derive; extern crate serde; extern crate bincode; +pub mod unit; pub mod number; - -/// Automatically derive a hashing function for each type, to make sure that we -/// can apply patches to a subset of instructions. -use std::hash::{Hash, Hasher}; - -/// A LIR Unit is a connected set of basic blocks with an entry and exit blocks. -/// This might correspond to a Rust function, a subset of a Rust function which -/// corresponds to an opcode or inline caches, or to an target specific -/// intrinsic abstract code. A Unit contains the set of instructions and blocks -/// which are indexing the instruction in the order in which they are expected -/// to be executed. -#[derive(Serialize, Deserialize, Debug)] -pub struct Unit { - /// Unique Unit Identifier. - pub id: UnitId, - - /// Data flow, contains all the instructions and their operands, as well as - /// the potentially memory dependencies. - pub data_flow: DataFlow, - - /// Control flow, contains all the blocks which are making references to the - /// data flow instructions, and also the control flow instructions. - pub control_flow: ControlFlow, -} - -/// A Data flow contains all the instructions from one Unit, and they describe -/// how data are flowing through instructions. -#[derive(Serialize, Deserialize, Debug)] -pub struct DataFlow { - /// Set of instruction, where each instruction should be referenced by at - /// least a Block using their ValueHash. - pub instructions: Vec, -} - -/// A Control flow contains all the blocks and how they flow from one to -/// another. They reference data flow instructions to determine the order in -/// which side-effectful instructions are expected. They are ending with a -/// control flow instruction. -#[derive(Serialize, Deserialize, Debug)] -pub struct ControlFlow { - /// List of basic blocks. - pub blocks: Vec, - /// Index of the entry block. - pub entry: BlockIndex, - /// Set of exit blocks indexes. - pub exit: Vec, -} - -/// Unique Unit identifier of an intrinsic. -type IntrinsicId = usize; -/// Unique unit identifier of a function. -type FunctionId = usize; -/// Unique unit identifier of a sub-set of a function. -type SubSetId = usize; - -/// Unique Unit identifier. -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy, PartialEq, Eq)] -pub enum UnitId { - /// Identifier of a pseudo-code of an intrinsic used to represent the - /// equivalent LIR of a target specific optimization. Intrisic do not have - /// support for unwinding. - Intrinsic(IntrinsicId), - - /// Identifier of a callable function. - Function(FunctionId), - - /// Identifier of a sub-set of a Rust function. - SubSet(SubSetId) -} - - -/// A LIR Block Index is an integer which corresponds to an index within a given Unit. -#[derive(Serialize, Deserialize, Debug, Hash)] -pub struct BlockIndex { - pub value: usize, -} - -/// A LIR Block is a sequence of computation of value and a Terminator. -#[derive(Serialize, Deserialize, Debug, Hash)] -pub struct Block { - /// Sequence of instructions contained in the current block in the order in - /// which they have to be executed. - pub sequence: Vec, - /// Control flow instruction. - pub terminator: Value, - - /// Goto's branch, Call's return location and Switch default case. - pub default: Option, - /// Error handling block, if any error happens during the control flow - /// instruction. - pub unwind: Option, - /// Switch targets. - pub targets: Option>, -} - -/// A LIR Instruction is a single operation which aggregates operands and an -/// operation to produce a ValueHash. -#[derive(Serialize, Deserialize, Debug)] /* derive(Hash)-manually */ -pub struct Instruction { - /// Opcode of the instruction. - pub opcode: Opcode, - /// Ordered list of operands of the instruction. - pub operands: Vec, - /// Set of previous instructions which might change the memory read by this - /// instruction. - pub dependencies: Vec, - /// Set if this instruction got replaced by another. This is not taken into - /// account while computing the hash of an instruction. - pub replaced_by: Option, -} - -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] -pub struct SwitchData { - pub low: i32, - pub high: i32, -} - -/// An Opcode is an instruction which contains basic operations. This list of -/// opcode is minimal and contains all the building blocks to express higher -/// level IR instructions which are platform specific as intrinsic units. -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] -pub enum Opcode { - /// WithHash is a way to wrap a Value with another ValueHash which will be - /// used either for loop-back or to limit the amount of context referrenced - /// in a patch. - /// (1 operand) - Rehash(usize), - - /// Phi is an instruction which merges values from different blocks. Note - /// that this LIR uses Phi instead of extended basic block, in order to - /// avoid carrying around too many variables to the next block, which would - /// imply additional rewrite of the graphs on inlining. - /// (multiple operands) - Phi, - - /// Encode a constant. - /// (0 operand) - Const(number::NumberValue), - - /// Cast is used to change the type interpretation of a Value without any content checks. - /// (1 operand + data_index to CastData) - Cast(ComplexTypeId), - - /// Extract overflow flag from the operation which created the value. - /// (0 operand, 1 dependency) - OverflowFlag, - - /// Addition. (2 operands) - Add(number::NumberType), - /// Substraction. (2 operands: result = lhs - rhs) - Sub(number::NumberType), - /// Multiplication. (2 operands) - Mul(number::NumberType), - /// Division. (2 operands: result = lhs / rhs) - Div(number::NumberType), - /// Remainder. (2 operands: result = lhs % rhs) - Rem(number::NumberType), - /// Sign-extend. (1 operand) - SignExt(number::SignedType), - /// Zero-extend. (1 operand) - ZeroExt(number::IntType), - - /// Truncate. (round towards zero) (1 operand) - Truncate(number::FloatType), - /// Round. (round towards nearest) (1 operand) - Round(number::FloatType), - /// Floor. (round towards -Inf) (1 operand) - Floor(number::FloatType), - /// Ceil. (round towards +Inf) (1 operand) - Ceil(number::FloatType), - - /// Bitwise exclusive or. (2 operands) - BwXor(number::IntType), - /// Bitwise And. (2 operands) - BwAnd(number::IntType), - /// Bitwise Or. (2 operands) - BwOr(number::IntType), - /// Bitwise Not. (2 operands) - BwNot(number::IntType), - /// Shift left. (2 operands: result = lhs << rhs) - ShiftLeft(number::IntType), - /// Shift right. (2 operands: result = lhs >> rhs) - ShiftRight(number::SignedType), - - /// Equal. (2 operands) - Eq(number::NumberType), - /// Less than. (2 operands: result = lhs < rhs) - Lt(number::NumberType), - /// Less than or equal. (2 operands: result = lhs <= rhs) - Le(number::NumberType), - /// Not equal. (2 operands) - Ne(number::NumberType), - /// Greather than. (2 operands: result = lhs > rhs) - Gt(number::NumberType), - // Greather than or equal. (2 operands: result = lhs >= rhs) - Ge(number::NumberType), - - /// StaticAddress is used to refer to data which is not yet known at compile - /// time, but known at the execution, such as function pointer addresses. - /// (0 operand) - StaticAddress, - /// CPUAddress is used to refer to internal CPU data, and help the compiler - /// reason about the aliasing intrinsic using CPU data, such as flags and - /// cpuid. - /// (0 operand) - CPUAddress, - /// Get the address of where the input operand is stored. At the end of the - /// pipeline, if any of these instructions remain it enforces the data to - /// live in memory at least as long as the address exists. - /// (1 operand) - Address, - - /// Load content from the address. (1 operand: result = *input, data_index) - Load(ComplexTypeId), - /// Store content to the address. (2 operands: *lhs = rhs, data_index) - Store(ComplexTypeId), - - // Acquire = LoadFence{Load, Store} - // Release = {Load, Store}FenceStore - - /// LoadFenceLoad or read barrier implies that all loads must be completed - /// before proceeding with any loads. (Prevent the compiler from moving load - /// instructions) (0 operand) - LoadFenceLoad, - /// LoadFenceStore implies that all loads must be completed before - /// proceeding with any stores. (Prevent the compiler from moving load and - /// store instructions) (0 operand) - LoadFenceStore, - /// StoreFenceLoad implies that all stores must be completed before - /// proceeding with any loads. (Prevent the compiler from moving load and - /// store instructions) (0 operand) - StoreFenceLoad, - /// StoreFenceStore or write barrier implies that all stores must be - /// completed before proceeding with any stores. (Prevent the compiler from - /// moving store instructions) (0 operand) - StoreFenceStore, - - /// Unit is used for non fallible unit. For example, this can be used for - /// non-inlinable and non-optimizable intrinsics which are expressed in - /// terms of the minimal set of instructions. This is used to provide target - /// specific instructions such as SIMD, locked-instructions or cpuid which - /// are not represented in this LIR. (maybe operands) - Unit(UnitId), - - // - // List of Control instructions. - // - - /// Return the value computed by the instruction behind Value. This - /// corresponds either to the returned value of a function, or to the result - /// of an expression for subset of Rust functions. - /// (1 operand) - Return, - - /// Unwind ends the control flow, and unwind everything. - Unwind, - - /// Unreachable is used either as an assertion / optimization mechanism. - Unreachable, - - /// Goto is an unconditional jump to another basic block in the same Unit. - /// (0 operand, default target) - Goto, - - /// Switch is a conditional branch implemented as a switch case over - /// variable ranges of integer values. This is used even for simple if/else - /// branches. - /// (1 operand, maybe default target, targets) - Switch(SwitchData), - - /// Call implements a function call, such as any Rust function, an assertion - /// or a drop function. - /// (many operands: function + arguments, maybe default target, maybe unwind target) - Call, - - /// CallUnit implements an internal Unit call or inline. - /// (many operands: arguments, maybe default target, maybe unwind target) - CallUnit(UnitId), -} - -/// ComplexType indexes are used to index an arithmetic type or an aggregated -/// type, such as tuples, enums or structures within the Assembly. -type ComplexTypeId = usize; - -/// A LIR Value corresponds to the computation of either instructions or -/// terminator. As opposed to ordinary SSA notation, we use a hash instead of an -/// instruction index, in order to be able to generate position-independent -/// patches for each Unit. -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] -pub struct Value { - pub hash: u64, - pub index: usize, -} - -impl Opcode { - pub fn is_control(self) -> bool { - match self { - Opcode::Return | - Opcode::Unwind | - Opcode::Unreachable | - Opcode::Goto | - Opcode::Switch(_) | - Opcode::Call | - Opcode::CallUnit(_) => true, - _ => false, - } - } -} - -impl Hash for Instruction { - fn hash(&self, state: &mut H) { - self.opcode.hash(state); - self.operands.hash(state); - self.dependencies.hash(state); - // Exclude self.replaced_by. - } -} - -impl<'a> From<&'a Instruction> for u64 { - fn from(ins: &'a Instruction) -> u64 { - use std::collections::hash_map::DefaultHasher; - let mut hasher = DefaultHasher::new(); - ins.hash(&mut hasher); - hasher.finish() - } -} - -impl Unit { - pub fn new(id: UnitId) -> Unit { - Unit { - id, - data_flow: DataFlow::new(), - control_flow: ControlFlow::new(), - } - } -} - -impl ControlFlow { - pub fn new() -> ControlFlow { - ControlFlow { - blocks: vec![], - entry: BlockIndex { value: 0 }, - exit: vec![], - } - } -} - -impl DataFlow { - pub fn new() -> DataFlow { - DataFlow { instructions: vec![] } - } - - fn get_value(&self, index: usize) -> Value { - Value { hash: (&self.instructions[index]).into(), index } - } - - pub fn add_ins(&mut self, ins: Instruction) -> Value { - // TODO: Ensure that if the instruction already exists, then it is not - // being added a second time, and the returned Value output correspond - // to the existing Instruction. - // TODO: Add consistency checks that all value references are indeed in - // the current DataFlow structure. - self.instructions.push(ins); - self.get_value(self.instructions.len() - 1) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn check_rehash() { - let mut df = DataFlow::new(); - let v0 = df.add_ins(Instruction { - opcode: Opcode::Const(number::NumberValue::U32(1024)), - operands: vec![], - dependencies: vec![], - replaced_by: None, - }); - let v1 = df.add_ins(Instruction { - opcode: Opcode::Rehash(21), - operands: vec![v0], - dependencies: vec![], - replaced_by: None, - }); - let v2 = df.add_ins(Instruction { - opcode: Opcode::Rehash(69), - operands: vec![v0], - dependencies: vec![], - replaced_by: None, - }); - // Rehash opcode compute a different hash value based on the number - // which is given as argument. This is used to handle loops. - assert_ne!(v1.hash, v2.hash); - } - - #[test] - fn check_replaced_by() { - let mut df = DataFlow::new(); - let v0 = df.add_ins(Instruction { - opcode: Opcode::Const(number::NumberValue::I32(1)), - operands: vec![], - dependencies: vec![], - replaced_by: None, - }); - let v1 = df.add_ins(Instruction { - opcode: Opcode::Add(number::NumberType::I32), - operands: vec![v0, v0], - dependencies: vec![], - replaced_by: None, - }); - let v2 = df.add_ins(Instruction { - opcode: Opcode::Const(number::NumberValue::I32(2)), - operands: vec![v0], - dependencies: vec![], - replaced_by: None, - }); - df.instructions[v1.index].replaced_by = Some(v2); - // When setting the replaced_by field, the hash of an instruction should - // not change. - assert_eq!(v1.hash, df.get_value(v1.index).hash); - } -} +pub mod types; +pub mod data_flow; +pub mod control_flow; +pub mod context; +pub mod builder; diff --git a/lir/src/number.rs b/lir/src/number.rs index 2709d24..77e8f49 100644 --- a/lir/src/number.rs +++ b/lir/src/number.rs @@ -5,22 +5,22 @@ use std::hash::{Hash, Hasher}; /// NumberType are used for math and bitwise operators. All other number types /// can be convert to this one, including NumberValue, using the `into()` /// method. -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum NumberType { U8, U16, U32, U64, I8, I16, I32, I64, F32, F64, } -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum IntType { U8, U16, U32, U64, } -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum SignedType { U8, U16, U32, U64, I8, I16, I32, I64, } -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum FloatType { F32, F64, } diff --git a/lir/src/types.rs b/lir/src/types.rs new file mode 100644 index 0000000..3b91a9e --- /dev/null +++ b/lir/src/types.rs @@ -0,0 +1,40 @@ +/// Complex types are used to help analysis running on the data flow graph solve +/// problem such as aliassing. Using types to dissambiguate aliasing issues is +/// useful as it reduces the depenency graph between load and store +/// instructions. +use number; + +/// ComplexType indexes are used to index an arithmetic type or an aggregated +/// type, such as tuples, enums or structures within the Assembly. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone, Copy)] +pub struct ComplexTypeId(pub usize); + +/// Offset within an aggregated type. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone, Copy)] +pub struct Offset(pub usize); + +/// Determine if a given function can unwind or not, If not no unwind successors +/// would be exepected. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone, Copy)] +pub struct CanUnwind(pub bool); + +/// A complex type is either a function signature, an structure, an union, a +/// pointer, a scalar or a vector of scalar. All these types should be aggregaed +/// globally, such that that can be used across multiple Units. +#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone)] +pub enum ComplexType { + /// Functions are used to express the signature of Unit and external + /// functions. + Function(Vec, Vec, CanUnwind), + /// Structures are used to map each offsets with its corresponding type. + Structure(Vec<(Offset, ComplexTypeId)>), + /// Unions are used to select between multiple structures. + Union(Vec), + /// Untyped pointers, the type is carried by the load and store operations. + /// This simplify the problem by not having to handle recursive types. + Pointer, + /// A Scalar represents a number. + Scalar(number::NumberType), + /// A Vector represents an aggregation of Scalar. + Vector(number::NumberType, usize), +} diff --git a/lir/src/unit.rs b/lir/src/unit.rs new file mode 100644 index 0000000..658c7f5 --- /dev/null +++ b/lir/src/unit.rs @@ -0,0 +1,69 @@ +use data_flow::{DataFlow, Value}; +use control_flow::ControlFlow; +use types::ComplexTypeId; + +/// A LIR Unit is a connected set of basic blocks with an entry and exit blocks. +/// This might correspond to a Rust function, a subset of a Rust function which +/// corresponds to an opcode or inline caches, or to an target specific +/// intrinsic abstract code. A Unit contains the set of instructions and blocks +/// which are indexing the instruction in the order in which they are expected +/// to be executed. +#[derive(Serialize, Deserialize, Debug)] +pub struct Unit { + /// Unique Unit Identifier. + pub id: UnitId, + + /// Data flow, contains all the instructions and their operands, as well as + /// the potentially memory dependencies. + pub dfg: DataFlow, + + /// Control flow, contains all the blocks which are making references to the + /// data flow instructions, and also the control flow instructions. + pub cfg: ControlFlow, + + /// Signature of the current unit. + pub sig: ComplexTypeId, + + /// Value corresponding to the arguments. (Uniquely identified Rehash values) + pub inputs: Vec, + + // Set of Value corresponding to the returned value. (Return opcode) + pub outputs: Vec, +} + +/// Unique Unit identifier of an intrinsic. +type IntrinsicId = usize; +/// Unique unit identifier of a function. +type FunctionId = usize; +/// Unique unit identifier of a sub-set of a function. +type SubSetId = usize; + +/// Unique Unit identifier. +#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy, PartialEq, Eq)] +pub enum UnitId { + /// Identifier of a pseudo-code of an intrinsic used to represent the + /// equivalent LIR of a target specific optimization. Intrisic do not have + /// support for unwinding. + Intrinsic(IntrinsicId), + + /// Identifier of a callable function. + Function(FunctionId), + + /// Identifier of a sub-set of a Rust function. + SubSet(SubSetId) +} + +impl Unit { + /// Create a new Unit. It is recommended to use the `UnitBuilder` to + /// construct the data flow and control flow graph of the Unit. + pub fn new(id: UnitId) -> Unit { + Unit { + id, + dfg: DataFlow::new(), + cfg: ControlFlow::new(), + sig: ComplexTypeId(usize::max_value()), + inputs: vec![], + outputs: vec![], + } + } +} From c93f2d092c978b62fe2902fd10bb377d5660f55a Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 18 Aug 2018 18:52:01 +0200 Subject: [PATCH 04/32] Extract Cranelift signature from the LIR. --- codegen/src/error.rs | 2 ++ codegen/src/lib.rs | 66 ++++++++++++++++++++++++-------------------- codegen/src/lower.rs | 57 ++++++++++++++++++++++++++++++++++---- lir/src/builder.rs | 10 ++++--- 4 files changed, 96 insertions(+), 39 deletions(-) diff --git a/codegen/src/error.rs b/codegen/src/error.rs index 3e08f40..fa34754 100644 --- a/codegen/src/error.rs +++ b/codegen/src/error.rs @@ -12,6 +12,8 @@ pub enum LowerError { CodeGen(CodegenError), Map(MapError), Protect, + UnitIsNotAFunction, + ParameterTypeNotLowered, } // TODO: impl Error for LowerError diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index b6347a0..0e2176a 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -12,9 +12,12 @@ pub mod error; use codegen::settings::Configurable; use exec_alloc::{WrittableCode, ExecutableCode}; -/// This is a code generator context, which is used to lower a LIR Unit into +use lir::unit; +use lir::context; + +/// This is a reusable code generator, which is used to compile a LIR Unit to /// machine code. -pub struct Context { +pub struct CodeGenerator { ctx: codegen::Context, isa: Box, } @@ -24,7 +27,7 @@ pub struct JitCode { code: ExecutableCode, } -impl Context { +impl CodeGenerator { /// Create a lowering (code generator and executable page allocation) /// context for the architecture on which this code is running. pub fn new() -> Self { @@ -56,9 +59,9 @@ impl Context { /// Given an HolyJIT LIR Unit, convert it to a Cranelift function in order /// to generate the corresponding bytes, then allocate memory pages and map /// them as executable. - pub fn compile(&mut self, unit: &lir::unit::Unit) -> error::LowerResult { - let &mut Context { ref mut ctx, ref isa, .. } = self; - ctx.func = lower::convert(unit)?; + pub fn compile(&mut self, lir_ctx: &context::Context, unit: &unit::Unit) -> error::LowerResult { + let &mut CodeGenerator { ref mut ctx, ref isa, .. } = self; + ctx.func = lower::convert(isa.as_ref(), lir_ctx, unit)?; let mut reloc_sink = exec_alloc::NullRelocSink {}; let mut trap_sink = exec_alloc::NullTrapSink {}; let code_size = ctx.compile(isa.as_ref())?; @@ -90,36 +93,39 @@ mod tests { #[test] - fn check_create_context() { - let _ctx = Context::new(); + fn create_code_generator() { + let _cg = CodeGenerator::new(); assert!(true); } #[test] - fn check_add1_unit() { + fn add1_unit() { let mut ctx_bld = ContextBuilder::new(); - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); - let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v1], - dependencies: vec![], - replaced_by: None, - }) - } - let add1_unit = bld.finish(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); + let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v1], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); - let mut ctx = Context::new(); - let code = ctx.compile(&add1_unit).unwrap(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); let add1 : fn(i32) -> i32 = unsafe { mem::transmute(code.as_ptr()) }; diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 7d1b0b0..aa5daef 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -3,18 +3,65 @@ use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; use codegen::entity::EntityRef; use codegen::ir::{ExternalName, Function, Signature, AbiParam, InstBuilder}; use codegen::ir::types::*; +use codegen::ir::types; use codegen::settings::{self, CallConv}; use codegen::verifier::verify_function; +use codegen::isa::TargetIsa; use lir::unit::Unit; -use error; +use lir::context::Context; +use lir::types::{ComplexTypeId, ComplexType}; +use lir::number::{NumberType}; +use error::{LowerResult, LowerError}; + +#[derive(Copy, Clone)] +struct CtxIsa<'a> { + pub isa: &'a TargetIsa, + pub ctx: &'a Context, +} +fn abiparam<'a>(ci: CtxIsa<'a>, ty: ComplexTypeId) -> LowerResult { + use self::ComplexType::*; + use self::NumberType::*; + let ty = ci.ctx.get_type(ty); + let ty = match ty { + &Pointer => ci.isa.pointer_type(), + &Scalar(U8) | &Scalar(I8) => types::I8, + &Scalar(U16) | &Scalar(I16) => types::I16, + &Scalar(U32) | &Scalar(I32) => types::I32, + &Scalar(U64) | &Scalar(I64) => types::I64, + &Vector(_, _) => unimplemented!(), + _ => return Err(LowerError::ParameterTypeNotLowered), + }; + Ok(AbiParam::new(ty)) +} -/// Convert a LIR Unit into a Cranelift IR (Function). -pub fn convert(_unit: &Unit) -> error::LowerResult { +/// Unit have a signature expressed as a type, we have to convert this signature +/// into simpler types understood by Cranelift. +fn signature<'a>(ci: CtxIsa<'a>, unit: &Unit) -> LowerResult{ + let ty = ci.ctx.get_type(unit.sig); + let (ins, outs) = match ty { + &ComplexType::Function(ref ins, ref outs, ref _unwind) => (ins, outs), + _ => return Err(LowerError::UnitIsNotAFunction), + }; + + // At the moment, assume that all Units are going to be called with Rust + // calling convention. let mut sig = Signature::new(CallConv::SystemV); - sig.returns.push(AbiParam::new(I32)); - sig.params.push(AbiParam::new(I32)); + + for &ty in ins.iter() { + sig.params.push(abiparam(ci, ty)?); + } + for &ty in outs.iter() { + sig.returns.push(abiparam(ci, ty)?); + } + Ok(sig) +} + +/// Convert a LIR Unit into a Cranelift IR (Function). +pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult { + let ci = CtxIsa { ctx, isa }; + let sig = signature(ci, unit)?; let mut fn_builder_ctx = FunctionBuilderContext::::new(); let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); { diff --git a/lir/src/builder.rs b/lir/src/builder.rs index b7f0d02..b0b4af1 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -39,10 +39,6 @@ impl ContextBuilder { } } - pub fn finish(self) -> context::Context { - self.ctx - } - pub fn get_rehash(&mut self) -> Opcode { Opcode::Rehash(self.ctx.get_hash_seed()) } @@ -75,6 +71,11 @@ impl ContextBuilder { self.ctx.get_type(id) } + /// Finalize and return the context which hold the type information of + /// multiple Units. + pub fn finish(self) -> context::Context { + self.ctx + } } impl<'a> UnitBuilder<'a> { @@ -213,6 +214,7 @@ impl<'a> UnitBuilder<'a> { edit.unwind = Some(succ_idx); } + /// Finalize and (TODO) assert that the generate Unit is valid. pub fn finish(self) -> Unit { self.unit } From 9524d25577f54997d6e6587794a9d1f5955836c3 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 18 Aug 2018 18:53:06 +0200 Subject: [PATCH 05/32] Add a Nix expression to remove extra nix-shell arguments. --- shell.nix | 1 + 1 file changed, 1 insertion(+) create mode 100644 shell.nix diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000..6a8d4b8 --- /dev/null +++ b/shell.nix @@ -0,0 +1 @@ +(import ./release.nix {}).holyjit From eb6d6de0c8ba51ec4484fce1bb5d6cea83b4b07f Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 19 Aug 2018 20:58:42 +0200 Subject: [PATCH 06/32] Replace the codegen add1 test case by one produced from the LIR. --- codegen/src/error.rs | 2 +- codegen/src/lib.rs | 1 + codegen/src/lower.rs | 336 ++++++++++++++++++++++++++++++++++++------- lir/src/builder.rs | 19 ++- lir/src/data_flow.rs | 66 +++++++++ lir/src/unit.rs | 6 +- 6 files changed, 366 insertions(+), 64 deletions(-) diff --git a/codegen/src/error.rs b/codegen/src/error.rs index fa34754..c0cc836 100644 --- a/codegen/src/error.rs +++ b/codegen/src/error.rs @@ -13,7 +13,7 @@ pub enum LowerError { Map(MapError), Protect, UnitIsNotAFunction, - ParameterTypeNotLowered, + ComplexTypeNotLowered, } // TODO: impl Error for LowerError diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index 0e2176a..1cb30f6 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -110,6 +110,7 @@ mod tests { let s0 = bld.create_sequence(); { bld.switch_to_sequence(s0); + bld.set_entry(); let a0 = bld.unit_arg(0); let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index aa5daef..d8d5c5d 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -1,17 +1,22 @@ +use std::mem; + use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; use codegen::entity::EntityRef; -use codegen::ir::{ExternalName, Function, Signature, AbiParam, InstBuilder}; +use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode}; +use codegen::ir::immediates::{Ieee32, Ieee64}; use codegen::ir::types::*; use codegen::ir::types; use codegen::settings::{self, CallConv}; use codegen::verifier::verify_function; use codegen::isa::TargetIsa; -use lir::unit::Unit; +use lir::unit::{Unit, UnitId}; use lir::context::Context; use lir::types::{ComplexTypeId, ComplexType}; -use lir::number::{NumberType}; +use lir::number::{NumberType, NumberValue}; +use lir::control_flow::{Sequence, SequenceIndex, SuccessorIndex}; +use lir::data_flow::{Opcode, Instruction, ValueType}; use error::{LowerResult, LowerError}; #[derive(Copy, Clone)] @@ -20,30 +25,48 @@ struct CtxIsa<'a> { pub ctx: &'a Context, } -fn abiparam<'a>(ci: CtxIsa<'a>, ty: ComplexTypeId) -> LowerResult { +fn convert_number_type(ty: NumberType) -> types::Type { + match ty { + NumberType::I8 | NumberType::U8 => types::I8, + NumberType::I16 | NumberType::U16 => types::I16, + NumberType::I32 | NumberType::U32 => types::I32, + NumberType::I64 | NumberType::U64 => types::I64, + NumberType::F32 => types::F32, + NumberType::F64 => types::F64, + } +} + +fn convert_type<'a>(ci: CtxIsa<'a>, ty: ComplexTypeId) -> LowerResult { use self::ComplexType::*; use self::NumberType::*; let ty = ci.ctx.get_type(ty); - let ty = match ty { - &Pointer => ci.isa.pointer_type(), - &Scalar(U8) | &Scalar(I8) => types::I8, - &Scalar(U16) | &Scalar(I16) => types::I16, - &Scalar(U32) | &Scalar(I32) => types::I32, - &Scalar(U64) | &Scalar(I64) => types::I64, + match ty { + &Pointer => Ok(ci.isa.pointer_type()), + &Scalar(U8) | &Scalar(I8) => Ok(types::I8), + &Scalar(U16) | &Scalar(I16) => Ok(types::I16), + &Scalar(U32) | &Scalar(I32) => Ok(types::I32), + &Scalar(U64) | &Scalar(I64) => Ok(types::I64), &Vector(_, _) => unimplemented!(), - _ => return Err(LowerError::ParameterTypeNotLowered), - }; - Ok(AbiParam::new(ty)) + _ => Err(LowerError::ComplexTypeNotLowered), + } +} + +fn abiparam<'a>(ci: CtxIsa<'a>, ty: ComplexTypeId) -> LowerResult { + Ok(AbiParam::new(convert_type(ci, ty)?)) +} + +fn signature_io<'a>(ci: CtxIsa<'a>, sig: ComplexTypeId) -> LowerResult<(&'a Vec, &'a Vec)> { + let ty = ci.ctx.get_type(sig); + match ty { + &ComplexType::Function(ref ins, ref outs, ref _unwind) => Ok((ins, outs)), + _ => Err(LowerError::UnitIsNotAFunction), + } } /// Unit have a signature expressed as a type, we have to convert this signature /// into simpler types understood by Cranelift. -fn signature<'a>(ci: CtxIsa<'a>, unit: &Unit) -> LowerResult{ - let ty = ci.ctx.get_type(unit.sig); - let (ins, outs) = match ty { - &ComplexType::Function(ref ins, ref outs, ref _unwind) => (ins, outs), - _ => return Err(LowerError::UnitIsNotAFunction), - }; +fn signature<'a>(ci: CtxIsa<'a>, unit: &Unit) -> LowerResult { + let (ins, outs) = signature_io(ci, unit.sig)?; // At the moment, assume that all Units are going to be called with Rust // calling convention. @@ -58,45 +81,248 @@ fn signature<'a>(ci: CtxIsa<'a>, unit: &Unit) -> LowerResult{ Ok(sig) } -/// Convert a LIR Unit into a Cranelift IR (Function). -pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult { - let ci = CtxIsa { ctx, isa }; - let sig = signature(ci, unit)?; - let mut fn_builder_ctx = FunctionBuilderContext::::new(); - let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); - { - let mut builder = FunctionBuilder::::new(&mut func, &mut fn_builder_ctx); +fn external_name(id: UnitId) -> ExternalName { + let (d, i) = match id { + UnitId::Intrinsic(i) => (0, i), + UnitId::Function(i) => (1, i), + UnitId::SubSet(i) => (2, i) + }; + ExternalName::user(d, i) +} - let block0 = builder.create_ebb(); - let x = Variable::new(0); - let y = Variable::new(1); - builder.declare_var(x, I32); - builder.declare_var(y, I32); - builder.append_ebb_params_for_function_params(block0); +type OptVarType = Option<(Variable, types::Type)>; - builder.switch_to_block(block0); - builder.seal_block(block0); - { - let tmp = builder.ebb_params(block0)[0]; // the first function parameter - builder.def_var(x, tmp); +/// Identify the type of each instructions and declare a variable with the same +/// offset as the instruction in the data flow graph of the Unit. +fn declare_vars<'a>(ci: CtxIsa<'a>, unit: &Unit, bld: &mut FunctionBuilder) -> LowerResult> { + // Use pointer as a dummy type. + let mut types : Vec<_> = unit.dfg.instructions.iter().map(|_| None).collect(); + + // Give a type to the arguments of the Unit. + let (params, _) = signature_io(ci, unit.sig)?; + for (value, &ty) in unit.inputs.iter().zip(params.iter()) { + let index = value.index(); + let v = Variable::new(index); + let ty = convert_type(ci, ty)?; + bld.declare_var(v, ty); + types[index] = Some((v, ty)); + } + + // Infer type from the operation. + for (index, ref ins) in unit.dfg.instructions.iter().enumerate() { + let ty = match ins.opcode.result_type() { + ValueType::Boolean => types::B1, + ValueType::Pointer => ci.isa.pointer_type(), + ValueType::Number(n) => convert_number_type(n), + ValueType::Complex(id) => convert_type(ci, id)?, + ValueType::ResultOfUnit(_unit) => unimplemented!(), + ValueType::ResultOfSig(id) => { + let (_, out) = signature_io(ci, id)?; + match out.len() { + 0 => continue, + 1 => convert_type(ci, out[0])?, + _ => unimplemented!(), + } + } + ValueType::InheritFromOperands | + ValueType::None => continue, + }; + let v = Variable::new(index); + bld.declare_var(v, ty); + types[index] = Some((v, ty)); + } + + // Infer type from the operands. + for (index, ref ins) in unit.dfg.instructions.iter().enumerate() { + match ins.opcode.result_type() { + ValueType::InheritFromOperands => (), + _ => continue, + }; + assert!(ins.operands.len() >= 1); + let ty = match types[ins.operands[0].index] { + Some((_, ty)) => ty, + None => continue, + }; + let v = Variable::new(index); + bld.declare_var(v, ty); + types[index] = Some((v, ty)); + } + + Ok(types) +} + +fn convert_ins(idx: usize, ins: &Instruction, seq: &Sequence, ebbs: &Vec, bld: &mut FunctionBuilder) -> LowerResult<()> { + use self::Opcode::*; + match ins.opcode { + Entry(_) => (), + Newhash(_) => (), + Rehash(_) => { + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let res = bld.ins().copy(a0); + bld.def_var(Variable::new(idx), res); } - { - let tmp = builder.ins().iconst(I32, 1); - builder.def_var(y, tmp); + Phi => (), // Phi are handled at the end of predecessor blocks. + Const(val) => { + use self::NumberValue::*; + let res = match val { + I8(i) => bld.ins().iconst(types::I8, i as i64), + I16(i) => bld.ins().iconst(types::I16, i as i64), + I32(i) => bld.ins().iconst(types::I32, i as i64), + I64(i) => bld.ins().iconst(types::I64, i), + U8(i) => bld.ins().iconst(types::I8, i as i64), + U16(i) => bld.ins().iconst(types::I16, i as i64), + U32(i) => bld.ins().iconst(types::I32, i as i64), + U64(i) => bld.ins().iconst(types::I64, i as i64), + F32(f) => bld.ins().f32const(Ieee32::with_float(f)), + F64(f) => bld.ins().f64const(Ieee64::with_float(f)), + }; + bld.def_var(Variable::new(idx), res); + }, + Cast(_id) => unimplemented!(), + OverflowFlag => (), + Add(n) => { + // TODO: If any overflow flag depends on this instruction, we should + // change the encoding of this instruction to emit a carry bits. + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let a1 = bld.use_var(Variable::new(ins.operands[1].index)); + let res = match n { + NumberType::F32 | NumberType::F64 => bld.ins().fadd(a0, a1), + _ => bld.ins().iadd(a0, a1), + }; + bld.def_var(Variable::new(idx), res); } - let z = Variable::new(2); - builder.declare_var(z, I32); - { - let arg1 = builder.use_var(x); - let arg2 = builder.use_var(y); - let tmp = builder.ins().iadd(arg1, arg2); - builder.def_var(z, tmp); + Sub(_n) | + Mul(_n) | + Div(_n) | + Rem(_n) => unimplemented!(), + SignExt(_n) => unimplemented!(), + ZeroExt(_n) => unimplemented!(), + Truncate(_f) | + Round(_f) | + Floor(_f) | + Ceil(_f) => unimplemented!(), + BwXor(_b) | + BwAnd(_b) | + BwOr(_b) | + BwNot(_b) => unimplemented!(), + ShiftLeft(_i) => unimplemented!(), + ShiftRight(_i) => unimplemented!(), + Eq(_) | Lt(_) | Le(_) | + Ne(_) | Gt(_) | Ge(_) => unimplemented!(), + StaticAddress | + Address => unimplemented!(), + CPUAddress => unimplemented!(), + Load(_) => unimplemented!(), + Store(_ty) => unimplemented!(), + LoadFenceLoad | + LoadFenceStore | + StoreFenceLoad | + StoreFenceStore => unimplemented!(), + Unit(_id) => unimplemented!(), + Return => { + let args : Vec<_> = ins.operands.iter().map(|v| bld.use_var(Variable::new(v.index))).collect(); + bld.ins().return_(&args); + }, + Unwind | // TODO: properly implement unwind. + Unreachable => { + bld.ins().trap(TrapCode::User(0)); } - { - let arg = builder.use_var(z); - builder.ins().return_(&[arg]); + Goto => { + let SuccessorIndex(idx) = seq.default.unwrap(); + let SequenceIndex(idx) = seq.successors[idx]; + bld.ins().jump(ebbs[idx], &[]); } + Switch(_) => unimplemented!(), + Call(_id) => unimplemented!(), + CallUnit(_id) => unimplemented!(), + }; + + Ok(()) +} + +/// Convert the sequences of the control flow graph into a graph of extended +/// blocks expected by Cranelift. +fn convert_cfg<'a>(ci: CtxIsa<'a>, unit: &Unit, bld: &mut FunctionBuilder) -> LowerResult<()> { + let _types = declare_vars(ci, unit, bld)?; + + // TODO: Create better extended basic blocks instead of creating a 1:1 + // mapping with the LIR. + let ebbs : Vec<_> = unit.cfg.sequences.iter().map(|_| bld.create_ebb()).collect(); + + // Anchor argument processing to the entry EBB. + let SequenceIndex(entry) = unit.cfg.entry; + bld.append_ebb_params_for_function_params(ebbs[entry]); + + // This is a queue of block which are waiting for all their inputs to be + // finalized before sealing them. + let mut seal_queue = vec![]; + + // Iterate over each sequence and convert the instruction in each of them to + // a Cranelift instruction. + for (i, ref seq) in unit.cfg.sequences.iter().enumerate() { + // Switch to the block in which we emit the sequence of instructions. + bld.switch_to_block(ebbs[i]); + // Seal the block, unless we are waiting for upcoming sequences to be + // converted before freezing the list of predecessors. + let get_seq_index = |pred : &(SequenceIndex, SuccessorIndex)| -> usize { + let &(SequenceIndex(idx), _) = pred; + idx + }; + match seq.predecessors.iter().map(get_seq_index).max() { + None => bld.seal_block(ebbs[i]), + Some(idx) => { + if idx < i { + bld.seal_block(ebbs[i]); + } else { + seal_queue.push((idx, i)); + } + } + }; + // Bind arguments in the entry block. + if SequenceIndex(i) == unit.cfg.entry { + for (a, value) in unit.inputs.iter().enumerate() { + let arg = bld.ebb_params(ebbs[i])[a]; + bld.def_var(Variable::new(value.index), arg); + } + } + + // Convert instructions. NOTE: Assumes that all the instructions are + // scheduled properly in the control flow graph. + for value in seq.sequence.iter() { + convert_ins(value.index, &unit.dfg.instructions[value.index], seq, &ebbs, bld)?; + } + + // Set variables corresponding to the Phi of the following blocks. + // TODO! + + // Add conditional and jump instruction. + let value = seq.control; + convert_ins(value.index, &unit.dfg.instructions[value.index], seq, &ebbs, bld)?; + + // Seal blocks which were waiting on the current block to be ended. + let rest = mem::replace(&mut seal_queue, vec![]); + let (to_seal, rest) : (Vec<_>, _) = + rest.into_iter().partition(|&(idx, _)| idx <= i); + seal_queue = rest; + for (_, j) in to_seal.into_iter() { + bld.seal_block(ebbs[j]) + } + } + + Ok(()) +} + +/// Convert a LIR Unit into a Cranelift IR (Function). +pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult { + let ci = CtxIsa { ctx, isa }; + println!("{:?}", unit); + let sig = signature(ci, unit)?; + let mut fn_builder_ctx = FunctionBuilderContext::::new(); + let mut func = Function::with_name_signature(external_name(unit.id), sig); + { + let mut builder = FunctionBuilder::::new(&mut func, &mut fn_builder_ctx); + convert_cfg(ci, unit, &mut builder)?; builder.finalize(); } @@ -108,7 +334,12 @@ pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult LowerResult { let mut sig = Signature::new(CallConv::SystemV); sig.returns.push(AbiParam::new(I32)); sig.params.push(AbiParam::new(I32)); @@ -191,5 +422,4 @@ pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult UnitBuilder<'a> { return arg } let opcode = self.ctx.get_newhash(); - self.dfg_add_ins(Instruction { + let arg = self.dfg_add_ins(Instruction { opcode, operands: vec![], dependencies: vec![], @@ -152,7 +152,7 @@ impl<'a> UnitBuilder<'a> { pub fn add_ins(&mut self, ins: Instruction) -> Value { let value = self.dfg_add_ins(ins); let SequenceIndex(index) = self.sequence.unwrap(); - assert!(self.unit.cfg.sequences[index].control.is_dummy()); + debug_assert!(self.unit.cfg.sequences[index].control.is_dummy()); self.unit.cfg.sequences[index].sequence.push(value); value } @@ -170,13 +170,13 @@ impl<'a> UnitBuilder<'a> { /// Add a control flow instruction to end the current sequence. pub fn end_sequence(&mut self, ins: Instruction) { - assert!(ins.is_control()); + debug_assert!(ins.is_control()); let is_return = ins.opcode.is_return(); let value = self.dfg_add_ins(ins); { let SequenceIndex(index) = self.sequence.unwrap(); let edit = &mut self.unit.cfg.sequences[index]; - assert!(edit.control.is_dummy()); + debug_assert!(edit.control.is_dummy()); edit.control = value; } // If the last instruction is a return statement, then add this return @@ -186,13 +186,18 @@ impl<'a> UnitBuilder<'a> { } } + pub fn set_entry(&mut self) { + debug_assert!(self.unit.cfg.entry.is_dummy()); + self.unit.cfg.entry = self.sequence.unwrap(); + } + /// Set conditional branch. pub fn sequence_value_jump(&mut self, value: isize, seq: SequenceIndex) { let SequenceIndex(index) = self.sequence.unwrap(); let edit = &mut self.unit.cfg.sequences[index]; edit.successors.push(seq); let succ_idx = SuccessorIndex(edit.successors.len() - 1); - assert!(!edit.targets.iter().any(|&(v, _)| v == value)); + debug_assert!(!edit.targets.iter().any(|&(v, _)| v == value)); edit.targets.push((value, succ_idx)); } /// Set default branch. @@ -201,7 +206,7 @@ impl<'a> UnitBuilder<'a> { let edit = &mut self.unit.cfg.sequences[index]; edit.successors.push(seq); let succ_idx = SuccessorIndex(edit.successors.len() - 1); - assert_eq!(edit.default, None); + debug_assert_eq!(edit.default, None); edit.default = Some(succ_idx); } /// Set unwind branch. @@ -210,7 +215,7 @@ impl<'a> UnitBuilder<'a> { let edit = &mut self.unit.cfg.sequences[index]; edit.successors.push(seq); let succ_idx = SuccessorIndex(edit.successors.len() - 1); - assert_eq!(edit.unwind, None); + debug_assert_eq!(edit.unwind, None); edit.unwind = Some(succ_idx); } diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index eefc1b3..4e8bfee 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -229,6 +229,17 @@ pub enum Opcode { CallUnit(unit::UnitId), } +pub enum ValueType { + Boolean, + Pointer, + Number(number::NumberType), + Complex(ComplexTypeId), + ResultOfUnit(unit::UnitId), + ResultOfSig(ComplexTypeId), + InheritFromOperands, + None, +} + impl Opcode { pub fn is_control(self) -> bool { match self { @@ -249,6 +260,55 @@ impl Opcode { _ => false, } } + + pub fn result_type(self) -> ValueType { + use self::Opcode::*; + match self { + Entry(_) | + Newhash(_) => ValueType::None, + Rehash(_) | + Phi => ValueType::InheritFromOperands, + Const(val) => ValueType::Number(val.into()), + Cast(id) => ValueType::Complex(id), + OverflowFlag => ValueType::Boolean, + Add(n) | + Sub(n) | + Mul(n) | + Div(n) | + Rem(n) => ValueType::Number(n), + SignExt(n) => ValueType::Number(n.into()), + ZeroExt(n) => ValueType::Number(n.into()), + Truncate(f) | + Round(f) | + Floor(f) | + Ceil(f) => ValueType::Number(f.into()), + BwXor(b) | + BwAnd(b) | + BwOr(b) | + BwNot(b) => ValueType::Number(b.into()), + ShiftLeft(i) => ValueType::Number(i.into()), + ShiftRight(i) => ValueType::Number(i.into()), + Eq(_) | Lt(_) | Le(_) | + Ne(_) | Gt(_) | Ge(_) => ValueType::Boolean, + StaticAddress | + Address => ValueType::Pointer, + CPUAddress => ValueType::None, + Load(_) => ValueType::None, + Store(ty) => ValueType::Complex(ty), + LoadFenceLoad | + LoadFenceStore | + StoreFenceLoad | + StoreFenceStore => ValueType::None, + Unit(id) => ValueType::ResultOfUnit(id), + Return | + Unwind | + Unreachable | + Goto | + Switch(_) => ValueType::None, + Call(id) => ValueType::ResultOfSig(id), + CallUnit(id) => ValueType::ResultOfUnit(id), + } + } } impl Instruction { @@ -294,12 +354,18 @@ impl DataFlow { } impl Value { + // Create a Dummy value which should fail any validation test. pub fn dummy() -> Value { Value { hash: 0, index: usize::max_value() } } + // Check if this is the value created as a dummy. pub fn is_dummy(self) -> bool { self.index == usize::max_value() } + pub fn index(self) -> usize { + debug_assert!(!self.is_dummy()); + self.index + } } #[cfg(test)] diff --git a/lir/src/unit.rs b/lir/src/unit.rs index 658c7f5..1f6e401 100644 --- a/lir/src/unit.rs +++ b/lir/src/unit.rs @@ -32,11 +32,11 @@ pub struct Unit { } /// Unique Unit identifier of an intrinsic. -type IntrinsicId = usize; +type IntrinsicId = u32; /// Unique unit identifier of a function. -type FunctionId = usize; +type FunctionId = u32; /// Unique unit identifier of a sub-set of a function. -type SubSetId = usize; +type SubSetId = u32; /// Unique Unit identifier. #[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy, PartialEq, Eq)] From 9ea666900d466e6b33a0f82be865ff0bbd8efea1 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 25 Aug 2018 15:12:23 +0200 Subject: [PATCH 07/32] LIR: Add carry flag next to the overflow flag. --- codegen/src/lower.rs | 6 ++++-- lir/src/data_flow.rs | 10 +++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index d8d5c5d..8527f42 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -180,9 +180,11 @@ fn convert_ins(idx: usize, ins: &Instruction, seq: &Sequence, ebbs: &Vec, b }, Cast(_id) => unimplemented!(), OverflowFlag => (), + CarryFlag => (), Add(n) => { - // TODO: If any overflow flag depends on this instruction, we should - // change the encoding of this instruction to emit a carry bits. + // TODO: If any overflow/carry flag depends on this instruction, we + // should change the encoding of this instruction to emit a carry + // bits. let a0 = bld.use_var(Variable::new(ins.operands[0].index)); let a1 = bld.use_var(Variable::new(ins.operands[1].index)); let res = match n { diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index 4e8bfee..11344b7 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -89,9 +89,12 @@ pub enum Opcode { /// (1 operand) Cast(ComplexTypeId), - /// Extract overflow flag from the operation which created the value. - /// (0 operand, 1 dependency) + /// Extract overflow flag from the operation on which this instruction + /// depends on. (0 operand, 1 dependency) OverflowFlag, + /// Extract carry flag from the operation on which this instruction depends + /// on. (0 operand, 1 dependency) + CarryFlag, /// Addition. (2 operands) Add(number::NumberType), @@ -270,7 +273,8 @@ impl Opcode { Phi => ValueType::InheritFromOperands, Const(val) => ValueType::Number(val.into()), Cast(id) => ValueType::Complex(id), - OverflowFlag => ValueType::Boolean, + OverflowFlag | + CarryFlag => ValueType::Boolean, Add(n) | Sub(n) | Mul(n) | From 1f534900a843ec70bd6c2071bb4a620bf2e91193 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 25 Aug 2018 16:02:10 +0200 Subject: [PATCH 08/32] codegen: Add a Convert context to hold the common state. --- codegen/src/error.rs | 9 + codegen/src/lower.rs | 548 ++++++++++++++++++++++--------------------- 2 files changed, 286 insertions(+), 271 deletions(-) diff --git a/codegen/src/error.rs b/codegen/src/error.rs index c0cc836..53dda6d 100644 --- a/codegen/src/error.rs +++ b/codegen/src/error.rs @@ -1,3 +1,4 @@ +use codegen::verifier::VerifierError; use codegen::CodegenError; use mmap::MapError; use region; @@ -10,6 +11,7 @@ pub type LowerResult = Result; #[derive(Debug)] pub enum LowerError { CodeGen(CodegenError), + Verifier(VerifierError), Map(MapError), Protect, UnitIsNotAFunction, @@ -26,6 +28,13 @@ impl From for LowerError { } } +impl From for LowerError { + /// Implictly convert Cranelift codegen errors into LowerError. + fn from(err: VerifierError) -> LowerError { + LowerError::Verifier(err) + } +} + impl From for LowerError { /// Implictly convert mmap errors into LowerError. fn from(err: MapError) -> LowerError { diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 8527f42..07b15c9 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -20,321 +20,327 @@ use lir::data_flow::{Opcode, Instruction, ValueType}; use error::{LowerResult, LowerError}; #[derive(Copy, Clone)] -struct CtxIsa<'a> { +struct ConvertCtx<'a> { pub isa: &'a TargetIsa, pub ctx: &'a Context, + pub unit: &'a Unit, } -fn convert_number_type(ty: NumberType) -> types::Type { - match ty { - NumberType::I8 | NumberType::U8 => types::I8, - NumberType::I16 | NumberType::U16 => types::I16, - NumberType::I32 | NumberType::U32 => types::I32, - NumberType::I64 | NumberType::U64 => types::I64, - NumberType::F32 => types::F32, - NumberType::F64 => types::F64, +type OptVarType = Option<(Variable, types::Type)>; + +impl<'a> ConvertCtx<'a> { + fn number_type(&self, ty: NumberType) -> types::Type { + match ty { + NumberType::I8 | NumberType::U8 => types::I8, + NumberType::I16 | NumberType::U16 => types::I16, + NumberType::I32 | NumberType::U32 => types::I32, + NumberType::I64 | NumberType::U64 => types::I64, + NumberType::F32 => types::F32, + NumberType::F64 => types::F64, + } } -} -fn convert_type<'a>(ci: CtxIsa<'a>, ty: ComplexTypeId) -> LowerResult { - use self::ComplexType::*; - use self::NumberType::*; - let ty = ci.ctx.get_type(ty); - match ty { - &Pointer => Ok(ci.isa.pointer_type()), - &Scalar(U8) | &Scalar(I8) => Ok(types::I8), - &Scalar(U16) | &Scalar(I16) => Ok(types::I16), - &Scalar(U32) | &Scalar(I32) => Ok(types::I32), - &Scalar(U64) | &Scalar(I64) => Ok(types::I64), - &Vector(_, _) => unimplemented!(), - _ => Err(LowerError::ComplexTypeNotLowered), + fn cltype(&self, ty: ComplexTypeId) -> LowerResult { + use self::ComplexType::*; + use self::NumberType::*; + let ty = self.ctx.get_type(ty); + match ty { + &Pointer => Ok(self.isa.pointer_type()), + &Scalar(U8) | &Scalar(I8) => Ok(types::I8), + &Scalar(U16) | &Scalar(I16) => Ok(types::I16), + &Scalar(U32) | &Scalar(I32) => Ok(types::I32), + &Scalar(U64) | &Scalar(I64) => Ok(types::I64), + &Vector(_, _) => unimplemented!(), + _ => Err(LowerError::ComplexTypeNotLowered), + } } -} -fn abiparam<'a>(ci: CtxIsa<'a>, ty: ComplexTypeId) -> LowerResult { - Ok(AbiParam::new(convert_type(ci, ty)?)) -} + fn abiparam(&self, ty: ComplexTypeId) -> LowerResult { + Ok(AbiParam::new(self.cltype(ty)?)) + } -fn signature_io<'a>(ci: CtxIsa<'a>, sig: ComplexTypeId) -> LowerResult<(&'a Vec, &'a Vec)> { - let ty = ci.ctx.get_type(sig); - match ty { - &ComplexType::Function(ref ins, ref outs, ref _unwind) => Ok((ins, outs)), - _ => Err(LowerError::UnitIsNotAFunction), + fn signature_io(&self, sig: ComplexTypeId) -> LowerResult<(&'a Vec, &'a Vec)> { + let ty = self.ctx.get_type(sig); + match ty { + &ComplexType::Function(ref ins, ref outs, ref _unwind) => Ok((ins, outs)), + _ => Err(LowerError::UnitIsNotAFunction), + } } -} -/// Unit have a signature expressed as a type, we have to convert this signature -/// into simpler types understood by Cranelift. -fn signature<'a>(ci: CtxIsa<'a>, unit: &Unit) -> LowerResult { - let (ins, outs) = signature_io(ci, unit.sig)?; + /// Unit have a signature expressed as a type, we have to convert this signature + /// into simpler types understood by Cranelift. + fn signature(&self) -> LowerResult { + let (ins, outs) = self.signature_io(self.unit.sig)?; - // At the moment, assume that all Units are going to be called with Rust - // calling convention. - let mut sig = Signature::new(CallConv::SystemV); + // At the moment, assume that all Units are going to be called with Rust + // calling convention. + let mut sig = Signature::new(CallConv::SystemV); - for &ty in ins.iter() { - sig.params.push(abiparam(ci, ty)?); + for &ty in ins.iter() { + sig.params.push(self.abiparam(ty)?); + } + for &ty in outs.iter() { + sig.returns.push(self.abiparam(ty)?); + } + Ok(sig) } - for &ty in outs.iter() { - sig.returns.push(abiparam(ci, ty)?); + + // Generate external name indexes based on the UnitId. + fn external_name(&self, id: UnitId) -> ExternalName { + let (d, i) = match id { + UnitId::Intrinsic(i) => (0, i), + UnitId::Function(i) => (1, i), + UnitId::SubSet(i) => (2, i) + }; + ExternalName::user(d, i) } - Ok(sig) -} -fn external_name(id: UnitId) -> ExternalName { - let (d, i) = match id { - UnitId::Intrinsic(i) => (0, i), - UnitId::Function(i) => (1, i), - UnitId::SubSet(i) => (2, i) - }; - ExternalName::user(d, i) -} + /// Identify the type of each instructions and declare a variable with the same + /// offset as the instruction in the data flow graph of the Unit. + fn declare_vars(&self, bld: &mut FunctionBuilder) -> LowerResult> { + // Use pointer as a dummy type. + let mut types : Vec<_> = self.unit.dfg.instructions.iter().map(|_| None).collect(); + + // Give a type to the arguments of the Unit. + let (params, _) = self.signature_io(self.unit.sig)?; + for (value, &ty) in self.unit.inputs.iter().zip(params.iter()) { + let index = value.index(); + let v = Variable::new(index); + let ty = self.cltype(ty)?; + bld.declare_var(v, ty); + types[index] = Some((v, ty)); + } -type OptVarType = Option<(Variable, types::Type)>; + // Infer type from the operation. + for (index, ref ins) in self.unit.dfg.instructions.iter().enumerate() { + let ty = match ins.opcode.result_type() { + ValueType::Boolean => types::B1, + ValueType::Pointer => self.isa.pointer_type(), + ValueType::Number(n) => self.number_type(n), + ValueType::Complex(id) => self.cltype(id)?, + ValueType::ResultOfUnit(_unit) => unimplemented!(), + ValueType::ResultOfSig(id) => { + let (_, out) = self.signature_io(id)?; + match out.len() { + 0 => continue, + 1 => self.cltype(out[0])?, + _ => unimplemented!(), + } + } + ValueType::InheritFromOperands | + ValueType::None => continue, + }; + let v = Variable::new(index); + bld.declare_var(v, ty); + types[index] = Some((v, ty)); + } + + // Infer type from the operands. + for (index, ref ins) in self.unit.dfg.instructions.iter().enumerate() { + match ins.opcode.result_type() { + ValueType::InheritFromOperands => (), + _ => continue, + }; + assert!(ins.operands.len() >= 1); + let ty = match types[ins.operands[0].index] { + Some((_, ty)) => ty, + None => continue, + }; + let v = Variable::new(index); + bld.declare_var(v, ty); + types[index] = Some((v, ty)); + } -/// Identify the type of each instructions and declare a variable with the same -/// offset as the instruction in the data flow graph of the Unit. -fn declare_vars<'a>(ci: CtxIsa<'a>, unit: &Unit, bld: &mut FunctionBuilder) -> LowerResult> { - // Use pointer as a dummy type. - let mut types : Vec<_> = unit.dfg.instructions.iter().map(|_| None).collect(); - - // Give a type to the arguments of the Unit. - let (params, _) = signature_io(ci, unit.sig)?; - for (value, &ty) in unit.inputs.iter().zip(params.iter()) { - let index = value.index(); - let v = Variable::new(index); - let ty = convert_type(ci, ty)?; - bld.declare_var(v, ty); - types[index] = Some((v, ty)); + Ok(types) } - // Infer type from the operation. - for (index, ref ins) in unit.dfg.instructions.iter().enumerate() { - let ty = match ins.opcode.result_type() { - ValueType::Boolean => types::B1, - ValueType::Pointer => ci.isa.pointer_type(), - ValueType::Number(n) => convert_number_type(n), - ValueType::Complex(id) => convert_type(ci, id)?, - ValueType::ResultOfUnit(_unit) => unimplemented!(), - ValueType::ResultOfSig(id) => { - let (_, out) = signature_io(ci, id)?; - match out.len() { - 0 => continue, - 1 => convert_type(ci, out[0])?, - _ => unimplemented!(), - } + fn instruction(&self, idx: usize, ins: &Instruction, seq: &Sequence, ebbs: &Vec, bld: &mut FunctionBuilder) -> LowerResult<()> { + use self::Opcode::*; + match ins.opcode { + Entry(_) => (), + Newhash(_) => (), + Rehash(_) => { + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let res = bld.ins().copy(a0); + bld.def_var(Variable::new(idx), res); + } + Phi => (), // Phi are handled at the end of predecessor blocks. + Const(val) => { + use self::NumberValue::*; + let res = match val { + I8(i) => bld.ins().iconst(types::I8, i as i64), + I16(i) => bld.ins().iconst(types::I16, i as i64), + I32(i) => bld.ins().iconst(types::I32, i as i64), + I64(i) => bld.ins().iconst(types::I64, i), + U8(i) => bld.ins().iconst(types::I8, i as i64), + U16(i) => bld.ins().iconst(types::I16, i as i64), + U32(i) => bld.ins().iconst(types::I32, i as i64), + U64(i) => bld.ins().iconst(types::I64, i as i64), + F32(f) => bld.ins().f32const(Ieee32::with_float(f)), + F64(f) => bld.ins().f64const(Ieee64::with_float(f)), + }; + bld.def_var(Variable::new(idx), res); + }, + Cast(_id) => unimplemented!(), + OverflowFlag => (), + CarryFlag => (), + Add(n) => { + // TODO: If any overflow/carry flag depends on this instruction, we + // should change the encoding of this instruction to emit a carry + // bits. + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let a1 = bld.use_var(Variable::new(ins.operands[1].index)); + let res = match n { + NumberType::F32 | NumberType::F64 => bld.ins().fadd(a0, a1), + _ => bld.ins().iadd(a0, a1), + }; + bld.def_var(Variable::new(idx), res); + } + Sub(_n) | + Mul(_n) | + Div(_n) | + Rem(_n) => unimplemented!(), + SignExt(_n) => unimplemented!(), + ZeroExt(_n) => unimplemented!(), + Truncate(_f) | + Round(_f) | + Floor(_f) | + Ceil(_f) => unimplemented!(), + BwXor(_b) | + BwAnd(_b) | + BwOr(_b) | + BwNot(_b) => unimplemented!(), + ShiftLeft(_i) => unimplemented!(), + ShiftRight(_i) => unimplemented!(), + Eq(_) | Lt(_) | Le(_) | + Ne(_) | Gt(_) | Ge(_) => unimplemented!(), + StaticAddress | + Address => unimplemented!(), + CPUAddress => unimplemented!(), + Load(_) => unimplemented!(), + Store(_ty) => unimplemented!(), + LoadFenceLoad | + LoadFenceStore | + StoreFenceLoad | + StoreFenceStore => unimplemented!(), + Unit(_id) => unimplemented!(), + Return => { + let args : Vec<_> = ins.operands.iter().map(|v| bld.use_var(Variable::new(v.index))).collect(); + bld.ins().return_(&args); + }, + Unwind | // TODO: properly implement unwind. + Unreachable => { + bld.ins().trap(TrapCode::User(0)); } - ValueType::InheritFromOperands | - ValueType::None => continue, + Goto => { + let SuccessorIndex(idx) = seq.default.unwrap(); + let SequenceIndex(idx) = seq.successors[idx]; + bld.ins().jump(ebbs[idx], &[]); + } + Switch(_) => unimplemented!(), + Call(_id) => unimplemented!(), + CallUnit(_id) => unimplemented!(), }; - let v = Variable::new(index); - bld.declare_var(v, ty); - types[index] = Some((v, ty)); - } - // Infer type from the operands. - for (index, ref ins) in unit.dfg.instructions.iter().enumerate() { - match ins.opcode.result_type() { - ValueType::InheritFromOperands => (), - _ => continue, - }; - assert!(ins.operands.len() >= 1); - let ty = match types[ins.operands[0].index] { - Some((_, ty)) => ty, - None => continue, - }; - let v = Variable::new(index); - bld.declare_var(v, ty); - types[index] = Some((v, ty)); + Ok(()) } - Ok(types) -} - -fn convert_ins(idx: usize, ins: &Instruction, seq: &Sequence, ebbs: &Vec, bld: &mut FunctionBuilder) -> LowerResult<()> { - use self::Opcode::*; - match ins.opcode { - Entry(_) => (), - Newhash(_) => (), - Rehash(_) => { - let a0 = bld.use_var(Variable::new(ins.operands[0].index)); - let res = bld.ins().copy(a0); - bld.def_var(Variable::new(idx), res); - } - Phi => (), // Phi are handled at the end of predecessor blocks. - Const(val) => { - use self::NumberValue::*; - let res = match val { - I8(i) => bld.ins().iconst(types::I8, i as i64), - I16(i) => bld.ins().iconst(types::I16, i as i64), - I32(i) => bld.ins().iconst(types::I32, i as i64), - I64(i) => bld.ins().iconst(types::I64, i), - U8(i) => bld.ins().iconst(types::I8, i as i64), - U16(i) => bld.ins().iconst(types::I16, i as i64), - U32(i) => bld.ins().iconst(types::I32, i as i64), - U64(i) => bld.ins().iconst(types::I64, i as i64), - F32(f) => bld.ins().f32const(Ieee32::with_float(f)), - F64(f) => bld.ins().f64const(Ieee64::with_float(f)), + /// Convert the sequences of the control flow graph into a graph of extended + /// blocks expected by Cranelift. + fn cfg(&self, bld: &mut FunctionBuilder) -> LowerResult<()> { + let _types = self.declare_vars(bld)?; + + // TODO: Create better extended basic blocks instead of creating a 1:1 + // mapping with the LIR. + let ebbs : Vec<_> = self.unit.cfg.sequences.iter().map(|_| bld.create_ebb()).collect(); + + // Anchor argument processing to the entry EBB. + let SequenceIndex(entry) = self.unit.cfg.entry; + bld.append_ebb_params_for_function_params(ebbs[entry]); + + // This is a queue of block which are waiting for all their inputs to be + // finalized before sealing them. + let mut seal_queue = vec![]; + + // Iterate over each sequence and convert the instruction in each of them to + // a Cranelift instruction. + for (i, ref seq) in self.unit.cfg.sequences.iter().enumerate() { + // Switch to the block in which we emit the sequence of instructions. + bld.switch_to_block(ebbs[i]); + // Seal the block, unless we are waiting for upcoming sequences to be + // converted before freezing the list of predecessors. + let get_seq_index = |pred : &(SequenceIndex, SuccessorIndex)| -> usize { + let &(SequenceIndex(idx), _) = pred; + idx }; - bld.def_var(Variable::new(idx), res); - }, - Cast(_id) => unimplemented!(), - OverflowFlag => (), - CarryFlag => (), - Add(n) => { - // TODO: If any overflow/carry flag depends on this instruction, we - // should change the encoding of this instruction to emit a carry - // bits. - let a0 = bld.use_var(Variable::new(ins.operands[0].index)); - let a1 = bld.use_var(Variable::new(ins.operands[1].index)); - let res = match n { - NumberType::F32 | NumberType::F64 => bld.ins().fadd(a0, a1), - _ => bld.ins().iadd(a0, a1), + match seq.predecessors.iter().map(get_seq_index).max() { + None => bld.seal_block(ebbs[i]), + Some(idx) => { + if idx < i { + bld.seal_block(ebbs[i]); + } else { + seal_queue.push((idx, i)); + } + } }; - bld.def_var(Variable::new(idx), res); - } - Sub(_n) | - Mul(_n) | - Div(_n) | - Rem(_n) => unimplemented!(), - SignExt(_n) => unimplemented!(), - ZeroExt(_n) => unimplemented!(), - Truncate(_f) | - Round(_f) | - Floor(_f) | - Ceil(_f) => unimplemented!(), - BwXor(_b) | - BwAnd(_b) | - BwOr(_b) | - BwNot(_b) => unimplemented!(), - ShiftLeft(_i) => unimplemented!(), - ShiftRight(_i) => unimplemented!(), - Eq(_) | Lt(_) | Le(_) | - Ne(_) | Gt(_) | Ge(_) => unimplemented!(), - StaticAddress | - Address => unimplemented!(), - CPUAddress => unimplemented!(), - Load(_) => unimplemented!(), - Store(_ty) => unimplemented!(), - LoadFenceLoad | - LoadFenceStore | - StoreFenceLoad | - StoreFenceStore => unimplemented!(), - Unit(_id) => unimplemented!(), - Return => { - let args : Vec<_> = ins.operands.iter().map(|v| bld.use_var(Variable::new(v.index))).collect(); - bld.ins().return_(&args); - }, - Unwind | // TODO: properly implement unwind. - Unreachable => { - bld.ins().trap(TrapCode::User(0)); - } - Goto => { - let SuccessorIndex(idx) = seq.default.unwrap(); - let SequenceIndex(idx) = seq.successors[idx]; - bld.ins().jump(ebbs[idx], &[]); - } - Switch(_) => unimplemented!(), - Call(_id) => unimplemented!(), - CallUnit(_id) => unimplemented!(), - }; - - Ok(()) -} -/// Convert the sequences of the control flow graph into a graph of extended -/// blocks expected by Cranelift. -fn convert_cfg<'a>(ci: CtxIsa<'a>, unit: &Unit, bld: &mut FunctionBuilder) -> LowerResult<()> { - let _types = declare_vars(ci, unit, bld)?; - - // TODO: Create better extended basic blocks instead of creating a 1:1 - // mapping with the LIR. - let ebbs : Vec<_> = unit.cfg.sequences.iter().map(|_| bld.create_ebb()).collect(); - - // Anchor argument processing to the entry EBB. - let SequenceIndex(entry) = unit.cfg.entry; - bld.append_ebb_params_for_function_params(ebbs[entry]); - - // This is a queue of block which are waiting for all their inputs to be - // finalized before sealing them. - let mut seal_queue = vec![]; - - // Iterate over each sequence and convert the instruction in each of them to - // a Cranelift instruction. - for (i, ref seq) in unit.cfg.sequences.iter().enumerate() { - // Switch to the block in which we emit the sequence of instructions. - bld.switch_to_block(ebbs[i]); - // Seal the block, unless we are waiting for upcoming sequences to be - // converted before freezing the list of predecessors. - let get_seq_index = |pred : &(SequenceIndex, SuccessorIndex)| -> usize { - let &(SequenceIndex(idx), _) = pred; - idx - }; - match seq.predecessors.iter().map(get_seq_index).max() { - None => bld.seal_block(ebbs[i]), - Some(idx) => { - if idx < i { - bld.seal_block(ebbs[i]); - } else { - seal_queue.push((idx, i)); + // Bind arguments in the entry block. + if SequenceIndex(i) == self.unit.cfg.entry { + for (a, value) in self.unit.inputs.iter().enumerate() { + let arg = bld.ebb_params(ebbs[i])[a]; + bld.def_var(Variable::new(value.index), arg); } } - }; - // Bind arguments in the entry block. - if SequenceIndex(i) == unit.cfg.entry { - for (a, value) in unit.inputs.iter().enumerate() { - let arg = bld.ebb_params(ebbs[i])[a]; - bld.def_var(Variable::new(value.index), arg); + // Convert instructions. NOTE: Assumes that all the instructions are + // scheduled properly in the control flow graph. + for value in seq.sequence.iter() { + self.instruction(value.index, &self.unit.dfg.instructions[value.index], seq, &ebbs, bld)?; } - } - // Convert instructions. NOTE: Assumes that all the instructions are - // scheduled properly in the control flow graph. - for value in seq.sequence.iter() { - convert_ins(value.index, &unit.dfg.instructions[value.index], seq, &ebbs, bld)?; - } - - // Set variables corresponding to the Phi of the following blocks. - // TODO! + // Set variables corresponding to the Phi of the following blocks. + // TODO! - // Add conditional and jump instruction. - let value = seq.control; - convert_ins(value.index, &unit.dfg.instructions[value.index], seq, &ebbs, bld)?; + // Add conditional and jump instruction. + let value = seq.control; + self.instruction(value.index, &self.unit.dfg.instructions[value.index], seq, &ebbs, bld)?; - // Seal blocks which were waiting on the current block to be ended. - let rest = mem::replace(&mut seal_queue, vec![]); - let (to_seal, rest) : (Vec<_>, _) = - rest.into_iter().partition(|&(idx, _)| idx <= i); - seal_queue = rest; - for (_, j) in to_seal.into_iter() { - bld.seal_block(ebbs[j]) + // Seal blocks which were waiting on the current block to be ended. + let rest = mem::replace(&mut seal_queue, vec![]); + let (to_seal, rest) : (Vec<_>, _) = + rest.into_iter().partition(|&(idx, _)| idx <= i); + seal_queue = rest; + for (_, j) in to_seal.into_iter() { + bld.seal_block(ebbs[j]) + } } + + Ok(()) } - Ok(()) + /// Convert a LIR Unit into a Cranelift IR (Function). + fn convert(&self) -> LowerResult { + let sig = self.signature()?; + let mut fn_builder_ctx = FunctionBuilderContext::::new(); + let mut func = Function::with_name_signature(self.external_name(self.unit.id), sig); + { + let mut builder = FunctionBuilder::::new(&mut func, &mut fn_builder_ctx); + self.cfg(&mut builder)?; + builder.finalize(); + } + + let flags = settings::Flags::new(settings::builder()); + verify_function(&func, &flags)?; + Ok(func) + } } /// Convert a LIR Unit into a Cranelift IR (Function). pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult { - let ci = CtxIsa { ctx, isa }; + let cc = ConvertCtx { ctx, isa, unit }; println!("{:?}", unit); - let sig = signature(ci, unit)?; - let mut fn_builder_ctx = FunctionBuilderContext::::new(); - let mut func = Function::with_name_signature(external_name(unit.id), sig); - { - let mut builder = FunctionBuilder::::new(&mut func, &mut fn_builder_ctx); - convert_cfg(ci, unit, &mut builder)?; - builder.finalize(); - } - - let flags = settings::Flags::new(settings::builder()); - let res = verify_function(&func, &flags); + let func = cc.convert()?; println!("{}", func.display(None)); - if let Err(errors) = res { - panic!("{}", errors); - } - Ok(func) } From 1254bbc14b1601a1ab5a394d7b6bbd39cb702d95 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 26 Aug 2018 00:08:40 +0200 Subject: [PATCH 09/32] codegen: Add a new test case to cover more LIR uses. --- codegen/src/lib.rs | 68 ++++++++++++++++++++++- codegen/src/lower.rs | 129 ++++++++++++++++++++++++++++++++++++------- lir/src/builder.rs | 48 +++++++++++----- lir/src/data_flow.rs | 6 +- 4 files changed, 213 insertions(+), 38 deletions(-) diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index 1cb30f6..44b9f0b 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -99,7 +99,7 @@ mod tests { } #[test] - fn add1_unit() { + fn add1_test() { let mut ctx_bld = ContextBuilder::new(); let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); @@ -130,6 +130,72 @@ mod tests { let add1 : fn(i32) -> i32 = unsafe { mem::transmute(code.as_ptr()) }; + assert_eq!(add1(-5), -4); assert_eq!(add1(12), 13); } + + #[test] + fn round_odd_up_test() { + let mut ctx_bld = ContextBuilder::new(); + let round_odd_up_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); // test x % 2 == 0 + let s1 = bld.create_sequence(); // x += 1 + let s2 = bld.create_sequence(); // return x + + // [sequence 0] + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Const(NumberValue::I32(2)), &[]); + let v1 = bld.add_op(Opcode::Rem(SignedType::I32), &[a0, v0]); + bld.end_sequence(Instruction { + opcode: Opcode::Switch(SwitchData { low: 0, high: 1 }), + operands: vec![v1], + dependencies: vec![], + replaced_by: None, + }); + bld.sequence_value_jump(0, s2); + bld.sequence_value_jump(1, s1); + + // [sequence 1] + bld.switch_to_sequence(s1); + let v2 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); + let v3 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v2]); + bld.end_sequence(Instruction { + opcode: Opcode::Goto, + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + bld.sequence_default_jump(s2); + + // [sequence 2] + bld.switch_to_sequence(s2); + let v4 = bld.add_op(Opcode::Phi, &[a0, v3]); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v4], + dependencies: vec![], + replaced_by: None, + }); + + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &round_odd_up_unit).unwrap(); + let round_odd_up : fn(i32) -> i32 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(round_odd_up(0), 0); + assert_eq!(round_odd_up(9654), 9654); + assert_eq!(round_odd_up(1618033), 1618034); + assert_eq!(round_odd_up(-5), -4); + } } diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 07b15c9..7e9c09a 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -4,7 +4,8 @@ use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; use codegen::entity::EntityRef; use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode}; -use codegen::ir::immediates::{Ieee32, Ieee64}; +use codegen::ir::immediates::{Ieee32, Ieee64, Imm64}; +use codegen::ir::condcodes::IntCC; use codegen::ir::types::*; use codegen::ir::types; use codegen::settings::{self, CallConv}; @@ -14,16 +15,16 @@ use codegen::isa::TargetIsa; use lir::unit::{Unit, UnitId}; use lir::context::Context; use lir::types::{ComplexTypeId, ComplexType}; -use lir::number::{NumberType, NumberValue}; +use lir::number::{NumberType, SignedType, NumberValue}; use lir::control_flow::{Sequence, SequenceIndex, SuccessorIndex}; use lir::data_flow::{Opcode, Instruction, ValueType}; use error::{LowerResult, LowerError}; -#[derive(Copy, Clone)] struct ConvertCtx<'a> { pub isa: &'a TargetIsa, pub ctx: &'a Context, pub unit: &'a Unit, + pub var_types: Vec, } type OptVarType = Option<(Variable, types::Type)>; @@ -97,7 +98,7 @@ impl<'a> ConvertCtx<'a> { /// Identify the type of each instructions and declare a variable with the same /// offset as the instruction in the data flow graph of the Unit. - fn declare_vars(&self, bld: &mut FunctionBuilder) -> LowerResult> { + fn declare_vars(&mut self, bld: &mut FunctionBuilder) -> LowerResult<()> { // Use pointer as a dummy type. let mut types : Vec<_> = self.unit.dfg.instructions.iter().map(|_| None).collect(); @@ -151,10 +152,35 @@ impl<'a> ConvertCtx<'a> { types[index] = Some((v, ty)); } - Ok(types) + self.var_types = types; + Ok(()) } - fn instruction(&self, idx: usize, ins: &Instruction, seq: &Sequence, ebbs: &Vec, bld: &mut FunctionBuilder) -> LowerResult<()> { + /// We are about to jump to the next block, convert the Phi operands into + /// variable definitions, such that the FunctionBuilder can convert these + /// into arguments of extended basic blocks. + fn bind_phis(&self, idx: SequenceIndex, seq: &Sequence, succ: SuccessorIndex, bld: &mut FunctionBuilder) { + let SuccessorIndex(sidx) = succ; + let SequenceIndex(sseq) = seq.successors[sidx]; + let sseq = &self.unit.cfg.sequences[sseq]; + // Find the index of this successor in the list of predecessors. + let (pidx, _) = sseq.predecessors.iter().enumerate().find(|p| p.1 == &(idx, succ)).unwrap(); + + // For each Phi, bind the variable corresponding to the Phi, with the + // variable of the current sequence. + for value in sseq.sequence.iter() { + let ins = &self.unit.dfg.instructions[value.index]; + match ins.opcode { + Opcode::Phi => (), + _ => continue, + }; + let input = bld.use_var(Variable::new(ins.operands[pidx].index)); + let res = bld.ins().copy(input); + bld.def_var(Variable::new(value.index), res); + } + } + + fn instruction(&mut self, idx: usize, ins: &Instruction, sidx: SequenceIndex, seq: &Sequence, ebbs: &Vec, bld: &mut FunctionBuilder) -> LowerResult<()> { use self::Opcode::*; match ins.opcode { Entry(_) => (), @@ -196,10 +222,22 @@ impl<'a> ConvertCtx<'a> { }; bld.def_var(Variable::new(idx), res); } - Sub(_n) | - Mul(_n) | - Div(_n) | - Rem(_n) => unimplemented!(), + Sub(_n) => unimplemented!(), + Mul(_n) => unimplemented!(), + Div(_n) => unimplemented!(), + Rem(n) => { + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let a1 = bld.use_var(Variable::new(ins.operands[1].index)); + let res = match n { + SignedType::I8 | SignedType::I16 | + SignedType::I32 | SignedType::I64 + => bld.ins().srem(a0, a1), + SignedType::U8 | SignedType::U16 | + SignedType::U32 | SignedType::U64 + => bld.ins().urem(a0, a1), + }; + bld.def_var(Variable::new(idx), res); + }, SignExt(_n) => unimplemented!(), ZeroExt(_n) => unimplemented!(), Truncate(_f) | @@ -234,10 +272,66 @@ impl<'a> ConvertCtx<'a> { } Goto => { let SuccessorIndex(idx) = seq.default.unwrap(); + self.bind_phis(sidx, seq, SuccessorIndex(idx), bld); let SequenceIndex(idx) = seq.successors[idx]; bld.ins().jump(ebbs[idx], &[]); } - Switch(_) => unimplemented!(), + Switch(_) => { + // TODO: use data field and the number of successors to + // determine if we should generate a jump table. + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + assert!(seq.unwind == None, "Switch statements are cannot unwind"); + let nb_succ = seq.targets.len() + seq.default.map_or(0, |_| 1); + assert!(nb_succ > 0, "Switch statement should at least have a successor,"); + match nb_succ { + 0 => unreachable!(), + 1 => { + // Unconditional jump to the only branch. + let SuccessorIndex(idx) = seq.default.unwrap_or_else(|| seq.targets[0].1); + self.bind_phis(sidx, seq, SuccessorIndex(idx), bld); + let SequenceIndex(idx) = seq.successors[idx]; + bld.ins().jump(ebbs[idx], &[]); + } + 2 => { + let (tv, t, f) = match seq.default { + Some(f) => (seq.targets[0].0, seq.targets[0].1, f), + None => { + let (tv, t) = seq.targets[0]; + let (fv, f) = seq.targets[1]; + assert_ne!(tv, fv); + if (0 <= tv && tv < fv) || (fv < tv && tv <= 0) { + (tv, t, f) + } else { + (fv, f, t) + } + } + }; + + // Conditional jump to the first branch. + let SuccessorIndex(t) = t; + self.bind_phis(sidx, seq, SuccessorIndex(t), bld); + let SequenceIndex(t) = seq.successors[t]; + if tv == 0 { + bld.ins().brz(a0, ebbs[t], &[]); + } else { + let t0 = Variable::new(self.var_types.len()); + self.var_types.push(Some((t0, types::B1))); + bld.declare_var(t0, types::B1); + let cmp_res = bld.ins().icmp_imm(IntCC::Equal, a0, Imm64::new(tv as i64)); + bld.def_var(t0, cmp_res); + let t0 = bld.use_var(t0); + bld.ins().brnz(t0, ebbs[t], &[]); + } + + // Unconditional jump to the second branch. + let SuccessorIndex(f) = f; + self.bind_phis(sidx, seq, SuccessorIndex(f), bld); + let SequenceIndex(f) = seq.successors[f]; + bld.ins().jump(ebbs[f], &[]); + } + _ => unimplemented!("Switch with more than 2 branches"), + } + } Call(_id) => unimplemented!(), CallUnit(_id) => unimplemented!(), }; @@ -247,7 +341,7 @@ impl<'a> ConvertCtx<'a> { /// Convert the sequences of the control flow graph into a graph of extended /// blocks expected by Cranelift. - fn cfg(&self, bld: &mut FunctionBuilder) -> LowerResult<()> { + fn cfg(&mut self, bld: &mut FunctionBuilder) -> LowerResult<()> { let _types = self.declare_vars(bld)?; // TODO: Create better extended basic blocks instead of creating a 1:1 @@ -295,15 +389,12 @@ impl<'a> ConvertCtx<'a> { // Convert instructions. NOTE: Assumes that all the instructions are // scheduled properly in the control flow graph. for value in seq.sequence.iter() { - self.instruction(value.index, &self.unit.dfg.instructions[value.index], seq, &ebbs, bld)?; + self.instruction(value.index, &self.unit.dfg.instructions[value.index], SequenceIndex(i), seq, &ebbs, bld)?; } - // Set variables corresponding to the Phi of the following blocks. - // TODO! - // Add conditional and jump instruction. let value = seq.control; - self.instruction(value.index, &self.unit.dfg.instructions[value.index], seq, &ebbs, bld)?; + self.instruction(value.index, &self.unit.dfg.instructions[value.index], SequenceIndex(i), seq, &ebbs, bld)?; // Seal blocks which were waiting on the current block to be ended. let rest = mem::replace(&mut seal_queue, vec![]); @@ -319,7 +410,7 @@ impl<'a> ConvertCtx<'a> { } /// Convert a LIR Unit into a Cranelift IR (Function). - fn convert(&self) -> LowerResult { + fn convert(&mut self) -> LowerResult { let sig = self.signature()?; let mut fn_builder_ctx = FunctionBuilderContext::::new(); let mut func = Function::with_name_signature(self.external_name(self.unit.id), sig); @@ -337,7 +428,7 @@ impl<'a> ConvertCtx<'a> { /// Convert a LIR Unit into a Cranelift IR (Function). pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult { - let cc = ConvertCtx { ctx, isa, unit }; + let mut cc = ConvertCtx { ctx, isa, unit, var_types: vec![] }; println!("{:?}", unit); let func = cc.convert()?; println!("{}", func.display(None)); diff --git a/lir/src/builder.rs b/lir/src/builder.rs index 0ffe2b5..301faa5 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -194,29 +194,47 @@ impl<'a> UnitBuilder<'a> { /// Set conditional branch. pub fn sequence_value_jump(&mut self, value: isize, seq: SequenceIndex) { let SequenceIndex(index) = self.sequence.unwrap(); - let edit = &mut self.unit.cfg.sequences[index]; - edit.successors.push(seq); - let succ_idx = SuccessorIndex(edit.successors.len() - 1); - debug_assert!(!edit.targets.iter().any(|&(v, _)| v == value)); - edit.targets.push((value, succ_idx)); + let succ_idx = { + let edit = &mut self.unit.cfg.sequences[index]; + edit.successors.push(seq); + let succ_idx = SuccessorIndex(edit.successors.len() - 1); + debug_assert!(!edit.targets.iter().any(|&(v, _)| v == value)); + edit.targets.push((value, succ_idx)); + succ_idx + }; + let SequenceIndex(seq) = seq; + let edit = &mut self.unit.cfg.sequences[seq]; + edit.predecessors.push((SequenceIndex(index), succ_idx)); } /// Set default branch. pub fn sequence_default_jump(&mut self, seq: SequenceIndex) { let SequenceIndex(index) = self.sequence.unwrap(); - let edit = &mut self.unit.cfg.sequences[index]; - edit.successors.push(seq); - let succ_idx = SuccessorIndex(edit.successors.len() - 1); - debug_assert_eq!(edit.default, None); - edit.default = Some(succ_idx); + let succ_idx = { + let edit = &mut self.unit.cfg.sequences[index]; + edit.successors.push(seq); + let succ_idx = SuccessorIndex(edit.successors.len() - 1); + debug_assert_eq!(edit.default, None); + edit.default = Some(succ_idx); + succ_idx + }; + let SequenceIndex(seq) = seq; + let edit = &mut self.unit.cfg.sequences[seq]; + edit.predecessors.push((SequenceIndex(index), succ_idx)); } /// Set unwind branch. pub fn sequence_unwind_jump(&mut self, seq: SequenceIndex) { let SequenceIndex(index) = self.sequence.unwrap(); - let edit = &mut self.unit.cfg.sequences[index]; - edit.successors.push(seq); - let succ_idx = SuccessorIndex(edit.successors.len() - 1); - debug_assert_eq!(edit.unwind, None); - edit.unwind = Some(succ_idx); + let succ_idx = { + let edit = &mut self.unit.cfg.sequences[index]; + edit.successors.push(seq); + let succ_idx = SuccessorIndex(edit.successors.len() - 1); + debug_assert_eq!(edit.unwind, None); + edit.unwind = Some(succ_idx); + succ_idx + }; + let SequenceIndex(seq) = seq; + let edit = &mut self.unit.cfg.sequences[seq]; + edit.predecessors.push((SequenceIndex(index), succ_idx)); } /// Finalize and (TODO) assert that the generate Unit is valid. diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index 11344b7..c9d309a 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -105,7 +105,7 @@ pub enum Opcode { /// Division. (2 operands: result = lhs / rhs) Div(number::NumberType), /// Remainder. (2 operands: result = lhs % rhs) - Rem(number::NumberType), + Rem(number::SignedType), /// Sign-extend. (1 operand) SignExt(number::SignedType), /// Zero-extend. (1 operand) @@ -278,8 +278,8 @@ impl Opcode { Add(n) | Sub(n) | Mul(n) | - Div(n) | - Rem(n) => ValueType::Number(n), + Div(n) => ValueType::Number(n), + Rem(n) => ValueType::Number(n.into()), SignExt(n) => ValueType::Number(n.into()), ZeroExt(n) => ValueType::Number(n.into()), Truncate(f) | From 045b87fca26ed67a11df3067b1cc80c7b95685b1 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Mon, 27 Aug 2018 00:17:53 +0200 Subject: [PATCH 10/32] Generate code for overflow and carry flags for additions. --- codegen/src/lib.rs | 364 +++++++++++++++++++++++++++++++++++++++++++ codegen/src/lower.rs | 113 +++++++++++--- lir/src/data_flow.rs | 2 +- lir/src/number.rs | 4 + 4 files changed, 464 insertions(+), 19 deletions(-) diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index 44b9f0b..a9386af 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -198,4 +198,368 @@ mod tests { assert_eq!(round_odd_up(1618033), 1618034); assert_eq!(round_odd_up(-5), -4); } + + #[test] + fn add_overflow_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::OverflowFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(-1, 1), true); + assert_eq!(add_overflow(i32::max_value() - 1, 1), false); + assert_eq!(add_overflow(i32::max_value(), 1), true); + assert_eq!(add_overflow(i32::min_value(), -1), true); + } + + #[test] + fn add_overflow_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::OverflowFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(u32::max_value() - 1, 1), false); + assert_eq!(add_overflow(u32::max_value() >> 1, 1), true); + assert_eq!(add_overflow(u32::max_value(), 1), true); + } + + #[test] + fn add_overflow_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::OverflowFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(-1, 1), true); + assert_eq!(add_overflow(i64::max_value() - 1, 1), false); + assert_eq!(add_overflow(i64::max_value(), 1), true); + assert_eq!(add_overflow(i64::min_value(), -1), true); + } + + #[test] + fn add_overflow_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::OverflowFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(u64::max_value() - 1, 1), false); + assert_eq!(add_overflow(u64::max_value() >> 1, 1), true); + assert_eq!(add_overflow(u64::max_value(), 1), true); + } + + #[test] + fn add_carry_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::CarryFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(-1, 1), true); + assert_eq!(add_carry(i32::max_value() - 1, 1), false); + assert_eq!(add_carry(i32::max_value(), 1), false); + assert_eq!(add_carry(i32::min_value(), -1), true); + } + + #[test] + fn add_carry_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::CarryFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(u32::max_value() - 1, 1), false); + assert_eq!(add_carry(u32::max_value() >> 1, 1), false); + assert_eq!(add_carry(u32::max_value(), 1), true); + } + + #[test] + fn add_carry_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::CarryFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(-1, 1), true); + assert_eq!(add_carry(i64::max_value() - 1, 1), false); + assert_eq!(add_carry(i64::max_value(), 1), false); + assert_eq!(add_carry(i64::min_value(), -1), true); + } + + #[test] + fn add_carry_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.switch_to_sequence(s0); + bld.set_entry(); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); + let v2 = bld.add_ins(Instruction { + opcode: Opcode::CarryFlag, + operands: vec![], + dependencies: vec![v1], + replaced_by: None, + }); + bld.end_sequence(Instruction { + opcode: Opcode::Return, + operands: vec![v2], + dependencies: vec![], + replaced_by: None, + }) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(u64::max_value() - 1, 1), false); + assert_eq!(add_carry(u64::max_value() >> 1, 1), false); + assert_eq!(add_carry(u64::max_value(), 1), true); + } } diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 7e9c09a..432fe59 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -1,4 +1,5 @@ use std::mem; +use std::collections::HashMap; use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; @@ -17,21 +18,41 @@ use lir::context::Context; use lir::types::{ComplexTypeId, ComplexType}; use lir::number::{NumberType, SignedType, NumberValue}; use lir::control_flow::{Sequence, SequenceIndex, SuccessorIndex}; -use lir::data_flow::{Opcode, Instruction, ValueType}; +use lir::data_flow::{Opcode, Instruction, ValueType, Value}; use error::{LowerResult, LowerError}; struct ConvertCtx<'a> { + /// Used to know the size of a pointer. pub isa: &'a TargetIsa, + /// Context used to resove the LIR types information. pub ctx: &'a Context, + /// Unit of the function which is being compiled. pub unit: &'a Unit, + /// Map each variable known with the type declared to cranelift. This is + /// also used to allocate new temporary variables. pub var_types: Vec, + /// Map the math operation to the overflow variable. + pub overflow_map: HashMap, + /// Map the math operation to the carry variable. + pub carry_map: HashMap, } type OptVarType = Option<(Variable, types::Type)>; impl<'a> ConvertCtx<'a> { + fn sign_mask(&self, ty: NumberType) -> u64 { + match ty { + NumberType::F64 | NumberType::I64 | NumberType::U64 => 0x8000_0000_0000_0000, + NumberType::F32 | NumberType::I32 | NumberType::U32 => 0x8000_0000, + NumberType::I16 | NumberType::U16 => 0x8000, + NumberType::I8 | NumberType::U8 => 0x80, + NumberType::B1 => 1, + } + } + fn number_type(&self, ty: NumberType) -> types::Type { match ty { + NumberType::B1 => types::B1, NumberType::I8 | NumberType::U8 => types::I8, NumberType::I16 | NumberType::U16 => types::I16, NumberType::I32 | NumberType::U32 => types::I32, @@ -47,6 +68,7 @@ impl<'a> ConvertCtx<'a> { let ty = self.ctx.get_type(ty); match ty { &Pointer => Ok(self.isa.pointer_type()), + &Scalar(B1) => Ok(types::B1), &Scalar(U8) | &Scalar(I8) => Ok(types::I8), &Scalar(U16) | &Scalar(I16) => Ok(types::I16), &Scalar(U32) | &Scalar(I32) => Ok(types::I32), @@ -134,6 +156,18 @@ impl<'a> ConvertCtx<'a> { let v = Variable::new(index); bld.declare_var(v, ty); types[index] = Some((v, ty)); + + // Record Overflow and Carry dependency and map it to overflow / + // carry variable. Note, that we expect to have only a single + // overflow and carry per math operation. TODO: To handle multiple + // overflow flag and carry flag, should create new variable that + // would be copied on overflow/carry encoding. + let ins_res = match ins.opcode { + Opcode::OverflowFlag => self.overflow_map.insert(ins.dependencies[0], v), + Opcode::CarryFlag => self.carry_map.insert(ins.dependencies[0], v), + _ => None, + }; + debug_assert!(ins_res == None); } // Infer type from the operands. @@ -180,20 +214,22 @@ impl<'a> ConvertCtx<'a> { } } - fn instruction(&mut self, idx: usize, ins: &Instruction, sidx: SequenceIndex, seq: &Sequence, ebbs: &Vec, bld: &mut FunctionBuilder) -> LowerResult<()> { + fn instruction(&mut self, val: Value, ins: &Instruction, sidx: SequenceIndex, seq: &Sequence, ebbs: &Vec, bld: &mut FunctionBuilder) -> LowerResult<()> { use self::Opcode::*; + let res_var = Variable::new(val.index); match ins.opcode { Entry(_) => (), Newhash(_) => (), Rehash(_) => { let a0 = bld.use_var(Variable::new(ins.operands[0].index)); let res = bld.ins().copy(a0); - bld.def_var(Variable::new(idx), res); + bld.def_var(Variable::new(val.index), res); } Phi => (), // Phi are handled at the end of predecessor blocks. Const(val) => { use self::NumberValue::*; let res = match val { + B1(i) => bld.ins().bconst(types::B1, i), I8(i) => bld.ins().iconst(types::I8, i as i64), I16(i) => bld.ins().iconst(types::I16, i as i64), I32(i) => bld.ins().iconst(types::I32, i as i64), @@ -205,7 +241,7 @@ impl<'a> ConvertCtx<'a> { F32(f) => bld.ins().f32const(Ieee32::with_float(f)), F64(f) => bld.ins().f64const(Ieee64::with_float(f)), }; - bld.def_var(Variable::new(idx), res); + bld.def_var(res_var, res); }, Cast(_id) => unimplemented!(), OverflowFlag => (), @@ -216,11 +252,47 @@ impl<'a> ConvertCtx<'a> { // bits. let a0 = bld.use_var(Variable::new(ins.operands[0].index)); let a1 = bld.use_var(Variable::new(ins.operands[1].index)); - let res = match n { - NumberType::F32 | NumberType::F64 => bld.ins().fadd(a0, a1), - _ => bld.ins().iadd(a0, a1), + let (of, cf) = (self.overflow_map.get(&val), + self.carry_map.get(&val)); + match (n, of, cf) { + (NumberType::F32, _, _) | + (NumberType::F64, _, _) => { + debug_assert!(of == None); + debug_assert!(cf == None); + let res = bld.ins().fadd(a0, a1); + bld.def_var(res_var, res); + } + (_, None, None) => { + let res = bld.ins().iadd(a0, a1); + bld.def_var(res_var, res); + } + (_, None, Some(cv)) => { + let (res, carry) = bld.ins().iadd_cout(a0, a1); + bld.def_var(res_var, res); + bld.def_var(*cv, carry); + } + (i, Some(ov), None) => { + let res = bld.ins().iadd(a0, a1); + bld.def_var(res_var, res); + // of = ((a0 | a1) ^ (a0 + a1)) > (max >> 1) + let bor = bld.ins().bor(a0, a1); + let xor = bld.ins().bxor(bor, res); + let sign_mask = self.sign_mask(i); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + bld.def_var(*ov, cmp); + } + (i, Some(ov), Some(cv)) => { + let (res, carry) = bld.ins().iadd_cout(a0, a1); + bld.def_var(res_var, res); + bld.def_var(*cv, carry); + // of = ((a0 | a1) ^ (a0 + a1)) > (max >> 1) + let bor = bld.ins().bor(a0, a1); + let xor = bld.ins().bxor(bor, res); + let sign_mask = self.sign_mask(i); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + bld.def_var(*ov, cmp); + } }; - bld.def_var(Variable::new(idx), res); } Sub(_n) => unimplemented!(), Mul(_n) => unimplemented!(), @@ -236,7 +308,7 @@ impl<'a> ConvertCtx<'a> { SignedType::U32 | SignedType::U64 => bld.ins().urem(a0, a1), }; - bld.def_var(Variable::new(idx), res); + bld.def_var(res_var, res); }, SignExt(_n) => unimplemented!(), ZeroExt(_n) => unimplemented!(), @@ -314,13 +386,13 @@ impl<'a> ConvertCtx<'a> { if tv == 0 { bld.ins().brz(a0, ebbs[t], &[]); } else { - let t0 = Variable::new(self.var_types.len()); - self.var_types.push(Some((t0, types::B1))); - bld.declare_var(t0, types::B1); + // let t0 = Variable::new(self.var_types.len()); + // self.var_types.push(Some((t0, types::B1))); + // bld.declare_var(t0, types::B1); let cmp_res = bld.ins().icmp_imm(IntCC::Equal, a0, Imm64::new(tv as i64)); - bld.def_var(t0, cmp_res); - let t0 = bld.use_var(t0); - bld.ins().brnz(t0, ebbs[t], &[]); + // bld.def_var(t0, cmp_res); + // let t0 = bld.use_var(t0); + bld.ins().brnz(cmp_res, ebbs[t], &[]); } // Unconditional jump to the second branch. @@ -389,12 +461,12 @@ impl<'a> ConvertCtx<'a> { // Convert instructions. NOTE: Assumes that all the instructions are // scheduled properly in the control flow graph. for value in seq.sequence.iter() { - self.instruction(value.index, &self.unit.dfg.instructions[value.index], SequenceIndex(i), seq, &ebbs, bld)?; + self.instruction(*value, &self.unit.dfg.instructions[value.index], SequenceIndex(i), seq, &ebbs, bld)?; } // Add conditional and jump instruction. let value = seq.control; - self.instruction(value.index, &self.unit.dfg.instructions[value.index], SequenceIndex(i), seq, &ebbs, bld)?; + self.instruction(value, &self.unit.dfg.instructions[value.index], SequenceIndex(i), seq, &ebbs, bld)?; // Seal blocks which were waiting on the current block to be ended. let rest = mem::replace(&mut seal_queue, vec![]); @@ -428,7 +500,12 @@ impl<'a> ConvertCtx<'a> { /// Convert a LIR Unit into a Cranelift IR (Function). pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult { - let mut cc = ConvertCtx { ctx, isa, unit, var_types: vec![] }; + let mut cc = ConvertCtx { + ctx, isa, unit, + var_types: vec![], + overflow_map: HashMap::new(), + carry_map: HashMap::new(), + }; println!("{:?}", unit); let func = cc.convert()?; println!("{}", func.display(None)); diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index c9d309a..caf577e 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -25,7 +25,7 @@ pub struct DataFlow { /// terminator. As opposed to ordinary SSA notation, we use a hash instead of an /// instruction index, in order to be able to generate position-independent /// patches for each Unit. -#[derive(Serialize, Deserialize, Debug, Hash, Clone, Copy)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Hash, Clone, Copy)] pub struct Value { pub hash: u64, pub index: usize, diff --git a/lir/src/number.rs b/lir/src/number.rs index 77e8f49..b545e7a 100644 --- a/lir/src/number.rs +++ b/lir/src/number.rs @@ -7,6 +7,7 @@ use std::hash::{Hash, Hasher}; /// method. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum NumberType { + B1, U8, U16, U32, U64, I8, I16, I32, I64, F32, F64, @@ -27,6 +28,7 @@ pub enum FloatType { #[derive(Serialize, Deserialize, Debug, Clone, Copy)] /* derive(Hash)-manually */ pub enum NumberValue { + B1(bool), U8(u8), U16(u16), U32(u32), U64(u64), I8(i8), I16(i16), I32(i32), I64(i64), F32(f32), F64(f64), @@ -71,6 +73,7 @@ macro_rules! from_with_same_prefix_remove_parent { } from_with_same_prefix_remove_parent!(impl From for NumberType => + B1, U8, U16, U32, U64, I8, I16, I32, I64, F32, F64, @@ -82,6 +85,7 @@ impl Hash for NumberValue { use std::mem; mem::discriminant(self).hash(state); match self { + &NumberValue::B1(v) => v.hash(state), &NumberValue::U8(v) => v.hash(state), &NumberValue::U16(v) => v.hash(state), &NumberValue::U32(v) => v.hash(state), From 81a7fba5f6e6f75adf386437edc468c8a8e1e283 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Fri, 28 Sep 2018 14:26:52 +0200 Subject: [PATCH 11/32] Add builders add_op_deps and corresponding end functions. --- codegen/src/lib.rs | 140 +++++++-------------------------------------- lir/src/builder.rs | 31 ++++++++-- 2 files changed, 46 insertions(+), 125 deletions(-) diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index a9386af..391be5b 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -114,12 +114,7 @@ mod tests { let a0 = bld.unit_arg(0); let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v1], - dependencies: vec![], - replaced_by: None, - }) + bld.end_ins(Opcode::Return, &[v1]); } bld.finish() }; @@ -153,12 +148,7 @@ mod tests { let a0 = bld.unit_arg(0); let v0 = bld.add_op(Opcode::Const(NumberValue::I32(2)), &[]); let v1 = bld.add_op(Opcode::Rem(SignedType::I32), &[a0, v0]); - bld.end_sequence(Instruction { - opcode: Opcode::Switch(SwitchData { low: 0, high: 1 }), - operands: vec![v1], - dependencies: vec![], - replaced_by: None, - }); + bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 1 }), &[v1]); bld.sequence_value_jump(0, s2); bld.sequence_value_jump(1, s1); @@ -166,23 +156,13 @@ mod tests { bld.switch_to_sequence(s1); let v2 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); let v3 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v2]); - bld.end_sequence(Instruction { - opcode: Opcode::Goto, - operands: vec![], - dependencies: vec![], - replaced_by: None, - }); + bld.end_op(Opcode::Goto, &[]); bld.sequence_default_jump(s2); // [sequence 2] bld.switch_to_sequence(s2); let v4 = bld.add_op(Opcode::Phi, &[a0, v3]); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v4], - dependencies: vec![], - replaced_by: None, - }); + bld.end_op(Opcode::Return, &[v4]); bld.finish() }; @@ -216,18 +196,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::OverflowFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; @@ -262,18 +232,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::OverflowFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; @@ -307,18 +267,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::OverflowFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; @@ -353,18 +303,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::OverflowFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; @@ -398,18 +338,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::CarryFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; @@ -444,18 +374,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::CarryFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; @@ -489,18 +409,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::CarryFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; @@ -535,18 +445,8 @@ mod tests { let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); - let v2 = bld.add_ins(Instruction { - opcode: Opcode::CarryFlag, - operands: vec![], - dependencies: vec![v1], - replaced_by: None, - }); - bld.end_sequence(Instruction { - opcode: Opcode::Return, - operands: vec![v2], - dependencies: vec![], - replaced_by: None, - }) + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) } bld.finish() }; diff --git a/lir/src/builder.rs b/lir/src/builder.rs index 301faa5..600ef40 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -157,19 +157,24 @@ impl<'a> UnitBuilder<'a> { value } - /// Add an instruction based only on its opcode, this function creates a - /// conservative aliasing between load, store, calls and units. - pub fn add_op(&mut self, opcode: Opcode, operands: &[Value]) -> Value { + /// Add an instruction based on its opcode, operands and dependencies. + pub fn add_op_deps(&mut self, opcode: Opcode, operands: &[Value], dependencies: &[Value]) -> Value { self.add_ins(Instruction { opcode, operands: operands.iter().map(|x| *x).collect(), - dependencies: vec![], + dependencies: dependencies.iter().map(|x| *x).collect(), replaced_by: None, }) } + /// Add an instruction based only on its opcode, this function creates a + /// conservative aliasing between load, store, calls and units. + pub fn add_op(&mut self, opcode: Opcode, operands: &[Value]) -> Value { + self.add_op_deps(opcode, operands, &[]) + } + /// Add a control flow instruction to end the current sequence. - pub fn end_sequence(&mut self, ins: Instruction) { + pub fn end_ins(&mut self, ins: Instruction) { debug_assert!(ins.is_control()); let is_return = ins.opcode.is_return(); let value = self.dfg_add_ins(ins); @@ -186,6 +191,22 @@ impl<'a> UnitBuilder<'a> { } } + // Add a control flow instruction based on its opcode, operands and dependencies. + pub fn end_op_deps(&mut self, opcode: Opcode, operands: &[Value], dependencies: &[Value]) { + self.end_ins(Instruction { + opcode, + operands: operands.iter().map(|x| *x).collect(), + dependencies: dependencies.iter().map(|x| *x).collect(), + replaced_by: None, + }) + } + + /// Add a control flow instruction based only on its opcode, this function + /// creates a conservative aliasing between load, store, calls and units. + pub fn end_op(&mut self, opcode: Opcode, operands: &[Value]) { + self.end_op_deps(opcode, operands, &[]) + } + pub fn set_entry(&mut self) { debug_assert!(self.unit.cfg.entry.is_dummy()); self.unit.cfg.entry = self.sequence.unwrap(); From 486bdd0567105b24b614b93e698eb025b9d61769 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Fri, 28 Sep 2018 14:27:52 +0200 Subject: [PATCH 12/32] Add builders the ability to know when we are done adding predecessors to a block. --- codegen/src/lib.rs | 2 ++ lir/src/builder.rs | 21 ++++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index 391be5b..3ad62f3 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -154,6 +154,7 @@ mod tests { // [sequence 1] bld.switch_to_sequence(s1); + bld.freeze_sequence_predecessors(s1); let v2 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); let v3 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v2]); bld.end_op(Opcode::Goto, &[]); @@ -161,6 +162,7 @@ mod tests { // [sequence 2] bld.switch_to_sequence(s2); + bld.freeze_sequence_predecessors(s2); let v4 = bld.add_op(Opcode::Phi, &[a0, v3]); bld.end_op(Opcode::Return, &[v4]); diff --git a/lir/src/builder.rs b/lir/src/builder.rs index 600ef40..ac8cd58 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -1,7 +1,7 @@ /// This module contains everything need for constructing a Unit, with its data /// flow graph and its control flow graph. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use unit::{Unit, UnitId}; use data_flow::{Instruction, Opcode, Value}; @@ -29,6 +29,9 @@ pub struct UnitBuilder<'a> { ctx: &'a mut ContextBuilder, /// Sequence which is currently being editted. sequence: Option, + /// List of sequences which are frozen, which implies that no predecessors + /// should be added to them. + frozen_seqs: HashSet, } impl ContextBuilder { @@ -86,6 +89,7 @@ impl<'a> UnitBuilder<'a> { unit: Unit::new(id), ctx, sequence: None, + frozen_seqs: HashSet::new(), } } @@ -209,11 +213,14 @@ impl<'a> UnitBuilder<'a> { pub fn set_entry(&mut self) { debug_assert!(self.unit.cfg.entry.is_dummy()); - self.unit.cfg.entry = self.sequence.unwrap(); + let seq = self.sequence.unwrap(); + self.unit.cfg.entry = seq; + self.freeze_sequence_predecessors(seq); } /// Set conditional branch. pub fn sequence_value_jump(&mut self, value: isize, seq: SequenceIndex) { + debug_assert!(!self.frozen_seqs.contains(&seq)); let SequenceIndex(index) = self.sequence.unwrap(); let succ_idx = { let edit = &mut self.unit.cfg.sequences[index]; @@ -229,6 +236,7 @@ impl<'a> UnitBuilder<'a> { } /// Set default branch. pub fn sequence_default_jump(&mut self, seq: SequenceIndex) { + debug_assert!(!self.frozen_seqs.contains(&seq)); let SequenceIndex(index) = self.sequence.unwrap(); let succ_idx = { let edit = &mut self.unit.cfg.sequences[index]; @@ -244,6 +252,7 @@ impl<'a> UnitBuilder<'a> { } /// Set unwind branch. pub fn sequence_unwind_jump(&mut self, seq: SequenceIndex) { + debug_assert!(!self.frozen_seqs.contains(&seq)); let SequenceIndex(index) = self.sequence.unwrap(); let succ_idx = { let edit = &mut self.unit.cfg.sequences[index]; @@ -257,8 +266,14 @@ impl<'a> UnitBuilder<'a> { let edit = &mut self.unit.cfg.sequences[seq]; edit.predecessors.push((SequenceIndex(index), succ_idx)); } + /// Prevent the addition of any predecessors to the given sequence. This is + /// used for computing the automatic insertion of Phi instructions. + pub fn freeze_sequence_predecessors(&mut self, seq: SequenceIndex) { + self.frozen_seqs.insert(seq); + } + - /// Finalize and (TODO) assert that the generate Unit is valid. + /// Finalize and (TODO) assert that the generated Unit is valid. pub fn finish(self) -> Unit { self.unit } From 45d4c1f2ef06941e9005577ea8256ae95b2371a3 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Fri, 28 Sep 2018 17:15:55 +0200 Subject: [PATCH 13/32] Fix end_ins instruction. --- codegen/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index 3ad62f3..f9544b8 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -114,7 +114,7 @@ mod tests { let a0 = bld.unit_arg(0); let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); - bld.end_ins(Opcode::Return, &[v1]); + bld.end_op(Opcode::Return, &[v1]); } bld.finish() }; From a6657fd682cf277ec902f9f1d3bf1a6adc8c2ab1 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 30 Sep 2018 23:55:40 +0200 Subject: [PATCH 14/32] Add SSA builder to automagically add Rehash & Phi instructions. --- codegen/src/error.rs | 1 + codegen/src/lib.rs | 89 +++++++++++++-- codegen/src/lower.rs | 91 ++++++++++++--- lir/src/bitset.rs | 9 ++ lir/src/builder.rs | 257 ++++++++++++++++++++++++++++++++++++++++--- lir/src/data_flow.rs | 57 +++++++++- lir/src/lib.rs | 2 + 7 files changed, 459 insertions(+), 47 deletions(-) create mode 100644 lir/src/bitset.rs diff --git a/codegen/src/error.rs b/codegen/src/error.rs index 53dda6d..1888763 100644 --- a/codegen/src/error.rs +++ b/codegen/src/error.rs @@ -16,6 +16,7 @@ pub enum LowerError { Protect, UnitIsNotAFunction, ComplexTypeNotLowered, + NoFixPointForVarTypes, } // TODO: impl Error for LowerError diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index f9544b8..c4798ef 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -109,8 +109,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); @@ -143,8 +143,8 @@ mod tests { let s2 = bld.create_sequence(); // return x // [sequence 0] + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let v0 = bld.add_op(Opcode::Const(NumberValue::I32(2)), &[]); let v1 = bld.add_op(Opcode::Rem(SignedType::I32), &[a0, v0]); @@ -153,16 +153,16 @@ mod tests { bld.sequence_value_jump(1, s1); // [sequence 1] - bld.switch_to_sequence(s1); bld.freeze_sequence_predecessors(s1); + bld.switch_to_sequence(s1); let v2 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); let v3 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v2]); bld.end_op(Opcode::Goto, &[]); bld.sequence_default_jump(s2); // [sequence 2] - bld.switch_to_sequence(s2); bld.freeze_sequence_predecessors(s2); + bld.switch_to_sequence(s2); let v4 = bld.add_op(Opcode::Phi, &[a0, v3]); bld.end_op(Opcode::Return, &[v4]); @@ -193,8 +193,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); @@ -229,8 +229,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); @@ -264,8 +264,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); @@ -300,8 +300,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); @@ -335,8 +335,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); @@ -371,8 +371,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); @@ -406,8 +406,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); @@ -442,8 +442,8 @@ mod tests { bld.set_signature(t_sig); let s0 = bld.create_sequence(); { + bld.set_entry(s0); bld.switch_to_sequence(s0); - bld.set_entry(); let a0 = bld.unit_arg(0); let a1 = bld.unit_arg(1); let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); @@ -464,4 +464,69 @@ mod tests { assert_eq!(add_carry(u64::max_value() >> 1, 1), false); assert_eq!(add_carry(u64::max_value(), 1), true); } + + #[test] + fn sum_loop_test() { + let mut ctx_bld = ContextBuilder::new(); + let sum_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64], vec![t_u64], CanUnwind(true))); + let i = bld.new_var(); + let accu = bld.new_var(); + + let s0 = bld.create_sequence(); + let s1 = bld.create_sequence(); + let s2 = bld.create_sequence(); + + bld.set_signature(t_sig); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + bld.set_var(i, a0); + let v0 = bld.add_op(Opcode::Const(NumberValue::U64(0)), &[]); + bld.set_var(accu, v0); + bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 0 }), &[a0]); + bld.sequence_value_jump(0, s2); + bld.sequence_default_jump(s1); + } + { + bld.switch_to_sequence(s1); + let v0 = bld.add_op(Opcode::Const(NumberValue::U64(1)), &[]); + let v1 = bld.use_var(i); + let v2 = bld.add_op(Opcode::Sub(NumberType::U64), &[v1, v0]); + bld.set_var(i, v2); + let v3 = bld.use_var(accu); + let v4 = bld.add_op(Opcode::Add(NumberType::U64), &[v1, v3]); + bld.set_var(accu, v4); + bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 0 }), &[v2]); + bld.sequence_value_jump(0, s2); + bld.sequence_default_jump(s1); + bld.freeze_sequence_predecessors(s1); + } + { + bld.freeze_sequence_predecessors(s2); + bld.switch_to_sequence(s2); + bld.dead_var(i); + let v0 = bld.use_var(accu); + bld.dead_var(accu); + bld.end_op(Opcode::Return, &[v0]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sum_unit).unwrap(); + let sum : fn(u64) -> u64 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sum(0), 0); + assert_eq!(sum(1), 1); + assert_eq!(sum(2), 3); + assert_eq!(sum(3), 6); + assert_eq!(sum(4), 10); + } } diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 432fe59..c5bef62 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -170,20 +170,43 @@ impl<'a> ConvertCtx<'a> { debug_assert!(ins_res == None); } - // Infer type from the operands. - for (index, ref ins) in self.unit.dfg.instructions.iter().enumerate() { - match ins.opcode.result_type() { - ValueType::InheritFromOperands => (), - _ => continue, - }; - assert!(ins.operands.len() >= 1); - let ty = match types[ins.operands[0].index] { - Some((_, ty)) => ty, - None => continue, - }; - let v = Variable::new(index); - bld.declare_var(v, ty); - types[index] = Some((v, ty)); + // Infer type from the operands. Ideally instructions should be sorted + // within the sequences, but to satisfy cases where it is not we keep + // looping as long as we are able to find the type of more isntructions. + let mut last_nb = usize::max_value(); + let mut nb_unknown_types = usize::max_value(); + while nb_unknown_types != 0 { + nb_unknown_types = 0; + for (index, ref ins) in self.unit.dfg.instructions.iter().enumerate() { + // Only loop over instructios which are inferred from their + // operands. + match ins.opcode.result_type() { + ValueType::InheritFromOperands => (), + _ => continue, + }; + // In case of a fix-point just skip already types instructions. + match types[index] { + Some(_) => continue, + None => (), + }; + // Pick the first operand type and assign it to the current + // instruction. + assert!(ins.operands.len() >= 1); + let ty = match types[ins.operands[0].index] { + Some((_, ty)) => ty, + None => { + nb_unknown_types += 1; + continue + }, + }; + let v = Variable::new(index); + bld.declare_var(v, ty); + types[index] = Some((v, ty)); + } + if last_nb == nb_unknown_types { + return Err(LowerError::NoFixPointForVarTypes); + } + last_nb = nb_unknown_types; } self.var_types = types; @@ -221,6 +244,9 @@ impl<'a> ConvertCtx<'a> { Entry(_) => (), Newhash(_) => (), Rehash(_) => { + let op_ty = self.var_types[ins.operands[0].index].unwrap().1; + let rh_ty = self.var_types[val.index].unwrap().1; + debug_assert_eq!(op_ty, rh_ty); let a0 = bld.use_var(Variable::new(ins.operands[0].index)); let res = bld.ins().copy(a0); bld.def_var(Variable::new(val.index), res); @@ -247,9 +273,6 @@ impl<'a> ConvertCtx<'a> { OverflowFlag => (), CarryFlag => (), Add(n) => { - // TODO: If any overflow/carry flag depends on this instruction, we - // should change the encoding of this instruction to emit a carry - // bits. let a0 = bld.use_var(Variable::new(ins.operands[0].index)); let a1 = bld.use_var(Variable::new(ins.operands[1].index)); let (of, cf) = (self.overflow_map.get(&val), @@ -257,6 +280,7 @@ impl<'a> ConvertCtx<'a> { match (n, of, cf) { (NumberType::F32, _, _) | (NumberType::F64, _, _) => { + // TODO: Return an error instead. debug_assert!(of == None); debug_assert!(cf == None); let res = bld.ins().fadd(a0, a1); @@ -294,7 +318,38 @@ impl<'a> ConvertCtx<'a> { } }; } - Sub(_n) => unimplemented!(), + Sub(n) => { + // TODO: If any overflow/carry flag depends on this instruction, we + // should change the encoding of this instruction to emit a carry + // bits. + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let a1 = bld.use_var(Variable::new(ins.operands[1].index)); + let (of, cf) = (self.overflow_map.get(&val), + self.carry_map.get(&val)); + match (n, of, cf) { + (NumberType::F32, _, _) | + (NumberType::F64, _, _) => { + // TODO: Return an error instead. + debug_assert!(of == None); + debug_assert!(cf == None); + let res = bld.ins().fsub(a0, a1); + bld.def_var(res_var, res); + } + (_, None, None) => { + let res = bld.ins().isub(a0, a1); + bld.def_var(res_var, res); + } + (_, None, Some(_cv)) => { + unimplemented!(); + } + (_i, Some(_ov), None) => { + unimplemented!(); + } + (_i, Some(_ov), Some(_cv)) => { + unimplemented!(); + } + }; + }, Mul(_n) => unimplemented!(), Div(_n) => unimplemented!(), Rem(n) => { diff --git a/lir/src/bitset.rs b/lir/src/bitset.rs new file mode 100644 index 0000000..a293ae0 --- /dev/null +++ b/lir/src/bitset.rs @@ -0,0 +1,9 @@ +/// This module contains some data structure used for representing bit sets. + +use std::collections::BTreeSet; + +/// This implementation implement a set of bits which is encoded with 0 being +/// the most representated value. +// TODO: This implementation is very naive and might be optimized for a +// representation of bit set which is less sparsed. +pub type BitSet = BTreeSet; diff --git a/lir/src/builder.rs b/lir/src/builder.rs index ac8cd58..67b7ca1 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -8,6 +8,7 @@ use data_flow::{Instruction, Opcode, Value}; use control_flow::{Sequence, SequenceIndex, SuccessorIndex}; use types::{ComplexType, ComplexTypeId}; use context; +use bitset::BitSet; /// A UnitContext should be used across multiple `UnitBuilder`, in order to have /// different indexes and hashes for identical data. While this is not a @@ -22,6 +23,14 @@ pub struct ContextBuilder { types_lookup: HashMap, } +#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] +pub struct Variable(u32); + +/// This structure is used to capture variable states, and used to insert Phi +/// instructions if needed. +pub struct SSABuilder { +} + pub struct UnitBuilder<'a> { /// Identifier of the constructed unit. unit: Unit, @@ -32,6 +41,20 @@ pub struct UnitBuilder<'a> { /// List of sequences which are frozen, which implies that no predecessors /// should be added to them. frozen_seqs: HashSet, + + // [Build SSA values] + /// Number of variable which got allocated. + nb_vars: u32, + /// For each sequence, we have a list of variables which are live at the end + /// of each sequence. This is used for automatically inserting Phi + /// instructions in order to convert variables into an SSA form. + seq_live_vars: Vec, + /// For each live variable in a sequence, record which value it is + /// associated to. + seqvar_map: HashMap<(SequenceIndex, Variable), Value>, + /// List of Rehash instructions which have to be fixed by inserting Phi + /// instructions once all predecessors are known. + rehash_map: HashMap>, } impl ContextBuilder { @@ -90,6 +113,10 @@ impl<'a> UnitBuilder<'a> { ctx, sequence: None, frozen_seqs: HashSet::new(), + nb_vars: 0, + seq_live_vars: vec![], + seqvar_map: HashMap::new(), + rehash_map: HashMap::new(), } } @@ -102,12 +129,12 @@ impl<'a> UnitBuilder<'a> { pub fn set_signature(&mut self, signature: ComplexTypeId) { self.unit.sig = signature; let ty = self.ctx.get_type(signature); - let (ins, outs) = match ty { + let (ins, _outs) = match ty { &ComplexType::Function(ref ins, ref outs, _) => (ins, outs), _ => panic!("Unit signatures are expected to be a Function.") }; self.unit.inputs = ins.iter().map(|_| Value::dummy()).collect(); - self.unit.outputs = outs.iter().map(|_| Value::dummy()).collect(); + self.unit.outputs = vec![]; } /// Once the signature is defined with `set_signature`, and a block is @@ -139,26 +166,47 @@ impl<'a> UnitBuilder<'a> { /// garded instructions. pub fn create_sequence(&mut self) -> SequenceIndex { self.unit.cfg.sequences.push(Sequence::new()); + self.seq_live_vars.push(BitSet::new()); SequenceIndex(self.unit.cfg.sequences.len() - 1) } /// Switch to a sequence of code, such that newly added instructions are /// going to be added to this block by default. pub fn switch_to_sequence(&mut self, seq: SequenceIndex) { - // TODO: Assert that the previous sequence is properly ended with a - // control instruction, and a number of successors matching the number - // the expected successors of the control instruction. self.sequence = Some(seq); + // Create Phi instructions for all live variables if needed and if all + // predecessors have a control instructions. + if self.unit.cfg.entry != seq { + self.create_seq_vars(); + } + } + + pub fn end_sequence(&mut self) { + // Assert that the previous sequence is properly ended with a control + // instruction, and a number of successors matching the number the + // expected successors of the control instruction. + debug_assert!(!self.unit.cfg.sequences[self.current_sequence().0].control.is_dummy()); + self.sequence = None; + } + + fn current_sequence(&self) -> SequenceIndex { + self.sequence.expect(&"Should be between switch_to_sequence and end_sequence") + } + + /// Add an instruction to both the data flow graph and the control flow + /// sequence given as argument. + fn add_ins_in_seq(&mut self, ins: Instruction, si: SequenceIndex) -> Value { + debug_assert!(ins.is_phi() || self.unit.cfg.sequences[si.0].control.is_dummy()); + let value = self.dfg_add_ins(ins); + self.unit.cfg.sequences[si.0].sequence.push(value); + value } /// Add an instruction to both the data flow graph and the active sequence /// of the control flow graph. pub fn add_ins(&mut self, ins: Instruction) -> Value { - let value = self.dfg_add_ins(ins); - let SequenceIndex(index) = self.sequence.unwrap(); - debug_assert!(self.unit.cfg.sequences[index].control.is_dummy()); - self.unit.cfg.sequences[index].sequence.push(value); - value + let si = self.current_sequence(); + self.add_ins_in_seq(ins, si) } /// Add an instruction based on its opcode, operands and dependencies. @@ -183,7 +231,7 @@ impl<'a> UnitBuilder<'a> { let is_return = ins.opcode.is_return(); let value = self.dfg_add_ins(ins); { - let SequenceIndex(index) = self.sequence.unwrap(); + let SequenceIndex(index) = self.current_sequence(); let edit = &mut self.unit.cfg.sequences[index]; debug_assert!(edit.control.is_dummy()); edit.control = value; @@ -211,17 +259,20 @@ impl<'a> UnitBuilder<'a> { self.end_op_deps(opcode, operands, &[]) } - pub fn set_entry(&mut self) { + // Set the current sequence as being the entry point of the control flow + // graph. Also freeze the current sequence and assert that it has no + // predecessors. + pub fn set_entry(&mut self, seq: SequenceIndex) { debug_assert!(self.unit.cfg.entry.is_dummy()); - let seq = self.sequence.unwrap(); self.unit.cfg.entry = seq; + debug_assert!(self.unit.cfg.sequences[seq.0].predecessors.is_empty()); self.freeze_sequence_predecessors(seq); } /// Set conditional branch. pub fn sequence_value_jump(&mut self, value: isize, seq: SequenceIndex) { debug_assert!(!self.frozen_seqs.contains(&seq)); - let SequenceIndex(index) = self.sequence.unwrap(); + let SequenceIndex(index) = self.current_sequence(); let succ_idx = { let edit = &mut self.unit.cfg.sequences[index]; edit.successors.push(seq); @@ -237,7 +288,7 @@ impl<'a> UnitBuilder<'a> { /// Set default branch. pub fn sequence_default_jump(&mut self, seq: SequenceIndex) { debug_assert!(!self.frozen_seqs.contains(&seq)); - let SequenceIndex(index) = self.sequence.unwrap(); + let SequenceIndex(index) = self.current_sequence(); let succ_idx = { let edit = &mut self.unit.cfg.sequences[index]; edit.successors.push(seq); @@ -253,7 +304,7 @@ impl<'a> UnitBuilder<'a> { /// Set unwind branch. pub fn sequence_unwind_jump(&mut self, seq: SequenceIndex) { debug_assert!(!self.frozen_seqs.contains(&seq)); - let SequenceIndex(index) = self.sequence.unwrap(); + let SequenceIndex(index) = self.current_sequence(); let succ_idx = { let edit = &mut self.unit.cfg.sequences[index]; edit.successors.push(seq); @@ -270,8 +321,182 @@ impl<'a> UnitBuilder<'a> { /// used for computing the automatic insertion of Phi instructions. pub fn freeze_sequence_predecessors(&mut self, seq: SequenceIndex) { self.frozen_seqs.insert(seq); + // If the sequence already got generated, then we inserted Rehash + // instruction for every live value. Call create_Seq_rehash_phis to set + // the Rehash operands to Phi instruction which are considering all + // predecessors. + if !self.unit.cfg.sequences[seq.0].control.is_dummy() { + self.create_seq_rehash_phis(seq) + } } + /// Allocate a new Variable. + pub fn new_var(&mut self) -> Variable { + let id = self.nb_vars; + self.nb_vars += 1; + Variable(id) + } + + /// Within the current sequence, set the variable to a given value. + pub fn set_var(&mut self, var: Variable, val: Value) { + let SequenceIndex(index) = self.current_sequence(); + self.seq_live_vars[index].insert(var.0); + self.seqvar_map.insert((SequenceIndex(index), var), val); + } + + /// Within the current sequence, get the value corresponding to the given + /// variable. + pub fn use_var(&self, var: Variable) -> Value { + let si = self.current_sequence(); + debug_assert!(self.seq_live_vars[si.0].contains(&var.0)); + let val = self.seqvar_map.get(&(si, var)).expect(&"Variable is live in the current sequence, missing a set_var in predecessor sequences?"); + *val + } + + /// Declare the current variable as being no longer used. Calling this + /// function will avoid iterating over short-live variable created with + /// set_var. + pub fn dead_var(&mut self, var: Variable) { + let SequenceIndex(index) = self.current_sequence(); + self.seq_live_vars[index].remove(&var.0); + } + + /// When switching to a new sequence, we look at all the predecessors to + /// resolve the Phi of live variables, and potentially to create rehash in + /// case of loops, where the list of predecessors is not yet known. + fn create_seq_vars(&mut self) { + let si = self.current_sequence(); + let create_phis = { + if self.frozen_seqs.contains(&si) { + // We know all the predecessors, check that they all have a + // control instruction. + self.unit.cfg.sequences[si.0].predecessors.iter() + .map(|&(pi, _)| pi).all( + |pseq| !self.unit.cfg.sequences[pseq.0].control.is_dummy() + ) + } else { + // We might add new predecessors, assume noting, and use rehash + // instructions. + false + } + }; + let live = self.entry_live_set(); + match create_phis { + true => self.create_seq_phis(&live), + false => self.create_seq_rehash(&live), + } + self.seq_live_vars[si.0] = live; + } + + /// Compute live variables as the intersection of all predecessors. + fn entry_live_set(&self) -> BitSet { + let si = self.current_sequence(); + let pred = &self.unit.cfg.sequences[si.0].predecessors; + debug_assert!(pred.len() >= 1); + let mut pred = pred.iter().map(|&(pred, _)| pred); + let first = match pred.next() { + None => return BitSet::new(), + Some(psi) => psi + }; + let mut live = self.seq_live_vars[first.0].clone(); + for psi in pred { + live = live.intersection(&self.seq_live_vars[psi.0]).cloned().collect(); + } + live + } + + /// All predecessors are known, create Phi instructions for all live + /// variables. + fn create_seq_phis(&mut self, live: &BitSet) { + // For each live variable add seqvar_map bindings. + let si = self.current_sequence(); + if self.unit.cfg.sequences[si.0].predecessors.len() == 1 { + // We have a single predecessor, inherit all the bindings without + // adding any Phi instruction. + let (psi, _) = self.unit.cfg.sequences[si.0].predecessors[0]; + for var in live { + let var = Variable(*var); + let val = *self.seqvar_map.get(&(psi, var)) + .expect(&"Impossible error: live set is the intersection of predecessors"); + self.seqvar_map.insert((si, var), val); + } + } else { + // For each variable, collect the corresponding value from each + // predecessor and ad them as operands to a Phi instruction which + // it-self correspond to the value of the variable in the current + // sequence. + for var in live { + let var = Variable(*var); + let vals : Vec<_> = { + let pred = self.unit.cfg.sequences[si.0].predecessors.iter().map(|&(psi, _)| psi); + let vals = pred.map(|psi| *self.seqvar_map.get(&(psi, var)).unwrap()); + vals.collect() + }; + // Do not insert a Phi instruction if all the predecessors are + // identical. + let val = if vals.iter().all(|v| v == &vals[0]) { + vals[0] + } else { + self.add_op(Opcode::Phi, &vals) + }; + self.seqvar_map.insert((si, var), val); + } + } + } + + /// Some predecessors might be added later, create Rehash instructions for + /// all live variables. + fn create_seq_rehash(&mut self, live: &BitSet) { + // For each variable known to be live, add a Rehash instruction with a + // dummy value operand. + let si = self.current_sequence(); + let mut rehash_list = Vec::with_capacity(live.len()); + for var in live { + let var = Variable(*var); + let op = self.ctx.get_rehash(); + let val = self.add_op(op, &[Value::dummy()]); + self.seqvar_map.insert((si, var), val); + rehash_list.push((var, val)); + } + self.rehash_map.insert(si, rehash_list); + } + + /// We finally freeze a sequence after having generated rehash instructions + /// in it. + fn create_seq_rehash_phis(&mut self, si: SequenceIndex) { + let rehash_list = self.rehash_map.remove(&si) + .expect(&"create_seq_rehash should be called before, and the current fucntion should only be called once per sequence."); + let mut inserted_phis = 0; + for (var, rehash) in rehash_list { + let vals : Vec<_> = { + let pred = self.unit.cfg.sequences[si.0].predecessors.iter().map(|&(psi, _)| psi); + let vals = pred.map(|psi| *self.seqvar_map.get(&(psi, var)).unwrap()); + vals.collect() + }; + // Do not insert a Phi instruction if all the predecessors are + // identical. + let val = if vals.iter().all(|v| v == &vals[0]) { + vals[0] + } else { + inserted_phis += 1; + self.add_ins_in_seq(Instruction { + opcode: Opcode::Phi, + operands: vals, + dependencies: vec![], + replaced_by: None, + }, si) + }; + + // Replace the Rehash operand by the newly computed value/phi. + let rehash = &mut self.unit.dfg.instructions[rehash.index]; + debug_assert!(rehash.is_rehash()); + debug_assert!(rehash.operands.len() == 1); + debug_assert!(rehash.operands[0].is_dummy()); + rehash.operands[0] = val; + } + // Move the inserted phis to the beginning of the sequence. + self.unit.cfg.sequences[si.0].sequence.rotate_right(inserted_phis); + } /// Finalize and (TODO) assert that the generated Unit is valid. pub fn finish(self) -> Unit { diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index caf577e..ae5372d 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -264,6 +264,20 @@ impl Opcode { } } + pub fn is_phi(self) -> bool { + match self { + Opcode::Phi => true, + _ => false, + } + } + + pub fn is_rehash(self) -> bool { + match self { + Opcode::Rehash(_) => true, + _ => false, + } + } + pub fn result_type(self) -> ValueType { use self::Opcode::*; match self { @@ -317,12 +331,21 @@ impl Opcode { impl Instruction { pub fn is_control(&self) -> bool { self.opcode.is_control() } + pub fn is_phi(&self) -> bool { self.opcode.is_phi() } + pub fn is_rehash(&self) -> bool { self.opcode.is_rehash() } } impl Hash for Instruction { fn hash(&self, state: &mut H) { self.opcode.hash(state); - self.operands.hash(state); + if !self.is_rehash() { + // Rehash ignores the hash of its operands in order to properly + // handle loops without having circulat computation of hashes. Note, + // we could use the replace_by field to implement Rehash based on + // Newhash, but we might want to have the ability to remove all + // replace_by fields when needed. + self.operands.hash(state); + } self.dependencies.hash(state); // Exclude self.replaced_by. } @@ -402,6 +425,38 @@ mod tests { assert_ne!(v1.hash, v2.hash); } + #[test] + fn rehash_ignore_operands() { + let mut df = DataFlow::new(); + let v0 = df.add_ins(Instruction { + opcode: Opcode::Const(number::NumberValue::U32(1024)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + let v1 = df.add_ins(Instruction { + opcode: Opcode::Const(number::NumberValue::U32(512)), + operands: vec![], + dependencies: vec![], + replaced_by: None, + }); + let v2 = df.add_ins(Instruction { + opcode: Opcode::Rehash(51), + operands: vec![v0], + dependencies: vec![], + replaced_by: None, + }); + let v3 = df.add_ins(Instruction { + opcode: Opcode::Rehash(51), + operands: vec![v1], + dependencies: vec![], + replaced_by: None, + }); + // Rehash opcode compute a hash which is independent of the operand. + // This is used to avoid loops in hashes computations. + assert_eq!(v2.hash, v3.hash); + } + #[test] fn stable_hash() { // TODO: We should add test cases like that for each instruction, to diff --git a/lir/src/lib.rs b/lir/src/lib.rs index 83941a7..298ae58 100644 --- a/lir/src/lib.rs +++ b/lir/src/lib.rs @@ -1,3 +1,4 @@ +#![feature(slice_rotate)] /// The LIR format is used for encoding the Rust code which would be optimized /// at compile time and the also at runtime. It should mostly be generated by /// the holyjit driver, and consumed by the holyjit compiler. @@ -40,3 +41,4 @@ pub mod data_flow; pub mod control_flow; pub mod context; pub mod builder; +mod bitset; From a10a5a988a7f162a8c8dfa5bb06fe13ede0ba40c Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 7 Oct 2018 18:18:00 +0200 Subject: [PATCH 15/32] Update Readme roadmap section with the latest milestone. (v01 and v0.2) --- README.md | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 43d187c..500857c 100644 --- a/README.md +++ b/README.md @@ -108,30 +108,32 @@ currently at a prototype stage, and most of the code & dependencies present today were made only as a proof of concept and not as a definitive implementation design. -## HolyJit Roadmap for 0.0.0 +## [HolyJit Roadmap](https://github.com/nbp/holyjit/milestones) -The current goal is to make a proof of concept which highlights the main -feature, i.e. being trivial to integrate into an existing code base and to -have a running JIT compiler. +### v0.2.0: Unroll Interpreter loops (future) -As of today, HolyJit contains a draft of what the interface might look like, -and is able to generate code for the example present in the repository. +Interpreters are most of the time written as a loop over a byte-code. This goal +is about unrolling interpreter loops at JIT-compile time based on a limited set +of guarded/assumed constant inputs. + +At this stage: We should have a macro interfaces to annotate variables which +should be used as guarded or assumed constants. We should remove the dispatch +cost of an interpreter. + +### v0.1.0: Cranelift + LIR (in progress) + +The goal is to replace the quick and dirty solution by a solution with a proper +code generator and a proper intermediate representation. -- [x] Create Rust library - - [x] Allocate pages and map them as executable. - - [x] Add a way to call either a dynamically compiled function or a - statically compiled function. - - [x] Add a `jit!` macro, to make calls transparent from the usage point of - view. - - [x] Create a JitContext class, and use it to request JIT compiled code. - - [x] Create a graph representation. - - [x] Consume the graph to generate code. - -- [x] Create a MIR plugin - - [x] Detect locations which have to be patched. - - [x] Find functions which have to be converted. - - [x] Inject a generated vector in the binary content. - - [x] Inject static variables as a tuple. - - [x] Collect static variable references. - - [x] Convert the MIR (from the Rust compiler) to the library graph - representation. +At this stage, the LIR (Low-level Intermediate Represenation) should be +documented, generated by the Rustc driver and consumed to generate Cranelift IR. +The Cranelift IR should be used to generate machine code. + +### v0.0.0: Proof of concept + +This goal is to make a proof of concept which highlights the main feature, i.e. +being trivial to integrate into an existing code base and to have a running +dummy-JIT compiler. + +At this stage, HolyJit contains a draft of what the interface might look like, +and is able to generate code for the example present in the repository. From 1e9ed950acdb38a415b439e973bf962242234c54 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 20 Oct 2018 18:42:39 +0200 Subject: [PATCH 16/32] Generate code for carry and overflow flag for substraction --- codegen/src/lib.rs | 276 +++++++++++++++++++++++++++++++++++++++++++ codegen/src/lower.rs | 38 ++++-- 2 files changed, 303 insertions(+), 11 deletions(-) diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index c4798ef..d7c9133 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -465,6 +465,282 @@ mod tests { assert_eq!(add_carry(u64::max_value(), 1), true); } + #[test] + fn sub_overflow_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(i32::min_value() + 1, 1), false); + assert_eq!(sub_overflow(i32::min_value(), 1), true); + assert_eq!(sub_overflow(i32::max_value(), -1), true); + } + + #[test] + fn sub_overflow_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(u32::min_value() + 1, 1), true); + assert_eq!(sub_overflow(u32::min_value(), 1), false); + } + + #[test] + fn sub_overflow_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(i64::min_value() + 1, 1), false); + assert_eq!(sub_overflow(i64::min_value(), 1), true); + assert_eq!(sub_overflow(i64::max_value(), -1), true); + } + + #[test] + fn sub_overflow_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(u64::min_value() + 1, 1), true); + assert_eq!(sub_overflow(u64::min_value(), 1), false); + } + + #[test] + fn sub_carry_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(i32::min_value() + 1, 1), false); + assert_eq!(sub_carry(i32::min_value(), 1), false); + assert_eq!(sub_carry(i32::max_value(), -1), true); + } + + #[test] + fn sub_carry_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(u32::min_value() + 1, 1), false); + assert_eq!(sub_carry(u32::min_value(), 1), true); + } + + #[test] + fn sub_carry_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(i64::min_value() + 1, 1), false); + assert_eq!(sub_carry(i64::min_value(), 1), false); + assert_eq!(sub_carry(i64::max_value(), -1), true); + } + + #[test] + fn sub_carry_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(u64::min_value() + 1, 1), false); + assert_eq!(sub_carry(u64::min_value(), 1), true); + } + #[test] fn sum_loop_test() { let mut ctx_bld = ContextBuilder::new(); diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index c5bef62..d3e5c7e 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -298,7 +298,7 @@ impl<'a> ConvertCtx<'a> { (i, Some(ov), None) => { let res = bld.ins().iadd(a0, a1); bld.def_var(res_var, res); - // of = ((a0 | a1) ^ (a0 + a1)) > (max >> 1) + // of = ((a0 | a1) ^ (a0 + a1)) >= sign_mask let bor = bld.ins().bor(a0, a1); let xor = bld.ins().bxor(bor, res); let sign_mask = self.sign_mask(i); @@ -309,7 +309,7 @@ impl<'a> ConvertCtx<'a> { let (res, carry) = bld.ins().iadd_cout(a0, a1); bld.def_var(res_var, res); bld.def_var(*cv, carry); - // of = ((a0 | a1) ^ (a0 + a1)) > (max >> 1) + // of = ((a0 | a1) ^ (a0 + a1)) >= sign_mask let bor = bld.ins().bor(a0, a1); let xor = bld.ins().bxor(bor, res); let sign_mask = self.sign_mask(i); @@ -319,9 +319,6 @@ impl<'a> ConvertCtx<'a> { }; } Sub(n) => { - // TODO: If any overflow/carry flag depends on this instruction, we - // should change the encoding of this instruction to emit a carry - // bits. let a0 = bld.use_var(Variable::new(ins.operands[0].index)); let a1 = bld.use_var(Variable::new(ins.operands[1].index)); let (of, cf) = (self.overflow_map.get(&val), @@ -339,14 +336,33 @@ impl<'a> ConvertCtx<'a> { let res = bld.ins().isub(a0, a1); bld.def_var(res_var, res); } - (_, None, Some(_cv)) => { - unimplemented!(); + (_, None, Some(cv)) => { + let (res, borrow) = bld.ins().isub_bout(a0, a1); + bld.def_var(res_var, res); + bld.def_var(*cv, borrow); } - (_i, Some(_ov), None) => { - unimplemented!(); + (i, Some(ov), None) => { + let res = bld.ins().isub(a0, a1); + bld.def_var(res_var, res); + // of = ((a0 | ~(a1 - 1)) ^ (a0 - a1)) >= sign_mask + let sign_mask = self.sign_mask(i) as u64; + let v2 = bld.ins().iadd_imm(a1, Imm64::new((sign_mask - 1 | sign_mask) as i64)); + let bor = bld.ins().bor_not(a0, v2); + let xor = bld.ins().bxor(bor, res); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + bld.def_var(*ov, cmp); } - (_i, Some(_ov), Some(_cv)) => { - unimplemented!(); + (i, Some(ov), Some(cv)) => { + let (res, borrow) = bld.ins().isub_bout(a0, a1); + bld.def_var(res_var, res); + bld.def_var(*cv, borrow); + // of = ((a0 | ~(a1 - 1)) ^ (a0 - a1)) >= sign_mask + let sign_mask = self.sign_mask(i) as u64; + let v2 = bld.ins().iadd_imm(a1, Imm64::new((sign_mask - 1 | sign_mask) as i64)); + let bor = bld.ins().bor_not(a0, v2); + let xor = bld.ins().bxor(bor, res); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + bld.def_var(*ov, cmp); } }; }, From a52764146c1981a733754d4251d3cce99b43d33f Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 20 Oct 2018 19:28:34 +0200 Subject: [PATCH 17/32] Move tests from condegen/src/lib.rs to codegen/tests directory. --- codegen/src/lib.rs | 727 ---------------------------------- codegen/tests/add.rs | 326 +++++++++++++++ codegen/tests/control_flow.rs | 134 +++++++ codegen/tests/sub.rs | 287 ++++++++++++++ 4 files changed, 747 insertions(+), 727 deletions(-) create mode 100644 codegen/tests/add.rs create mode 100644 codegen/tests/control_flow.rs create mode 100644 codegen/tests/sub.rs diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index d7c9133..43fd884 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -79,730 +79,3 @@ impl JitCode { self.code.as_ptr() } } - -#[cfg(test)] -mod tests { - use super::*; - use std::mem; - - use lir::unit::*; - use lir::data_flow::*; - use lir::number::*; - use lir::builder::*; - use lir::types::*; - - - #[test] - fn create_code_generator() { - let _cg = CodeGenerator::new(); - assert!(true); - } - - #[test] - fn add1_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); - let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); - bld.end_op(Opcode::Return, &[v1]); - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add1 : fn(i32) -> i32 = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add1(-5), -4); - assert_eq!(add1(12), 13); - } - - #[test] - fn round_odd_up_test() { - let mut ctx_bld = ContextBuilder::new(); - let round_odd_up_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); // test x % 2 == 0 - let s1 = bld.create_sequence(); // x += 1 - let s2 = bld.create_sequence(); // return x - - // [sequence 0] - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let v0 = bld.add_op(Opcode::Const(NumberValue::I32(2)), &[]); - let v1 = bld.add_op(Opcode::Rem(SignedType::I32), &[a0, v0]); - bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 1 }), &[v1]); - bld.sequence_value_jump(0, s2); - bld.sequence_value_jump(1, s1); - - // [sequence 1] - bld.freeze_sequence_predecessors(s1); - bld.switch_to_sequence(s1); - let v2 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); - let v3 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v2]); - bld.end_op(Opcode::Goto, &[]); - bld.sequence_default_jump(s2); - - // [sequence 2] - bld.freeze_sequence_predecessors(s2); - bld.switch_to_sequence(s2); - let v4 = bld.add_op(Opcode::Phi, &[a0, v3]); - bld.end_op(Opcode::Return, &[v4]); - - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &round_odd_up_unit).unwrap(); - let round_odd_up : fn(i32) -> i32 = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(round_odd_up(0), 0); - assert_eq!(round_odd_up(9654), 9654); - assert_eq!(round_odd_up(1618033), 1618034); - assert_eq!(round_odd_up(-5), -4); - } - - #[test] - fn add_overflow_i32_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_overflow : fn(i32, i32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(-1, 1), true); - assert_eq!(add_overflow(i32::max_value() - 1, 1), false); - assert_eq!(add_overflow(i32::max_value(), 1), true); - assert_eq!(add_overflow(i32::min_value(), -1), true); - } - - #[test] - fn add_overflow_u32_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_overflow : fn(u32, u32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(u32::max_value() - 1, 1), false); - assert_eq!(add_overflow(u32::max_value() >> 1, 1), true); - assert_eq!(add_overflow(u32::max_value(), 1), true); - } - - #[test] - fn add_overflow_i64_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_overflow : fn(i64, i64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(-1, 1), true); - assert_eq!(add_overflow(i64::max_value() - 1, 1), false); - assert_eq!(add_overflow(i64::max_value(), 1), true); - assert_eq!(add_overflow(i64::min_value(), -1), true); - } - - #[test] - fn add_overflow_u64_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_overflow : fn(u64, u64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(u64::max_value() - 1, 1), false); - assert_eq!(add_overflow(u64::max_value() >> 1, 1), true); - assert_eq!(add_overflow(u64::max_value(), 1), true); - } - - #[test] - fn add_carry_i32_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_carry : fn(i32, i32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(-1, 1), true); - assert_eq!(add_carry(i32::max_value() - 1, 1), false); - assert_eq!(add_carry(i32::max_value(), 1), false); - assert_eq!(add_carry(i32::min_value(), -1), true); - } - - #[test] - fn add_carry_u32_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_carry : fn(u32, u32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(u32::max_value() - 1, 1), false); - assert_eq!(add_carry(u32::max_value() >> 1, 1), false); - assert_eq!(add_carry(u32::max_value(), 1), true); - } - - #[test] - fn add_carry_i64_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_carry : fn(i64, i64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(-1, 1), true); - assert_eq!(add_carry(i64::max_value() - 1, 1), false); - assert_eq!(add_carry(i64::max_value(), 1), false); - assert_eq!(add_carry(i64::min_value(), -1), true); - } - - #[test] - fn add_carry_u64_test() { - let mut ctx_bld = ContextBuilder::new(); - let add1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &add1_unit).unwrap(); - let add_carry : fn(u64, u64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(u64::max_value() - 1, 1), false); - assert_eq!(add_carry(u64::max_value() >> 1, 1), false); - assert_eq!(add_carry(u64::max_value(), 1), true); - } - - #[test] - fn sub_overflow_i32_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::I32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_overflow : fn(i32, i32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(i32::min_value() + 1, 1), false); - assert_eq!(sub_overflow(i32::min_value(), 1), true); - assert_eq!(sub_overflow(i32::max_value(), -1), true); - } - - #[test] - fn sub_overflow_u32_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::U32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_overflow : fn(u32, u32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(u32::min_value() + 1, 1), true); - assert_eq!(sub_overflow(u32::min_value(), 1), false); - } - - #[test] - fn sub_overflow_i64_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::I64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_overflow : fn(i64, i64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(i64::min_value() + 1, 1), false); - assert_eq!(sub_overflow(i64::min_value(), 1), true); - assert_eq!(sub_overflow(i64::max_value(), -1), true); - } - - #[test] - fn sub_overflow_u64_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::U64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_overflow : fn(u64, u64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(u64::min_value() + 1, 1), true); - assert_eq!(sub_overflow(u64::min_value(), 1), false); - } - - #[test] - fn sub_carry_i32_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::I32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_carry : fn(i32, i32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_carry(0, 0), false); - assert_eq!(sub_carry(i32::min_value() + 1, 1), false); - assert_eq!(sub_carry(i32::min_value(), 1), false); - assert_eq!(sub_carry(i32::max_value(), -1), true); - } - - #[test] - fn sub_carry_u32_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::U32), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_carry : fn(u32, u32) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_carry(0, 0), false); - assert_eq!(sub_carry(u32::min_value() + 1, 1), false); - assert_eq!(sub_carry(u32::min_value(), 1), true); - } - - #[test] - fn sub_carry_i64_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::I64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_carry : fn(i64, i64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_carry(0, 0), false); - assert_eq!(sub_carry(i64::min_value() + 1, 1), false); - assert_eq!(sub_carry(i64::min_value(), 1), false); - assert_eq!(sub_carry(i64::max_value(), -1), true); - } - - #[test] - fn sub_carry_u64_test() { - let mut ctx_bld = ContextBuilder::new(); - let sub1_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Sub the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); - bld.set_signature(t_sig); - let s0 = bld.create_sequence(); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - let a1 = bld.unit_arg(1); - let v1 = bld.add_op(Opcode::Sub(NumberType::U64), &[a0, a1]); - let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); - bld.end_op(Opcode::Return, &[v2]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sub1_unit).unwrap(); - let sub_carry : fn(u64, u64) -> bool = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sub_carry(0, 0), false); - assert_eq!(sub_carry(u64::min_value() + 1, 1), false); - assert_eq!(sub_carry(u64::min_value(), 1), true); - } - - #[test] - fn sum_loop_test() { - let mut ctx_bld = ContextBuilder::new(); - let sum_unit = { - let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); - // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64], vec![t_u64], CanUnwind(true))); - let i = bld.new_var(); - let accu = bld.new_var(); - - let s0 = bld.create_sequence(); - let s1 = bld.create_sequence(); - let s2 = bld.create_sequence(); - - bld.set_signature(t_sig); - { - bld.set_entry(s0); - bld.switch_to_sequence(s0); - let a0 = bld.unit_arg(0); - bld.set_var(i, a0); - let v0 = bld.add_op(Opcode::Const(NumberValue::U64(0)), &[]); - bld.set_var(accu, v0); - bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 0 }), &[a0]); - bld.sequence_value_jump(0, s2); - bld.sequence_default_jump(s1); - } - { - bld.switch_to_sequence(s1); - let v0 = bld.add_op(Opcode::Const(NumberValue::U64(1)), &[]); - let v1 = bld.use_var(i); - let v2 = bld.add_op(Opcode::Sub(NumberType::U64), &[v1, v0]); - bld.set_var(i, v2); - let v3 = bld.use_var(accu); - let v4 = bld.add_op(Opcode::Add(NumberType::U64), &[v1, v3]); - bld.set_var(accu, v4); - bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 0 }), &[v2]); - bld.sequence_value_jump(0, s2); - bld.sequence_default_jump(s1); - bld.freeze_sequence_predecessors(s1); - } - { - bld.freeze_sequence_predecessors(s2); - bld.switch_to_sequence(s2); - bld.dead_var(i); - let v0 = bld.use_var(accu); - bld.dead_var(accu); - bld.end_op(Opcode::Return, &[v0]) - } - bld.finish() - }; - let ctx = ctx_bld.finish(); - - let mut cg = CodeGenerator::new(); - let code = cg.compile(&ctx, &sum_unit).unwrap(); - let sum : fn(u64) -> u64 = unsafe { - mem::transmute(code.as_ptr()) - }; - assert_eq!(sum(0), 0); - assert_eq!(sum(1), 1); - assert_eq!(sum(2), 3); - assert_eq!(sum(3), 6); - assert_eq!(sum(4), 10); - } -} diff --git a/codegen/tests/add.rs b/codegen/tests/add.rs new file mode 100644 index 0000000..0911e58 --- /dev/null +++ b/codegen/tests/add.rs @@ -0,0 +1,326 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +#[test] +fn add1_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); + let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v0]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add1 : fn(i32) -> i32 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add1(-5), -4); + assert_eq!(add1(12), 13); +} + +#[test] +fn add_overflow_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(-1, 1), true); + assert_eq!(add_overflow(i32::max_value() - 1, 1), false); + assert_eq!(add_overflow(i32::max_value(), 1), true); + assert_eq!(add_overflow(i32::min_value(), -1), true); +} + +#[test] +fn add_overflow_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(u32::max_value() - 1, 1), false); + assert_eq!(add_overflow(u32::max_value() >> 1, 1), true); + assert_eq!(add_overflow(u32::max_value(), 1), true); +} + +#[test] +fn add_overflow_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(-1, 1), true); + assert_eq!(add_overflow(i64::max_value() - 1, 1), false); + assert_eq!(add_overflow(i64::max_value(), 1), true); + assert_eq!(add_overflow(i64::min_value(), -1), true); +} + +#[test] +fn add_overflow_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_overflow : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_overflow(0, 0), false); + assert_eq!(add_overflow(u64::max_value() - 1, 1), false); + assert_eq!(add_overflow(u64::max_value() >> 1, 1), true); + assert_eq!(add_overflow(u64::max_value(), 1), true); +} + +#[test] +fn add_carry_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(-1, 1), true); + assert_eq!(add_carry(i32::max_value() - 1, 1), false); + assert_eq!(add_carry(i32::max_value(), 1), false); + assert_eq!(add_carry(i32::min_value(), -1), true); +} + +#[test] +fn add_carry_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(u32::max_value() - 1, 1), false); + assert_eq!(add_carry(u32::max_value() >> 1, 1), false); + assert_eq!(add_carry(u32::max_value(), 1), true); +} + +#[test] +fn add_carry_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::I64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(-1, 1), true); + assert_eq!(add_carry(i64::max_value() - 1, 1), false); + assert_eq!(add_carry(i64::max_value(), 1), false); + assert_eq!(add_carry(i64::min_value(), -1), true); +} + +#[test] +fn add_carry_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Add(NumberType::U64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let add_carry : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(add_carry(0, 0), false); + assert_eq!(add_carry(u64::max_value() - 1, 1), false); + assert_eq!(add_carry(u64::max_value() >> 1, 1), false); + assert_eq!(add_carry(u64::max_value(), 1), true); +} diff --git a/codegen/tests/control_flow.rs b/codegen/tests/control_flow.rs new file mode 100644 index 0000000..a816113 --- /dev/null +++ b/codegen/tests/control_flow.rs @@ -0,0 +1,134 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +#[test] +fn create_code_generator() { + let _cg = CodeGenerator::new(); + assert!(true); +} + +#[test] +fn round_odd_up_test() { + let mut ctx_bld = ContextBuilder::new(); + let round_odd_up_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); // test x % 2 == 0 + let s1 = bld.create_sequence(); // x += 1 + let s2 = bld.create_sequence(); // return x + + // [sequence 0] + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Const(NumberValue::I32(2)), &[]); + let v1 = bld.add_op(Opcode::Rem(SignedType::I32), &[a0, v0]); + bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 1 }), &[v1]); + bld.sequence_value_jump(0, s2); + bld.sequence_value_jump(1, s1); + + // [sequence 1] + bld.freeze_sequence_predecessors(s1); + bld.switch_to_sequence(s1); + let v2 = bld.add_op(Opcode::Const(NumberValue::I32(1)), &[]); + let v3 = bld.add_op(Opcode::Add(NumberType::I32), &[a0, v2]); + bld.end_op(Opcode::Goto, &[]); + bld.sequence_default_jump(s2); + + // [sequence 2] + bld.freeze_sequence_predecessors(s2); + bld.switch_to_sequence(s2); + let v4 = bld.add_op(Opcode::Phi, &[a0, v3]); + bld.end_op(Opcode::Return, &[v4]); + + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &round_odd_up_unit).unwrap(); + let round_odd_up : fn(i32) -> i32 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(round_odd_up(0), 0); + assert_eq!(round_odd_up(9654), 9654); + assert_eq!(round_odd_up(1618033), 1618034); + assert_eq!(round_odd_up(-5), -4); +} + +#[test] +fn sum_loop_test() { + let mut ctx_bld = ContextBuilder::new(); + let sum_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64], vec![t_u64], CanUnwind(true))); + let i = bld.new_var(); + let accu = bld.new_var(); + + let s0 = bld.create_sequence(); + let s1 = bld.create_sequence(); + let s2 = bld.create_sequence(); + + bld.set_signature(t_sig); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + bld.set_var(i, a0); + let v0 = bld.add_op(Opcode::Const(NumberValue::U64(0)), &[]); + bld.set_var(accu, v0); + bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 0 }), &[a0]); + bld.sequence_value_jump(0, s2); + bld.sequence_default_jump(s1); + } + { + bld.switch_to_sequence(s1); + let v0 = bld.add_op(Opcode::Const(NumberValue::U64(1)), &[]); + let v1 = bld.use_var(i); + let v2 = bld.add_op(Opcode::Sub(NumberType::U64), &[v1, v0]); + bld.set_var(i, v2); + let v3 = bld.use_var(accu); + let v4 = bld.add_op(Opcode::Add(NumberType::U64), &[v1, v3]); + bld.set_var(accu, v4); + bld.end_op(Opcode::Switch(SwitchData { low: 0, high: 0 }), &[v2]); + bld.sequence_value_jump(0, s2); + bld.sequence_default_jump(s1); + bld.freeze_sequence_predecessors(s1); + } + { + bld.freeze_sequence_predecessors(s2); + bld.switch_to_sequence(s2); + bld.dead_var(i); + let v0 = bld.use_var(accu); + bld.dead_var(accu); + bld.end_op(Opcode::Return, &[v0]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sum_unit).unwrap(); + let sum : fn(u64) -> u64 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sum(0), 0); + assert_eq!(sum(1), 1); + assert_eq!(sum(2), 3); + assert_eq!(sum(3), 6); + assert_eq!(sum(4), 10); +} diff --git a/codegen/tests/sub.rs b/codegen/tests/sub.rs new file mode 100644 index 0000000..012079d --- /dev/null +++ b/codegen/tests/sub.rs @@ -0,0 +1,287 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +#[test] +fn sub_overflow_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(i32::min_value() + 1, 1), false); + assert_eq!(sub_overflow(i32::min_value(), 1), true); + assert_eq!(sub_overflow(i32::max_value(), -1), true); +} + +#[test] +fn sub_overflow_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(u32::min_value() + 1, 1), true); + assert_eq!(sub_overflow(u32::min_value(), 1), false); +} + +#[test] +fn sub_overflow_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(i64::min_value() + 1, 1), false); + assert_eq!(sub_overflow(i64::min_value(), 1), true); + assert_eq!(sub_overflow(i64::max_value(), -1), true); +} + +#[test] +fn sub_overflow_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::OverflowFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_overflow : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_overflow(0, 0), false); + assert_eq!(sub_overflow(u64::min_value() + 1, 1), true); + assert_eq!(sub_overflow(u64::min_value(), 1), false); +} + +#[test] +fn sub_carry_i32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(i32::min_value() + 1, 1), false); + assert_eq!(sub_carry(i32::min_value(), 1), false); + assert_eq!(sub_carry(i32::max_value(), -1), true); +} + +#[test] +fn sub_carry_u32_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U32), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(u32::min_value() + 1, 1), false); + assert_eq!(sub_carry(u32::min_value(), 1), true); +} + +#[test] +fn sub_carry_i64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::I64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(i64::min_value() + 1, 1), false); + assert_eq!(sub_carry(i64::min_value(), 1), false); + assert_eq!(sub_carry(i64::max_value(), -1), true); +} + +#[test] +fn sub_carry_u64_test() { + let mut ctx_bld = ContextBuilder::new(); + let sub1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Sub the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Sub(NumberType::U64), &[a0, a1]); + let v2 = bld.add_op_deps(Opcode::CarryFlag, &[], &[v1]); + bld.end_op(Opcode::Return, &[v2]) + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &sub1_unit).unwrap(); + let sub_carry : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(sub_carry(0, 0), false); + assert_eq!(sub_carry(u64::min_value() + 1, 1), false); + assert_eq!(sub_carry(u64::min_value(), 1), true); +} From 5a075bc022f73c8e31b249b66bfb1de023789da8 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 20 Oct 2018 23:46:46 +0200 Subject: [PATCH 18/32] Fix Add/Sub overflow computation, and check results against rust library. --- codegen/src/lower.rs | 42 ++++++++++--------- codegen/tests/add.rs | 87 +++++++++++++++++++++++----------------- codegen/tests/lib/mod.rs | 41 +++++++++++++++++++ codegen/tests/sub.rs | 75 +++++++++++++++++++++++----------- lir/src/data_flow.rs | 6 ++- 5 files changed, 169 insertions(+), 82 deletions(-) create mode 100644 codegen/tests/lib/mod.rs diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index d3e5c7e..3c5fbfb 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -298,22 +298,24 @@ impl<'a> ConvertCtx<'a> { (i, Some(ov), None) => { let res = bld.ins().iadd(a0, a1); bld.def_var(res_var, res); - // of = ((a0 | a1) ^ (a0 + a1)) >= sign_mask - let bor = bld.ins().bor(a0, a1); - let xor = bld.ins().bxor(bor, res); + // of = (a0 ^ res) & (a1 ^ res) >= sign_mask + let x0 = bld.ins().bxor(res, a0); + let x1 = bld.ins().bxor(res, a1); + let xx = bld.ins().band(x0, x1); let sign_mask = self.sign_mask(i); - let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xx, Imm64::new(sign_mask as i64)); bld.def_var(*ov, cmp); } (i, Some(ov), Some(cv)) => { let (res, carry) = bld.ins().iadd_cout(a0, a1); bld.def_var(res_var, res); bld.def_var(*cv, carry); - // of = ((a0 | a1) ^ (a0 + a1)) >= sign_mask - let bor = bld.ins().bor(a0, a1); - let xor = bld.ins().bxor(bor, res); + // of = (a0 ^ res) & (a1 ^ res) >= sign_mask + let x0 = bld.ins().bxor(res, a0); + let x1 = bld.ins().bxor(res, a1); + let xx = bld.ins().band(x0, x1); let sign_mask = self.sign_mask(i); - let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xx, Imm64::new(sign_mask as i64)); bld.def_var(*ov, cmp); } }; @@ -344,24 +346,24 @@ impl<'a> ConvertCtx<'a> { (i, Some(ov), None) => { let res = bld.ins().isub(a0, a1); bld.def_var(res_var, res); - // of = ((a0 | ~(a1 - 1)) ^ (a0 - a1)) >= sign_mask - let sign_mask = self.sign_mask(i) as u64; - let v2 = bld.ins().iadd_imm(a1, Imm64::new((sign_mask - 1 | sign_mask) as i64)); - let bor = bld.ins().bor_not(a0, v2); - let xor = bld.ins().bxor(bor, res); - let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + // of = (a0 ^ res) & (~a1 ^ res) >= sign_mask + let x0 = bld.ins().bxor(res, a0); + let x1 = bld.ins().bxor_not(res, a1); + let xx = bld.ins().band(x0, x1); + let sign_mask = self.sign_mask(i); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xx, Imm64::new(sign_mask as i64)); bld.def_var(*ov, cmp); } (i, Some(ov), Some(cv)) => { let (res, borrow) = bld.ins().isub_bout(a0, a1); bld.def_var(res_var, res); bld.def_var(*cv, borrow); - // of = ((a0 | ~(a1 - 1)) ^ (a0 - a1)) >= sign_mask - let sign_mask = self.sign_mask(i) as u64; - let v2 = bld.ins().iadd_imm(a1, Imm64::new((sign_mask - 1 | sign_mask) as i64)); - let bor = bld.ins().bor_not(a0, v2); - let xor = bld.ins().bxor(bor, res); - let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xor, Imm64::new(sign_mask as i64)); + // of = (a0 ^ res) & (~a1 ^ res) >= sign_mask + let x0 = bld.ins().bxor(res, a0); + let x1 = bld.ins().bxor_not(res, a1); + let xx = bld.ins().band(x0, x1); + let sign_mask = self.sign_mask(i); + let cmp = bld.ins().icmp_imm(IntCC::UnsignedGreaterThanOrEqual, xx, Imm64::new(sign_mask as i64)); bld.def_var(*ov, cmp); } }; diff --git a/codegen/tests/add.rs b/codegen/tests/add.rs index 0911e58..2f4883e 100644 --- a/codegen/tests/add.rs +++ b/codegen/tests/add.rs @@ -10,6 +10,9 @@ use lir::number::*; use lir::builder::*; use lir::types::*; +mod lib; +use lib::*; + #[test] fn add1_test() { let mut ctx_bld = ContextBuilder::new(); @@ -70,11 +73,12 @@ fn add_overflow_i32_test() { let add_overflow : fn(i32, i32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(-1, 1), true); - assert_eq!(add_overflow(i32::max_value() - 1, 1), false); - assert_eq!(add_overflow(i32::max_value(), 1), true); - assert_eq!(add_overflow(i32::min_value(), -1), true); + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("add_overflow({}, {}) == {}", i, j, add_overflow(i, j)); + assert_eq!(add_overflow(i, j), i.overflowing_add(j).1) + } + } } #[test] @@ -106,10 +110,12 @@ fn add_overflow_u32_test() { let add_overflow : fn(u32, u32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(u32::max_value() - 1, 1), false); - assert_eq!(add_overflow(u32::max_value() >> 1, 1), true); - assert_eq!(add_overflow(u32::max_value(), 1), true); + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("add_overflow({}, {}) == {}", i, j, add_overflow(i, j)); + assert_eq!(add_overflow(i, j), (i as i32).overflowing_add(j as i32).1) + } + } } #[test] @@ -141,11 +147,12 @@ fn add_overflow_i64_test() { let add_overflow : fn(i64, i64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(-1, 1), true); - assert_eq!(add_overflow(i64::max_value() - 1, 1), false); - assert_eq!(add_overflow(i64::max_value(), 1), true); - assert_eq!(add_overflow(i64::min_value(), -1), true); + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("add_overflow({}, {}) == {}", i, j, add_overflow(i, j)); + assert_eq!(add_overflow(i, j), i.overflowing_add(j).1) + } + } } #[test] @@ -177,10 +184,12 @@ fn add_overflow_u64_test() { let add_overflow : fn(u64, u64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_overflow(0, 0), false); - assert_eq!(add_overflow(u64::max_value() - 1, 1), false); - assert_eq!(add_overflow(u64::max_value() >> 1, 1), true); - assert_eq!(add_overflow(u64::max_value(), 1), true); + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("add_overflow({}, {}) == {}", i, j, add_overflow(i, j)); + assert_eq!(add_overflow(i, j), (i as i64).overflowing_add(j as i64).1) + } + } } #[test] @@ -212,11 +221,12 @@ fn add_carry_i32_test() { let add_carry : fn(i32, i32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(-1, 1), true); - assert_eq!(add_carry(i32::max_value() - 1, 1), false); - assert_eq!(add_carry(i32::max_value(), 1), false); - assert_eq!(add_carry(i32::min_value(), -1), true); + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("add_carry({}, {}) == {}", i, j, add_carry(i, j)); + assert_eq!(add_carry(i, j), (i as u32).overflowing_add(j as u32).1) + } + } } #[test] @@ -248,10 +258,12 @@ fn add_carry_u32_test() { let add_carry : fn(u32, u32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(u32::max_value() - 1, 1), false); - assert_eq!(add_carry(u32::max_value() >> 1, 1), false); - assert_eq!(add_carry(u32::max_value(), 1), true); + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("add_carry({}, {}) == {}", i, j, add_carry(i, j)); + assert_eq!(add_carry(i, j), i.overflowing_add(j).1) + } + } } #[test] @@ -283,11 +295,12 @@ fn add_carry_i64_test() { let add_carry : fn(i64, i64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(-1, 1), true); - assert_eq!(add_carry(i64::max_value() - 1, 1), false); - assert_eq!(add_carry(i64::max_value(), 1), false); - assert_eq!(add_carry(i64::min_value(), -1), true); + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("add_carry({}, {}) == {}", i, j, add_carry(i, j)); + assert_eq!(add_carry(i, j), (i as u64).overflowing_add(j as u64).1) + } + } } #[test] @@ -319,8 +332,10 @@ fn add_carry_u64_test() { let add_carry : fn(u64, u64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(add_carry(0, 0), false); - assert_eq!(add_carry(u64::max_value() - 1, 1), false); - assert_eq!(add_carry(u64::max_value() >> 1, 1), false); - assert_eq!(add_carry(u64::max_value(), 1), true); + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("add_carry({}, {}) == {}", i, j, add_carry(i, j)); + assert_eq!(add_carry(i, j), i.overflowing_add(j).1) + } + } } diff --git a/codegen/tests/lib/mod.rs b/codegen/tests/lib/mod.rs new file mode 100644 index 0000000..c99244a --- /dev/null +++ b/codegen/tests/lib/mod.rs @@ -0,0 +1,41 @@ +/// Iterate over interesting u32 numbers. +pub fn u32_values() -> Vec { + let mut res = vec![u32::min_value(), u32::max_value()]; + for i in [0u32, 1, 2, 29, 30, 31].into_iter() { + res.push(1u32 << i); + for j in [0u32, 1, 2, 29, 30, 31].into_iter() { + res.push((1u32 << i).wrapping_sub(1u32 << j)); + res.push((1u32 << i).wrapping_add(1u32 << j)); + } + res.push((1u32 << i).wrapping_neg()); + } + res.sort(); + res.dedup(); + res +} + +/// Iterate over interesting i32 numbers. +pub fn i32_values() -> Vec { + u32_values().into_iter().map(|x| x as i32).collect() +} + +/// Iterate over interesting u64 numbers. +pub fn u64_values() -> Vec { + let mut res = vec![u64::min_value(), u64::max_value()]; + for i in [0u64, 1, 2, 61, 62, 63].into_iter() { + res.push(1u64 << i); + for j in [0u64, 1, 2, 61, 62, 63].into_iter() { + res.push((1u64 << i).wrapping_sub(1u64 << j)); + res.push((1u64 << i).wrapping_add(1u64 << j)); + } + res.push((1u64 << i).wrapping_neg()); + } + res.sort(); + res.dedup(); + res +} + +pub fn i64_values() -> Vec { + u64_values().into_iter().map(|x| x as i64).collect() +} + diff --git a/codegen/tests/sub.rs b/codegen/tests/sub.rs index 012079d..7174489 100644 --- a/codegen/tests/sub.rs +++ b/codegen/tests/sub.rs @@ -10,6 +10,9 @@ use lir::number::*; use lir::builder::*; use lir::types::*; +mod lib; +use lib::*; + #[test] fn sub_overflow_i32_test() { let mut ctx_bld = ContextBuilder::new(); @@ -39,10 +42,12 @@ fn sub_overflow_i32_test() { let sub_overflow : fn(i32, i32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(i32::min_value() + 1, 1), false); - assert_eq!(sub_overflow(i32::min_value(), 1), true); - assert_eq!(sub_overflow(i32::max_value(), -1), true); + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("sub_overflow({}, {}) == {}", i, j, sub_overflow(i, j)); + assert_eq!(sub_overflow(i, j), i.overflowing_sub(j).1) + } + } } #[test] @@ -74,9 +79,12 @@ fn sub_overflow_u32_test() { let sub_overflow : fn(u32, u32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(u32::min_value() + 1, 1), true); - assert_eq!(sub_overflow(u32::min_value(), 1), false); + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("sub_overflow({}, {}) == {}", i, j, sub_overflow(i, j)); + assert_eq!(sub_overflow(i, j), (i as i32).overflowing_sub(j as i32).1) + } + } } #[test] @@ -108,10 +116,12 @@ fn sub_overflow_i64_test() { let sub_overflow : fn(i64, i64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(i64::min_value() + 1, 1), false); - assert_eq!(sub_overflow(i64::min_value(), 1), true); - assert_eq!(sub_overflow(i64::max_value(), -1), true); + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("sub_overflow({}, {}) == {}", i, j, sub_overflow(i, j)); + assert_eq!(sub_overflow(i, j), i.overflowing_sub(j).1) + } + } } #[test] @@ -143,9 +153,12 @@ fn sub_overflow_u64_test() { let sub_overflow : fn(u64, u64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(sub_overflow(0, 0), false); - assert_eq!(sub_overflow(u64::min_value() + 1, 1), true); - assert_eq!(sub_overflow(u64::min_value(), 1), false); + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("sub_overflow({}, {}) == {}", i, j, sub_overflow(i, j)); + assert_eq!(sub_overflow(i, j), (i as i64).overflowing_sub(j as i64).1) + } + } } #[test] @@ -177,10 +190,12 @@ fn sub_carry_i32_test() { let sub_carry : fn(i32, i32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(sub_carry(0, 0), false); - assert_eq!(sub_carry(i32::min_value() + 1, 1), false); - assert_eq!(sub_carry(i32::min_value(), 1), false); - assert_eq!(sub_carry(i32::max_value(), -1), true); + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("sub_carry({}, {}) == {}", i, j, sub_carry(i, j)); + assert_eq!(sub_carry(i, j), (i as u32).overflowing_sub(j as u32).1) + } + } } #[test] @@ -212,9 +227,12 @@ fn sub_carry_u32_test() { let sub_carry : fn(u32, u32) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(sub_carry(0, 0), false); - assert_eq!(sub_carry(u32::min_value() + 1, 1), false); - assert_eq!(sub_carry(u32::min_value(), 1), true); + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("sub_carry({}, {}) == {}", i, j, sub_carry(i, j)); + assert_eq!(sub_carry(i, j), i.overflowing_sub(j).1) + } + } } #[test] @@ -246,6 +264,12 @@ fn sub_carry_i64_test() { let sub_carry : fn(i64, i64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("sub_carry({}, {}) == {}", i, j, sub_carry(i, j)); + assert_eq!(sub_carry(i, j), (i as u64).overflowing_sub(j as u64).1) + } + } assert_eq!(sub_carry(0, 0), false); assert_eq!(sub_carry(i64::min_value() + 1, 1), false); assert_eq!(sub_carry(i64::min_value(), 1), false); @@ -281,7 +305,10 @@ fn sub_carry_u64_test() { let sub_carry : fn(u64, u64) -> bool = unsafe { mem::transmute(code.as_ptr()) }; - assert_eq!(sub_carry(0, 0), false); - assert_eq!(sub_carry(u64::min_value() + 1, 1), false); - assert_eq!(sub_carry(u64::min_value(), 1), true); + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("sub_carry({}, {}) == {}", i, j, sub_carry(i, j)); + assert_eq!(sub_carry(i, j), i.overflowing_sub(j).1) + } + } } diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index ae5372d..b7c157c 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -90,10 +90,12 @@ pub enum Opcode { Cast(ComplexTypeId), /// Extract overflow flag from the operation on which this instruction - /// depends on. (0 operand, 1 dependency) + /// depends on. The overflow flag indicate an overflow/underflow in signed + /// addition and substraction. (0 operand, 1 dependency) OverflowFlag, /// Extract carry flag from the operation on which this instruction depends - /// on. (0 operand, 1 dependency) + /// on. The carry flag indicate an overflow/underflow in unsigned + /// additions and substraction. (0 operand, 1 dependency) CarryFlag, /// Addition. (2 operands) From 93d6de3cdaf5676c0c8b7f53358649232bd8262e Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 27 Oct 2018 16:30:15 +0200 Subject: [PATCH 19/32] Generate code for load and store instructions. --- codegen/src/lower.rs | 18 +- codegen/tests/lib/mod.rs | 13 + codegen/tests/load-store.rs | 542 ++++++++++++++++++++++++++++++++++++ 3 files changed, 570 insertions(+), 3 deletions(-) create mode 100644 codegen/tests/load-store.rs diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 3c5fbfb..f2e76f7 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; use codegen::entity::EntityRef; -use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode}; +use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode, MemFlags}; use codegen::ir::immediates::{Ieee32, Ieee64, Imm64}; use codegen::ir::condcodes::IntCC; use codegen::ir::types::*; @@ -400,8 +400,20 @@ impl<'a> ConvertCtx<'a> { StaticAddress | Address => unimplemented!(), CPUAddress => unimplemented!(), - Load(_) => unimplemented!(), - Store(_ty) => unimplemented!(), + Load(ty) => { + let addr = bld.use_var(Variable::new(ins.operands[0].index)); + let mut mf = MemFlags::new(); + mf.set_notrap(); + let res = bld.ins().load(self.cltype(ty)?, mf, addr, 0); + bld.def_var(res_var, res); + } + Store(_ty) => { + let addr = bld.use_var(Variable::new(ins.operands[0].index)); + let val = bld.use_var(Variable::new(ins.operands[1].index)); + let mut mf = MemFlags::new(); + mf.set_notrap(); + bld.ins().store(mf, val, addr, 0); + } LoadFenceLoad | LoadFenceStore | StoreFenceLoad | diff --git a/codegen/tests/lib/mod.rs b/codegen/tests/lib/mod.rs index c99244a..84faf2b 100644 --- a/codegen/tests/lib/mod.rs +++ b/codegen/tests/lib/mod.rs @@ -1,3 +1,7 @@ +#![allow(dead_code)] +use std::mem::size_of; +use lir::number::*; + /// Iterate over interesting u32 numbers. pub fn u32_values() -> Vec { let mut res = vec![u32::min_value(), u32::max_value()]; @@ -39,3 +43,12 @@ pub fn i64_values() -> Vec { u64_values().into_iter().map(|x| x as i64).collect() } +pub fn addr_type() -> NumberType { + match size_of::() { + 1 => NumberType::U8, + 2 => NumberType::U16, + 4 => NumberType::U32, + 8 => NumberType::U64, + _ => panic!("Pointer size is not yet supported") + } +} diff --git a/codegen/tests/load-store.rs b/codegen/tests/load-store.rs new file mode 100644 index 0000000..84e6fb4 --- /dev/null +++ b/codegen/tests/load-store.rs @@ -0,0 +1,542 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +mod lib; +use lib::*; + +#[test] +fn load_u8() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u8], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_u8), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&u8) -> u8 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&u8::min_value()), u8::min_value()); + assert_eq!(f(&u8::max_value()), u8::max_value()); +} + +#[test] +fn load_u16() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U16)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u16], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_u16), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&u16) -> u16 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&u16::min_value()), u16::min_value()); + assert_eq!(f(&u16::max_value()), u16::max_value()); +} + +#[test] +fn load_u32() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_u32), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&u32) -> u32 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&u32::min_value()), u32::min_value()); + assert_eq!(f(&u32::max_value()), u32::max_value()); +} + +#[test] +fn load_u64() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u64], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_u64), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&u64) -> u64 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&u64::min_value()), u64::min_value()); + assert_eq!(f(&u64::max_value()), u64::max_value()); +} + +#[test] +fn load_i8() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I8)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i8], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_i8), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&i8) -> i8 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&i8::min_value()), i8::min_value()); + assert_eq!(f(&i8::max_value()), i8::max_value()); +} + +#[test] +fn load_i16() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I16)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i16], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_i16), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&i16) -> i16 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&i16::min_value()), i16::min_value()); + assert_eq!(f(&i16::max_value()), i16::max_value()); +} + +#[test] +fn load_i32() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_i32), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&i32) -> i32 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&i32::min_value()), i32::min_value()); + assert_eq!(f(&i32::max_value()), i32::max_value()); +} + +#[test] +fn load_i64() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i64], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v0 = bld.add_op(Opcode::Load(t_i64), &[a0]); + bld.end_op(Opcode::Return, &[v0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&i64) -> i64 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(&i64::min_value()), i64::min_value()); + assert_eq!(f(&i64::max_value()), i64::max_value()); +} + +#[test] +fn store_u8() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u8], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_u8), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut u8, u8) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : u8 = 0; + f(&mut val, u8::min_value()); + assert_eq!(val, u8::min_value()); + f(&mut val, u8::max_value()); + assert_eq!(val, u8::max_value()); +} + +#[test] +fn store_u16() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U16)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u16], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_u16), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut u16, u16) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : u16 = 0; + f(&mut val, u16::min_value()); + assert_eq!(val, u16::min_value()); + f(&mut val, u16::max_value()); + assert_eq!(val, u16::max_value()); +} + +#[test] +fn store_u32() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u32], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_u32), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut u32, u32) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : u32 = 0; + f(&mut val, u32::min_value()); + assert_eq!(val, u32::min_value()); + f(&mut val, u32::max_value()); + assert_eq!(val, u32::max_value()); +} + +#[test] +fn store_u64() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u64], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_u64), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut u64, u64) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : u64 = 0; + f(&mut val, u64::min_value()); + assert_eq!(val, u64::min_value()); + f(&mut val, u64::max_value()); + assert_eq!(val, u64::max_value()); +} + +#[test] +fn store_i8() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I8)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i8], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_i8), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut i8, i8) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : i8 = 0; + f(&mut val, i8::min_value()); + assert_eq!(val, i8::min_value()); + f(&mut val, i8::max_value()); + assert_eq!(val, i8::max_value()); +} + +#[test] +fn store_i16() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I16)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i16], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_i16), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut i16, i16) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : i16 = 0; + f(&mut val, i16::min_value()); + assert_eq!(val, i16::min_value()); + f(&mut val, i16::max_value()); + assert_eq!(val, i16::max_value()); +} + +#[test] +fn store_i32() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i32], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_i32), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut i32, i32) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : i32 = 0; + f(&mut val, i32::min_value()); + assert_eq!(val, i32::min_value()); + f(&mut val, i32::max_value()); + assert_eq!(val, i32::max_value()); +} + +#[test] +fn store_i64() { + let mut ctx_bld = ContextBuilder::new(); + let add1_unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i64], vec![], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + bld.add_op(Opcode::Store(t_i64), &[a0, a1]); + bld.end_op(Opcode::Return, &[]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &add1_unit).unwrap(); + let f : fn(&mut i64, i64) = unsafe { + mem::transmute(code.as_ptr()) + }; + let mut val : i64 = 0; + f(&mut val, i64::min_value()); + assert_eq!(val, i64::min_value()); + f(&mut val, i64::max_value()); + assert_eq!(val, i64::max_value()); +} From ffe89ce5548b0b3427c58aca5a8bc37e31d45937 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 3 Nov 2018 18:33:10 +0100 Subject: [PATCH 20/32] Fix result type of Load and Store instructions. --- lir/src/data_flow.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index b7c157c..98b14d1 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -311,10 +311,10 @@ impl Opcode { Eq(_) | Lt(_) | Le(_) | Ne(_) | Gt(_) | Ge(_) => ValueType::Boolean, StaticAddress | - Address => ValueType::Pointer, - CPUAddress => ValueType::None, - Load(_) => ValueType::None, - Store(ty) => ValueType::Complex(ty), + Address | + CPUAddress => ValueType::Pointer, + Load(ty) => ValueType::Complex(ty), + Store(_ty) => ValueType::None, LoadFenceLoad | LoadFenceStore | StoreFenceLoad | From e6c072fe187a3714360466583f98c7c1b4de3730 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 3 Nov 2018 18:34:29 +0100 Subject: [PATCH 21/32] Records static references on the context and generate code for StaticAddress instructions. --- codegen/src/lower.rs | 14 ++++- codegen/tests/static-address.rs | 90 +++++++++++++++++++++++++++++++++ lir/src/builder.rs | 29 +++++++++++ lir/src/context.rs | 60 ++++++++++++++++++++++ lir/src/number.rs | 18 +++++++ 5 files changed, 210 insertions(+), 1 deletion(-) create mode 100644 codegen/tests/static-address.rs diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index f2e76f7..be719e3 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -397,7 +397,19 @@ impl<'a> ConvertCtx<'a> { ShiftRight(_i) => unimplemented!(), Eq(_) | Lt(_) | Le(_) | Ne(_) | Gt(_) | Ge(_) => unimplemented!(), - StaticAddress | + StaticAddress => { + let refs = self.ctx.get_static_refs_address() as i64; + let res = match mem::size_of::() { + 1 => bld.ins().iconst(types::I8, refs), + 2 => bld.ins().iconst(types::I16, refs), + 4 => bld.ins().iconst(types::I32, refs), + 8 => bld.ins().iconst(types::I64, refs), + // TODO: Panic with a documented error code, or an explicit message + // explaining how to fix this issue. + _ => panic!("Pointer size is not yet supported") + }; + bld.def_var(res_var, res); + } Address => unimplemented!(), CPUAddress => unimplemented!(), Load(ty) => { diff --git a/codegen/tests/static-address.rs b/codegen/tests/static-address.rs new file mode 100644 index 0000000..29e65b3 --- /dev/null +++ b/codegen/tests/static-address.rs @@ -0,0 +1,90 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +mod lib; +use lib::*; + +// Emulate what is expected to be generated by HolyJIT for resolving symbols +// addresses, such as functions. +const CST : u8 = 42; +const JIT_REFS : (u8, &'static u8) = (51, &CST); + +#[test] +fn load_first_static_field() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![], vec![t_u8], CanUnwind(true))); + let ref0 = bld.ctx().add_typed_ref::(); + let _ref1 = bld.ctx().add_typed_ref::<&u8>(); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let v0 = bld.add_op(Opcode::StaticAddress, &[]); + let v1 = bld.add_op(Opcode::Const((addr_type(), ref0).into()), &[]); + let v2 = bld.add_op(Opcode::Add(addr_type()), &[v0, v1]); + let v3 = bld.add_op(Opcode::Load(t_u8), &[v2]); + bld.end_op(Opcode::Return, &[v3]); + } + bld.finish() + }; + let mut ctx = ctx_bld.finish(); + ctx.set_static_refs(&JIT_REFS); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn() -> u8 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(), JIT_REFS.0); +} + +#[test] +fn load_second_static_field() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); + let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![], vec![t_u8], CanUnwind(true))); + let _ref0 = bld.ctx().add_typed_ref::(); + let ref1 = bld.ctx().add_typed_ref::<&u8>(); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let v0 = bld.add_op(Opcode::StaticAddress, &[]); + let v1 = bld.add_op(Opcode::Const((addr_type(), ref1).into()), &[]); + let v2 = bld.add_op(Opcode::Add(addr_type()), &[v0, v1]); + let v3 = bld.add_op(Opcode::Load(t_ptr), &[v2]); + let v4 = bld.add_op(Opcode::Load(t_u8), &[v3]); + bld.end_op(Opcode::Return, &[v4]); + } + bld.finish() + }; + let mut ctx = ctx_bld.finish(); + ctx.set_static_refs(&JIT_REFS); + + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn() -> u8 = unsafe { + mem::transmute(code.as_ptr()) + }; + assert_eq!(f(), *JIT_REFS.1); +} + diff --git a/lir/src/builder.rs b/lir/src/builder.rs index 67b7ca1..427a7d8 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -2,6 +2,7 @@ /// flow graph and its control flow graph. use std::collections::{HashMap, HashSet}; +use std::mem::{align_of, size_of}; use unit::{Unit, UnitId}; use data_flow::{Instruction, Opcode, Value}; @@ -97,6 +98,34 @@ impl ContextBuilder { self.ctx.get_type(id) } + /// Records the position of a static reference in the tuple with which this + /// context is expected to be intialized with. Any references added to the + /// context with this function should also be mirrored in the tuple which + /// would be used to initialize the Context at runtime. + /// + /// Based on the alignment and size of the added value, this function + /// reserves space and returns the offset of the StaticAddress from which + /// the tuple value can be read. + pub fn add_untyped_ref(&mut self, align: usize, size: usize) -> usize { + let rest = self.ctx.expected_refs_size % align; + let padding = if rest == 0 { 0 } else { align - rest }; + let base_offset = self.ctx.expected_refs_size + padding; + self.ctx.expected_refs_size = base_offset + size; + base_offset + } + + /// Records the position of a static reference in the tuple with which this + /// context is expected to be intialized with. Any references added to the + /// context with this function should also be mirrored in the tuple which + /// would be used to initialize the Context at runtime. + /// + /// Based on the type of the added value, this functions reserves space and + /// returns the offset of the StaticAddress from which the tuple value can + /// be read. + pub fn add_typed_ref(&mut self) -> usize { + self.add_untyped_ref(align_of::(), size_of::()) + } + /// Finalize and return the context which hold the type information of /// multiple Units. pub fn finish(self) -> context::Context { diff --git a/lir/src/context.rs b/lir/src/context.rs index bb68ce8..d01b1e3 100644 --- a/lir/src/context.rs +++ b/lir/src/context.rs @@ -1,5 +1,11 @@ +use std::{ptr, mem}; use types::{ComplexType, ComplexTypeId}; +// Pointer to the static memory. Note, this is not a u8 slices because this +// contains an heterogeneous list of references, symbols and values which might +// be of different type and sizes. +type StaticStorage = *const (); + /// A context is a structure which centralize all the data necessary for the /// execution of any Unit. It holds the collection of complex types, and any /// counter related to having unique identifiers. @@ -12,6 +18,24 @@ pub struct Context { /// This vector holds the list of types references by all Unit associated to /// this context. Any ComplexTypeId is an index in this Vector. types: Vec, + + /// If any, this is the pointer to the memory which contains static + /// information filled by the static compiler with all the symbol references + /// or values. This fields should be set with the function + /// `set_statics_refs` on the constructed or deserialized Context. Once set, + /// it is not allowed to change. Attempting to build any unit without + /// setting this value will cause a compilation error if the Unit uses an + /// StaticAddress-es. + refs_ptr: StaticStorage, + + /// When a Context is created with the ContextBuilder, this field is mutated + /// to account for the expected size of type which contains all the + /// references to symbols compiled by the static compiler (LLVM backend). As + /// the refs_ptr value cannot be deserialized, it has to be initialized as + /// runtime, and as such this fields is used to ensure that we are not going + /// to do any out-of-bounds memory read when reading StaticAddress-es. + // TODO: Remove unnecessary public fields by moving the Context builder to this file. + pub expected_refs_size: usize, } impl Context { @@ -21,6 +45,8 @@ impl Context { Context { wrapper_seed: 0, types: vec![], + refs_ptr: ptr::null(), + expected_refs_size: 0, } } @@ -45,4 +71,38 @@ impl Context { let ComplexTypeId(index) = id; &self.types[index] } + + /// Register the constant tuple of references which are used by all units + /// which are built with this context. This function is only allowed to be + /// called once per Context, calling it more than once would cause this + /// function to panic. + pub fn set_static_refs(&mut self, refs: &'static T) { + let refs = refs as *const _ as *const(); + if self.refs_ptr != ptr::null() { + // TODO: Panic with a documented error code, or an explicit message + // explaining how to fix this issue. + panic!("set_static_refs can only be called once per context.") + } + if self.expected_refs_size != mem::size_of::() { + // TODO: Panic with a documented error code, or an explicit message + // explaining how to fix this issue. + panic!("set_static_refs called with a tuple of unexpected size.") + } + self.refs_ptr = refs; + } + + /// Return the pointer to the list of symbol references or values as an + /// unsigned value which should be used for converting StaticAddress-es + /// opcodes. + /// + /// This functions panics if `set_static_refs` has not been called before + /// calling this function. + pub fn get_static_refs_address(&self) -> usize { + if self.refs_ptr == ptr::null() { + // TODO: Panic with a documented error code, or an explicit message + // explaining how to fix this issue. + panic!("set_static_refs was not called when initializing the Context.") + } + self.refs_ptr as usize + } } diff --git a/lir/src/number.rs b/lir/src/number.rs index b545e7a..ea0dc05 100644 --- a/lir/src/number.rs +++ b/lir/src/number.rs @@ -110,3 +110,21 @@ impl Hash for NumberValue { } } +impl From<(NumberType, usize)> for NumberValue { + fn from(input: (NumberType, usize)) -> NumberValue { + let val = input.1; + match input.0 { + NumberType::B1 => NumberValue::B1(val != 0), + NumberType::U8 => NumberValue::U8(val as u8), + NumberType::U16 => NumberValue::U16(val as u16), + NumberType::U32 => NumberValue::U32(val as u32), + NumberType::U64 => NumberValue::U64(val as u64), + NumberType::I8 => NumberValue::I8(val as i8), + NumberType::I16 => NumberValue::I16(val as i16), + NumberType::I32 => NumberValue::I32(val as i32), + NumberType::I64 => NumberValue::I64(val as i64), + NumberType::F32 => NumberValue::F32(val as f32), + NumberType::F64 => NumberValue::F64(val as f64), + } + } +} From bb7f00f8b1d5af52cf59f928083736cc7be8d409 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Fri, 23 Nov 2018 17:47:19 +0100 Subject: [PATCH 22/32] Generate code for calling function, with multiple arguments and return values. --- bin/src/main.rs | 5 ++ codegen/src/lower.rs | 126 +++++++++++++++++++++++++++++----- codegen/tests/add.rs | 16 ++--- codegen/tests/control_flow.rs | 2 +- codegen/tests/sub.rs | 16 ++--- lir/src/builder.rs | 10 +-- lir/src/data_flow.rs | 14 +++- lir/src/types.rs | 3 +- 8 files changed, 149 insertions(+), 43 deletions(-) diff --git a/bin/src/main.rs b/bin/src/main.rs index 74e7c63..2a35a9d 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -183,6 +183,11 @@ pub fn main() { .collect() }; + // TODO: HolyJIT does not yet supports unwinding as this would require + // generating ELF code with eh_frame section, to encode the block address + // where to resume in case of unwinding. TODO: In the mean time we should + // enforce usage of "-C panic=abort", and tell the user about it to avoid + // surprises. let holyjit_enabled = true; let mut holyjit_cc = HolyJitCompilerCalls::new(holyjit_enabled); rustc_driver::run(move || { diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index be719e3..6cdde18 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; use codegen::entity::EntityRef; -use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode, MemFlags}; +use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode, MemFlags, SigRef}; use codegen::ir::immediates::{Ieee32, Ieee64, Imm64}; use codegen::ir::condcodes::IntCC; use codegen::ir::types::*; @@ -35,6 +35,8 @@ struct ConvertCtx<'a> { pub overflow_map: HashMap, /// Map the math operation to the carry variable. pub carry_map: HashMap, + /// Map the unit, call or callunit to its list of Nth accessors. + pub nth_map: HashMap>, } type OptVarType = Option<(Variable, types::Type)>; @@ -78,10 +80,14 @@ impl<'a> ConvertCtx<'a> { } } + /// From any scalar type, returns the Cranelift AbiParam to use when + /// building a function signature. fn abiparam(&self, ty: ComplexTypeId) -> LowerResult { Ok(AbiParam::new(self.cltype(ty)?)) } + /// From a complex type id representing a function signature, return the + /// list of inputs and outputs which are corresponding to this signature. fn signature_io(&self, sig: ComplexTypeId) -> LowerResult<(&'a Vec, &'a Vec)> { let ty = self.ctx.get_type(sig); match ty { @@ -92,8 +98,8 @@ impl<'a> ConvertCtx<'a> { /// Unit have a signature expressed as a type, we have to convert this signature /// into simpler types understood by Cranelift. - fn signature(&self) -> LowerResult { - let (ins, outs) = self.signature_io(self.unit.sig)?; + fn signature(&self, sig: ComplexTypeId) -> LowerResult { + let (ins, outs) = self.signature_io(sig)?; // At the moment, assume that all Units are going to be called with Rust // calling convention. @@ -108,6 +114,20 @@ impl<'a> ConvertCtx<'a> { Ok(sig) } + /// Returns the Cranelift Signature of the compiled Unit. + fn unit_signature(&self) -> LowerResult { + self.signature(self.unit.sig) + } + + /// Calls to an external function implies that we have to import the + /// function signature, and annotate the call instruction with the function + /// signature index. This function returns the signature reference expected + /// by the call_indirect instruction of Cranelift. + fn sigref(&self, bld: &mut FunctionBuilder, sig: ComplexTypeId) -> LowerResult { + let sig = self.signature(sig)?; + Ok(bld.import_signature(sig)) + } + // Generate external name indexes based on the UnitId. fn external_name(&self, id: UnitId) -> ExternalName { let (d, i) = match id { @@ -147,7 +167,11 @@ impl<'a> ConvertCtx<'a> { match out.len() { 0 => continue, 1 => self.cltype(out[0])?, - _ => unimplemented!(), + // For cases which are larger than 1, we do not have any + // simple types. This is resolved with the addition of + // the Nth opcode, which would split the output tuple + // and extract the type of each opcode. + _ => continue, } } ValueType::InheritFromOperands | @@ -160,7 +184,7 @@ impl<'a> ConvertCtx<'a> { // Record Overflow and Carry dependency and map it to overflow / // carry variable. Note, that we expect to have only a single // overflow and carry per math operation. TODO: To handle multiple - // overflow flag and carry flag, should create new variable that + // overflow flag and carry flag, we should create new variable that // would be copied on overflow/carry encoding. let ins_res = match ins.opcode { Opcode::OverflowFlag => self.overflow_map.insert(ins.dependencies[0], v), @@ -184,22 +208,51 @@ impl<'a> ConvertCtx<'a> { ValueType::InheritFromOperands => (), _ => continue, }; - // In case of a fix-point just skip already types instructions. + // In case of a fix-point just skip instructions which already + // have a type. match types[index] { Some(_) => continue, None => (), }; - // Pick the first operand type and assign it to the current - // instruction. - assert!(ins.operands.len() >= 1); - let ty = match types[ins.operands[0].index] { - Some((_, ty)) => ty, - None => { - nb_unknown_types += 1; - continue - }, - }; + // Based on the opcode, record and infer the type inherited from + // its operands. Skip by using continue if the operands do not + // have a type yet. A fixed-point is used to ensure we revisit + // instruction which have missing types. let v = Variable::new(index); + let ty = match ins.opcode { + Opcode::Rehash(_) | + Opcode::Phi => { + assert!(ins.operands.len() >= 1); + match types[ins.operands[0].index] { + Some((_, ty)) => ty, + None => { + nb_unknown_types += 1; + continue + }, + } + } + Opcode::Nth(n) => { + assert!(ins.operands.len() == 1); + let ty = { + let call = &self.unit.dfg.instructions[ins.operands[0].index]; + let id = match call.opcode.result_type() { + ValueType::ResultOfSig(id) => id, + // TODO: Raise a LowerError. + _ => panic!("Opcode::Nth applied to something which does not have a signature type") + }; + let (_, out) = self.signature_io(id)?; + assert!(out.len() >= 2); + assert!((n as usize) < out.len()); // TODO: Raise a LowerError + let id = out[n as usize]; + self.cltype(id)? + }; + let nths = self.nth_map.entry(ins.operands[0]).or_insert(vec![]); + nths.push((n, v)); + ty + } + // TODO: LowerError: Unexpected InheritFromOperand opcode. + _ => unimplemented!(), + }; bld.declare_var(v, ty); types[index] = Some((v, ty)); } @@ -252,6 +305,7 @@ impl<'a> ConvertCtx<'a> { bld.def_var(Variable::new(val.index), res); } Phi => (), // Phi are handled at the end of predecessor blocks. + Nth(_) => (), // Nth are handled by declare_var function. Const(val) => { use self::NumberValue::*; let res = match val { @@ -501,7 +555,42 @@ impl<'a> ConvertCtx<'a> { _ => unimplemented!("Switch with more than 2 branches"), } } - Call(_id) => unimplemented!(), + Call(type_id) => { + // Generate the function call. + let fun = bld.use_var(Variable::new(ins.operands[0].index)); + let args : Vec<_> = + ins.operands.iter().skip(1) + .map(|op| bld.use_var(Variable::new(op.index))) + .collect(); + let sig = self.sigref(bld, type_id)?; + let call = bld.ins().call_indirect(sig, fun, &args); + let results : Vec<_> = bld.inst_results(call).iter().cloned().collect(); + assert_eq!(results.len(), self.signature_io(type_id)?.1.len()); + match results.len() { + 0 => (), + 1 => bld.def_var(res_var, results[0]), + _ => { + match self.nth_map.get(&val) { + Some(list) => { + for &(n, var) in list { + bld.def_var(var, results[n as usize]); + } + }, + None => (), + }; + }, + }; + + match (seq.default, seq.unwind) { + (None, None) => { bld.ins().trap(TrapCode::User(0)); }, + (Some(SuccessorIndex(succ)), None) | + (None, Some(SuccessorIndex(succ))) => { + let SequenceIndex(seq) = seq.successors[succ]; + bld.ins().jump(ebbs[seq], &[]); + } + (Some(_), Some(_)) => unimplemented!("We need to implement the unwinding logic."), + }; + }, CallUnit(_id) => unimplemented!(), }; @@ -580,7 +669,7 @@ impl<'a> ConvertCtx<'a> { /// Convert a LIR Unit into a Cranelift IR (Function). fn convert(&mut self) -> LowerResult { - let sig = self.signature()?; + let sig = self.unit_signature()?; let mut fn_builder_ctx = FunctionBuilderContext::::new(); let mut func = Function::with_name_signature(self.external_name(self.unit.id), sig); { @@ -602,6 +691,7 @@ pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult UnitBuilder<'a> { } /// Add a control flow instruction to end the current sequence. - pub fn end_ins(&mut self, ins: Instruction) { + pub fn end_ins(&mut self, ins: Instruction) -> Value { debug_assert!(ins.is_control()); let is_return = ins.opcode.is_return(); let value = self.dfg_add_ins(ins); @@ -269,11 +269,13 @@ impl<'a> UnitBuilder<'a> { // statement in the list of outputs of the unit. if is_return { self.unit.outputs.push(value); - } + }; + + value } // Add a control flow instruction based on its opcode, operands and dependencies. - pub fn end_op_deps(&mut self, opcode: Opcode, operands: &[Value], dependencies: &[Value]) { + pub fn end_op_deps(&mut self, opcode: Opcode, operands: &[Value], dependencies: &[Value]) -> Value { self.end_ins(Instruction { opcode, operands: operands.iter().map(|x| *x).collect(), @@ -284,7 +286,7 @@ impl<'a> UnitBuilder<'a> { /// Add a control flow instruction based only on its opcode, this function /// creates a conservative aliasing between load, store, calls and units. - pub fn end_op(&mut self, opcode: Opcode, operands: &[Value]) { + pub fn end_op(&mut self, opcode: Opcode, operands: &[Value]) -> Value { self.end_op_deps(opcode, operands, &[]) } diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index 98b14d1..6fc9cbd 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -97,6 +97,12 @@ pub enum Opcode { /// on. The carry flag indicate an overflow/underflow in unsigned /// additions and substraction. (0 operand, 1 dependency) CarryFlag, + /// Extract the returned value 'n' from its operand. This is used as a way + /// to extract the one returned value from a tuple of returned values. This + /// is made to be used with unit, call and callunit when they have tuples of + /// returned values. When lowered each would get its own binding, and be + /// handled separately. + Nth(u8), /// Addition. (2 operands) Add(number::NumberType), @@ -224,13 +230,14 @@ pub enum Opcode { Switch(SwitchData), /// Call implements a function call, such as any Rust function, an assertion - /// or a drop function. The argument correspond to the signature of the + /// or a drop function. The argument corresponds to the signature of the /// function being called. (many operands: function + arguments, maybe /// default target, maybe unwind target) Call(ComplexTypeId), - /// CallUnit implements an internal Unit call or inline. - /// (many operands: arguments, maybe default target, maybe unwind target) + /// CallUnit implements an internal Unit call or inline, such as another + /// Function which is encoded in LIR format. (many operands: arguments, + /// maybe default target, maybe unwind target) CallUnit(unit::UnitId), } @@ -285,6 +292,7 @@ impl Opcode { match self { Entry(_) | Newhash(_) => ValueType::None, + Nth(_) | Rehash(_) | Phi => ValueType::InheritFromOperands, Const(val) => ValueType::Number(val.into()), diff --git a/lir/src/types.rs b/lir/src/types.rs index 3b91a9e..6dc16b0 100644 --- a/lir/src/types.rs +++ b/lir/src/types.rs @@ -24,7 +24,8 @@ pub struct CanUnwind(pub bool); #[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone)] pub enum ComplexType { /// Functions are used to express the signature of Unit and external - /// functions. + /// functions. At the moment, all functions are assumed to follow the same + /// calling convention as rust functions. Function(Vec, Vec, CanUnwind), /// Structures are used to map each offsets with its corresponding type. Structure(Vec<(Offset, ComplexTypeId)>), From 5dc0e118d6a033e3f37a28199be89dade637227c Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 24 Nov 2018 12:39:15 +0100 Subject: [PATCH 23/32] Add test cases to verify calls with multiple arguments and returned values. --- codegen/tests/call.rs | 170 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 codegen/tests/call.rs diff --git a/codegen/tests/call.rs b/codegen/tests/call.rs new file mode 100644 index 0000000..201caa8 --- /dev/null +++ b/codegen/tests/call.rs @@ -0,0 +1,170 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +mod lib; +use lib::*; + +fn ret_tuple(x : u8) -> (u8, u8) { + sub_add(x, 1) +} + +fn sub_add(x : u8, y: u8) -> (u8, u8) { + (x.wrapping_sub(y), x.wrapping_add(y)) +} + +fn diff_sub_add(x : u8, y : u8) -> u8 { + let (a, b) = sub_add(x, y); + b.wrapping_sub(a) +} + + +#[test] +fn call_ret_tuple() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u8], vec![t_u8], CanUnwind(true))); + // TODO: Unwinding is not supported without DWARF at the moment and + // eh_frame describing where to resume the execution when unwinding. + let t_ret_tuple = bld.ctx().add_type(ComplexType::Function(vec![t_u8], vec![t_u8, t_u8], CanUnwind(false))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + let s1 = bld.create_sequence(); + let r0 = { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let v1 = bld.add_op(Opcode::Const((addr_type(), ret_tuple as usize).into()), &[]); + let r0 = bld.end_op(Opcode::Call(t_ret_tuple), &[v1, a0]); + bld.sequence_default_jump(s1); + r0 + }; + { + bld.freeze_sequence_predecessors(s1); + bld.switch_to_sequence(s1); + let v1 = bld.add_op(Opcode::Nth(0), &[r0]); + let v2 = bld.add_op(Opcode::Nth(1), &[r0]); + let v3 = bld.add_op(Opcode::Sub(NumberType::U8), &[v2, v1]); + bld.end_op(Opcode::Return, &[v3]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u8) -> u8 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + println!("f({}) == {}", i, f(i as u8)); + assert_eq!(f(i as u8), 2); + } +} + +#[test] +fn call_sub_add() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8], CanUnwind(true))); + // TODO: Unwinding is not supported without DWARF at the moment and + // eh_frame describing where to resume the execution when unwinding. + let t_fun = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8, t_u8], CanUnwind(false))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + let s1 = bld.create_sequence(); + let r0 = { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Const((addr_type(), sub_add as usize).into()), &[]); + let r0 = bld.end_op(Opcode::Call(t_fun), &[v1, a0, a1]); + bld.sequence_default_jump(s1); + r0 + }; + { + bld.freeze_sequence_predecessors(s1); + bld.switch_to_sequence(s1); + let v1 = bld.add_op(Opcode::Nth(0), &[r0]); + let v2 = bld.add_op(Opcode::Nth(1), &[r0]); + let v3 = bld.add_op(Opcode::Sub(NumberType::U8), &[v2, v1]); + bld.end_op(Opcode::Return, &[v3]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u8, u8) -> u8 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + let i = i as u8; + for j in u32_values().into_iter() { + let j = j as u8; + println!("f({}, {}) == {} (expect {})", i, j, f(i, j), j.wrapping_mul(2)); + assert_eq!(f(i, j), j.wrapping_mul(2)); + } + } +} + +#[test] +fn call_diff_sub_add() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8], CanUnwind(true))); + // TODO: Unwinding is not supported without DWARF at the moment and + // eh_frame describing where to resume the execution when unwinding. + let t_fun = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8], CanUnwind(false))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + let s1 = bld.create_sequence(); + let r0 = { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Const((addr_type(), diff_sub_add as usize).into()), &[]); + let r0 = bld.end_op(Opcode::Call(t_fun), &[v1, a0, a1]); + bld.sequence_default_jump(s1); + r0 + }; + { + bld.freeze_sequence_predecessors(s1); + bld.switch_to_sequence(s1); + bld.end_op(Opcode::Return, &[r0]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u8, u8) -> u8 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + let i = i as u8; + for j in u32_values().into_iter() { + let j = j as u8; + println!("f({}, {}) == {} (expect {})", i, j, f(i, j), j.wrapping_mul(2)); + assert_eq!(f(i, j), j.wrapping_mul(2)); + } + } +} From af1c2cbbd0a8a49165963a48019a8cc6a9d8028f Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 24 Nov 2018 12:40:37 +0100 Subject: [PATCH 24/32] Generate code to handle conditions: Eq, Ne, Lt, Le, Gt, Ge and Ord. --- codegen/src/lower.rs | 82 +- codegen/tests/compare.rs | 1920 ++++++++++++++++++++++++++++++++++++++ codegen/tests/lib/mod.rs | 10 +- lir/src/data_flow.rs | 17 +- lir/src/number.rs | 26 + 5 files changed, 2043 insertions(+), 12 deletions(-) create mode 100644 codegen/tests/compare.rs diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 6cdde18..240876e 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -6,7 +6,7 @@ use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; use codegen::entity::EntityRef; use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode, MemFlags, SigRef}; use codegen::ir::immediates::{Ieee32, Ieee64, Imm64}; -use codegen::ir::condcodes::IntCC; +use codegen::ir::condcodes::{IntCC, FloatCC}; use codegen::ir::types::*; use codegen::ir::types; use codegen::settings::{self, CallConv}; @@ -16,7 +16,7 @@ use codegen::isa::TargetIsa; use lir::unit::{Unit, UnitId}; use lir::context::Context; use lir::types::{ComplexTypeId, ComplexType}; -use lir::number::{NumberType, SignedType, NumberValue}; +use lir::number::{NumberType, SignedType, OrderedType, NumberValue}; use lir::control_flow::{Sequence, SequenceIndex, SuccessorIndex}; use lir::data_flow::{Opcode, Instruction, ValueType, Value}; use error::{LowerResult, LowerError}; @@ -41,6 +41,70 @@ struct ConvertCtx<'a> { type OptVarType = Option<(Variable, types::Type)>; +enum IFCond { + IntCond(IntCC), + FloatCond(FloatCC), +} +fn opcode_to_cond(op: Opcode) -> IFCond { + use self::Opcode::*; + use self::OrderedType::*; + use self::IFCond::*; + match op { + Ord(Ordered(t)) => { + assert_eq!(t.is_float(), true); + FloatCond(FloatCC::Ordered) + } + Eq(Ordered(t)) => { + match t.is_float() { + true => FloatCond(FloatCC::Equal), + false => IntCond(IntCC::Equal), + } + } + Ne(Ordered(t)) => { + match t.is_float() { + true => FloatCond(FloatCC::OrderedNotEqual), + false => IntCond(IntCC::NotEqual), + } + } + Lt(Ordered(t)) => { + match (t.is_float(), t.is_signed()) { + (true, _) => FloatCond(FloatCC::LessThan), + (false, true) => IntCond(IntCC::SignedLessThan), + (false, false) => IntCond(IntCC::UnsignedLessThan), + } + } + Ge(Ordered(t)) => { + match (t.is_float(), t.is_signed()) { + (true, _) => FloatCond(FloatCC::GreaterThanOrEqual), + (false, true) => IntCond(IntCC::SignedGreaterThanOrEqual), + (false, false) => IntCond(IntCC::UnsignedGreaterThanOrEqual), + } + } + Le(Ordered(t)) => { + match (t.is_float(), t.is_signed()) { + (true, _) => FloatCond(FloatCC::LessThanOrEqual), + (false, true) => IntCond(IntCC::SignedLessThanOrEqual), + (false, false) => IntCond(IntCC::UnsignedLessThanOrEqual), + } + } + Gt(Ordered(t)) => { + match (t.is_float(), t.is_signed()) { + (true, _) => FloatCond(FloatCC::GreaterThan), + (false, true) => IntCond(IntCC::SignedGreaterThan), + (false, false) => IntCond(IntCC::UnsignedGreaterThan), + } + } + Ord(Unordered(_)) => FloatCond(FloatCC::Unordered), + Eq(Unordered(_)) => FloatCond(FloatCC::UnorderedOrEqual), + Ne(Unordered(_)) => FloatCond(FloatCC::NotEqual), + Lt(Unordered(_)) => FloatCond(FloatCC::UnorderedOrLessThan), + Ge(Unordered(_)) => FloatCond(FloatCC::UnorderedOrGreaterThanOrEqual), + Le(Unordered(_)) => FloatCond(FloatCC::UnorderedOrLessThanOrEqual), + Gt(Unordered(_)) => FloatCond(FloatCC::UnorderedOrGreaterThan), + _ => panic!("Unexpected conditional opcode") + } +} + impl<'a> ConvertCtx<'a> { fn sign_mask(&self, ty: NumberType) -> u64 { match ty { @@ -75,6 +139,8 @@ impl<'a> ConvertCtx<'a> { &Scalar(U16) | &Scalar(I16) => Ok(types::I16), &Scalar(U32) | &Scalar(I32) => Ok(types::I32), &Scalar(U64) | &Scalar(I64) => Ok(types::I64), + &Scalar(F32) => Ok(types::F32), + &Scalar(F64) => Ok(types::F64), &Vector(_, _) => unimplemented!(), _ => Err(LowerError::ComplexTypeNotLowered), } @@ -449,8 +515,16 @@ impl<'a> ConvertCtx<'a> { BwNot(_b) => unimplemented!(), ShiftLeft(_i) => unimplemented!(), ShiftRight(_i) => unimplemented!(), - Eq(_) | Lt(_) | Le(_) | - Ne(_) | Gt(_) | Ge(_) => unimplemented!(), + Ord(_) | Eq(_) | Lt(_) | Le(_) | + Ne(_) | Gt(_) | Ge(_) => { + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let a1 = bld.use_var(Variable::new(ins.operands[1].index)); + let res = match opcode_to_cond(ins.opcode) { + IFCond::IntCond(cc) => bld.ins().icmp(cc, a0, a1), + IFCond::FloatCond(cc) => bld.ins().fcmp(cc, a0, a1), + }; + bld.def_var(res_var, res); + } StaticAddress => { let refs = self.ctx.get_static_refs_address() as i64; let res = match mem::size_of::() { diff --git a/codegen/tests/compare.rs b/codegen/tests/compare.rs new file mode 100644 index 0000000..834ac69 --- /dev/null +++ b/codegen/tests/compare.rs @@ -0,0 +1,1920 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; +use std::cmp::Ordering; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +mod lib; +use lib::*; + + +#[test] +fn eq_u32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Ordered(NumberType::U32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i == j); + } + } +} + +#[test] +fn ne_u32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Ordered(NumberType::U32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("f({}, {}) != {}", i, j, f(i, j)); + assert_eq!(f(i, j), i != j); + } + } +} + +#[test] +fn lt_u32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Ordered(NumberType::U32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("f({}, {}) < {}", i, j, f(i, j)); + assert_eq!(f(i, j), i < j); + } + } +} + +#[test] +fn le_u32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Ordered(NumberType::U32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("f({}, {}) <= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i <= j); + } + } +} + +#[test] +fn gt_u32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Ordered(NumberType::U32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("f({}, {}) > {}", i, j, f(i, j)); + assert_eq!(f(i, j), i > j); + } + } +} + +#[test] +fn ge_u32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Ordered(NumberType::U32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u32, u32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("f({}, {}) >= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i >= j); + } + } +} + +#[test] +fn eq_i32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Ordered(NumberType::I32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i == j); + } + } +} + +#[test] +fn ne_i32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Ordered(NumberType::I32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("f({}, {}) != {}", i, j, f(i, j)); + assert_eq!(f(i, j), i != j); + } + } +} + +#[test] +fn lt_i32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Ordered(NumberType::I32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("f({}, {}) < {}", i, j, f(i, j)); + assert_eq!(f(i, j), i < j); + } + } +} + +#[test] +fn le_i32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Ordered(NumberType::I32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("f({}, {}) <= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i <= j); + } + } +} + +#[test] +fn gt_i32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Ordered(NumberType::I32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("f({}, {}) > {}", i, j, f(i, j)); + assert_eq!(f(i, j), i > j); + } + } +} + +#[test] +fn ge_i32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Ordered(NumberType::I32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i32, i32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("f({}, {}) >= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i >= j); + } + } +} + +#[test] +fn eq_u64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Ordered(NumberType::U64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i == j); + } + } +} + +#[test] +fn ne_u64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Ordered(NumberType::U64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("f({}, {}) != {}", i, j, f(i, j)); + assert_eq!(f(i, j), i != j); + } + } +} + +#[test] +fn lt_u64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Ordered(NumberType::U64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("f({}, {}) < {}", i, j, f(i, j)); + assert_eq!(f(i, j), i < j); + } + } +} + +#[test] +fn le_u64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Ordered(NumberType::U64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("f({}, {}) <= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i <= j); + } + } +} + +#[test] +fn gt_u64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Ordered(NumberType::U64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("f({}, {}) > {}", i, j, f(i, j)); + assert_eq!(f(i, j), i > j); + } + } +} + +#[test] +fn ge_u64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Ordered(NumberType::U64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u64, u64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("f({}, {}) >= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i >= j); + } + } +} + +#[test] +fn eq_i64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Ordered(NumberType::I64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i == j); + } + } +} + +#[test] +fn ne_i64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Ordered(NumberType::I64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("f({}, {}) != {}", i, j, f(i, j)); + assert_eq!(f(i, j), i != j); + } + } +} + +#[test] +fn lt_i64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Ordered(NumberType::I64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("f({}, {}) < {}", i, j, f(i, j)); + assert_eq!(f(i, j), i < j); + } + } +} + +#[test] +fn le_i64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Ordered(NumberType::I64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("f({}, {}) <= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i <= j); + } + } +} + +#[test] +fn gt_i64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Ordered(NumberType::I64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("f({}, {}) > {}", i, j, f(i, j)); + assert_eq!(f(i, j), i > j); + } + } +} + +#[test] +fn ge_i64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Ordered(NumberType::I64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i64, i64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("f({}, {}) >= {}", i, j, f(i, j)); + assert_eq!(f(i, j), i >= j); + } + } +} + +#[test] +fn ord_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ord(OrderedType::Ordered(NumberType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(_) => true, + None => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn oeq_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Ordered(NumberType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn one_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Ordered(NumberType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) | None => false, + _ => true + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn olt_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Ordered(NumberType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ole_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Ordered(NumberType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) | Some(Ordering::Equal) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ogt_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Ordered(NumberType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn oge_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Ordered(NumberType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) | Some(Ordering::Equal) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn unord_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ord(OrderedType::Unordered(FloatType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(_) => false, + None => true + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ueq_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Unordered(FloatType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn une_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Unordered(FloatType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) => false, + _ => true + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ult_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Unordered(FloatType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ule_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Unordered(FloatType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) | Some(Ordering::Equal) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ugt_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Unordered(FloatType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn uge_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Unordered(FloatType::F32)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) | Some(Ordering::Equal) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ord_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ord(OrderedType::Ordered(NumberType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(_) => true, + None => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn oeq_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Ordered(NumberType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn one_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Ordered(NumberType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) | None => false, + _ => true + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn olt_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Ordered(NumberType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ole_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Ordered(NumberType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) | Some(Ordering::Equal) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ogt_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Ordered(NumberType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn oge_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Ordered(NumberType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) | Some(Ordering::Equal) => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn unord_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ord(OrderedType::Unordered(FloatType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(_) => false, + None => true + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ueq_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Eq(OrderedType::Unordered(FloatType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn une_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ne(OrderedType::Unordered(FloatType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Equal) => false, + _ => true + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ult_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Lt(OrderedType::Unordered(FloatType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ule_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Le(OrderedType::Unordered(FloatType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Less) | Some(Ordering::Equal) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn ugt_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Gt(OrderedType::Unordered(FloatType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} + +#[test] +fn uge_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Ge(OrderedType::Unordered(FloatType::F64)), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> bool = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + let expect = match i.partial_cmp(&j) { + Some(Ordering::Greater) | Some(Ordering::Equal) | None => true, + _ => false + }; + assert_eq!(f(i, j), expect); + } + } +} diff --git a/codegen/tests/lib/mod.rs b/codegen/tests/lib/mod.rs index 84faf2b..21af1ef 100644 --- a/codegen/tests/lib/mod.rs +++ b/codegen/tests/lib/mod.rs @@ -1,5 +1,5 @@ #![allow(dead_code)] -use std::mem::size_of; +use std::mem::{size_of, transmute}; use lir::number::*; /// Iterate over interesting u32 numbers. @@ -43,6 +43,14 @@ pub fn i64_values() -> Vec { u64_values().into_iter().map(|x| x as i64).collect() } +pub fn f32_values() -> Vec { + u32_values().into_iter().map(|x| unsafe { transmute::(x) }).collect() +} + +pub fn f64_values() -> Vec { + u64_values().into_iter().map(|x| unsafe { transmute::(x) }).collect() +} + pub fn addr_type() -> NumberType { match size_of::() { 1 => NumberType::U8, diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index 6fc9cbd..5673f9a 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -141,18 +141,21 @@ pub enum Opcode { /// Shift right. (2 operands: result = lhs >> rhs) ShiftRight(number::SignedType), + /// Ordered. (2 operands) + /// Test if the operands are comparable. + Ord(number::OrderedType), /// Equal. (2 operands) - Eq(number::NumberType), + Eq(number::OrderedType), /// Less than. (2 operands: result = lhs < rhs) - Lt(number::NumberType), + Lt(number::OrderedType), /// Less than or equal. (2 operands: result = lhs <= rhs) - Le(number::NumberType), + Le(number::OrderedType), /// Not equal. (2 operands) - Ne(number::NumberType), + Ne(number::OrderedType), /// Greather than. (2 operands: result = lhs > rhs) - Gt(number::NumberType), + Gt(number::OrderedType), // Greather than or equal. (2 operands: result = lhs >= rhs) - Ge(number::NumberType), + Ge(number::OrderedType), /// StaticAddress is used to refer to data which is not yet known at compile /// time, but known at the execution, such as function pointer addresses. @@ -316,7 +319,7 @@ impl Opcode { BwNot(b) => ValueType::Number(b.into()), ShiftLeft(i) => ValueType::Number(i.into()), ShiftRight(i) => ValueType::Number(i.into()), - Eq(_) | Lt(_) | Le(_) | + Ord(_) | Eq(_) | Lt(_) | Le(_) | Ne(_) | Gt(_) | Ge(_) => ValueType::Boolean, StaticAddress | Address | diff --git a/lir/src/number.rs b/lir/src/number.rs index ea0dc05..85f0398 100644 --- a/lir/src/number.rs +++ b/lir/src/number.rs @@ -26,6 +26,15 @@ pub enum FloatType { F32, F64, } +/// When comparing numerical types such as Floating point numbers, another +/// aspect appear which is called "Ordered", which basically means that value +/// can be compared. Unordered values would be values such as NaN. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub enum OrderedType { + Ordered(NumberType), + Unordered(FloatType) +} + #[derive(Serialize, Deserialize, Debug, Clone, Copy)] /* derive(Hash)-manually */ pub enum NumberValue { B1(bool), @@ -128,3 +137,20 @@ impl From<(NumberType, usize)> for NumberValue { } } } + +impl NumberType { + pub fn is_float(self) -> bool { + use self::NumberType::*; + match self { + F32 | F64 => true, + _ => false, + } + } + pub fn is_signed(self) -> bool { + use self::NumberType::*; + match self { + F32 | F64 | I8 | I16 | I32 | I64 => true, + _ => false, + } + } +} From a7fb48ce5da1f4594066247346033841a168a7ce Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 24 Nov 2018 17:35:31 +0100 Subject: [PATCH 25/32] Generate code for Muliplication without overflow handling. --- codegen/src/lower.rs | 22 +++- codegen/tests/lib/mod.rs | 6 +- codegen/tests/mul.rs | 222 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 246 insertions(+), 4 deletions(-) create mode 100644 codegen/tests/mul.rs diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 240876e..e979a8d 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -488,7 +488,27 @@ impl<'a> ConvertCtx<'a> { } }; }, - Mul(_n) => unimplemented!(), + Mul(n) => { + let a0 = bld.use_var(Variable::new(ins.operands[0].index)); + let a1 = bld.use_var(Variable::new(ins.operands[1].index)); + let (of, cf) = (self.overflow_map.get(&val), + self.carry_map.get(&val)); + match (n, of, cf) { + (NumberType::F32, _, _) | + (NumberType::F64, _, _) => { + // TODO: Return an error instead. + debug_assert!(of == None); + debug_assert!(cf == None); + let res = bld.ins().fmul(a0, a1); + bld.def_var(res_var, res); + } + (_, None, None) => { + let res = bld.ins().imul(a0, a1); + bld.def_var(res_var, res); + } + _ => unimplemented!("TODO: No support for overflow or carry flag yet."), + } + }, Div(_n) => unimplemented!(), Rem(n) => { let a0 = bld.use_var(Variable::new(ins.operands[0].index)); diff --git a/codegen/tests/lib/mod.rs b/codegen/tests/lib/mod.rs index 21af1ef..a201d57 100644 --- a/codegen/tests/lib/mod.rs +++ b/codegen/tests/lib/mod.rs @@ -1,5 +1,5 @@ #![allow(dead_code)] -use std::mem::{size_of, transmute}; +use std::mem::size_of; use lir::number::*; /// Iterate over interesting u32 numbers. @@ -44,11 +44,11 @@ pub fn i64_values() -> Vec { } pub fn f32_values() -> Vec { - u32_values().into_iter().map(|x| unsafe { transmute::(x) }).collect() + u32_values().into_iter().map(|x| f32::from_bits(x)).collect() } pub fn f64_values() -> Vec { - u64_values().into_iter().map(|x| unsafe { transmute::(x) }).collect() + u64_values().into_iter().map(|x| f64::from_bits(x)).collect() } pub fn addr_type() -> NumberType { diff --git a/codegen/tests/mul.rs b/codegen/tests/mul.rs new file mode 100644 index 0000000..47e04f0 --- /dev/null +++ b/codegen/tests/mul.rs @@ -0,0 +1,222 @@ +extern crate holyjit_codegen as codegen; +extern crate holyjit_lir as lir; + +use codegen::*; +use std::mem; + +use lir::unit::*; +use lir::data_flow::*; +use lir::number::*; +use lir::builder::*; +use lir::types::*; + +mod lib; +use lib::*; + +#[test] +fn mul_u32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_u32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Mul(NumberType::U32), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u32, u32) -> u32 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u32_values().into_iter() { + for j in u32_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i.wrapping_mul(j)); + } + } +} + +#[test] +fn mul_i32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_i32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Mul(NumberType::I32), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i32, i32) -> i32 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i32_values().into_iter() { + for j in i32_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i.wrapping_mul(j)); + } + } +} + +#[test] +fn mul_u64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_u64], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Mul(NumberType::U64), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(u64, u64) -> u64 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in u64_values().into_iter() { + for j in u64_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i.wrapping_mul(j)); + } + } +} + +#[test] +fn mul_i64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_i64], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Mul(NumberType::I64), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(i64, i64) -> i64 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in i64_values().into_iter() { + for j in i64_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + assert_eq!(f(i, j), i.wrapping_mul(j)); + } + } +} + +#[test] +fn mul_f32() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_f32], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Mul(NumberType::F32), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f32, f32) -> f32 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f32_values().into_iter() { + for j in f32_values().into_iter() { + println!("f({}, {}) == {}" ,i, j, f(i, j)); + // NOTE: We cannot compare NaN against another NaN with assert_eq! + // macro, unless we compare the raw bits of the NaN values. + assert_eq!(f(i, j).to_bits(), (i * j).to_bits()); + } + } +} + +#[test] +fn mul_f64() { + let mut ctx_bld = ContextBuilder::new(); + let unit = { + let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); + // Add the function signature. + let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); + let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_f64], CanUnwind(true))); + bld.set_signature(t_sig); + let s0 = bld.create_sequence(); + { + bld.set_entry(s0); + bld.switch_to_sequence(s0); + let a0 = bld.unit_arg(0); + let a1 = bld.unit_arg(1); + let v1 = bld.add_op(Opcode::Mul(NumberType::F64), &[a0, a1]); + bld.end_op(Opcode::Return, &[v1]); + } + bld.finish() + }; + let ctx = ctx_bld.finish(); + let mut cg = CodeGenerator::new(); + let code = cg.compile(&ctx, &unit).unwrap(); + let f : fn(f64, f64) -> f64 = unsafe { + mem::transmute(code.as_ptr()) + }; + for i in f64_values().into_iter() { + for j in f64_values().into_iter() { + println!("f({}, {}) == {}", i, j, f(i, j)); + // NOTE: We cannot compare NaN against another NaN with assert_eq! + // macro, unless we compare the raw bits of the NaN values. + assert_eq!(f(i, j).to_bits(), (i * j).to_bits()); + } + } +} From eec51ca2fd496b91832518253e7f00ad0f58de33 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 2 Dec 2018 16:59:49 +0100 Subject: [PATCH 26/32] Use assert! instead of assert_eq! when checking a precondition in opcode_to_cond --- codegen/src/lower.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index e979a8d..7f5c233 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -51,7 +51,7 @@ fn opcode_to_cond(op: Opcode) -> IFCond { use self::IFCond::*; match op { Ord(Ordered(t)) => { - assert_eq!(t.is_float(), true); + assert!(t.is_float()); FloatCond(FloatCC::Ordered) } Eq(Ordered(t)) => { From 0417a725efe2ce1db7517611de1955b056040ee9 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 30 Dec 2018 20:11:14 +0100 Subject: [PATCH 27/32] Add StackAddress to reserve explicit stack slots in the generated code. --- codegen/src/lower.rs | 41 ++++++++++++++++++++++++++++++++++------- lir/src/builder.rs | 3 +++ lir/src/context.rs | 41 ++++++++++++++++++++++++++++++++++++++--- lir/src/data_flow.rs | 21 +++++++++++++++------ 4 files changed, 90 insertions(+), 16 deletions(-) diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 7f5c233..881d434 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -4,9 +4,10 @@ use std::collections::HashMap; use frontend::{FunctionBuilderContext, FunctionBuilder, Variable}; use codegen::entity::EntityRef; -use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode, MemFlags, SigRef}; +use codegen::ir::{Ebb, ExternalName, Function, Signature, AbiParam, InstBuilder, TrapCode, MemFlags, SigRef, StackSlotData, StackSlotKind}; use codegen::ir::immediates::{Ieee32, Ieee64, Imm64}; use codegen::ir::condcodes::{IntCC, FloatCC}; +use codegen::ir::entities::StackSlot; use codegen::ir::types::*; use codegen::ir::types; use codegen::settings::{self, CallConv}; @@ -37,6 +38,8 @@ struct ConvertCtx<'a> { pub carry_map: HashMap, /// Map the unit, call or callunit to its list of Nth accessors. pub nth_map: HashMap>, + /// StackAddress to StackSlot mapping + pub stack_map: HashMap, } type OptVarType = Option<(Variable, types::Type)>; @@ -252,12 +255,30 @@ impl<'a> ConvertCtx<'a> { // overflow and carry per math operation. TODO: To handle multiple // overflow flag and carry flag, we should create new variable that // would be copied on overflow/carry encoding. - let ins_res = match ins.opcode { - Opcode::OverflowFlag => self.overflow_map.insert(ins.dependencies[0], v), - Opcode::CarryFlag => self.carry_map.insert(ins.dependencies[0], v), - _ => None, + // + // When seeing a StackAddress instruction, register a corresponding + // StackSlot in Cranelift function, which would later be used when + // loading the address. + match ins.opcode { + Opcode::OverflowFlag => { + let exists = self.overflow_map.insert(ins.dependencies[0], v); + debug_assert!(exists == None); + } + Opcode::CarryFlag => { + let exists = self.carry_map.insert(ins.dependencies[0], v); + debug_assert!(exists == None); + } + Opcode::StackAddress(id) => { + let size = { + let info = self.ctx.get_stack_info(id); + info.size as u32 + }; + let slot = bld.create_stack_slot(StackSlotData::new(StackSlotKind::ExplicitSlot, size)); + let exists = self.stack_map.insert(id, slot); + debug_assert!(exists == None); + } + _ => (), }; - debug_assert!(ins_res == None); } // Infer type from the operands. Ideally instructions should be sorted @@ -558,8 +579,13 @@ impl<'a> ConvertCtx<'a> { }; bld.def_var(res_var, res); } - Address => unimplemented!(), CPUAddress => unimplemented!(), + StackAddress(id) => { + let slot = self.stack_map.get(&id).clone(); + let slot = *slot.expect("StackAddress should have a registered slot."); + let res = bld.ins().stack_addr(self.isa.pointer_type(), slot, 0); + bld.def_var(res_var, res); + } Load(ty) => { let addr = bld.use_var(Variable::new(ins.operands[0].index)); let mut mf = MemFlags::new(); @@ -786,6 +812,7 @@ pub fn convert(isa: &TargetIsa, ctx: &Context, unit: &Unit) -> LowerResult Opcode { Opcode::Newhash(self.ctx.get_hash_seed()) } + pub fn get_stackaddress(&mut self, ty: ComplexTypeId, size: usize, align: usize) -> Opcode { + Opcode::StackAddress(self.ctx.add_stack_info(ty, size, align)) + } /// Add a type and reuse a type which already got registered if any. pub fn add_type(&mut self, ty: ComplexType) -> ComplexTypeId { diff --git a/lir/src/context.rs b/lir/src/context.rs index d01b1e3..22e2cec 100644 --- a/lir/src/context.rs +++ b/lir/src/context.rs @@ -1,11 +1,24 @@ use std::{ptr, mem}; use types::{ComplexType, ComplexTypeId}; -// Pointer to the static memory. Note, this is not a u8 slices because this -// contains an heterogeneous list of references, symbols and values which might -// be of different type and sizes. +/// Pointer to the static memory. Note, this is not a u8 slices because this +/// contains an heterogeneous list of references, symbols and values which might +/// be of different type and sizes. type StaticStorage = *const (); +/// Information stored in the context for each StackAddress instruction in the +/// data flow. This contains the ComplexTypeId, the size and its alignment. +pub struct StackAddressInfo { + /// Type to be stored in the space reserved for the given stack address. + pub ty: ComplexTypeId, + /// Size of the type which is being stored. TODO: This should be a property + /// of the type. + pub size: usize, + /// Alignment required for the type stored on the stack. TODO: This should + /// be a property of the type. + pub align: usize, +} + /// A context is a structure which centralize all the data necessary for the /// execution of any Unit. It holds the collection of complex types, and any /// counter related to having unique identifiers. @@ -15,6 +28,12 @@ pub struct Context { /// instruction should be added to the graph. wrapper_seed: usize, + /// This vector is used for StackAddress instructions. It's length is used + /// to compute the next available stack address identifer. Each index + /// identify uniquely a stack address and as such prevent aliasing of Stack + /// spaces holding data of the same type. + stack_info: Vec, + /// This vector holds the list of types references by all Unit associated to /// this context. Any ComplexTypeId is an index in this Vector. types: Vec, @@ -44,6 +63,7 @@ impl Context { pub fn new() -> Context { Context { wrapper_seed: 0, + stack_info: vec![], types: vec![], refs_ptr: ptr::null(), expected_refs_size: 0, @@ -59,6 +79,21 @@ impl Context { value } + /// Create a new stack seed, such that we can avoid aliasing of stack + /// values. This function is used by the ContextBuilder for creating new + /// StackAddress instructions. + pub fn add_stack_info(&mut self, ty: ComplexTypeId, size: usize, align: usize) -> usize { + let value = self.stack_info.len(); + self.stack_info.push(StackAddressInfo { ty, size, align }); + value + } + + /// Extract the StackAddressInfo from the index of a StackAddress + /// instruction. + pub fn get_stack_info(&self, index: usize) -> &StackAddressInfo { + &self.stack_info[index] + } + /// Add a new complex type in the list of known types. This function is used /// by the ContextBuilder to register types seen while generating Units. pub fn add_type(&mut self, ty: ComplexType) -> ComplexTypeId { diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index 5673f9a..8c051d3 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -166,11 +166,20 @@ pub enum Opcode { /// cpuid. /// (0 operand) CPUAddress, - /// Get the address of where the input operand is stored. At the end of the - /// pipeline, if any of these instructions remain it enforces the data to - /// live in memory at least as long as the address exists. - /// (1 operand) - Address, + /// StackAddress is used for storing content on the stack. The |usize| + /// argument is used as a unique identifier for the reserved space, as we do + /// not want to share addresses by default. These address spaces are assumed + /// to be uninitialize when they are created and should be written down with + /// a Store operation. + /// + /// Each unique address unique identifer is accosicated a minimum size and + /// alignment as part of the Context, and not as part of the data-flow. + /// + /// NOTE: At the moment there is no support for dynamic stack space + /// allocation. (0 operand) + // TODO: Add DeadAddress to mark the space as non-initialized and free the + // stack address space. + StackAddress(usize), /// Load content from the address. (1 operand: result = *input) Load(ComplexTypeId), @@ -322,7 +331,7 @@ impl Opcode { Ord(_) | Eq(_) | Lt(_) | Le(_) | Ne(_) | Gt(_) | Ge(_) => ValueType::Boolean, StaticAddress | - Address | + StackAddress(_) | CPUAddress => ValueType::Pointer, Load(ty) => ValueType::Complex(ty), Store(_ty) => ValueType::None, From 37c787b773ab5dc630fce6b17aa89b2885440796 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sat, 27 Apr 2019 18:57:31 +0200 Subject: [PATCH 28/32] Replace HolyJit lib own definition of the LIR and Compiler --- codegen/src/lib.rs | 2 +- lib/Cargo.toml | 2 + lib/src/compile.rs | 2163 -------------------------------------------- lib/src/context.rs | 30 +- lib/src/lib.rs | 18 +- lib/src/lir.rs | 189 ---- lir/src/builder.rs | 5 - lir/src/context.rs | 9 +- lir/src/types.rs | 2 +- 9 files changed, 44 insertions(+), 2376 deletions(-) delete mode 100644 lib/src/compile.rs delete mode 100644 lib/src/lir.rs diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index 43fd884..f4e8543 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -75,7 +75,7 @@ impl CodeGenerator { } impl JitCode { - pub fn as_ptr(&self) -> *const u8 { + pub unsafe fn as_ptr(&self) -> *const u8 { self.code.as_ptr() } } diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 7252906..98bb142 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,6 +6,8 @@ authors = [ "Nicolas B. Pierron " ] # Lock dynasm and serde_derive in order to build against old versions of # rustc. [dependencies] +holyjit_lir = { path = "../lir" } +holyjit_codegen = { path = "../codegen" } dynasm = "=0.1.3" dynasmrt = "=0.1.3" serde = "1.0" diff --git a/lib/src/compile.rs b/lib/src/compile.rs deleted file mode 100644 index 104ebb5..0000000 --- a/lib/src/compile.rs +++ /dev/null @@ -1,2163 +0,0 @@ -//! Compiles the LIR representation into executable code using the dynasm -//! plugin and library. The use of dynasm is only for having a working proof -//! of concept, but is unlikely to be part of the final solution. - -use std::collections::HashMap; -use std::mem; - -use dynasmrt; -use dynasmrt::x64; -use dynasmrt::{DynasmApi, DynasmLabelApi}; - -use bincode; - -use lir; - -/// Structure used to store the generated code. -pub struct JitCode { - code: dynasmrt::ExecutableBuffer, - start: dynasmrt::AssemblyOffset, - -} - -impl JitCode { - /// Compile a function based on the vector of bytes and the table of - /// statics. - pub fn compile(bytes: &[u8], defs: *const ()) -> Result { - Compiler::compile(bytes, defs.clone()) - } - - /// Cast the current code into the proper fn-type given as parameter of - /// this generic function. - pub unsafe fn get_fn(&self) -> *const () { - mem::transmute(self.code.ptr(self.start)) - } -} - -#[derive(Debug)] -pub enum Error { - /// Issue caused when deserializing the buffer which contains the Lir. - Deserialize, - - /// The number of live registers exceeds the number of the architecture - /// registers. This should be handled by a better register allocator in - /// the future. - NotEnoughRegisters, - NotEnoughRegistersForMovGrp, - - /// Cannot finalize the code, and generate an executable page. - Finalize, - - /// Cannot reuse deleted register mapping - MissingRegisterMap, - - /// We cannot find an allocation corresponding to a given register. - AllocatedButNotRegistered, - - /// We were not able to find the offset in which the data should be - /// stored into. - StoreIntoMissTarget(u8), - - /// This is still a prototype. - NYI(&'static str), - - /// - WhatTheHell -} - -impl<'tcx> From> for Error { - fn from(err: Box) -> Error { - println!("bincode::ErrorKind = {}", err); - Error::Deserialize - } -} - -mod asm { - use dynasmrt; - use dynasmrt::DynasmApi; - - #[derive(Copy, Clone, Debug, PartialEq)] - #[repr(u8)] - pub enum Register { - Rax = 0, - Rcx, Rdx, Rbx, - Rsp, Rbp, Rsi, Rdi, - R8 , R9 , R10, R11, - R12, R13, R14, R15, - } - - // Conditions for CMP instructions (CMPSS, CMPSD, CMPPS, CMPPD, etc). - /* - #[derive(Copy, Clone, Debug)] - #[repr(u8)] - enum FpCondition { - Eq = 0x0, - Lt = 0x1, - Le = 0x2, - UnOrd = 0x3, - Neq = 0x4, - Nlt = 0x5, - Nle = 0x6, - Ord = 0x7, - } - */ - - #[allow(unused)] - #[derive(Copy, Clone, Debug)] - #[repr(u8)] - pub enum Condition { - O = 0x0, - NO, - B, // Carry - AE, // no Carry - E, - NE, - BE, - A, - S, - NS, - P, - NP, - L, - GE, - LE, - G, - } - - // These enumerated values are following the Intel documentation Volume 2C [1], - // Appendix A.2 and Appendix A.3. - // - // Operand size/types as listed in the Appendix A.2. Tables of the instructions - // and their operands can be found in the Appendix A.3. - // - // E = reg/mem - // G = reg (reg field of ModR/M) - // U = xmm (R/M field of ModR/M) - // V = xmm (reg field of ModR/M) - // W = xmm/mem64 - // I = immediate - // O = offset - // - // b = byte (8-bit) - // w = word (16-bit) - // v = register size - // d = double (32-bit) - // dq = double-quad (128-bit) (xmm) - // ss = scalar float 32 (xmm) - // ps = packed float 32 (xmm) - // sd = scalar double (xmm) - // pd = packed double (xmm) - // z = 16/32/64-bit - // vqp = (*) - // - // (*) Some website [2] provides a convenient list of all instructions, but be - // aware that they do not follow the Intel documentation naming, as the - // following enumeration does. Do not use these names as a reference for adding - // new instructions. - // - // [1] http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-manual-325462.html - // [2] http://ref.x86asm.net/geek.html - // - // OPn_NAME_DstSrc - #[allow(non_camel_case_types,unused)] // This is a prototype - #[derive(Copy, Clone, Debug)] - enum OneByteOpcodeID { - OP_NOP_00, - OP_ADD_EbGb, - OP_ADD_EvGv, - OP_ADD_GbEb, - OP_ADD_GvEv, - OP_ADD_EAXIv, - OP_OR_EbGb, - OP_OR_EvGv, - OP_OR_GvEv, - OP_OR_EAXIv, - OP_2BYTE_ESCAPE, - OP_NOP_0F, - OP_ADC_GvEv, - OP_SBB_GvEv, - OP_NOP_1F, - OP_AND_EbGb, - OP_AND_EvGv, - OP_AND_GvEv, - OP_AND_EAXIv, - OP_SUB_EbGb, - OP_SUB_EvGv, - OP_SUB_GbEb, - OP_SUB_GvEv, - OP_SUB_EAXIv, - PRE_PREDICT_BRANCH_NOT_TAKEN, - OP_XOR_EbGb, - OP_XOR_EvGv, - OP_XOR_GvEv, - OP_XOR_EAXIv, - OP_CMP_EbGb, - OP_CMP_EvGv, - OP_CMP_GbEb, - OP_CMP_GvEv, - OP_CMP_EAXIv, - PRE_REX, - OP_NOP_40, - OP_NOP_44, - OP_PUSH_EAX, - OP_POP_EAX, - OP_PUSHA, - OP_POPA, - OP_MOVSXD_GvEv, - PRE_OPERAND_SIZE, - PRE_SSE_66, - OP_NOP_66, - OP_PUSH_Iz, - OP_IMUL_GvEvIz, - OP_PUSH_Ib, - OP_IMUL_GvEvIb, - OP_JCC_rel8, - OP_GROUP1_EbIb, - OP_NOP_80, - OP_GROUP1_EvIz, - OP_GROUP1_EvIb, - OP_TEST_EbGb, - OP_NOP_84, - OP_TEST_EvGv, - OP_XCHG_GbEb, - OP_XCHG_GvEv, - OP_MOV_EbGv, - OP_MOV_EvGv, - OP_MOV_GvEb, - OP_MOV_GvEv, - OP_LEA, - OP_GROUP1A_Ev, - OP_NOP, - OP_PUSHFLAGS, - OP_POPFLAGS, - OP_CDQ, - OP_MOV_EAXOv, - OP_MOV_OvEAX, - OP_TEST_EAXIb, - OP_TEST_EAXIv, - OP_MOV_EbIb, - OP_MOV_EAXIv, - OP_GROUP2_EvIb, - OP_ADDP_ST0_ST1, - OP_RET_Iz, - PRE_VEX_C4, - PRE_VEX_C5, - OP_RET, - OP_GROUP11_EvIb, - OP_GROUP11_EvIz, - OP_INT3, - OP_GROUP2_Ev1, - OP_GROUP2_EvCL, - OP_FPU6, - OP_FPU6_F32, - OP_FPU6_ADDP, - OP_FILD, - OP_CALL_rel32, - OP_JMP_rel32, - OP_JMP_rel8, - PRE_LOCK, - PRE_SSE_F2, - PRE_SSE_F3, - OP_HLT, - OP_GROUP3_EbIb, - OP_GROUP3_Ev, - OP_GROUP3_EvIz, // OP_GROUP3_Ev has an immediate, when instruction is a test. - OP_GROUP5_Ev - } - - #[allow(non_camel_case_types,unused)] // This is a prototype - #[derive(Copy, Clone, Debug)] - enum TwoByteOpcodeID { - OP2_UD2, - OP2_MOVSD_VsdWsd, - OP2_MOVPS_VpsWps, - OP2_MOVSD_WsdVsd, - OP2_MOVPS_WpsVps, - OP2_MOVDDUP_VqWq, - OP2_MOVHLPS_VqUq, - OP2_MOVSLDUP_VpsWps, - OP2_UNPCKLPS_VsdWsd, - OP2_UNPCKHPS_VsdWsd, - OP2_MOVLHPS_VqUq, - OP2_MOVSHDUP_VpsWps, - OP2_MOVAPD_VsdWsd, - OP2_MOVAPS_VsdWsd, - OP2_MOVAPS_WsdVsd, - OP2_CVTSI2SD_VsdEd, - OP2_CVTTSD2SI_GdWsd, - OP2_UCOMISD_VsdWsd, - OP2_CMOVZ_GvEv, - OP2_MOVMSKPD_EdVd, - OP2_ANDPS_VpsWps, - OP2_ANDNPS_VpsWps, - OP2_ORPS_VpsWps, - OP2_XORPS_VpsWps, - OP2_ADDSD_VsdWsd, - OP2_ADDPS_VpsWps, - OP2_MULSD_VsdWsd, - OP2_MULPS_VpsWps, - OP2_CVTSS2SD_VsdEd, - OP2_CVTSD2SS_VsdEd, - OP2_CVTTPS2DQ_VdqWps, - OP2_CVTDQ2PS_VpsWdq, - OP2_SUBSD_VsdWsd, - OP2_SUBPS_VpsWps, - OP2_MINSD_VsdWsd, - OP2_MINSS_VssWss, - OP2_MINPS_VpsWps, - OP2_DIVSD_VsdWsd, - OP2_DIVPS_VpsWps, - OP2_MAXSD_VsdWsd, - OP2_MAXSS_VssWss, - OP2_MAXPS_VpsWps, - OP2_SQRTSD_VsdWsd, - OP2_SQRTSS_VssWss, - OP2_SQRTPS_VpsWps, - OP2_RSQRTPS_VpsWps, - OP2_RCPPS_VpsWps, - OP2_ANDPD_VpdWpd, - OP2_ORPD_VpdWpd, - OP2_XORPD_VpdWpd, - OP2_PUNPCKLDQ, - OP2_PCMPGTB_VdqWdq, - OP2_PCMPGTW_VdqWdq, - OP2_PCMPGTD_VdqWdq, - OP2_MOVD_VdEd, - OP2_MOVDQ_VsdWsd, - OP2_MOVDQ_VdqWdq, - OP2_PSHUFD_VdqWdqIb, - OP2_PSHUFLW_VdqWdqIb, - OP2_PSHUFHW_VdqWdqIb, - OP2_PSLLW_UdqIb, - OP2_PSRAW_UdqIb, - OP2_PSRLW_UdqIb, - OP2_PSLLD_UdqIb, - OP2_PSRAD_UdqIb, - OP2_PSRLD_UdqIb, - OP2_PSRLDQ_Vd, - OP2_PCMPEQB_VdqWdq, - OP2_PCMPEQW_VdqWdq, - OP2_PCMPEQD_VdqWdq, - OP2_HADDPD, - OP2_MOVD_EdVd, - OP2_MOVQ_VdWd, - OP2_MOVDQ_WdqVdq, - OP2_JCC_rel32, - OP_SETCC, - OP2_SHLD, - OP2_SHLD_GvEv, - OP2_SHRD, - OP2_SHRD_GvEv, - OP_FENCE, - OP2_IMUL_GvEv, - OP2_CMPXCHG_GvEb, - OP2_CMPXCHG_GvEw, - OP2_POPCNT_GvEv, - OP2_BSF_GvEv, - OP2_BSR_GvEv, - OP2_MOVSX_GvEb, - OP2_MOVSX_GvEw, - OP2_MOVZX_GvEb, - OP2_MOVZX_GvEw, - OP2_XADD_EbGb, - OP2_XADD_EvGv, - OP2_CMPPS_VpsWps, - OP2_PINSRW, - OP2_PEXTRW_GdUdIb, - OP2_SHUFPS_VpsWpsIb, - OP2_PSRLW_VdqWdq, - OP2_PSRLD_VdqWdq, - OP2_PMULLW_VdqWdq, - OP2_MOVQ_WdVd, - OP2_PMOVMSKB_EdVd, - OP2_PSUBUSB_VdqWdq, - OP2_PSUBUSW_VdqWdq, - OP2_PANDDQ_VdqWdq, - OP2_PADDUSB_VdqWdq, - OP2_PADDUSW_VdqWdq, - OP2_PANDNDQ_VdqWdq, - OP2_PSRAW_VdqWdq, - OP2_PSRAD_VdqWdq, - OP2_PSUBSB_VdqWdq, - OP2_PSUBSW_VdqWdq, - OP2_PORDQ_VdqWdq, - OP2_PADDSB_VdqWdq, - OP2_PADDSW_VdqWdq, - OP2_PXORDQ_VdqWdq, - OP2_PSLLW_VdqWdq, - OP2_PSLLD_VdqWdq, - OP2_PMULUDQ_VdqWdq, - OP2_PSUBB_VdqWdq, - OP2_PSUBW_VdqWdq, - OP2_PSUBD_VdqWdq, - OP2_PADDB_VdqWdq, - OP2_PADDW_VdqWdq, - OP2_PADDD_VdqWdq - } - - #[allow(non_camel_case_types,unused)] // This is a prototype - #[derive(Copy, Clone, Debug)] - enum ThreeByteOpcodeID { - OP3_PSHUFB_VdqWdq, - OP3_ROUNDSS_VsdWsd, - OP3_ROUNDSD_VsdWsd, - OP3_BLENDVPS_VdqWdq, - OP3_PEXTRB_EdVdqIb, - OP3_PEXTRD_EdVdqIb, - OP3_BLENDPS_VpsWpsIb, - OP3_PTEST_VdVd, - OP3_PINSRB_VdqEdIb, - OP3_INSERTPS_VpsUps, - OP3_PINSRD_VdqEdIb, - OP3_PMULLD_VdqWdq, - OP3_VBLENDVPS_VdqWdq - } - - #[allow(non_camel_case_types,unused)] // This is a prototype - #[derive(Copy, Clone, Debug)] - enum GroupOpcodeID { - NOGROUP_OP_SETCC, - - GROUP1_OP_ADD, - GROUP1_OP_OR, - GROUP1_OP_ADC, - GROUP1_OP_SBB, - GROUP1_OP_AND, - GROUP1_OP_SUB, - GROUP1_OP_XOR, - GROUP1_OP_CMP, - - GROUP1A_OP_POP, - - GROUP2_OP_ROL, - GROUP2_OP_ROR, - GROUP2_OP_SHL, - GROUP2_OP_SHR, - GROUP2_OP_SAR, - - GROUP3_OP_TEST, - GROUP3_OP_NOT, - GROUP3_OP_NEG, - GROUP3_OP_MUL, - GROUP3_OP_IMUL, - GROUP3_OP_DIV, - GROUP3_OP_IDIV, - - GROUP5_OP_INC, - GROUP5_OP_DEC, - GROUP5_OP_CALLN, - GROUP5_OP_JMPN, - GROUP5_OP_PUSH, - - FILD_OP_64, - - FPU6_OP_FLD, - FPU6_OP_FISTTP, - FPU6_OP_FSTP, - FPU6_OP_FLDCW, - FPU6_OP_FISTP, - - GROUP11_MOV - } - - impl Into for OneByteOpcodeID { - fn into(self) -> u8 { - use self::OneByteOpcodeID::*; - match self { - OP_NOP_00 => 0x00, - OP_ADD_EbGb => 0x00, - OP_ADD_EvGv => 0x01, - OP_ADD_GbEb => 0x02, - OP_ADD_GvEv => 0x03, - OP_ADD_EAXIv => 0x05, - OP_OR_EbGb => 0x08, - OP_OR_EvGv => 0x09, - OP_OR_GvEv => 0x0B, - OP_OR_EAXIv => 0x0D, - OP_2BYTE_ESCAPE => 0x0F, - OP_NOP_0F => 0x0F, - OP_ADC_GvEv => 0x13, - OP_SBB_GvEv => 0x1B, - OP_NOP_1F => 0x1F, - OP_AND_EbGb => 0x20, - OP_AND_EvGv => 0x21, - OP_AND_GvEv => 0x23, - OP_AND_EAXIv => 0x25, - OP_SUB_EbGb => 0x28, - OP_SUB_EvGv => 0x29, - OP_SUB_GbEb => 0x2A, - OP_SUB_GvEv => 0x2B, - OP_SUB_EAXIv => 0x2D, - PRE_PREDICT_BRANCH_NOT_TAKEN => 0x2E, - OP_XOR_EbGb => 0x30, - OP_XOR_EvGv => 0x31, - OP_XOR_GvEv => 0x33, - OP_XOR_EAXIv => 0x35, - OP_CMP_EbGb => 0x38, - OP_CMP_EvGv => 0x39, - OP_CMP_GbEb => 0x3A, - OP_CMP_GvEv => 0x3B, - OP_CMP_EAXIv => 0x3D, - PRE_REX => 0x40, - OP_NOP_40 => 0x40, - OP_NOP_44 => 0x44, - OP_PUSH_EAX => 0x50, - OP_POP_EAX => 0x58, - OP_PUSHA => 0x60, - OP_POPA => 0x61, - OP_MOVSXD_GvEv => 0x63, - PRE_OPERAND_SIZE => 0x66, - PRE_SSE_66 => 0x66, - OP_NOP_66 => 0x66, - OP_PUSH_Iz => 0x68, - OP_IMUL_GvEvIz => 0x69, - OP_PUSH_Ib => 0x6a, - OP_IMUL_GvEvIb => 0x6b, - OP_JCC_rel8 => 0x70, - OP_GROUP1_EbIb => 0x80, - OP_NOP_80 => 0x80, - OP_GROUP1_EvIz => 0x81, - OP_GROUP1_EvIb => 0x83, - OP_TEST_EbGb => 0x84, - OP_NOP_84 => 0x84, - OP_TEST_EvGv => 0x85, - OP_XCHG_GbEb => 0x86, - OP_XCHG_GvEv => 0x87, - OP_MOV_EbGv => 0x88, - OP_MOV_EvGv => 0x89, - OP_MOV_GvEb => 0x8A, - OP_MOV_GvEv => 0x8B, - OP_LEA => 0x8D, - OP_GROUP1A_Ev => 0x8F, - OP_NOP => 0x90, - OP_PUSHFLAGS => 0x9C, - OP_POPFLAGS => 0x9D, - OP_CDQ => 0x99, - OP_MOV_EAXOv => 0xA1, - OP_MOV_OvEAX => 0xA3, - OP_TEST_EAXIb => 0xA8, - OP_TEST_EAXIv => 0xA9, - OP_MOV_EbIb => 0xB0, - OP_MOV_EAXIv => 0xB8, - OP_GROUP2_EvIb => 0xC1, - OP_ADDP_ST0_ST1 => 0xC1, - OP_RET_Iz => 0xC2, - PRE_VEX_C4 => 0xC4, - PRE_VEX_C5 => 0xC5, - OP_RET => 0xC3, - OP_GROUP11_EvIb => 0xC6, - OP_GROUP11_EvIz => 0xC7, - OP_INT3 => 0xCC, - OP_GROUP2_Ev1 => 0xD1, - OP_GROUP2_EvCL => 0xD3, - OP_FPU6 => 0xDD, - OP_FPU6_F32 => 0xD9, - OP_FPU6_ADDP => 0xDE, - OP_FILD => 0xDF, - OP_CALL_rel32 => 0xE8, - OP_JMP_rel32 => 0xE9, - OP_JMP_rel8 => 0xEB, - PRE_LOCK => 0xF0, - PRE_SSE_F2 => 0xF2, - PRE_SSE_F3 => 0xF3, - OP_HLT => 0xF4, - OP_GROUP3_EbIb => 0xF6, - OP_GROUP3_Ev => 0xF7, - OP_GROUP3_EvIz => 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test. - OP_GROUP5_Ev => 0xFF - } - } - } - - impl Into for TwoByteOpcodeID { - fn into(self) -> u8 { - use self::TwoByteOpcodeID::*; - match self { - OP2_UD2 => 0x0B, - OP2_MOVSD_VsdWsd => 0x10, - OP2_MOVPS_VpsWps => 0x10, - OP2_MOVSD_WsdVsd => 0x11, - OP2_MOVPS_WpsVps => 0x11, - OP2_MOVDDUP_VqWq => 0x12, - OP2_MOVHLPS_VqUq => 0x12, - OP2_MOVSLDUP_VpsWps => 0x12, - OP2_UNPCKLPS_VsdWsd => 0x14, - OP2_UNPCKHPS_VsdWsd => 0x15, - OP2_MOVLHPS_VqUq => 0x16, - OP2_MOVSHDUP_VpsWps => 0x16, - OP2_MOVAPD_VsdWsd => 0x28, - OP2_MOVAPS_VsdWsd => 0x28, - OP2_MOVAPS_WsdVsd => 0x29, - OP2_CVTSI2SD_VsdEd => 0x2A, - OP2_CVTTSD2SI_GdWsd => 0x2C, - OP2_UCOMISD_VsdWsd => 0x2E, - OP2_CMOVZ_GvEv => 0x44, - OP2_MOVMSKPD_EdVd => 0x50, - OP2_ANDPS_VpsWps => 0x54, - OP2_ANDNPS_VpsWps => 0x55, - OP2_ORPS_VpsWps => 0x56, - OP2_XORPS_VpsWps => 0x57, - OP2_ADDSD_VsdWsd => 0x58, - OP2_ADDPS_VpsWps => 0x58, - OP2_MULSD_VsdWsd => 0x59, - OP2_MULPS_VpsWps => 0x59, - OP2_CVTSS2SD_VsdEd => 0x5A, - OP2_CVTSD2SS_VsdEd => 0x5A, - OP2_CVTTPS2DQ_VdqWps => 0x5B, - OP2_CVTDQ2PS_VpsWdq => 0x5B, - OP2_SUBSD_VsdWsd => 0x5C, - OP2_SUBPS_VpsWps => 0x5C, - OP2_MINSD_VsdWsd => 0x5D, - OP2_MINSS_VssWss => 0x5D, - OP2_MINPS_VpsWps => 0x5D, - OP2_DIVSD_VsdWsd => 0x5E, - OP2_DIVPS_VpsWps => 0x5E, - OP2_MAXSD_VsdWsd => 0x5F, - OP2_MAXSS_VssWss => 0x5F, - OP2_MAXPS_VpsWps => 0x5F, - OP2_SQRTSD_VsdWsd => 0x51, - OP2_SQRTSS_VssWss => 0x51, - OP2_SQRTPS_VpsWps => 0x51, - OP2_RSQRTPS_VpsWps => 0x52, - OP2_RCPPS_VpsWps => 0x53, - OP2_ANDPD_VpdWpd => 0x54, - OP2_ORPD_VpdWpd => 0x56, - OP2_XORPD_VpdWpd => 0x57, - OP2_PUNPCKLDQ => 0x62, - OP2_PCMPGTB_VdqWdq => 0x64, - OP2_PCMPGTW_VdqWdq => 0x65, - OP2_PCMPGTD_VdqWdq => 0x66, - OP2_MOVD_VdEd => 0x6E, - OP2_MOVDQ_VsdWsd => 0x6F, - OP2_MOVDQ_VdqWdq => 0x6F, - OP2_PSHUFD_VdqWdqIb => 0x70, - OP2_PSHUFLW_VdqWdqIb => 0x70, - OP2_PSHUFHW_VdqWdqIb => 0x70, - OP2_PSLLW_UdqIb => 0x71, - OP2_PSRAW_UdqIb => 0x71, - OP2_PSRLW_UdqIb => 0x71, - OP2_PSLLD_UdqIb => 0x72, - OP2_PSRAD_UdqIb => 0x72, - OP2_PSRLD_UdqIb => 0x72, - OP2_PSRLDQ_Vd => 0x73, - OP2_PCMPEQB_VdqWdq => 0x74, - OP2_PCMPEQW_VdqWdq => 0x75, - OP2_PCMPEQD_VdqWdq => 0x76, - OP2_HADDPD => 0x7C, - OP2_MOVD_EdVd => 0x7E, - OP2_MOVQ_VdWd => 0x7E, - OP2_MOVDQ_WdqVdq => 0x7F, - OP2_JCC_rel32 => 0x80, - OP_SETCC => 0x90, - OP2_SHLD => 0xA4, - OP2_SHLD_GvEv => 0xA5, - OP2_SHRD => 0xAC, - OP2_SHRD_GvEv => 0xAD, - OP_FENCE => 0xAE, - OP2_IMUL_GvEv => 0xAF, - OP2_CMPXCHG_GvEb => 0xB0, - OP2_CMPXCHG_GvEw => 0xB1, - OP2_POPCNT_GvEv => 0xB8, - OP2_BSF_GvEv => 0xBC, - OP2_BSR_GvEv => 0xBD, - OP2_MOVSX_GvEb => 0xBE, - OP2_MOVSX_GvEw => 0xBF, - OP2_MOVZX_GvEb => 0xB6, - OP2_MOVZX_GvEw => 0xB7, - OP2_XADD_EbGb => 0xC0, - OP2_XADD_EvGv => 0xC1, - OP2_CMPPS_VpsWps => 0xC2, - OP2_PINSRW => 0xC4, - OP2_PEXTRW_GdUdIb => 0xC5, - OP2_SHUFPS_VpsWpsIb => 0xC6, - OP2_PSRLW_VdqWdq => 0xD1, - OP2_PSRLD_VdqWdq => 0xD2, - OP2_PMULLW_VdqWdq => 0xD5, - OP2_MOVQ_WdVd => 0xD6, - OP2_PMOVMSKB_EdVd => 0xD7, - OP2_PSUBUSB_VdqWdq => 0xD8, - OP2_PSUBUSW_VdqWdq => 0xD9, - OP2_PANDDQ_VdqWdq => 0xDB, - OP2_PADDUSB_VdqWdq => 0xDC, - OP2_PADDUSW_VdqWdq => 0xDD, - OP2_PANDNDQ_VdqWdq => 0xDF, - OP2_PSRAW_VdqWdq => 0xE1, - OP2_PSRAD_VdqWdq => 0xE2, - OP2_PSUBSB_VdqWdq => 0xE8, - OP2_PSUBSW_VdqWdq => 0xE9, - OP2_PORDQ_VdqWdq => 0xEB, - OP2_PADDSB_VdqWdq => 0xEC, - OP2_PADDSW_VdqWdq => 0xED, - OP2_PXORDQ_VdqWdq => 0xEF, - OP2_PSLLW_VdqWdq => 0xF1, - OP2_PSLLD_VdqWdq => 0xF2, - OP2_PMULUDQ_VdqWdq => 0xF4, - OP2_PSUBB_VdqWdq => 0xF8, - OP2_PSUBW_VdqWdq => 0xF9, - OP2_PSUBD_VdqWdq => 0xFA, - OP2_PADDB_VdqWdq => 0xFC, - OP2_PADDW_VdqWdq => 0xFD, - OP2_PADDD_VdqWdq => 0xFE - } - } - } - - impl Into for ThreeByteOpcodeID { - fn into(self) -> u8 { - use self::ThreeByteOpcodeID::*; - match self { - OP3_PSHUFB_VdqWdq => 0x00, - OP3_ROUNDSS_VsdWsd => 0x0A, - OP3_ROUNDSD_VsdWsd => 0x0B, - OP3_BLENDVPS_VdqWdq => 0x14, - OP3_PEXTRB_EdVdqIb => 0x14, - OP3_PEXTRD_EdVdqIb => 0x16, - OP3_BLENDPS_VpsWpsIb => 0x0C, - OP3_PTEST_VdVd => 0x17, - OP3_PINSRB_VdqEdIb => 0x20, - OP3_INSERTPS_VpsUps => 0x21, - OP3_PINSRD_VdqEdIb => 0x22, - OP3_PMULLD_VdqWdq => 0x40, - OP3_VBLENDVPS_VdqWdq => 0x4A - } - } - } - - impl Into for GroupOpcodeID { - fn into(self) -> u8 { - use self::GroupOpcodeID::*; - match self { - NOGROUP_OP_SETCC => 0, - - GROUP1_OP_ADD => 0, - GROUP1_OP_OR => 1, - GROUP1_OP_ADC => 2, - GROUP1_OP_SBB => 3, - GROUP1_OP_AND => 4, - GROUP1_OP_SUB => 5, - GROUP1_OP_XOR => 6, - GROUP1_OP_CMP => 7, - - GROUP1A_OP_POP => 0, - - GROUP2_OP_ROL => 0, - GROUP2_OP_ROR => 1, - GROUP2_OP_SHL => 4, - GROUP2_OP_SHR => 5, - GROUP2_OP_SAR => 7, - - GROUP3_OP_TEST => 0, - GROUP3_OP_NOT => 2, - GROUP3_OP_NEG => 3, - GROUP3_OP_MUL => 4, - GROUP3_OP_IMUL => 5, - GROUP3_OP_DIV => 6, - GROUP3_OP_IDIV => 7, - - GROUP5_OP_INC => 0, - GROUP5_OP_DEC => 1, - GROUP5_OP_CALLN => 2, - GROUP5_OP_JMPN => 4, - GROUP5_OP_PUSH => 6, - - FILD_OP_64 => 5, - - FPU6_OP_FLD => 0, - FPU6_OP_FISTTP => 1, - FPU6_OP_FSTP => 3, - FPU6_OP_FLDCW => 5, - FPU6_OP_FISTP => 7, - - GROUP11_MOV => 0 - } - } - } - - #[derive(Copy, Clone, Debug, PartialEq)] - #[repr(u8)] - enum ModRm { - MemoryNoDisp = 0, - MemoryDisp8, - MemoryDisp32, - Register - } - - impl Register { - fn requires_rex(&self) -> bool { - *self as u8 >= Register::R8 as u8 - } - fn byte_requires_rex(&self) -> bool { - *self as u8 >= Register::Rsp as u8 - } - } - - fn can_sign_extend_from_i8(value: i32) -> bool { - value as i8 as i32 == value - } - - pub struct Assembler { - pub backend: dynasmrt::x64::Assembler, - } - - // TODO: constant `hasSib` should have an upper case name such as `HAS_SIB` - #[allow(non_upper_case_globals)] - const noBase : Register = Register::Rbp; - #[allow(non_upper_case_globals)] - const hasSib : Register = Register::Rsp; - #[allow(non_upper_case_globals)] - const noIndex : Register = Register::Rsp; - - #[allow(non_upper_case_globals)] - const noBase2 : Register = Register::R13; - #[allow(non_upper_case_globals)] - const hasSib2 : Register = Register::R12; - - impl Assembler { - fn emit_rex_u8(&mut self, w: bool, r: u8, x: u8, b: u8) { - let w = w as u8; - let pre_rex : u8 = OneByteOpcodeID::PRE_REX.into(); - self.backend.push(pre_rex | (w << 3) | ((r >> 3) << 2) | ((x >> 3) << 1) | (b >> 3)); - } - - fn emit_rex(&mut self, w: bool, r: Register, x: Register, b: Register) { - let r = r as u8; - let x = x as u8; - let b = b as u8; - self.emit_rex_u8(w, r, x, b); - } - fn emit_rex_w(&mut self, r: Register, x: Register, b: Register) { - self.emit_rex(true, r, x, b); - } - fn emit_rex_if(&mut self, cond: bool, r: Register, x: Register, b: Register) { - if cond || r.requires_rex() || x.requires_rex() || b.requires_rex() { - self.emit_rex(false, r, x, b); - } - } - fn emit_rex_if_needed(&mut self, r: Register, x: Register, b: Register) { - self.emit_rex_if(false, r, x, b); - } - - fn put_modrm_u8(&mut self, mode: ModRm, rm: u8, reg: u8) - { - let mut byte : u8 = (mode as u8) << 6; - byte = byte | (reg & 7) << 3; - byte = byte | rm & 7; - self.backend.push(byte); - } - - fn put_modrm(&mut self, mode: ModRm, rm: Register, reg: Register) - { - self.put_modrm_u8(mode, rm as u8, reg as u8) - } - - fn put_modrm_grp(&mut self, mode: ModRm, rm: Register, grp: GroupOpcodeID) - { - self.put_modrm_u8(mode, rm as u8, grp.into()) - } - - fn put_modrm_sib(&mut self, mode: ModRm, base: Register, index: Register, scale: u8, reg: Register) - { - assert_ne!(mode, ModRm::Register); - - self.put_modrm(mode, hasSib, reg); - let mut byte : u8 = scale << 6; - byte = byte | ((index as u8) & 7) << 3; - byte = byte | (base as u8) & 7; - self.backend.push(byte); - } - - fn register_modrm(&mut self, rm: Register, reg: Register) - { - self.put_modrm(ModRm::Register, rm, reg); - } - fn memory_modrm(&mut self, offset: i32, base: Register, reg: Register) - { - // A base of esp or r12 would be interpreted as a sib, so force a - // sib with no index & put the base in there. - if (base == hasSib) || (base == hasSib2) { - if offset == 0 { - // No need to check if the base is noBase, since we know it is hasSib! - self.put_modrm_sib(ModRm::MemoryNoDisp, base, noIndex, 0, reg); - } else if can_sign_extend_from_i8(offset) { - self.put_modrm_sib(ModRm::MemoryDisp8, base, noIndex, 0, reg); - self.backend.push_i8(offset as i8); - } else { - self.put_modrm_sib(ModRm::MemoryDisp32, base, noIndex, 0, reg); - self.backend.push_i32(offset); - } - } else { - if offset == 0 && (base != noBase) && (base != noBase2) { - self.put_modrm(ModRm::MemoryNoDisp, base, reg); - } else if can_sign_extend_from_i8(offset) { - self.put_modrm(ModRm::MemoryDisp8, base, reg); - self.backend.push_i8(offset as i8); - } else { - self.put_modrm(ModRm::MemoryDisp32, base, reg); - self.backend.push_i32(offset); - } - } - } - - fn one_op8_reg(&mut self, opcode: OneByteOpcodeID, reg: Register) { - self.emit_rex_if(reg.byte_requires_rex(), Register::Rax, Register::Rax, reg); - let mut opcode : u8 = opcode.into(); - opcode += (reg as u8) & 0x7; - self.backend.push(opcode); - } - fn one_op_reg(&mut self, opcode: OneByteOpcodeID, reg: Register) { - self.emit_rex_if_needed(Register::Rax, Register::Rax, reg); - let mut opcode : u8 = opcode.into(); - opcode += (reg as u8) & 0x7; - self.backend.push(opcode); - } - fn one_op64_reg(&mut self, opcode: OneByteOpcodeID, reg: Register) { - self.emit_rex_w(Register::Rax, Register::Rax, reg); - let mut opcode : u8 = opcode.into(); - opcode += (reg as u8) & 0x7; - self.backend.push(opcode) - } - - fn one_op_reg_grp(&mut self, opcode: OneByteOpcodeID, reg: Register, grp: GroupOpcodeID) { - { - let grp_u8 : u8 = grp.into(); - assert!(grp_u8 < 8); - } - self.emit_rex_if_needed(Register::Rax, Register::Rax, reg); - self.backend.push(opcode.into()); - self.put_modrm_grp(ModRm::Register, reg, grp); - } - fn one_op64_reg_grp(&mut self, opcode: OneByteOpcodeID, reg: Register, grp: GroupOpcodeID) { - { - let grp_u8 : u8 = grp.into(); - assert!(grp_u8 < 8); - } - self.emit_rex_w(Register::Rax, Register::Rax, reg); - self.backend.push(opcode.into()); - self.put_modrm_grp(ModRm::Register, reg, grp); - } - - fn one_op8_rm_reg(&mut self, opcode: OneByteOpcodeID, rm: Register, reg: Register) { - self.emit_rex_if(reg.byte_requires_rex() || rm.byte_requires_rex(), - reg, Register::Rax, rm); - self.backend.push(opcode.into()); - self.register_modrm(rm, reg) - } - fn one_op_rm_reg(&mut self, opcode: OneByteOpcodeID, rm: Register, reg: Register) { - self.emit_rex_if_needed(reg, Register::Rax, rm); - self.backend.push(opcode.into()); - self.register_modrm(rm, reg) - } - fn one_op64_rm_reg(&mut self, opcode: OneByteOpcodeID, rm: Register, reg: Register) { - self.emit_rex_w(reg, Register::Rax, rm); - self.backend.push(opcode.into()); - self.register_modrm(rm, reg) - } - - fn one_op8_mm_reg(&mut self, opcode: OneByteOpcodeID, base: Register, offset: i32, reg: Register) { - self.emit_rex_if(reg.byte_requires_rex(), reg, Register::Rax, base); - self.backend.push(opcode.into()); - self.memory_modrm(offset, base, reg) - } - fn one_op_mm_reg(&mut self, opcode: OneByteOpcodeID, base: Register, offset: i32, reg: Register) { - self.emit_rex_if_needed(reg, Register::Rax, base); - self.backend.push(opcode.into()); - self.memory_modrm(offset, base, reg) - } - fn one_op64_mm_reg(&mut self, opcode: OneByteOpcodeID, base: Register, offset: i32, reg: Register) { - self.emit_rex_w(reg, Register::Rax, base); - self.backend.push(opcode.into()); - self.memory_modrm(offset, base, reg) - } - - fn two_op8_cc_reg(&mut self, opcode: TwoByteOpcodeID, cond: Condition, rm: Register, grp: GroupOpcodeID) { - self.emit_rex_if(rm.byte_requires_rex(), Register::Rax, Register::Rax, rm); - self.backend.push(OneByteOpcodeID::OP_2BYTE_ESCAPE.into()); - let mut opcode : u8 = opcode.into(); - opcode += cond as u8; - self.backend.push(opcode); - self.put_modrm_grp(ModRm::Register, rm, grp); - } - fn two_op64_rm_reg(&mut self, opcode: TwoByteOpcodeID, rm: Register, reg: Register) { - self.emit_rex_w(reg, Register::Rax, rm); - self.backend.push(OneByteOpcodeID::OP_2BYTE_ESCAPE.into()); - self.backend.push(opcode.into()); - self.register_modrm(rm, reg); - } - - // Set of Instructions used by the compiler. - - pub fn andq_rr(&mut self, src: Register, dst: Register) { - self.one_op64_rm_reg(OneByteOpcodeID::OP_AND_GvEv, src, dst) - } - pub fn orq_rr(&mut self, src: Register, dst: Register) { - self.one_op64_rm_reg(OneByteOpcodeID::OP_OR_GvEv, src, dst) - } - pub fn xorq_rr(&mut self, src: Register, dst: Register) { - self.one_op64_rm_reg(OneByteOpcodeID::OP_XOR_GvEv, src, dst) - } - - pub fn addb_rr(&mut self, src: Register, dst: Register) { - self.one_op8_rm_reg(OneByteOpcodeID::OP_ADD_GbEb, src, dst) - } - pub fn addw_rr(&mut self, src: Register, dst: Register) { - self.backend.push(OneByteOpcodeID::PRE_OPERAND_SIZE.into()); - self.one_op_rm_reg(OneByteOpcodeID::OP_ADD_GvEv, src, dst) - } - pub fn addl_rr(&mut self, src: Register, dst: Register) { - self.one_op_rm_reg(OneByteOpcodeID::OP_ADD_GvEv, src, dst) - } - pub fn addq_rr(&mut self, src: Register, dst: Register) { - self.one_op64_rm_reg(OneByteOpcodeID::OP_ADD_GvEv, src, dst) - } - - pub fn subb_rr(&mut self, src: Register, dst: Register) { - self.one_op8_rm_reg(OneByteOpcodeID::OP_SUB_GbEb, src, dst) - } - pub fn subw_rr(&mut self, src: Register, dst: Register) { - self.backend.push(OneByteOpcodeID::PRE_OPERAND_SIZE.into()); - self.one_op_rm_reg(OneByteOpcodeID::OP_SUB_GvEv, src, dst) - } - pub fn subl_rr(&mut self, src: Register, dst: Register) { - self.one_op_rm_reg(OneByteOpcodeID::OP_SUB_GvEv, src, dst) - } - pub fn subq_rr(&mut self, src: Register, dst: Register) { - self.one_op64_rm_reg(OneByteOpcodeID::OP_SUB_GvEv, src, dst) - } - - pub fn imulq_rr(&mut self, src: Register, dst: Register) { - self.two_op64_rm_reg(TwoByteOpcodeID::OP2_IMUL_GvEv, src, dst) - } - - /// Compare 8 bits registers - pub fn cmpb_rr(&mut self, lhs: Register, rhs: Register) { - // Note, inverted arguments to make them logical. - self.one_op8_rm_reg(OneByteOpcodeID::OP_CMP_GbEb, rhs, lhs) - } - /// Compare 16 bits registers - pub fn cmpw_rr(&mut self, lhs: Register, rhs: Register) { - // Note, inverted arguments to make them logical. - self.backend.push(OneByteOpcodeID::PRE_OPERAND_SIZE.into()); - self.one_op_rm_reg(OneByteOpcodeID::OP_CMP_GvEv, rhs, lhs) - } - /// Compare 32 bits registers - pub fn cmpl_rr(&mut self, lhs: Register, rhs: Register) { - // Note, inverted arguments to make them logical. - self.one_op_rm_reg(OneByteOpcodeID::OP_CMP_GvEv, rhs, lhs) - } - /// Compare 64 bits registers - pub fn cmpq_rr(&mut self, lhs: Register, rhs: Register) { - // Note, inverted arguments to make them logical. - self.one_op64_rm_reg(OneByteOpcodeID::OP_CMP_GvEv, rhs, lhs) - } - - /// Copy 8 bits immediate - pub fn movb_ir(&mut self, value: i8, dst: Register) { - self.one_op8_reg(OneByteOpcodeID::OP_MOV_EbIb, dst); - self.backend.push_i8(value) - } - /// Copy 16 bits immediate - pub fn movw_ir(&mut self, value: i16, dst: Register) { - self.backend.push(OneByteOpcodeID::PRE_OPERAND_SIZE.into()); - self.one_op_reg(OneByteOpcodeID::OP_MOV_EAXIv, dst); - self.backend.push_i16(value) - } - /// Copy 32 bits immediate - pub fn movl_ir(&mut self, value: i32, dst: Register) { - self.one_op_reg(OneByteOpcodeID::OP_MOV_EAXIv, dst); - self.backend.push_i32(value) - } - /// Copy 64 bits immediate - pub fn movq_ir(&mut self, value: i64, dst: Register) { - self.one_op64_reg(OneByteOpcodeID::OP_MOV_EAXIv, dst); - self.backend.push_i64(value) - } - - /// Load 8 bits - pub fn movb_mr(&mut self, base: Register, offset: i32, dst: Register) { - self.one_op8_mm_reg(OneByteOpcodeID::OP_MOV_GvEb, base, offset, dst) - } - /// Load 16 bits - pub fn movw_mr(&mut self, base: Register, offset: i32, dst: Register) { - self.backend.push(OneByteOpcodeID::PRE_OPERAND_SIZE.into()); - self.one_op_mm_reg(OneByteOpcodeID::OP_MOV_GvEv, base, offset, dst) - } - /// Load 32 bits - pub fn movl_mr(&mut self, base: Register, offset: i32, dst: Register) { - self.one_op_mm_reg(OneByteOpcodeID::OP_MOV_GvEv, base, offset, dst) - } - /// Load 64 bits - pub fn movq_mr(&mut self, base: Register, offset: i32, dst: Register) { - self.one_op64_mm_reg(OneByteOpcodeID::OP_MOV_GvEv, base, offset, dst) - } - - /// Store 8 bits - pub fn movb_rm(&mut self, src: Register, base: Register, offset: i32) { - self.one_op8_mm_reg(OneByteOpcodeID::OP_MOV_EbGv, base, offset, src) - } - /// Store 16 bits - pub fn movw_rm(&mut self, src: Register, base: Register, offset: i32) { - self.backend.push(OneByteOpcodeID::PRE_OPERAND_SIZE.into()); - self.one_op_mm_reg(OneByteOpcodeID::OP_MOV_EvGv, base, offset, src) - } - /// Store 32 bits - pub fn movl_rm(&mut self, src: Register, base: Register, offset: i32) { - self.one_op_mm_reg(OneByteOpcodeID::OP_MOV_EvGv, base, offset, src) - } - /// Store 64 bits - pub fn movq_rm(&mut self, src: Register, base: Register, offset: i32) { - self.one_op64_mm_reg(OneByteOpcodeID::OP_MOV_EvGv, base, offset, src) - } - - /// Copy 8 bits - pub fn movb_rr(&mut self, src: Register, dst: Register) { - self.one_op8_rm_reg(OneByteOpcodeID::OP_MOV_GvEb, src, dst) - } - /// Copy 16 bits - pub fn movw_rr(&mut self, src: Register, dst: Register) { - self.backend.push(OneByteOpcodeID::PRE_OPERAND_SIZE.into()); - self.one_op_rm_reg(OneByteOpcodeID::OP_MOV_GvEv, src, dst) - } - /// Copy 32 bits - pub fn movl_rr(&mut self, src: Register, dst: Register) { - self.one_op_rm_reg(OneByteOpcodeID::OP_MOV_GvEv, src, dst) - } - /// Copy 64 bits - pub fn movq_rr(&mut self, src: Register, dst: Register) { - self.one_op64_rm_reg(OneByteOpcodeID::OP_MOV_GvEv, src, dst) - } - - /// Store condition flag in a 8 bits register, the upper bits of the - /// destination register remain unchanged. - pub fn setcc_r(&mut self, cc: Condition, out: Register) { - self.two_op8_cc_reg(TwoByteOpcodeID::OP_SETCC, cc, out, GroupOpcodeID::NOGROUP_OP_SETCC) - } - - /// Make a call to the address contained in the given register. - pub fn call_r(&mut self, dst: Register) { - self.one_op_reg_grp(OneByteOpcodeID::OP_GROUP5_Ev, dst, GroupOpcodeID::GROUP5_OP_CALLN) - } - - /// Arithmetic right shift. - pub fn sarq_ir(&mut self, imm: u8, dst: Register) { - match imm { - 0 => {} - 1 => self.one_op64_reg_grp(OneByteOpcodeID::OP_GROUP2_Ev1, dst, GroupOpcodeID::GROUP2_OP_SAR), - _ => { - self.one_op64_reg_grp(OneByteOpcodeID::OP_GROUP2_EvIb, dst, GroupOpcodeID::GROUP2_OP_SAR); - self.backend.push(imm); - } - } - } - /// Logical right shift - pub fn shrq_ir(&mut self, imm: u8, dst: Register) { - match imm { - 0 => {} - 1 => self.one_op64_reg_grp(OneByteOpcodeID::OP_GROUP2_Ev1, dst, GroupOpcodeID::GROUP2_OP_SHR), - _ => { - self.one_op64_reg_grp(OneByteOpcodeID::OP_GROUP2_EvIb, dst, GroupOpcodeID::GROUP2_OP_SHR); - self.backend.push(imm); - } - } - } - /// Logical left shift - pub fn shlq_ir(&mut self, imm: u8, dst: Register) { - match imm { - 0 => {} - 1 => self.one_op64_reg_grp(OneByteOpcodeID::OP_GROUP2_Ev1, dst, GroupOpcodeID::GROUP2_OP_SHL), - _ => { - self.one_op64_reg_grp(OneByteOpcodeID::OP_GROUP2_EvIb, dst, GroupOpcodeID::GROUP2_OP_SHL); - self.backend.push(imm); - } - } - } - } - - pub const ABI_ARGS : [Register; 6] = [ - Register::Rdi, - Register::Rsi, - Register::Rdx, - Register::Rcx, - Register::R8, - Register::R9, - ]; - pub const ABI_RET : [Register; 2] = [ - Register::Rax, - Register::Rdx, - ]; -} - -use self::asm::Assembler; -use self::asm::Register; -use self::asm::Condition; - -#[derive(Copy, Clone, Debug)] -struct AllocInfo { - reg: Register, - off: usize, - sz: usize, -} -type Allocation = Vec; - -/// The compiler state. -struct Compiler { - /// The underlying assembler. - asm: Assembler, - /// The offset of the starting instruction. - start: dynasmrt::AssemblyOffset, - /// Set of labels for linking blocks with each others. - bb_labels: Vec, - /// Register map - reg_map: HashMap, - free_regs: Vec, - /// List of static variables stored in a raw pointer. - statics: *const (), - /// List of static variables stored in a raw pointer. - stack_size: usize, -} - -impl Compiler { - fn new(stack_size: usize, statics: *const ()) -> Compiler { - use compile::asm::Register::*; - let asm = x64::Assembler::new(); - let start = asm.offset(); - Compiler { - asm: Assembler { backend: asm }, - start, - bb_labels: vec![], - reg_map: HashMap::new(), - // Rbp and Rsp are reserved as a frame pointer and the stack - // pointer. - free_regs: vec![Rax, Rcx, Rdx, Rbx, Rsi, Rdi, - R8 , R9 , R10, R11, R12, R13, R14, R15], - statics, - stack_size, - } - } - - fn compile(bytes: &[u8], statics: *const ()) -> Result { - let cu : lir::CompilationUnit = bincode::deserialize(bytes)?; - let compiler = Self::new(cu.stack_size, statics); - compiler.compile_cu(&cu) - } - - - fn compile_cu(mut self, cu : &lir::CompilationUnit) -> Result { - // For each block, create a new dynamic label which identify the - // entry of each block. - for _ in 0..cu.blocks.len() { - // Create a label for the block address. - let label = self.asm.backend.new_dynamic_label(); - self.bb_labels.push(label); - } - - if cu.args_defs.len() >= self::asm::ABI_ARGS.len() { - return Err(Error::NYI("Reading arguments from the stack.")); - }; - - // Consume all the registers which are by arguments. The arguments would - // be pushed on the stack as soon as we see SetFramePtr. - for (&_, reg) in cu.args_defs.iter().zip(self::asm::ABI_ARGS.iter()) { - // TODO: Hum ... do we have to support argument given over 2 registers here? - self.take(*reg); - } - - // Compile each basic block. - for (id, block) in cu.blocks.iter().enumerate() { - println!("Compile Block {}:", id); - self.compile_block(id, block, &cu.args_defs)? - } - - self.finalize() - } - - fn compile_block(&mut self, id: usize, block: &lir::BasicBlockData, args_defs: &Vec) -> Result<(), Error> { - let label = self.bb_labels[id]; - self.asm.backend.dynamic_label(label); - - if id > 0 { - // Note, normally we should support proper inputs for merge blocks, - // but at the moment there is simply nothing else than the returned - // values of a call. - let inputs = vec![Register::Rbp, Register::Rax, Register::Rdx].into_iter(); - - // skip(1) is used to avoid re-binding the frame pointer constantly. - let mut inputs = inputs.skip(1); - - for &(lir, size) in block.input_regs.iter().skip(1) { - let mut off = 0; - let mut allocs = vec![]; - while off < size { - let sz = match size - off { - 1 => 1, - 2 | 3 => 2, - 4 | 5 | 6 | 7 => 4, - 8 | _ => 8, - }; - let reg = inputs.next().unwrap(); - self.take(reg); - allocs.push(AllocInfo{ reg, off, sz }); - off += sz; - } - self.register(lir, allocs); - } - } - - for inst in block.insts.iter() { - println!(" Compile Inst: {:?}", inst); - self.compile_inst(inst, args_defs)? - } - - self.compile_terminator(&block.end)?; - println!(" Free Registers Available: {:?}", self.free_regs.len()); - - Ok(()) - } - - fn allocate(&mut self, _sz: usize) -> Result { - match self.free_regs.pop() { - Some(reg) => { - println!(" Allocate: {:?}", reg); - Ok(reg) - } - None => Err(Error::NotEnoughRegisters) - } - } - fn free(&mut self, reg: Register) -> () { - assert!(!self.free_regs.contains(®)); - self.free_regs.push(reg); - } - - fn take(&mut self, reg: Register) { - if let Some(i) = self.free_regs.iter().position(|r| *r == reg) { - self.free_regs.swap_remove(i); - } else { - panic!("Register {:?} is not available.", reg) - } - } - - fn register(&mut self, r: lir::Reg, alloc: Allocation) { - println!(" Register: {:?} --> {:?}", r, alloc); - self.reg_map.insert(r, alloc); - } - - fn unregister(&mut self, r: lir::Reg) { - match self.reg_map.remove(&r) { - Some(l) => { - println!(" Unregister: {:?} --> {:?}", r, l); - for alloc_info in l { - self.free(alloc_info.reg); - } - } - None => (), - } - } - - fn reuse(&mut self, from: lir::Reg, to: lir::Reg) -> Result<(), Error> { - match self.reg_map.remove(&from) { - Some(alloc) => { - println!(" Reuse: ({:?}, {:?}) --> {:?}", from, to, alloc); - self.register(to, alloc); - Ok(()) - } - None => Err(Error::MissingRegisterMap), - } - } - - fn reuse_append(&mut self, from: lir::Reg, to: lir::Reg, mut rest: Allocation) -> Result<(), Error> { - match self.reg_map.remove(&from) { - Some(mut alloc) => { - println!(" Reuse: ({:?}, {:?}) --> {:?}", from, to, alloc); - println!(" Append: {:?} --> {:?}", to, rest); - alloc.append(&mut rest); - self.register(to, alloc); - Ok(()) - } - None => Err(Error::MissingRegisterMap), - } - } - - fn find_allocation_by_reg(&self, chk: Register) -> Option { - for (lir, alloc) in &self.reg_map { - for &AllocInfo{ reg, .. } in alloc { - if reg == chk { - return Some(*lir) - } - } - } - - None - } - - // This is certainly the least efficient way to do so ... - fn move_group(&mut self, what: &[lir::Reg], into: &[Register]) -> Result<(), Error> { - let mut into_reg = into.iter(); - for lir in what.iter() { - println!(" Move {:?}: {:?}", lir, self.reg_map[lir]); - for alloc_id in 0..self.reg_map[lir].len() { - let dst = match into_reg.next() { - None => return Err(Error::NotEnoughRegistersForMovGrp), - Some(dst) => *dst - }; - let AllocInfo{ reg, sz, .. } = self.reg_map[lir][alloc_id].clone(); - let lir = *lir; - - if reg == dst { - continue - } else { - if !self.free_regs.contains(&dst) { - // The Register is already allocated. - // 1. Locate which allocation this is. - // 2. Move the data into a free register. - match self.find_allocation_by_reg(dst) { - None => { - println!(" Cannot find {:?}", dst); - return Err(Error::AllocatedButNotRegistered) - } - Some(collide) => { - let repl = match self.free_regs.last() { - None => return Err(Error::NotEnoughRegistersForMovGrp), - Some(r) => *r, - }; - let into : Vec<_> = - self.reg_map[&collide].iter().map(|x| { - let &AllocInfo{ reg, .. } = x; - if reg == dst { repl } - else { reg } - }).collect(); - let what = [collide]; - self.move_group(&what, &into)?; - } - } - assert!(self.free_regs.contains(&dst)); - } - - // Copy the content in the destination register. - println!(" {:?}.{:?}: {:?} -> {:?}", lir, alloc_id, reg, dst); - self.take(dst); - match sz { - 0 => {} - 1 => self.asm.movb_rr(reg, dst), - 2 => self.asm.movw_rr(reg, dst), - 4 => self.asm.movl_rr(reg, dst), - 8 => self.asm.movq_rr(reg, dst), - _ => return Err(Error::NYI("MoveGroup: move larger than 8")), - } - - // unregister(lir) - let mut allocs = self.reg_map.remove(&lir).unwrap(); - self.free(allocs[alloc_id].reg); - allocs[alloc_id].reg = dst; - // register(lir, alloc) - self.reg_map.insert(lir, allocs); - } - } - println!(" Moved {:?}: {:?}", lir, self.reg_map[lir]); - } - - Ok(()) - } - - unsafe fn get_static(&self, off: isize) -> &'static T { - let all : *const u8 = mem::transmute(self.statics); - let val_ptr = all.offset(off); - let val_ptr : *const T = mem::transmute(val_ptr); - val_ptr.as_ref::<'static>().unwrap() - } - - fn compile_inst(&mut self, inst: &lir::Inst, args_defs: &Vec) -> Result<(), Error> { - use lir::Inst::*; - match inst { - &SetFramePtr(fp, _sz, stack_size) => { - let stack_size = stack_size + (16 - stack_size % 16); - // TODO: Save non-volatile registres. - dynasm!(self.asm.backend - ; push rbp - ; mov rbp, rsp - ; sub rsp, stack_size as _ - ); - self.register(fp, vec![AllocInfo{ reg: Register::Rbp, off: 0, sz: 8 }]); - - // Generate code which copy the arguments from their argument register - // to the stack, this might involve copying larger structures as well - // from the pointer if not given by value. - for (&(off, sz, by_value), ar) in args_defs.iter().zip(self::asm::ABI_ARGS.iter()) { - let off = 0 - (off as isize) - (sz as isize); - let ar = *ar; - println!(" Copy {:?} to offset {:?} with size {:?}", ar, off, sz); - if by_value { - // ar is a value which needs to be copied to the stack. - match sz { - 1 => self.asm.movb_rm(ar, Register::Rbp, off as i32), - 2 | 3 => self.asm.movw_rm(ar, Register::Rbp, off as i32), - 4 | 5 | 6 | 7 => - self.asm.movl_rm(ar, Register::Rbp, off as i32), - _ => self.asm.movq_rm(ar, Register::Rbp, off as i32), - } - } else { - // ar is a pointer to the data which need to be copied - // to the stack. We generate sequences of load and store - // instructions. - let mut arg_off = 0; - while arg_off < sz { - match sz - arg_off { - 1 => { - let tmp = self.allocate(1)?; - self.asm.movb_mr(ar, arg_off as i32, tmp); - self.asm.movb_rm(tmp, Register::Rbp, (arg_off as isize + off) as i32); - arg_off += 1; - self.free(tmp); - } - 2 | 3 => { - let tmp = self.allocate(2)?; - self.asm.movw_mr(ar, arg_off as i32, tmp); - self.asm.movw_rm(tmp, Register::Rbp, (arg_off as isize + off) as i32); - arg_off += 2; - self.free(tmp); - } - 4 | 5 | 6 | 7 => { - let tmp = self.allocate(4)?; - self.asm.movl_mr(ar, arg_off as i32, tmp); - self.asm.movl_rm(tmp, Register::Rbp, (arg_off as isize + off) as i32); - arg_off += 4; - self.free(tmp); - } - _ => { - let tmp = self.allocate(4)?; - self.asm.movq_mr(ar, arg_off as i32, tmp); - self.asm.movq_rm(tmp, Register::Rbp, (arg_off as isize + off) as i32); - arg_off += 8; - self.free(tmp); - } - } - } - } - self.free(ar); - } - }, - &Static(out, static_offset, static_size) => { - // Load the content from the static pointer, and convert it - // into immediate constants baked in the code of the - // generated program. - let static_size = static_size as isize; - let mut off = 0; - let mut alloc = vec![]; - while off < static_size { - match static_size - off { - 1 => { - let reg = self.allocate(1)?; - let value = unsafe { *self.get_static::(static_offset + off) }; - self.asm.movb_ir(value , reg); - alloc.push(AllocInfo{ reg, sz: 1, off: off as usize }); - off += 1; - } - 2 | 3 => { - let reg = self.allocate(2)?; - let value = unsafe { *self.get_static::(static_offset + off) }; - self.asm.movw_ir(value, reg); - alloc.push(AllocInfo{ reg, sz: 2, off: off as usize }); - off += 2; - } - 4 | 5 | 6 | 7 => { - let reg = self.allocate(4)?; - let value = unsafe { *self.get_static::(static_offset + off) }; - self.asm.movl_ir(value, reg); - alloc.push(AllocInfo{ reg, sz: 4, off: off as usize }); - off += 4; - } - _ => { - let reg = self.allocate(8)?; - let value = unsafe { *self.get_static::(static_offset + off) }; - self.asm.movq_ir(value, reg); - alloc.push(AllocInfo{ reg, sz: 8, off: off as usize }); - off += 8; - } - } - } - self.register(out, alloc); - }, - &CopyImm(out, value, sz) => { - // TODO: move the value to the given registers. - let mut off = 0; - let mut alloc = vec![]; - while off < sz { - match sz - off { - 1 => { - let reg = self.allocate(1)?; - self.asm.movb_ir(value as i8 , reg); - alloc.push(AllocInfo{ reg, sz: 1, off }); - off += 1; - } - 2 | 3 => { - let reg = self.allocate(2)?; - self.asm.movw_ir(value as i16, reg); - alloc.push(AllocInfo{ reg, sz: 2, off }); - off += 2; - } - 4 | 5 | 6 | 7 => { - let reg = self.allocate(4)?; - self.asm.movl_ir(value as i32, reg); - alloc.push(AllocInfo{ reg, sz: 4, off }); - off += 4; - } - _ => { - let reg = self.allocate(8)?; - self.asm.movq_ir(value as i64, reg); - alloc.push(AllocInfo{ reg, sz: 8, off }); - off += 8; - } - } - } - self.register(out, alloc); - } - &Resize(out, input, sz) => { - let input_alloc = self.reg_map[&input].clone(); - assert_eq!(input_alloc.len(), 1); - let AllocInfo{ reg: ir, off: io, sz: iz } = input_alloc[0]; - let reg = self.allocate(sz)?; - - // Copy the value. - self.asm.movq_rr(ir, reg); - // Shift it, such that the highest bit becomes the sign bit. - self.asm.shlq_ir((64 - (iz + io) * 8) as u8, reg); - // Shift it back, to sign-extend the sign bit. - self.asm.sarq_ir((64 - iz * 8) as u8, reg); - - self.register(out, vec![AllocInfo{ reg, sz, off: 0 }]); - }, - &Add(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - // Dirty: Assumes that operands are always consumed, except - // for the frame pointer. - let (lr, rr, reg) = if lr == Register::Rbp { - (lr, rr, rhs) - } else { - (rr, lr, lhs) - }; - match ls { - 1 => self.asm.addb_rr(lr, rr), - 2 => self.asm.addw_rr(lr, rr), - 4 => self.asm.addl_rr(lr, rr), - 8 => self.asm.addq_rr(lr, rr), - _ => return Err(Error::NYI("Unexpected Add size")), - } - self.reuse(reg, out)?; - }, - &Sub(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - // Dirty: Assumes that operands are always consumed, except - // for the frame pointer. - let (lr, rr, reg) = if lr == Register::Rbp { - (lr, rr, rhs) - } else { - (rr, lr, lhs) - }; - match ls { - 1 => self.asm.subb_rr(lr, rr), - 2 => self.asm.subw_rr(lr, rr), - 4 => self.asm.subl_rr(lr, rr), - 8 => self.asm.subq_rr(lr, rr), - _ => return Err(Error::NYI("Unexpected Add size")), - } - self.reuse(reg, out)?; - }, - &Mul(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, .. } = rhs_alloc[0]; - // Dirty: Assumes that operands are always consumed, except - // for the frame pointer. - if lr != Register::Rbp { - self.asm.imulq_rr(lr, rr); - self.reuse(rhs, out)?; - } else { - self.asm.imulq_rr(rr, lr); - self.reuse(lhs, out)?; - } - }, - &Div(_out, _lhs, _rhs) => { return Err(Error::NYI("Div")) }, - &Rem(_out, _lhs, _rhs) => { return Err(Error::NYI("Rem")) }, - &BitXor(_out, _lhs, _rhs) => { return Err(Error::NYI("BitXor")) }, - &BitAnd(_out, _lhs, _rhs) => { return Err(Error::NYI("BitAnd")) }, - &BitOr(_out, _lhs, _rhs) => { return Err(Error::NYI("BitOr")) }, - &Shl(_out, _lhs, _rhs) => { return Err(Error::NYI("Shl")) }, - &Shr(_out, _lhs, _rhs) => { return Err(Error::NYI("Shr")) }, - &Eq(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - let reg = self.allocate(1)?; - match ls { - 1 => self.asm.cmpb_rr(lr, rr), - 2 => self.asm.cmpw_rr(lr, rr), - 4 => self.asm.cmpl_rr(lr, rr), - 8 => self.asm.cmpq_rr(lr, rr), - _ => return Err(Error::NYI("Eq larger than 8")), - } - self.asm.setcc_r(Condition::E, reg); - self.register(out, vec![AllocInfo{ reg, off:0, sz: 1 }]); - }, - &Lt(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - let reg = self.allocate(1)?; - match ls { - 1 => self.asm.cmpb_rr(lr, rr), - 2 => self.asm.cmpw_rr(lr, rr), - 4 => self.asm.cmpl_rr(lr, rr), - 8 => self.asm.cmpq_rr(lr, rr), - _ => return Err(Error::NYI("Lt larger than 8")), - } - self.asm.setcc_r(Condition::L, reg); - self.register(out, vec![AllocInfo{ reg, off:0, sz: 1 }]); - }, - &Le(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - let reg = self.allocate(1)?; - match ls { - 1 => self.asm.cmpb_rr(lr, rr), - 2 => self.asm.cmpw_rr(lr, rr), - 4 => self.asm.cmpl_rr(lr, rr), - 8 => self.asm.cmpq_rr(lr, rr), - _ => return Err(Error::NYI("Le larger than 8")), - } - self.asm.setcc_r(Condition::LE, reg); - self.register(out, vec![AllocInfo{ reg, off:0, sz: 1 }]); - }, - &Ne(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - let reg = self.allocate(1)?; - match ls { - 1 => self.asm.cmpb_rr(lr, rr), - 2 => self.asm.cmpw_rr(lr, rr), - 4 => self.asm.cmpl_rr(lr, rr), - 8 => self.asm.cmpq_rr(lr, rr), - _ => return Err(Error::NYI("Ne larger than 8")), - } - self.asm.setcc_r(Condition::NE, reg); - self.register(out, vec![AllocInfo{ reg, off:0, sz: 1 }]); - }, - &Gt(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - let reg = self.allocate(1)?; - match ls { - 1 => self.asm.cmpb_rr(lr, rr), - 2 => self.asm.cmpw_rr(lr, rr), - 4 => self.asm.cmpl_rr(lr, rr), - 8 => self.asm.cmpq_rr(lr, rr), - _ => return Err(Error::NYI("Gt larger than 8")), - } - self.asm.setcc_r(Condition::G, reg); - self.register(out, vec![AllocInfo{ reg, off:0, sz: 1 }]); - }, - &Ge(out, lhs, rhs) => { - let lhs_alloc = self.reg_map[&lhs].clone(); - let rhs_alloc = self.reg_map[&rhs].clone(); - assert_eq!(lhs_alloc.len(), 1); - assert_eq!(rhs_alloc.len(), 1); - let AllocInfo{ reg: lr, sz: ls, .. } = lhs_alloc[0]; - let AllocInfo{ reg: rr, sz: rs, .. } = rhs_alloc[0]; - assert_eq!(ls, rs); - let reg = self.allocate(1)?; - match ls { - 1 => self.asm.cmpb_rr(lr, rr), - 2 => self.asm.cmpw_rr(lr, rr), - 4 => self.asm.cmpl_rr(lr, rr), - 8 => self.asm.cmpq_rr(lr, rr), - _ => return Err(Error::NYI("Ge larger than 8")), - } - self.asm.setcc_r(Condition::GE, reg); - self.register(out, vec![AllocInfo{ reg, off:0, sz: 1 }]); - }, - - &Chk(out_flags, out) => { - // Warning: This instructions assumes that no other - // intruction got added in-between. - let out_alloc = self.reg_map[&out].clone(); - assert_eq!(out_alloc.len(), 1); - let reg = self.allocate(1)?; - self.asm.setcc_r(Condition::O, reg); - self.reuse_append(out, out_flags, vec![AllocInfo{ reg, sz: 1, off: out_alloc[0].sz }])?; - }, - &Store(addr, value, _sz) => { - let addr_alloc = self.reg_map[&addr].clone(); - let value_alloc = self.reg_map[&value].clone(); - assert_eq!(addr_alloc.len(), 1); - let AllocInfo{ reg: ar, .. } = addr_alloc[0]; - for &AllocInfo{ reg: vr, off: offset, sz: size } in value_alloc.iter() { - match size { - 0 => {} - 1 => self.asm.movb_rm(vr, ar, offset as i32), - 2 => self.asm.movw_rm(vr, ar, offset as i32), - 4 => self.asm.movl_rm(vr, ar, offset as i32), - 8 => self.asm.movq_rm(vr, ar, offset as i32), - _ => return Err(Error::NYI("Store larger than 8")), - } - } - }, - &Load(out, addr, sz) => { - let addr_alloc = self.reg_map[&addr].clone(); - assert_eq!(addr_alloc.len(), 1); - let AllocInfo{ reg: ar, .. } = addr_alloc[0]; - let mut off = 0; - let mut alloc = vec![]; - while off < sz { - match sz - off { - 0 => {} - 1 => { - let reg = self.allocate(1)?; - self.asm.movb_mr(ar, off as i32, reg); - alloc.push(AllocInfo{ reg, sz: 1, off }); - off += 1; - } - 2 | 3 => { - let reg = self.allocate(2)?; - self.asm.movw_mr(ar, off as i32, reg); - alloc.push(AllocInfo{ reg, sz: 2, off }); - off += 2; - } - 4 | 5 | 6 | 7 => { - let reg = self.allocate(4)?; - self.asm.movl_mr(ar, off as i32, reg); - alloc.push(AllocInfo{ reg, sz: 4, off }); - off += 4; - } - _ => { - let reg = self.allocate(8)?; - self.asm.movq_mr(ar, off as i32, reg); - alloc.push(AllocInfo{ reg, sz: 8, off }); - off += 8; - } - } - } - self.register(out, alloc); - }, - - &StoreInto(into, value, offset, sz) => { - if sz == 0 { - return Ok(()) - } - let val_alloc = self.reg_map[&value].clone(); - let into_alloc = self.reg_map[&into].clone(); - let mut val_iter = val_alloc.iter(); - let mut into_iter = into_alloc.iter(); - let mut val_last = val_iter.next(); - let mut into_last = into_iter.next(); - loop { - match (val_last, into_last) { - (None, _) => break, - (_, None) => return Err(Error::StoreIntoMissTarget(0)), - (Some(&AllocInfo{ reg: vr, off: vo, sz: vs }), - Some(&AllocInfo{ reg: ir, off: io, sz: is })) => { - assert!(vo + vs <= sz); - let vo = vo + offset; - if io == vo && is == vs { - // The stored value matches exactly the - // component of the destination. - match is { - 0 => {} - 1 => self.asm.movb_rr(vr, ir), - 2 => self.asm.movw_rr(vr, ir), - 4 => self.asm.movl_rr(vr, ir), - 8 => self.asm.movq_rr(vr, ir), - _ => return Err(Error::NYI("StoreInto: allocation larger than 8")), - } - - val_last = val_iter.next(); - into_last = into_iter.next(); - } else if io + is <= vo { - into_last = into_iter.next(); - } else if vo + vs <= io { - return Err(Error::StoreIntoMissTarget(1)); - } else if io <= vo && vo + vs <= io + is { - // The value is a subset of the - // destination. Use masks to filter the low - // and high part of the destinations, and - // copy the value into it. - - // Allocate a temporary. - let tmp_vr = self.allocate(8)?; - - // Compute the masks used to filter out the - // target value. - let low_sz = vo - io; - let data_off = low_sz; - let data_sz = vs; - let high_off = low_sz + data_sz; - assert!(high_off <= is); - let high_sz = is - high_off; - - let mut mask : u64 = 0; - if high_sz > 0 { - let mut high_mask : u64 = 0xffff_ffff_ffff_ffff; - high_mask = high_mask >> (64 - high_sz * 8); - high_mask = high_mask << (high_off * 8); - mask = high_mask; - } - - if low_sz > 0 { - let mut low_mask : u64 = 0xffff_ffff_ffff_ffff; - low_mask = low_mask >> (64 - low_sz * 8); - mask = mask | low_mask; - } - - // Filter the low & high part. - self.asm.movq_ir(mask as i64, tmp_vr); - self.asm.andq_rr(tmp_vr, ir); - - // Copy the data. - let mask : u64 = 0xffff_ffff_ffff_ffff_u64 >> (64 - data_sz * 8); - self.asm.movq_ir(mask as i64, tmp_vr); - self.asm.andq_rr(vr, tmp_vr); - self.asm.shrq_ir((data_off * 8) as u8, tmp_vr); - self.asm.orq_rr(tmp_vr, ir); - - self.free(tmp_vr); - - val_last = val_iter.next(); - } else if io < vo + sz && vo < io + is { - // The stored value span multiple allocations. - return Err(Error::NYI("StoreInto: miss-aligned content")) - } else { - // The stored value span multiple allocations. - return Err(Error::NYI("StoreInto: ???")) - } - } - } - } - }, - &LoadFrom(out, from, offset, sz) => { - let from_alloc = self.reg_map[&from].clone(); - let mut dst_off = 0; - let mut alloc = vec![]; - for &AllocInfo{ reg: vr, off, sz: size } in from_alloc.iter() { - if off == offset && size == sz { - // The loaded value matches exactly the content - // which is being loaded. - let reg = self.allocate(size)?; - match size { - 0 => {} - 1 => self.asm.movb_rr(vr, reg), - 2 => self.asm.movw_rr(vr, reg), - 4 => self.asm.movl_rr(vr, reg), - 8 => self.asm.movq_rr(vr, reg), - _ => return Err(Error::NYI("LoadFrom: allocation larger than 8")), - } - - alloc.push(AllocInfo{ reg, sz: size, off: dst_off }); - dst_off += size; - } else if off <= offset && offset + sz <= off + size { - return Err(Error::NYI("LoadFrom: subset content")) - } else if off < offset + sz && offset < off + size { - // The loaded value is spread across multiple - // allocations. - return Err(Error::NYI("LoadFrom: miss-aligned content")) - } - } - self.register(out, alloc); - }, - - &Live(_) => {}, - &Dead(reg) => { - // Note, the register might have already been freed by one - // of call to reuse or reuse_append functions. - self.unregister(reg); - }, - }; - - Ok(()) - } - - fn compile_terminator(&mut self, term: &lir::Terminator) -> Result<(), Error> { - use lir::Terminator::*; - println!(" Compile Terminator {:?}:", term); - match term { - &Return { value } => { - if let Some(value) = value { - let regs = [value]; - self.move_group(®s, &self::asm::ABI_RET)?; - self.unregister(value); - } - - // TODO: Pop non-volatile registers. - let stack_size = self.stack_size; - let stack_size = stack_size + (16 - stack_size % 16); - dynasm!(self.asm.backend - ; add rsp, stack_size as _ - ; pop rbp - ; ret - ); - Ok(()) - } - &Unwind => { - // TODO: We should be calling the _Unwind_Resume@PLT - // function, but at the moment I have not looked from where - // we can get it from. - dynasm!(self.asm.backend - ; int3 - ); - Ok(()) - } - &Unreachable => { - // Not part of Rust, but whatever ... - dynasm!(self.asm.backend - ; int3 - ); - Ok(()) - } - &Goto { target } => { - let label = self.bb_labels[target]; - dynasm!(self.asm.backend - ; jmp => label - ); - Ok(()) - } - &SwitchInt { value, /*range,*/ ref targets, otherwise, .. } => { - // TODO: Optimize for boolean values. - let val_alloc = self.reg_map[&value].clone(); - let AllocInfo{ reg: vr, sz: vz, .. } = val_alloc[0]; - assert_eq!(val_alloc.len(), 1); - let ir = self.allocate(1)?; - - for &(val, dest) in targets.iter() { - // Load the value in the given register. - match vz { - 1 => { - self.asm.movb_ir(val as i8 , ir); - self.asm.cmpb_rr(vr, ir); - } - 2 => { - self.asm.movw_ir(val as i16, ir); - self.asm.cmpw_rr(vr, ir); - } - 4 => { - self.asm.movl_ir(val as i32, ir); - self.asm.cmpl_rr(vr, ir); - } - 8 => { - self.asm.movq_ir(val as i64, ir); - self.asm.cmpq_rr(vr, ir); - } - _ => return Err(Error::NYI("SwitchInt comparison is larger than 8")), - } - // Compare the value - // Jump to the destination if equal. - let label = self.bb_labels[dest]; - dynasm!(self.asm.backend - ; je => label - ); - } - - if let Some(other) = otherwise { - // Unconditionally jump to the otherwise place if none - // of the value matched. - let label = self.bb_labels[other]; - dynasm!(self.asm.backend - ; jmp => label - ); - } - - // Trap if none of the branches are taken. - dynasm!(self.asm.backend - ; int3 - ); - - self.free(ir); - self.unregister(value); - Ok(()) - } - &Call { function, ref args, return_target, unwind_target } => { - if args.len() >= self::asm::ABI_ARGS.len() { - return Err(Error::NYI("Writting arguments to the stack.")); - }; - - // bind the arguments to specific registers. - self.move_group(&args, &self::asm::ABI_ARGS)?; - - // Make the call. - let fun_alloc = self.reg_map[&function].clone(); - assert_eq!(fun_alloc.len(), 1); - let AllocInfo{ reg: fr, sz: fz, .. } = fun_alloc[0]; - assert_eq!(fz, 8); - self.asm.call_r(fr); - - // Free the function pointer. - self.unregister(function); - - // Free the argument registers. - for arg in args.iter() { - self.unregister(*arg) - } - - if let Some((_, ret_id)) = return_target { - // Note: The return value register is being set by the - // compile function. - let label = self.bb_labels[ret_id]; - dynasm!(self.asm.backend - ; jmp => label - ); - } - if let Some(uwd_id) = unwind_target { - let label = self.bb_labels[uwd_id]; - dynasm!(self.asm.backend - ; jmp => label - ); - } - - // TODO: Handle unwinding. - Ok(()) - } - } - } - - fn finalize(self) -> Result { - let res = match self.asm.backend.finalize() { - // TODO: transmute and return a class which implements Fn types. - Ok(buf) => Ok(JitCode { code: buf, start: self.start }), - Err(_) => Err(Error::Finalize), - }; - println!("Compilation finished"); - res - } -} diff --git a/lib/src/context.rs b/lib/src/context.rs index 105b0b3..095c38a 100644 --- a/lib/src/context.rs +++ b/lib/src/context.rs @@ -2,13 +2,18 @@ /// part. It provides an interface for tuning the JIT compiler parameters /// as well as the heuristics for enterring the JIT. -use compile::JitCode; +use bincode; +use lir; +use codegen::{CodeGenerator, JitCode}; use std::rc::Rc; +use std::mem; /// Opaque structure which is used to store the function mapping, and tune /// the JIT parameters. pub struct JitContext { - code: Option> + // TODO: Storage space for all compiled functions. At the moment only hold a + // single compiled function. + code: Option>, } impl JitContext { @@ -20,7 +25,20 @@ impl JitContext { } &None => { println!("Did not found JIT Code in the context.\nStart compiling ..."); - match JitCode::compile(bytes, defs) { + // TODO: Move this as part of the JitContext. In the mean time + // deserialize the LIR context everytime we start a compilation. + let (mut ctx, unit) : (lir::context::Context, lir::unit::Unit) = + match bincode::deserialize(bytes) { + Ok(res) => res, + Err(err) => { + println!("bincode::ErrorKind = {}", err); + return None + } + }; + let defs : &'static () = unsafe { mem::transmute(defs) }; + ctx.set_static_refs(defs); + let mut codegen = CodeGenerator::new(); + match codegen.compile(&ctx, &unit) { Ok(jit) => { let jit = Rc::new(jit); // TODO: Store the Jit code on the context. @@ -28,7 +46,7 @@ impl JitContext { Some(jit) } Err(e) => { - println!("JIT Compiler Error: {:?}", e); + println!("JIT Codegen Error: {:?}", e); None } } @@ -39,6 +57,8 @@ impl JitContext { impl Default for JitContext { fn default() -> JitContext { - JitContext{ code: None } + JitContext{ + code: None, + } } } diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 1fb10eb..58558dd 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -26,12 +26,12 @@ extern crate serde_derive; extern crate serde; extern crate bincode; -pub mod lir; +extern crate holyjit_lir as lir; +extern crate holyjit_codegen as codegen; mod context; -mod compile; pub use context::JitContext; -pub use compile::JitCode; +pub use codegen::JitCode; /// This trait should be implemented by every function that we want to be able to Jit. This trait /// implements the Fn trait to make this function callable, and to make it a potential entry point @@ -58,7 +58,7 @@ impl FnOnce<()> for Curry0 { Curry0::Native(fun) => fun(), Curry0::Jit(jit) => { let fun : fn() -> Output = unsafe { - std::mem::transmute(jit.get_fn()) + std::mem::transmute(jit.as_ptr()) }; fun() } @@ -71,7 +71,7 @@ impl FnMut<()> for Curry0 { &mut Curry0::Native(ref mut fun) => fun(), &mut Curry0::Jit(ref mut jit) => { let fun : fn() -> Output = unsafe { - std::mem::transmute(jit.get_fn()) + std::mem::transmute(jit.as_ptr()) }; fun() } @@ -84,7 +84,7 @@ impl Fn<()> for Curry0 { &Curry0::Native(ref fun) => fun(), &Curry0::Jit(ref jit) => { let fun : fn() -> Output = unsafe { - std::mem::transmute(jit.get_fn()) + std::mem::transmute(jit.as_ptr()) }; fun() } @@ -119,7 +119,7 @@ macro_rules! curry_decl { $name::Native(fun) => curry_call!{ fun => args: ($($arg),*) }, $name::Jit(jit) => { let fun : fn($($arg),*) -> $ret = unsafe { - std::mem::transmute(jit.get_fn()) + std::mem::transmute(jit.as_ptr()) }; curry_call!{ fun => args: ($($arg),*) } } @@ -132,7 +132,7 @@ macro_rules! curry_decl { &mut $name::Native(ref mut fun) => curry_call!{ fun => args: ($($arg),*) }, &mut $name::Jit(ref mut jit) => { let fun : fn($($arg),*) -> $ret = unsafe { - std::mem::transmute(jit.get_fn()) + std::mem::transmute(jit.as_ptr()) }; curry_call!{ fun => args: ($($arg),*) } } @@ -145,7 +145,7 @@ macro_rules! curry_decl { &$name::Native(ref fun) => curry_call!{ fun => args: ($($arg),*) }, &$name::Jit(ref jit) => { let fun : fn($($arg),*) -> $ret = unsafe { - std::mem::transmute(jit.get_fn()) + std::mem::transmute(jit.as_ptr()) }; curry_call!{ fun => args: ($($arg),*) } } diff --git a/lib/src/lir.rs b/lib/src/lir.rs deleted file mode 100644 index c687b74..0000000 --- a/lib/src/lir.rs +++ /dev/null @@ -1,189 +0,0 @@ -//! Defines the LIR structures and how to represent a graph and its -//! instructions. -use std::fmt; - -/// Prototype of the Mir graph of a single function. This representation is -/// not optimized for graph optimizations, but optimized only for the ease -/// of convertion from the MIR and the ease of naive compilation. -#[derive(Serialize, Deserialize, Debug)] -pub struct CompilationUnit { - /// Size of all local variable of the Mir. - pub stack_size: usize, - - /// Ordered list of arguments, with their associated registers. - pub args_defs: Vec, - - /// List of basic blocks of a given function. - pub blocks: Vec, -} - -/// (Prototype) Set of instruction within a block. -pub type BasicBlock = usize; - -/// Basic block which contains a list of instructions. -#[derive(Serialize, Deserialize, Debug)] -pub struct BasicBlockData { - /// Ordered list of registers available in this basic block. - // - // Note: We should probably look at encoding these lists of registers as - // spagheti stacks, such that we do not over-consume memory, by storing - // this information. - pub input_regs: Vec, - /// Ordered list of registers available after this basic block. - pub output_regs: Vec, - /// Ordered list of instructions. - pub insts: Vec, - /// How the basic block ends. - pub end: Terminator, -} - -/// Basic block terminator instruction, which resume the execution in -/// another basic block. -#[derive(Serialize, Deserialize, Debug)] -pub enum Terminator { - /// Exit successfully the current function. - Return { - value: Option, - }, - - /// Unwind the current function. - Unwind, - - /// Trap. - Unreachable, - - /// Jump unconditionally to the next basic block. - Goto { - target: BasicBlock, - }, - - /// Conditional branches, implemented as a switch case to handle all - /// forms of conditionals. - SwitchInt { - value: Reg, - range: RangeInclusive, - targets: Vec<(Imm, BasicBlock)>, - otherwise: Option, - }, - - /// Call a function. (any function, or an assertion, or a drop function) - Call { - /// Pointer to a given function. - function: Reg, - - // TODO: Add a reference of the ABI we are compiling this call with. - - /// Set of argument to be used for calling the function. - args: Vec, - - /// If the function returns, then the following register would be - /// defined in the block, and listed in the output_regs of the - /// current block, and the inputs-regs of the listed block. - return_target: Option<(Option, BasicBlock)>, - - /// If the function unwinds then the unwinding resumes in the - /// following block., - unwind_target: Option - } -} - -pub type Sz = usize; -pub type Reg = usize; -pub type Imm = isize; -pub type ByValue = bool; -pub type ArgDef = (Imm, Sz, ByValue); -pub type RegDef = (Reg, Sz); -pub type RangeInclusive = (Imm, Imm); - -/// (Prototype) Minimal set of instructions to support the MIR graph of -/// Rust for the examples directory. -#[derive(Serialize, Deserialize, Debug)] -pub enum Inst { - // Initialize the frame pointer. - SetFramePtr(Reg, Sz, Sz), - - // Copy the address of a static value in a register. - Static(Reg, Imm, Sz), - - // Copy a constant into a register. - CopyImm(Reg, Imm, Sz), - - // Cast operator for dummies. - Resize(Reg, Reg, Sz), - - // Logical & Math operations. - Add(Reg, Reg, Reg), - Sub(Reg, Reg, Reg), - Mul(Reg, Reg, Reg), - Div(Reg, Reg, Reg), - Rem(Reg, Reg, Reg), - BitXor(Reg, Reg, Reg), - BitAnd(Reg, Reg, Reg), - BitOr(Reg, Reg, Reg), - Shl(Reg, Reg, Reg), - Shr(Reg, Reg, Reg), - Eq(Reg, Reg, Reg), - Lt(Reg, Reg, Reg), - Le(Reg, Reg, Reg), - Ne(Reg, Reg, Reg), - Gt(Reg, Reg, Reg), - Ge(Reg, Reg, Reg), - - // Convert an operation result into a checked operation result, i-e add - // a boolean size value to the register size. - Chk(Reg, Reg), - - // Store at the address, the register value. - Store(Reg, Reg, Sz), - - // Load into the register, from the address. - Load(Reg, Reg, Sz), - - // Note: These 2 instructions are based on the infinite size registers, - // and are made to represent structures which are held in - // registers. This is a useful trick for generating code from Rust's - // Mir, but also a useful trick to represent owned memory. - - // Initialize the content of a register with the the content of a - // smaller one at an offset of the first. - StoreInto(Reg, Reg, /* offset */ Sz, /* size */ Sz), - - // Load from a register at a given offset, and a given size within the - // register the value to be output in the first register. - LoadFrom(Reg, Reg, /* offset */ Sz, /* size */ Sz), - - // Reserve, or kill a register allocation. - // Note: Live is useless in case of SSA forms. - Live(Reg), Dead(Reg), -} - -/// Display a Compilation unit with some hard-coded indentation level. -impl fmt::Display for CompilationUnit { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "CompilationUnit {{\n")?; - write!(f, " stack_size: {:?},\n", self.stack_size)?; - write!(f, " args_defs: {:?},\n", self.args_defs)?; - write!(f, " blocks: [\n")?; - for (x, b) in self.blocks.iter().enumerate() { - write!(f, " [{}] = {},\n", x, b)?; - } - write!(f, " ],\n")?; - write!(f, "}}") - } -} - -/// Display a BasicBlockData unit with some hard-coded indentation level. -impl fmt::Display for BasicBlockData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "BasicBlockData {{\n")?; - write!(f, " input_regs: {:?},\n", self.input_regs)?; - write!(f, " insts: [\n")?; - for ins in &self.insts { - write!(f, " {:?},\n", ins)?; - } - write!(f, " ],\n")?; - write!(f, " end: {:?},\n", self.end)?; - write!(f, " output_regs: {:?},\n", self.output_regs)?; - write!(f, " }}") - } -} diff --git a/lir/src/builder.rs b/lir/src/builder.rs index b995bda..edcda8c 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -27,11 +27,6 @@ pub struct ContextBuilder { #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] pub struct Variable(u32); -/// This structure is used to capture variable states, and used to insert Phi -/// instructions if needed. -pub struct SSABuilder { -} - pub struct UnitBuilder<'a> { /// Identifier of the constructed unit. unit: Unit, diff --git a/lir/src/context.rs b/lir/src/context.rs index 22e2cec..411081a 100644 --- a/lir/src/context.rs +++ b/lir/src/context.rs @@ -8,6 +8,7 @@ type StaticStorage = *const (); /// Information stored in the context for each StackAddress instruction in the /// data flow. This contains the ComplexTypeId, the size and its alignment. +#[derive(Serialize, Deserialize, Debug)] pub struct StackAddressInfo { /// Type to be stored in the space reserved for the given stack address. pub ty: ComplexTypeId, @@ -22,6 +23,7 @@ pub struct StackAddressInfo { /// A context is a structure which centralize all the data necessary for the /// execution of any Unit. It holds the collection of complex types, and any /// counter related to having unique identifiers. +#[derive(Serialize, Deserialize, Debug)] pub struct Context { /// This counter is used for both Rehash instructions and Newhash /// instructions. It holds the next value to be allocated if any of these @@ -41,10 +43,11 @@ pub struct Context { /// If any, this is the pointer to the memory which contains static /// information filled by the static compiler with all the symbol references /// or values. This fields should be set with the function - /// `set_statics_refs` on the constructed or deserialized Context. Once set, + /// `set_statics_refs` on the constructed or deserialized `Context`. Once set, /// it is not allowed to change. Attempting to build any unit without - /// setting this value will cause a compilation error if the Unit uses an - /// StaticAddress-es. + /// setting this value will cause a compilation error if the `Unit` uses an + /// `StaticAddress`-es. + #[serde(skip, default="ptr::null")] refs_ptr: StaticStorage, /// When a Context is created with the ContextBuilder, this field is mutated diff --git a/lir/src/types.rs b/lir/src/types.rs index 6dc16b0..0d4f3e5 100644 --- a/lir/src/types.rs +++ b/lir/src/types.rs @@ -21,7 +21,7 @@ pub struct CanUnwind(pub bool); /// A complex type is either a function signature, an structure, an union, a /// pointer, a scalar or a vector of scalar. All these types should be aggregaed /// globally, such that that can be used across multiple Units. -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone)] pub enum ComplexType { /// Functions are used to express the signature of Unit and external /// functions. At the moment, all functions are assumed to follow the same From 098440f3ca45f636e592a5c5c6d5a61b4ad9d1cf Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Wed, 8 May 2019 11:50:27 +0200 Subject: [PATCH 29/32] Change ComplexType to split the memory representation from the data stored in it. --- codegen/src/lower.rs | 27 ++- codegen/tests/add.rs | 52 +++--- codegen/tests/call.rs | 18 +- codegen/tests/compare.rs | 312 ++++++++++++++++---------------- codegen/tests/control_flow.rs | 8 +- codegen/tests/load-store.rs | 96 +++++----- codegen/tests/mul.rs | 24 +-- codegen/tests/static-address.rs | 10 +- codegen/tests/sub.rs | 48 ++--- lir/src/builder.rs | 6 +- lir/src/types.rs | 62 ++++++- 11 files changed, 355 insertions(+), 308 deletions(-) diff --git a/codegen/src/lower.rs b/codegen/src/lower.rs index 881d434..33dd3a9 100644 --- a/codegen/src/lower.rs +++ b/codegen/src/lower.rs @@ -16,7 +16,7 @@ use codegen::isa::TargetIsa; use lir::unit::{Unit, UnitId}; use lir::context::Context; -use lir::types::{ComplexTypeId, ComplexType}; +use lir::types::{ComplexTypeId, DataRepr}; use lir::number::{NumberType, SignedType, OrderedType, NumberValue}; use lir::control_flow::{Sequence, SequenceIndex, SuccessorIndex}; use lir::data_flow::{Opcode, Instruction, ValueType, Value}; @@ -132,19 +132,18 @@ impl<'a> ConvertCtx<'a> { } fn cltype(&self, ty: ComplexTypeId) -> LowerResult { - use self::ComplexType::*; + use self::DataRepr::*; use self::NumberType::*; let ty = self.ctx.get_type(ty); - match ty { - &Pointer => Ok(self.isa.pointer_type()), - &Scalar(B1) => Ok(types::B1), - &Scalar(U8) | &Scalar(I8) => Ok(types::I8), - &Scalar(U16) | &Scalar(I16) => Ok(types::I16), - &Scalar(U32) | &Scalar(I32) => Ok(types::I32), - &Scalar(U64) | &Scalar(I64) => Ok(types::I64), - &Scalar(F32) => Ok(types::F32), - &Scalar(F64) => Ok(types::F64), - &Vector(_, _) => unimplemented!(), + match ty.data { + Pointer => Ok(self.isa.pointer_type()), + Number(B1) => Ok(types::B1), + Number(U8) | Number(I8) => Ok(types::I8), + Number(U16) | Number(I16) => Ok(types::I16), + Number(U32) | Number(I32) => Ok(types::I32), + Number(U64) | Number(I64) => Ok(types::I64), + Number(F32) => Ok(types::F32), + Number(F64) => Ok(types::F64), _ => Err(LowerError::ComplexTypeNotLowered), } } @@ -159,8 +158,8 @@ impl<'a> ConvertCtx<'a> { /// list of inputs and outputs which are corresponding to this signature. fn signature_io(&self, sig: ComplexTypeId) -> LowerResult<(&'a Vec, &'a Vec)> { let ty = self.ctx.get_type(sig); - match ty { - &ComplexType::Function(ref ins, ref outs, ref _unwind) => Ok((ins, outs)), + match ty.data { + DataRepr::Function(ref ins, ref outs, ref _unwind) => Ok((ins, outs)), _ => Err(LowerError::UnitIsNotAFunction), } } diff --git a/codegen/tests/add.rs b/codegen/tests/add.rs index ac0584b..79cbdb8 100644 --- a/codegen/tests/add.rs +++ b/codegen/tests/add.rs @@ -19,8 +19,8 @@ fn add1_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32], vec![t_i32], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -50,9 +50,9 @@ fn add_overflow_i32_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -87,9 +87,9 @@ fn add_overflow_u32_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -124,9 +124,9 @@ fn add_overflow_i64_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -161,9 +161,9 @@ fn add_overflow_u64_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -198,9 +198,9 @@ fn add_carry_i32_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -235,9 +235,9 @@ fn add_carry_u32_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -272,9 +272,9 @@ fn add_carry_i64_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -309,9 +309,9 @@ fn add_carry_u64_test() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { diff --git a/codegen/tests/call.rs b/codegen/tests/call.rs index 201caa8..1a83b4f 100644 --- a/codegen/tests/call.rs +++ b/codegen/tests/call.rs @@ -33,11 +33,11 @@ fn call_ret_tuple() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u8], vec![t_u8], CanUnwind(true))); + let t_u8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u8], vec![t_u8], CanUnwind(true))); // TODO: Unwinding is not supported without DWARF at the moment and // eh_frame describing where to resume the execution when unwinding. - let t_ret_tuple = bld.ctx().add_type(ComplexType::Function(vec![t_u8], vec![t_u8, t_u8], CanUnwind(false))); + let t_ret_tuple = bld.ctx().add_type(ComplexType::new_fn(vec![t_u8], vec![t_u8, t_u8], CanUnwind(false))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); let s1 = bld.create_sequence(); @@ -78,11 +78,11 @@ fn call_sub_add() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8], CanUnwind(true))); + let t_u8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u8, t_u8], vec![t_u8], CanUnwind(true))); // TODO: Unwinding is not supported without DWARF at the moment and // eh_frame describing where to resume the execution when unwinding. - let t_fun = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8, t_u8], CanUnwind(false))); + let t_fun = bld.ctx().add_type(ComplexType::new_fn(vec![t_u8, t_u8], vec![t_u8, t_u8], CanUnwind(false))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); let s1 = bld.create_sequence(); @@ -128,11 +128,11 @@ fn call_diff_sub_add() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8], CanUnwind(true))); + let t_u8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u8, t_u8], vec![t_u8], CanUnwind(true))); // TODO: Unwinding is not supported without DWARF at the moment and // eh_frame describing where to resume the execution when unwinding. - let t_fun = bld.ctx().add_type(ComplexType::Function(vec![t_u8, t_u8], vec![t_u8], CanUnwind(false))); + let t_fun = bld.ctx().add_type(ComplexType::new_fn(vec![t_u8, t_u8], vec![t_u8], CanUnwind(false))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); let s1 = bld.create_sequence(); diff --git a/codegen/tests/compare.rs b/codegen/tests/compare.rs index 834ac69..407bb64 100644 --- a/codegen/tests/compare.rs +++ b/codegen/tests/compare.rs @@ -21,9 +21,9 @@ fn eq_u32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -56,9 +56,9 @@ fn ne_u32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -91,9 +91,9 @@ fn lt_u32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -126,9 +126,9 @@ fn le_u32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -161,9 +161,9 @@ fn gt_u32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -196,9 +196,9 @@ fn ge_u32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -231,9 +231,9 @@ fn eq_i32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -266,9 +266,9 @@ fn ne_i32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -301,9 +301,9 @@ fn lt_i32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -336,9 +336,9 @@ fn le_i32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -371,9 +371,9 @@ fn gt_i32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -406,9 +406,9 @@ fn ge_i32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -441,9 +441,9 @@ fn eq_u64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -476,9 +476,9 @@ fn ne_u64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -511,9 +511,9 @@ fn lt_u64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -546,9 +546,9 @@ fn le_u64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -581,9 +581,9 @@ fn gt_u64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -616,9 +616,9 @@ fn ge_u64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -651,9 +651,9 @@ fn eq_i64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -686,9 +686,9 @@ fn ne_i64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -721,9 +721,9 @@ fn lt_i64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -756,9 +756,9 @@ fn le_i64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -791,9 +791,9 @@ fn gt_i64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -826,9 +826,9 @@ fn ge_i64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -861,9 +861,9 @@ fn ord_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -899,9 +899,9 @@ fn oeq_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -937,9 +937,9 @@ fn one_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -975,9 +975,9 @@ fn olt_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1013,9 +1013,9 @@ fn ole_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1051,9 +1051,9 @@ fn ogt_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1089,9 +1089,9 @@ fn oge_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1127,9 +1127,9 @@ fn unord_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1165,9 +1165,9 @@ fn ueq_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1203,9 +1203,9 @@ fn une_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1241,9 +1241,9 @@ fn ult_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1279,9 +1279,9 @@ fn ule_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1317,9 +1317,9 @@ fn ugt_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1355,9 +1355,9 @@ fn uge_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1393,9 +1393,9 @@ fn ord_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1431,9 +1431,9 @@ fn oeq_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1469,9 +1469,9 @@ fn one_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1507,9 +1507,9 @@ fn olt_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1545,9 +1545,9 @@ fn ole_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1583,9 +1583,9 @@ fn ogt_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1621,9 +1621,9 @@ fn oge_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1659,9 +1659,9 @@ fn unord_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1697,9 +1697,9 @@ fn ueq_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1735,9 +1735,9 @@ fn une_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1773,9 +1773,9 @@ fn ult_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1811,9 +1811,9 @@ fn ule_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1849,9 +1849,9 @@ fn ugt_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -1887,9 +1887,9 @@ fn uge_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { diff --git a/codegen/tests/control_flow.rs b/codegen/tests/control_flow.rs index 6092d8e..29c3445 100644 --- a/codegen/tests/control_flow.rs +++ b/codegen/tests/control_flow.rs @@ -22,8 +22,8 @@ fn round_odd_up_test() { let round_odd_up_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32], vec![t_i32], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32], vec![t_i32], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); // test x % 2 == 0 let s1 = bld.create_sequence(); // x += 1 @@ -74,8 +74,8 @@ fn sum_loop_test() { let sum_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64], vec![t_u64], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64], vec![t_u64], CanUnwind(true))); let i = bld.new_var(); let accu = bld.new_var(); diff --git a/codegen/tests/load-store.rs b/codegen/tests/load-store.rs index 84e6fb4..bba9d9d 100644 --- a/codegen/tests/load-store.rs +++ b/codegen/tests/load-store.rs @@ -19,9 +19,9 @@ fn load_u8() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u8], CanUnwind(true))); + let t_u8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U8)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_u8], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -50,9 +50,9 @@ fn load_u16() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U16)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u16], CanUnwind(true))); + let t_u16 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U16)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_u16], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -81,9 +81,9 @@ fn load_u32() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u32], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_u32], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -112,9 +112,9 @@ fn load_u64() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_u64], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_u64], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -143,9 +143,9 @@ fn load_i8() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I8)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i8], CanUnwind(true))); + let t_i8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I8)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_i8], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -174,9 +174,9 @@ fn load_i16() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I16)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i16], CanUnwind(true))); + let t_i16 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I16)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_i16], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -205,9 +205,9 @@ fn load_i32() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i32], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_i32], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -236,9 +236,9 @@ fn load_i64() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr], vec![t_i64], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr], vec![t_i64], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -267,9 +267,9 @@ fn store_u8() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u8], vec![], CanUnwind(true))); + let t_u8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U8)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_u8], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -302,9 +302,9 @@ fn store_u16() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U16)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u16], vec![], CanUnwind(true))); + let t_u16 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U16)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_u16], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -337,9 +337,9 @@ fn store_u32() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u32], vec![], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_u32], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -372,9 +372,9 @@ fn store_u64() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_u64], vec![], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_u64], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -407,9 +407,9 @@ fn store_i8() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I8)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i8], vec![], CanUnwind(true))); + let t_i8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I8)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_i8], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -442,9 +442,9 @@ fn store_i16() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i16 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I16)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i16], vec![], CanUnwind(true))); + let t_i16 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I16)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_i16], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -477,9 +477,9 @@ fn store_i32() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i32], vec![], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_i32], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -512,9 +512,9 @@ fn store_i64() { let add1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_ptr, t_i64], vec![], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_ptr, t_i64], vec![], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { diff --git a/codegen/tests/mul.rs b/codegen/tests/mul.rs index 47e04f0..8621239 100644 --- a/codegen/tests/mul.rs +++ b/codegen/tests/mul.rs @@ -19,8 +19,8 @@ fn mul_u32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_u32], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_u32], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -53,8 +53,8 @@ fn mul_i32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_i32], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_i32], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -87,8 +87,8 @@ fn mul_u64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_u64], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_u64], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -121,8 +121,8 @@ fn mul_i64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_i64], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_i64], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -155,8 +155,8 @@ fn mul_f32() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F32)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f32, t_f32], vec![t_f32], CanUnwind(true))); + let t_f32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F32)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f32, t_f32], vec![t_f32], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -191,8 +191,8 @@ fn mul_f64() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_f64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::F64)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_f64, t_f64], vec![t_f64], CanUnwind(true))); + let t_f64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::F64)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_f64, t_f64], vec![t_f64], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { diff --git a/codegen/tests/static-address.rs b/codegen/tests/static-address.rs index 29e65b3..717012f 100644 --- a/codegen/tests/static-address.rs +++ b/codegen/tests/static-address.rs @@ -24,8 +24,8 @@ fn load_first_static_field() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![], vec![t_u8], CanUnwind(true))); + let t_u8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U8)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![], vec![t_u8], CanUnwind(true))); let ref0 = bld.ctx().add_typed_ref::(); let _ref1 = bld.ctx().add_typed_ref::<&u8>(); bld.set_signature(t_sig); @@ -58,9 +58,9 @@ fn load_second_static_field() { let unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Add the function signature. - let t_u8 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U8)); - let t_ptr = bld.ctx().add_type(ComplexType::Scalar(addr_type())); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![], vec![t_u8], CanUnwind(true))); + let t_u8 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U8)); + let t_ptr = bld.ctx().add_type(ComplexType::new_scalar(addr_type())); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![], vec![t_u8], CanUnwind(true))); let _ref0 = bld.ctx().add_typed_ref::(); let ref1 = bld.ctx().add_typed_ref::<&u8>(); bld.set_signature(t_sig); diff --git a/codegen/tests/sub.rs b/codegen/tests/sub.rs index 983232a..e1bdfb4 100644 --- a/codegen/tests/sub.rs +++ b/codegen/tests/sub.rs @@ -19,9 +19,9 @@ fn sub_overflow_i32_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -56,9 +56,9 @@ fn sub_overflow_u32_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -93,9 +93,9 @@ fn sub_overflow_i64_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -130,9 +130,9 @@ fn sub_overflow_u64_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -167,9 +167,9 @@ fn sub_carry_i32_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_i32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); + let t_i32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i32, t_i32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -204,9 +204,9 @@ fn sub_carry_u32_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_u32 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U32)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); + let t_u32 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U32)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u32, t_u32], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -241,9 +241,9 @@ fn sub_carry_i64_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_i64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::I64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); + let t_i64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::I64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_i64, t_i64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { @@ -282,9 +282,9 @@ fn sub_carry_u64_test() { let sub1_unit = { let mut bld = UnitBuilder::new(UnitId::Function(0), &mut ctx_bld); // Sub the function signature. - let t_u64 = bld.ctx().add_type(ComplexType::Scalar(NumberType::U64)); - let t_bool = bld.ctx().add_type(ComplexType::Scalar(NumberType::B1)); - let t_sig = bld.ctx().add_type(ComplexType::Function(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); + let t_u64 = bld.ctx().add_type(ComplexType::new_scalar(NumberType::U64)); + let t_bool = bld.ctx().add_type(ComplexType::new_scalar(NumberType::B1)); + let t_sig = bld.ctx().add_type(ComplexType::new_fn(vec![t_u64, t_u64], vec![t_bool], CanUnwind(true))); bld.set_signature(t_sig); let s0 = bld.create_sequence(); { diff --git a/lir/src/builder.rs b/lir/src/builder.rs index edcda8c..df28d5d 100644 --- a/lir/src/builder.rs +++ b/lir/src/builder.rs @@ -7,7 +7,7 @@ use std::mem::{align_of, size_of}; use unit::{Unit, UnitId}; use data_flow::{Instruction, Opcode, Value}; use control_flow::{Sequence, SequenceIndex, SuccessorIndex}; -use types::{ComplexType, ComplexTypeId}; +use types::{DataRepr, ComplexType, ComplexTypeId}; use context; use bitset::BitSet; @@ -156,8 +156,8 @@ impl<'a> UnitBuilder<'a> { pub fn set_signature(&mut self, signature: ComplexTypeId) { self.unit.sig = signature; let ty = self.ctx.get_type(signature); - let (ins, _outs) = match ty { - &ComplexType::Function(ref ins, ref outs, _) => (ins, outs), + let (ins, _outs) = match ty.data { + DataRepr::Function(ref ins, ref outs, _) => (ins, outs), _ => panic!("Unit signatures are expected to be a Function.") }; self.unit.inputs = ins.iter().map(|_| Value::dummy()).collect(); diff --git a/lir/src/types.rs b/lir/src/types.rs index 0d4f3e5..a240504 100644 --- a/lir/src/types.rs +++ b/lir/src/types.rs @@ -18,16 +18,26 @@ pub struct Offset(pub usize); #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone, Copy)] pub struct CanUnwind(pub bool); -/// A complex type is either a function signature, an structure, an union, a -/// pointer, a scalar or a vector of scalar. All these types should be aggregaed -/// globally, such that that can be used across multiple Units. +/// Define the preferred memory representation for one value. This is mainly +/// used to determine the way to transfer function's arguments. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone)] -pub enum ComplexType { +pub enum MemoryRepr { + None, + Register, + RegisterPair, + Vector{ bytes: usize }, + PointerTo{ bytes: usize }, +} + +/// Define how a type is composed. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone)] +pub enum DataRepr { /// Functions are used to express the signature of Unit and external /// functions. At the moment, all functions are assumed to follow the same /// calling convention as rust functions. Function(Vec, Vec, CanUnwind), /// Structures are used to map each offsets with its corresponding type. + /// Offsets are expected to be sorted by increasing order. Structure(Vec<(Offset, ComplexTypeId)>), /// Unions are used to select between multiple structures. Union(Vec), @@ -35,7 +45,45 @@ pub enum ComplexType { /// This simplify the problem by not having to handle recursive types. Pointer, /// A Scalar represents a number. - Scalar(number::NumberType), - /// A Vector represents an aggregation of Scalar. - Vector(number::NumberType, usize), + Number(number::NumberType), +} + +/// A complex type is defined as a way to represent a value in memory as well as +/// the meaning of its compoenents. Types are recorded on the Context which is +/// shared across multiple Units, such that one Unit can call another and use +/// the same ComplexTypeId. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone)] +pub struct ComplexType { + /// Defines the memory representation of this type and how it is to be + /// stored and transfered. + pub mem: MemoryRepr, + /// Defines the data representation of this type and how it is to be + /// accessed as well as the meaning associated with it. + pub data: DataRepr, +} + +impl ComplexType { + /// Simple new function to create a number stored in a register. + pub fn new_scalar(num: number::NumberType) -> ComplexType { + ComplexType { + mem: MemoryRepr::Register, + data: DataRepr::Number(num), + } + } + + /// Simple new function to create a number stored in a register. + pub fn new_ptr() -> ComplexType { + ComplexType { + mem: MemoryRepr::Register, + data: DataRepr::Pointer, + } + } + + /// Simple new function to create a function pointer. + pub fn new_fn(ins: Vec, outs: Vec, unwind: CanUnwind) -> ComplexType { + ComplexType { + mem: MemoryRepr::Register, + data: DataRepr::Function(ins, outs, unwind), + } + } } From ae023124f3e87844ddfac75bb35d515df96f2cc6 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 12 May 2019 15:56:40 +0200 Subject: [PATCH 30/32] Add a readable Display implementation of the LIR format --- lir/src/data_flow.rs | 40 ++++++++++++++++++++++++++++++++++++++++ lir/src/unit.rs | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/lir/src/data_flow.rs b/lir/src/data_flow.rs index 8c051d3..d7f2e20 100644 --- a/lir/src/data_flow.rs +++ b/lir/src/data_flow.rs @@ -7,6 +7,7 @@ /// Automatically derive a hashing function for each type, to make sure that we /// can apply patches to a subset of instructions. use std::hash::{Hash, Hasher}; +use std::fmt; use number; use unit; @@ -417,6 +418,45 @@ impl Value { } } +impl fmt::Display for Value { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.is_dummy() { + write!(f, "#(dummy)") + } else { + write!(f, "#{:x}", self.hash) + } + } +} + +/// Wrapper type for a Vec type such that we can implement the Display +/// trait. +pub struct VecValue<'a>(pub &'a Vec); +impl<'a> fmt::Display for VecValue<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "[")?; + let mut iter = self.0.iter(); + if let Some(v) = iter.next() { + write!(f, " {}", v)?; + while let Some(v) = iter.next() { + write!(f,", {}", v)?; + } + write!(f, " ")?; + } + write!(f, "]") + } +} + +impl fmt::Display for Instruction { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let hash : u64 = self.into(); + write!(f, "#{:x} = [{:?}](args = {}, deps = {})", hash, self.opcode, VecValue(&self.operands), VecValue(&self.dependencies))?; + if let Some(v) = self.replaced_by { + write!(f, "{{ replaced_by: {} }}", v)?; + } + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/lir/src/unit.rs b/lir/src/unit.rs index 1f6e401..8125b8d 100644 --- a/lir/src/unit.rs +++ b/lir/src/unit.rs @@ -1,6 +1,7 @@ use data_flow::{DataFlow, Value}; use control_flow::ControlFlow; use types::ComplexTypeId; +use std::fmt; /// A LIR Unit is a connected set of basic blocks with an entry and exit blocks. /// This might correspond to a Rust function, a subset of a Rust function which @@ -67,3 +68,43 @@ impl Unit { } } } + +impl fmt::Display for Unit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use data_flow::VecValue; + use control_flow::SequenceIndex; + let width = f.width().unwrap_or(0); + write!(f, "{e:i$}unit {:?} : {} -> {} {{\n", self.id, VecValue(&self.inputs), VecValue(&self.outputs), e = "", i = width)?; + for (index, ref seq) in self.cfg.sequences.iter().enumerate() { + write!(f, "{e:i$}Seq {}:\n", index, e = "", i = width * 2)?; + for v in seq.sequence.iter() { + if v.is_dummy() { + write!(f, "{e:i$}(dummy instruction)\n", e = "", i = width * 3)?; + } else { + let ins = &self.dfg.instructions[v.index()]; + write!(f, "{e:i$}{}\n", ins, e = "", i = width * 3)?; + } + } + if seq.control.is_dummy() { + write!(f, "{e:i$}(dummy control instruction)\n\n", e = "", i = width * 3)?; + } else { + let ins = &self.dfg.instructions[seq.control.index()]; + write!(f, "{e:i$}{}\n\n", ins, e = "", i = width * 3)?; + } + for &(val, succ) in seq.targets.iter() { + let SequenceIndex(seq) = seq.successors[succ.0]; + write!(f, "{e:i$}{} -> Seq {}\n", val, seq, e = "", i = width * 3)?; + } + if let Some(succ) = seq.default { + let SequenceIndex(seq) = seq.successors[succ.0]; + write!(f, "{e:i$}default -> Seq {}\n", seq, e = "", i = width * 3)?; + } + if let Some(succ) = seq.unwind { + let SequenceIndex(seq) = seq.successors[succ.0]; + write!(f, "{e:i$}unwind -> Seq {}\n", seq, e = "", i = width * 3)?; + } + write!(f, "\n")?; + } + write!(f, "{e:i$}}}", e = "", i = width) + } +} From 2c3e7d435b0b4881834d01e88751672d183465a1 Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 12 May 2019 15:57:46 +0200 Subject: [PATCH 31/32] Add an unsafe interface to register statics. The Library macro create an empty tuple pointer storage to reference the space which contains the value of all the statics value sused during the compilation. This implies that the library cannot use the untyped tuple type to derive the size, using mem::size_of, to assert its size when registering the memory of statics. --- lib/src/context.rs | 4 +--- lir/src/context.rs | 27 +++++++++++++++++---------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/lib/src/context.rs b/lib/src/context.rs index 095c38a..a767acf 100644 --- a/lib/src/context.rs +++ b/lib/src/context.rs @@ -6,7 +6,6 @@ use bincode; use lir; use codegen::{CodeGenerator, JitCode}; use std::rc::Rc; -use std::mem; /// Opaque structure which is used to store the function mapping, and tune /// the JIT parameters. @@ -35,8 +34,7 @@ impl JitContext { return None } }; - let defs : &'static () = unsafe { mem::transmute(defs) }; - ctx.set_static_refs(defs); + unsafe { ctx.set_static_refs_unchecked(defs) }; let mut codegen = CodeGenerator::new(); match codegen.compile(&ctx, &unit) { Ok(jit) => { diff --git a/lir/src/context.rs b/lir/src/context.rs index 411081a..066d7fb 100644 --- a/lir/src/context.rs +++ b/lir/src/context.rs @@ -116,16 +116,23 @@ impl Context { /// function to panic. pub fn set_static_refs(&mut self, refs: &'static T) { let refs = refs as *const _ as *const(); - if self.refs_ptr != ptr::null() { - // TODO: Panic with a documented error code, or an explicit message - // explaining how to fix this issue. - panic!("set_static_refs can only be called once per context.") - } - if self.expected_refs_size != mem::size_of::() { - // TODO: Panic with a documented error code, or an explicit message - // explaining how to fix this issue. - panic!("set_static_refs called with a tuple of unexpected size.") - } + // TODO: Panic with a documented error code, or an explicit message + // explaining how to fix this issue. + assert_eq!(self.refs_ptr, ptr::null(), + "static refs can only be set once per context."); + // TODO: Panic with a documented error code, or an explicit message + // explaining how to fix this issue. + assert_eq!(self.expected_refs_size, mem::size_of::(), + "set_static_refs called with a tuple of unexpected size {}, expected {}.", + mem::size_of::(), self.expected_refs_size); + self.refs_ptr = refs; + } + + pub unsafe fn set_static_refs_unchecked(&mut self, refs: *const ()) { + // TODO: Panic with a documented error code, or an explicit message + // explaining how to fix this issue. + assert_eq!(self.refs_ptr, ptr::null(), + "static refs can only be set once per context."); self.refs_ptr = refs; } From b7c30f6baee451e3d4e3a927b11eb135a17e89cb Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Sun, 12 May 2019 16:03:16 +0200 Subject: [PATCH 32/32] Disable debug mode to reduce HolyJIT compilations. The debug mode is enabling `-Z dump-mir=all` flag which attemps to spew the content of the `&[u8]` array containing the serialized content. This arrau serialization is time consuming and should be removed in the future by replacing it by a `&str`. --- rustc.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rustc.sh b/rustc.sh index 3a696f5..4e7d192 100755 --- a/rustc.sh +++ b/rustc.sh @@ -1,6 +1,6 @@ #!/bin/sh -debug=true +debug=false # CLI: rustc --crate name path/name.rs wrap=false;