Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
93b5fc7
fixed for loops creation
lfrenot Jun 18, 2025
abaa915
Merge remote-tracking branch 'origin/master' into feature/tensor
lfrenot Jun 18, 2025
5572a10
debugging attempts
lfrenot Jun 18, 2025
5654772
tensor.get, tensor.set working, and normalize_map_reduce rework
lfrenot Jun 18, 2025
3c78639
lots of debugging
lfrenot Jun 23, 2025
8ea06f8
test updates
lfrenot Jun 24, 2025
75cdd0f
affine/lower_for.cpp: add debug output
Jun 24, 2025
0786cd2
working dot_general
lfrenot Jun 25, 2025
4eaa100
prod_2d test case
lfrenot Jun 27, 2025
a98f901
bug fix in rewrite
Jun 30, 2025
749e95e
prod_2d update
lfrenot Jun 30, 2025
c42a7a0
doxygen fix
lfrenot Jul 1, 2025
4463e14
Merge remote-tracking branch 'origin/master' into feature/tensor
lfrenot Jul 3, 2025
57efffa
lower_fox fix
lfrenot Jul 3, 2025
d973537
starting to define transpose and broadcast
lfrenot Jul 7, 2025
a47b97b
bug example in get
lfrenot Jul 7, 2025
fa5b923
Fixes and comments
lfrenot Jul 16, 2025
64d6189
Merge branch 'master' into feature/tensor
Jul 16, 2025
07de13a
compile fixes
Jul 16, 2025
3de888e
polish
Jul 16, 2025
0451559
entangle Ast::bootstrap from emit
Jul 16, 2025
06baa62
bootstrapping now working
Jul 16, 2025
9417b91
Merge branch 'feature/tensor_partially_working' into feature/tensor
lfrenot Jul 17, 2025
007d1e3
updates towards hlo.mim
lfrenot Jul 17, 2025
7b92ca0
other get_set issue
lfrenot Jul 18, 2025
cf5d643
gett fix
lfrenot Jul 18, 2025
a45d85f
bug fix: incorrect rewrite with packs
Jul 19, 2025
d3ea1b1
Merge branch 'master' into feature/tensor
Jul 19, 2025
3c109c8
full hlo.mim
lfrenot Jul 21, 2025
3cf9582
fixed assert
Jul 21, 2025
80cdcca
Cleanup
lfrenot Jul 24, 2025
36ad39a
added stub for tensor::Lower Phase
Jul 26, 2025
3322c42
add reductions for nats
p3-c4 Jul 21, 2025
a50dfd6
doc
p3-c4 Jul 21, 2025
4cbd266
simplify
Jul 23, 2025
4832c43
fine-tune commute
Jul 24, 2025
c785434
make Def::arity more consistent
Aug 6, 2025
1797131
polish
Aug 6, 2025
d5354df
check: fix some performance bugs
Aug 6, 2025
2b991ec
disabled tensor_lower for now
Aug 6, 2025
64c05e9
merge master
Aug 8, 2025
be4a911
bug fix: Def::proj check first for 0_1 case
Aug 8, 2025
916d164
Merge branch 'master' into feature/tensor
Aug 9, 2025
1cf3d98
Merge branch 'master' into feature/tensor
Aug 9, 2025
e72241b
Merge branch 'master' into feature/tensor
Aug 10, 2025
543127c
Merge branch 'master' into feature/tensor
Aug 11, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME)
option(BUILD_TESTING "Build lit and unit tests." OFF) # needs to be BEFORE "include(CTest)"
include(CTest)
cmake_dependent_option(MIM_LIT_WITH_VALGRIND "If ON, the Mim CLI in the lit tests will be run under valgrind." OFF "BUILD_TESTING" OFF)
set(MIM_LIT_TIMEOUT 20 CACHE STRING "Timout for lit tests.")
set(MIM_LIT_TIMEOUT 30 CACHE STRING "Timout for lit tests.")
if(BUILD_TESTING)
include(GoogleTest)
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
Expand Down
19 changes: 19 additions & 0 deletions include/mim/plug/tensor/phase/lower.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#pragma once

#include <mim/phase/phase.h>

#include "mim/plug/tensor/tensor.h"

namespace mim::plug::tensor {

class Lower : public RWPhase {
public:
Lower(World& world)
: RWPhase(world, "tensor_lower") {}

private:
const Def* rewrite_imm(const Def*) override;
const Def* lower_map_reduce(const App*);
};

} // namespace mim::plug::tensor
21 changes: 21 additions & 0 deletions include/mim/plug/tensor/tensor.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,24 @@
#pragma once

#include <mim/world.h>

#include "mim/plug/tensor/autogen.h"

namespace mim::plug::tensor {

inline const Def* op_get(const Def* T, const Def* r, const Def* s, const Def* arr, const Def* index) {
auto& w = arr->world();
auto f = w.annex<tensor::get>();
f = w.app(f, {T, r, s});
f = w.app(f, {arr, index});
return f;
}

inline const Def* op_set(const Def* T, const Def* r, const Def* s, const Def* arr, const Def* index, const Def* x) {
auto& w = arr->world();
auto f = w.app(w.annex<tensor::set>(), {T, r, s});
f = w.app(f, {arr, index, x});
return f;
}

} // namespace mim::plug::tensor
136 changes: 136 additions & 0 deletions lit/tensor/hlo.mim
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
// RUN: %mim %s -O0 -p direct -p affine
plugin tensor;
plugin refly;
plugin core;
plugin direct;
plugin affine;
plugin math;

let f32 = %math.f32;
let F32 = %math.F32;

let R: Ring = (F32, 0: F32, %math.arith.add @f32 0, %math.arith.mul @f32 0);

lam F32_add {r: Nat, s: «r; Nat»} [is: [«s; F32», «s; F32»]] : «s; F32» = %tensor.binary (%math.arith.add @f32 0) is;
lam F32_sub {r: Nat, s: «r; Nat»} [is: [«s; F32», «s; F32»]] : «s; F32» = %tensor.binary (%math.arith.sub @f32 0) is;
lam F32_mul {r: Nat, s: «r; Nat»} [is: [«s; F32», «s; F32»]] : «s; F32» = %tensor.binary (%math.arith.mul @f32 0) is;
lam F32_div {r: Nat, s: «r; Nat»} [is: [«s; F32», «s; F32»]] : «s; F32» = %tensor.binary (%math.arith.div @f32 0) is;
lam F32_tanh {r: Nat, s: «r; Nat»} [i: «s; F32»] : «s; F32» = %tensor.unary (%math.tri.tanh @f32 0) i;
lam F32_maximum {r: Nat, s: «r; Nat»} [is: [«s; F32», «s; F32»]] : «s; F32» = %tensor.binary (%math.extrema.fmax @f32 0) is;
lam F32_is_finite {r: Nat, s: «r; Nat»} [i: «s; F32»] : «s; Bool» = %tensor.unary (%math.is_finite @f32 0) i;
lam F32_exp {r: Nat, s: «r; Nat»} [i: «s; F32»] : «s; F32» = %tensor.unary (%math.exp.exp @f32 0) i;
lam F32_abs {r: Nat, s: «r; Nat»} [i: «s; F32»] : «s; F32» = %tensor.unary (%math.abs @f32 0) i;
lam F32_GE {r: Nat, s: «r; Nat»} [is: [«s; F32», «s; F32»]] : «s; Bool» = %tensor.binary (%math.cmp.ge @f32 0) is;
lam F32_minus {r: Nat, s: «r; Nat»} [i: «s; F32»] : «s; F32» = %tensor.unary (%math.minus @f32 0) i;

fun extern main_hlo [arg0: «784; «1024; F32»», arg1: «1024; F32», arg2: «1024; «1024; F32»», arg3: «1024; F32», arg4: «1024; «10; F32»», arg5: «10; F32», arg6: «128; «784; F32»», arg7: «128; «10; F32»»]
: [«784; «1024; F32»», «1024; F32», «1024; «1024; F32»», «1024; F32», «1024; «10; F32»», «10; F32»]
= let v_0 = %tensor.product_2d R (arg6, arg0);
/// let v_1 : «1, 1024; F32» = arg1;
let v_2 = %tensor.broadcast ((1, 1024), (128, 1024), arg1);
let v_3 = F32_add @(2, (128, 1024)) (v_0, v_2);
let v_4 = F32_tanh @(2, (128, 1024)) v_3;
let v_cst = 1: F32;
let v_5 = ‹128, 1024; v_cst›;
let v_6 = F32_sub @(2, (128, 1024)) (v_5, v_4);
let v_7 = %tensor.product_2d R (v_4, arg2);
/// let v_8 : «1, 1024; F32» = arg3;
let v_9 = %tensor.broadcast ((1, 1024), (128, 1024), arg3);
let v_10 = F32_add @(2, (128, 1024)) (v_7, v_9);
let v_11 = F32_tanh @(2, (128, 1024)) v_10;
let v_12 = ‹128; ‹1024; v_cst››;
let v_13 = F32_sub @(2, (128, 1024)) (v_12, v_11);
let v_14 = %tensor.product_2d R (v_11, arg4);
/// let v_15 : «1, 10; F32» = arg5;
let v_16 = %tensor.broadcast ((1, 10), (128, 10), arg5);
let v_17 = F32_add @(2, (128, 10)) (v_14, v_16);
let v_cst_0 = 0xFF800000 : F32;
let v_18 = %tensor.map_reduce_pure @1 (128) @(F32, 2, (128, 10)) ((%math.extrema.fmax @f32 0), v_cst_0) (0, 1) v_17;
let v_cst_1 = 0xFF800000 : F32;
let v_19 = ‹128; v_cst_1›;
let v_20 = F32_maximum (v_19, v_18);
/// let v_21 : «128, 1; F32» = v_20;
let v_22 = F32_is_finite v_20;
let v_cst_2 = 0: F32;
let v_23 = ‹128; v_cst_2›;
let v_24 = %tensor.select (v_22, v_20, v_23);
let v_25 = %tensor.broadcast ((128, 1), (128, 10), v_24);
let v_26 = F32_sub @(2, (128, 10)) (v_17, v_25);
let v_27 = F32_exp @(2, (128, 10)) v_26;
let v_cst_3 = 0: F32;
let v_28 = %tensor.map_reduce_pure @1 (128) @(F32, 2, (128, 10)) ((%math.arith.add @f32 0), v_cst_3) (0, 1) v_27;
/// let v_29 : «128, 1; F32» = v_28;
let v_30 = F32_abs v_28;
let v_31 = ‹128, 1; v_cst_2›;
let v_32 = F32_GE (v_28, v_31);
let v_33 = %math.minus 0 v_cst;
let v_cst_4 = 1.280000e+02: F32;
let v_34 = %math.arith.div 0 (v_33, v_cst_4);
/// let v_35 = ‹128; v_34›;
let v_36 = %tensor.broadcast ((1, 1), (128, 10), v_34);
let v_37 = F32_mul @(2, (128, 10)) (v_36, arg7);
let v_38 = F32_minus @(2, (128, 10)) v_37;
let v_cst_5 = 0: F32;
let v_39 = %tensor.map_reduce_pure @1 (128) @(F32, 2, (128, 10)) ((%math.arith.add @f32 0), v_cst_5) (0, 1) v_38;
/// let v_40 : «128, 1; F32» = v_39;
let v_41 = F32_div (v_39, v_30);
let v_42 = ‹128, 1; v_cst_2›;
let v_43 = %tensor.select (v_32, v_42, v_41);
let v_44 = %tensor.select (v_32, v_41, v_42);
let v_45 = F32_minus v_43;
let v_46 = F32_add (v_44, v_45);
let v_cst_6 = 0: F32;
/// let v_47 = %tensor.map_reduce_pure @1 (128) @(F32, 2, (128, 1)) ((%math.arith.add @f32 0), v_cst_6) (0, 1) v_46;
let v_48 = %tensor.broadcast ((128, 1), (128, 10), v_46);
let v_49 = F32_mul @(2, (128, 10)) (v_48, v_27);
let v_50 = F32_add @(2, (128, 10)) (v_37, v_49);
let v_cst_7 = 0: F32;
let v_51 = %tensor.map_reduce_pure @1 (10) @(F32, 2, (128, 10)) ((%math.arith.add @f32 0), v_cst_7) (1, 0) v_50;
/// let v_52 : «1, 10; F32» = v_51;
let v_cst_8 = 0: F32;
/// let v_53 = %tensor.map_reduce_pure @1 (10) @(F32, 2, (1, 10)) ((%math.arith.add @f32 0), v_cst_8) (1, 0) v_52;
let v_54 = %tensor.dot_product R @(2, 2) (0_2, 0_2, (), ()) (v_50, v_11);
let v_55 = %tensor.transpose_2d v_54;
let v_56 = %tensor.dot_product R @(2, 2) (1_2, 1_2, (), ()) (v_50, arg4);
let v_57 = F32_mul @(2, (128, 1024)) (v_56, v_13);
let v_58 = F32_mul @(2, (128, 1024)) (v_56, v_11);
let v_59 = F32_add @(2, (128, 1024)) (v_57, v_58);
let v_cst_9 = 0: F32;
let v_60 = %tensor.map_reduce_pure @1 (1024) @(F32, 2, (128, 1024)) ((%math.arith.add @f32 0), v_cst_9) (1, 0) v_59;
/// let v_61 : «1, 1024; F32» = v_60;
let v_cst_10 = 0: F32;
/// let v_62 = %tensor.map_reduce_pure @1 (1024) @(F32, 2, (1, 1024)) ((%math.arith.add @f32 0), v_cst_10) (1, 0) v_61;
let v_63 = %tensor.dot_product R @(2, 2) (0_2, 0_2, (), ()) (v_59, v_4);
let v_64 = %tensor.transpose_2d v_63;
let v_65 = %tensor.dot_product R @(2, 2) (1_2, 1_2, (), ()) (v_59, arg2);
let v_66 = F32_mul @(2, (128, 1024)) (v_65, v_6);
let v_67 = F32_mul @(2, (128, 1024)) (v_66, v_4);
let v_68 = F32_add @(2, (128, 1024)) (v_66, v_67);
let v_cst_11 = 0: F32;
let v_69 = %tensor.map_reduce_pure @1 (1024) @(F32, 2, (128, 1024)) ((%math.arith.add @f32 0), v_cst_11) (1, 0) v_68;
/// let v_70 : «1, 1024; F32» = v_69;
let v_cst_12 = 0: F32;
/// let v_71 = %tensor.map_reduce_pure @1 (1024) @(F32, 2, (1, 1024)) ((%math.arith.add @f32 0), v_cst_12) (1, 0) v_70;
let v_72 = %tensor.dot_product R @(2, 2) (0_2, 0_2, (), ()) (v_68, arg6);
let v_73 = %tensor.transpose_2d v_72;
let v_cst_13 = 1.000000e-03: F32;
let v_74 = ‹784, 1024; v_cst_13›;
let v_75 = F32_mul @(2, (784, 1024)) (v_74, v_73);
let v_76 = F32_sub @(2, (784, 1024)) (arg0, v_75);
let v_77 = ‹1024; v_cst_13›;
let v_78 = F32_mul (v_77, v_69);
let v_79 = F32_sub (arg1, v_78);
let v_80 = ‹1024, 1024; v_cst_13›;
let v_81 = F32_mul @(2, (1024, 1024)) (v_80, v_64);
let v_82 = F32_sub @(2, (1024, 1024)) (arg2, v_81);
let v_83 = ‹1024; v_cst_13›;
let v_84 = F32_mul (v_83, v_60);
let v_85 = F32_sub (arg3, v_84);
let v_86 = ‹1024, 10; v_cst_13›;
let v_87 = F32_mul @(2, (1024, 10)) (v_86, v_55);
let v_88 = F32_sub @(2, (1024, 10)) (arg4, v_87);
let v_89 = ‹10; v_cst_13›;
let v_90 = F32_mul (v_89, v_51);
let v_91 = F32_sub (arg5, v_90);
return (v_76, v_79, v_82, v_85, v_88, v_91);
/// return (v_76, arg1, arg2, arg3, arg4, arg5);
21 changes: 21 additions & 0 deletions lit/tensor/map_reduce.mim
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// RUN: %mim %s
plugin tensor;
plugin refly;
plugin core;
plugin direct;
plugin affine;

let m0 = ((1, 2), (3, 4));

let R: Ring = (Nat, 0, %core.nat.add, %core.nat.mul);

let f = dot_general_fun R;

let dot = %tensor.map_reduce ‹2; 2› @(‹2; Nat›, ‹2; 2›, ‹2; ‹2; 2››) (f, 0) ((0, 2), (2, 1)) (m0, m0);
let dotd = %tensor.dot_product R @(2, 2) (1_2, 0_2, (), ()) (m0, m0);
let dot2d = %tensor.product_2d R (m0, m0);

let _ = %refly.equiv.struc_eq (dot, dotd);
let _ = %refly.equiv.struc_eq (dot2d, dotd);

fun extern main() = return (dot, dotd, dot2d);
103 changes: 103 additions & 0 deletions lit/tensor/normalize.mim
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
// RUN: %mim %s
plugin tensor;
plugin refly;
plugin core;
plugin direct;
plugin affine;

let m0 = ((1, 2), (3, 4));
let m1 = ((1, 2, 3), (4, 5, 6));

let g1 = %tensor.get @(Nat, 1, 1) (3, 0_1);
let g2 = %tensor.get @(Nat, 2, (2, 2)) (m0, (1_2, 0_2));
let g3 = %tensor.get @(Nat, 2, (2, 3)) (m1, (1_2, 2_3));

let _ = %refly.equiv.struc_eq (g1, 3);
let _ = %refly.equiv.struc_eq (g2, 3);
let _ = %refly.equiv.struc_eq (g3, 6);

let s1 = %tensor.set @(Nat, 1, 1) (3, 0_1, 5);
let s2 = %tensor.set @(Nat, 2, (2, 2)) (m0, (1_2, 0_2), 12);
let s3 = %tensor.set @(Nat, 2, (2, 3)) (m1, (1_2, 2_3), 24);

let _ = %refly.equiv.struc_eq (s1, 5);
let _ = %refly.equiv.struc_eq (s2, ((1, 2), (12, 4)));
let _ = %refly.equiv.struc_eq (s3, ((1, 2, 3), (4, 5, 24)));

fun f1 (n m: Nat): Nat = return m;

lam f(): Ring = (Nat, 0, %core.nat.add, %core.nat.mul);
let R = f ();

let r = 2;
let nc = 1;
let nb = 0;
let s = (2, 2);
let c1 = 1_2;
let c2 = 0_2;
let b = ();

let m = 2;
let k = 2;
let l = 2;
let t1 = m0;
let t2 = m0;

let s1 = (m, k);
let s2 = (k, l);

let bs = ‹i: nb; s1#(b#i)›;
let _ = %refly.equiv.struc_eq (bs, ());

let bc_1 = %tuple.cat_uniform (b, c1);
let _ = %refly.equiv.struc_eq (bc_1, 1_2);

let s1_res = %vec.diff (s1, bc_1);
let _ = %refly.equiv.struc_eq (s1_res, m);

let n_s1_res = %vec.len s1_res;
let _ = %refly.equiv.struc_eq (n_s1_res, 1);

let bc_2 = %tuple.cat_uniform (b, c2);
let _ = %refly.equiv.struc_eq (bc_2, 0_2);

let s2_res = %vec.diff (s2, bc_2);
let _ = %refly.equiv.struc_eq (s2_res, l);

let n_s2_res = %vec.len s1_res;
let _ = %refly.equiv.struc_eq (n_s2_res, 1);

let s12_res = %tuple.cat_uniform (s1_res, s2_res);
let _ = %refly.equiv.struc_eq (s12_res, (m, l));

let s_out = %tuple.cat_uniform (bs, s12_res);
let _ = %refly.equiv.struc_eq (s_out, (m, l));

let f = dot_general_fun R;
let r_out = %vec.len s_out;

let a1 = %vec.diff (‹i: r; i›, bc_1);
let a2 = %vec.diff (‹i: r; i›, bc_2);
let _ = %refly.equiv.struc_eq (a1, 0_2);
let _ = %refly.equiv.struc_eq (a2, 1_2);

let ein_1 = ‹i: r; dot_general_pick (a1, b, c1) (0, r_out) i›;
let ein_2 = ‹i: r; dot_general_pick (a2, b, c2) (1, r_out) i›;

let (ab, ai) = %vec.first (%core.icmp.e @2) (a2, 1_2);
let (bb, bi) = %vec.first (%core.icmp.e @2) (b, 1_2);
let (cb, ci) = %vec.first (%core.icmp.e @2) (c2, 1_2);
let bi_nat = %core.bitcast Nat bi;
let ci_nat = %core.bitcast Nat ci;
let ci_out = %core.nat.add (2, ci_nat);
let ai_nat = %core.bitcast Nat ai;
let ai_out = %core.nat.add (1, ai_nat);
let _ = %refly.equiv.struc_eq (bb, ff);
let _ = %refly.equiv.struc_eq (cb, ff);
let _ = %refly.equiv.struc_eq (ci_out, 2);

let _ = %refly.equiv.struc_eq (ein_2, (2, 1));

let dot = %tensor.map_reduce @2 @(Nat, 2) s_out @(‹2; Nat›, ‹2; 2›, ((m, k), (k, l))) (f, 0) (ein_1, ein_2) (t1, t2);

fun extern main() = return dot;
17 changes: 17 additions & 0 deletions lit/tensor/transpose.mim
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// RUN: %mim %s
plugin tensor;
plugin refly;
plugin core;
plugin direct;
plugin affine;

let m0 = ((1, 2), (3, 4));

let m1 = %tensor.transpose @(Nat, 2, (2, 2)) (m0, (tt, ff));
let m2 = %tensor.transpose @(Nat, 4, (1, 1, 2, 2)) (m0, (2_4, 1_4, 0_4, 3_4));
let m21 = %tensor.transpose @(Nat, 4, (2, 2, 1, 1)) (m0, (0_4, 3_4, 1_4, 2_4));
let m3 = %tensor.broadcast ((2, 1, 2), (2, 50, 2), m0);
let m4 = %tensor.broadcast_in_dim ((2, 1, 2), (2, 50, 2), m0, (2_3, 1_3, 0_3));
let _ = %refly.equiv.struc_eq (m3, (‹50; (1, 2)›, ‹50; (3, 4)›));

fun extern main() = return (m1, m2, m21, m3, m4);
2 changes: 2 additions & 0 deletions lit/vec/normalize.mim
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@ let _ = %refly.equiv.struc_eq (d2, (23, 23, 23, 23));

let g1 = %vec.first %core.ncmp.e ((8, 1, 3, 6, 9, 0), 1);
let g2 = %vec.last %core.ncmp.e ((8, 1, 3, 6, 9, 0), 2);
let g3 = %vec.first (%core.icmp.e @10) ((8_10, 1_10, 3_10, 6_10, 9_10, 0_10), 1_10 );
let _ = %refly.equiv.struc_eq (g1, (tt, 1_6));
let _ = %refly.equiv.struc_eq (g2, (ff, 0_6));
let _ = %refly.equiv.struc_eq (g3, (tt, 1_6));

let r1 = ‹i: 3; i›;
let r2 = ‹i: 3; %core.wrap.add 0 (i, 1_3)›;
Expand Down
3 changes: 2 additions & 1 deletion src/mim/plug/compile/compile.mim
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ axm %compile.direct_plugin: %compile.Plugin;
axm %compile.refly_plugin: %compile.Plugin;
axm %compile.regex_plugin: %compile.Plugin;
axm %compile.matrix_plugin: %compile.Plugin;
axm %compile.tensor_plugin: %compile.Plugin;
///
/// ### %%opt.is_loaded
///
Expand Down Expand Up @@ -146,7 +147,7 @@ lam extern _fallback_compile(): %compile.Pipeline = default_core_pipeline;
///
/// ### Dependent Passes and Phases
///
let empty_pass = %compile.nullptr_pass;
let empty_pass = %compile.nullptr_pass;
let empty_phase = %compile.passes_to_phase ();

axm %compile.plugin_select: [T: *] → %compile.Plugin → T → T → T;
Expand Down
6 changes: 6 additions & 0 deletions src/mim/plug/math/math.mim
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,12 @@ axm %math.cmp(ugle = f, uglE = e, ugLe = l, ugLE = le,
Ugle = u, UglE = ue, UgLe = ul, UgLE = ule,
UGle = ug, UGlE = uge, UGLe = une, UGLE = t):
{pe: «2; Nat»} → Nat → «2; %math.F pe» → Bool, normalize_cmp;
///
/// ### %%math.is_finite
///
/// Returns True if the float is finite, and false if not (i.e. if it is +Inf/-Inf/NAN)
///
axm %math.is_finite: {pe: «2; Nat»} → Nat → %math.F pe → Bool;
///
/// ### %%math.conv
///
Expand Down
Loading