aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--amd64/isel.c26
-rw-r--r--test/isel2.ssa14
2 files changed, 30 insertions, 10 deletions
diff --git a/amd64/isel.c b/amd64/isel.c
index 07e6142..607c176 100644
--- a/amd64/isel.c
+++ b/amd64/isel.c
@@ -344,6 +344,26 @@ Emit:
if (isload(i.op))
goto case_Oload;
if (iscmp(i.op, &kc, &x)) {
+ switch (x) {
+ case NCmpI+Cfeq:
+ /* zf is set when operands are
+ * unordered, so we may have to
+ * check pf
+ */
+ r0 = newtmp("isel", Kw, fn);
+ r1 = newtmp("isel", Kw, fn);
+ emit(Oand, Kw, i.to, r0, r1);
+ emit(Oflagfo, k, r1, R, R);
+ i.to = r0;
+ break;
+ case NCmpI+Cfne:
+ r0 = newtmp("isel", Kw, fn);
+ r1 = newtmp("isel", Kw, fn);
+ emit(Oor, Kw, i.to, r0, r1);
+ emit(Oflagfuo, k, r1, R, R);
+ i.to = r0;
+ break;
+ }
swap = cmpswap(i.arg, x);
if (swap)
x = cmpop(x);
@@ -388,7 +408,7 @@ seljmp(Blk *b, Fn *fn)
r = b->jmp.arg;
t = &fn->tmp[r.val];
b->jmp.arg = R;
- assert(!req(r, R) && rtype(r) != RCon);
+ assert(rtype(r) == RTmp);
if (b->s1 == b->s2) {
chuse(r, -1, fn);
b->jmp.type = Jjmp;
@@ -400,7 +420,9 @@ seljmp(Blk *b, Fn *fn)
selcmp((Ref[2]){r, CON_Z}, Kw, 0, fn); /* todo, long jnz */
b->jmp.type = Jjf + Cine;
}
- else if (iscmp(fi->op, &k, &c)) {
+ else if (iscmp(fi->op, &k, &c)
+ && c != NCmpI+Cfeq /* see sel() */
+ && c != NCmpI+Cfne) {
swap = cmpswap(fi->arg, c);
if (swap)
c = cmpop(c);
diff --git a/test/isel2.ssa b/test/isel2.ssa
index 280ceb2..8ca4a24 100644
--- a/test/isel2.ssa
+++ b/test/isel2.ssa
@@ -1,7 +1,5 @@
# tests that NaN is handled properly by
# floating point comparisons
-#
-# TODO: fix eq[123](NAN, NAN) on amd64
export function w $lt(d %x, d %y) {
@start
@@ -97,12 +95,12 @@ export function w $ne3(d %x, d %y) {
# + !le(0, 1) + !le(0, 0) + le(1, 0) + le(NAN, NAN)
# + gt(0, 1) + gt(0, 0) + !gt(1, 0) + gt(NAN, NAN)
# + ge(0, 1) + !ge(0, 0) + !ge(1, 0) + ge(NAN, NAN)
-# + eq1(0, 1) + !eq1(0, 0) + eq1(1, 0) /*+ eq1(NAN, NAN)*/
-# + eq2(0, 1) + !eq2(0, 0) + eq2(1, 0) /*+ eq2(NAN, NAN)*/
-# + eq3(0, 1) + !eq3(0, 0) + eq3(1, 0) /*+ eq3(NAN, NAN)*/
-# + !ne1(0, 1) + ne1(0, 0) + !ne1(1, 0) /*+ !ne1(NAN, NAN)*/
-# + !ne2(0, 1) + ne2(0, 0) + !ne2(1, 0) /*+ !ne2(NAN, NAN)*/
-# + !ne3(0, 1) + ne3(0, 0) + !ne3(1, 0) /*+ !ne3(NAN, NAN)*/
+# + eq1(0, 1) + !eq1(0, 0) + eq1(1, 0) + eq1(NAN, NAN)
+# + eq2(0, 1) + !eq2(0, 0) + eq2(1, 0) + eq2(NAN, NAN)
+# + eq3(0, 1) + !eq3(0, 0) + eq3(1, 0) + eq3(NAN, NAN)
+# + !ne1(0, 1) + ne1(0, 0) + !ne1(1, 0) + !ne1(NAN, NAN)
+# + !ne2(0, 1) + ne2(0, 0) + !ne2(1, 0) + !ne2(NAN, NAN)
+# + !ne3(0, 1) + ne3(0, 0) + !ne3(1, 0) + !ne3(NAN, NAN)
# ;
# }
# <<<