target/alpha: Use TCG_COND_TSTNE for gen_fold_mzero

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson
2023-10-24 23:23:24 -07:00
parent c47341f1d4
commit 630ee069c6

View File

@ -490,56 +490,53 @@ static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
/* Fold -0.0 for comparison with COND. */ /* Fold -0.0 for comparison with COND. */
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
{ {
uint64_t mzero = 1ull << 63; TCGv_i64 tmp;
switch (cond) { *pimm = 0;
switch (*pcond) {
case TCG_COND_LE: case TCG_COND_LE:
case TCG_COND_GT: case TCG_COND_GT:
/* For <= or >, the -0.0 value directly compares the way we want. */ /* For <= or >, the -0.0 value directly compares the way we want. */
tcg_gen_mov_i64(dest, src); return src;
break;
case TCG_COND_EQ: case TCG_COND_EQ:
case TCG_COND_NE: case TCG_COND_NE:
/* For == or !=, we can simply mask off the sign bit and compare. */ /* For == or !=, we can compare without the sign bit. */
tcg_gen_andi_i64(dest, src, mzero - 1); *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
break; *pimm = INT64_MAX;
return src;
case TCG_COND_GE: case TCG_COND_GE:
case TCG_COND_LT: case TCG_COND_LT:
/* For >= or <, map -0.0 to +0.0. */ /* For >= or <, map -0.0 to +0.0. */
tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero), tmp = tcg_temp_new_i64();
src, tcg_constant_i64(0)); tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
break; src, tcg_constant_i64(INT64_MIN),
tcg_constant_i64(0), src);
return tmp;
default: default:
abort(); g_assert_not_reached();
} }
} }
static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
int32_t disp) int32_t disp)
{ {
TCGv cmp_tmp = tcg_temp_new(); uint64_t imm;
DisasJumpType ret; TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
return gen_bcond_internal(ctx, cond, tmp, imm, disp);
gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
ret = gen_bcond_internal(ctx, cond, cmp_tmp, 0, disp);
return ret;
} }
static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
{ {
TCGv_i64 va, vb, z; uint64_t imm;
TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
z = load_zero(ctx); tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
vb = load_fpr(ctx, rb); tmp, tcg_constant_i64(imm),
va = tcg_temp_new(); load_fpr(ctx, rb), load_fpr(ctx, rc));
gen_fold_mzero(cond, va, load_fpr(ctx, ra));
tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
} }
#define QUAL_RM_N 0x080 /* Round mode nearest even */ #define QUAL_RM_N 0x080 /* Round mode nearest even */