diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 71bdedc9bb..37425e0e50 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -149,6 +149,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), + ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), @@ -1500,6 +1501,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), + MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index be39870691..2040b90da0 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -71,6 +71,7 @@ struct RISCVCPUConfig { bool ext_zihintntl; bool ext_zihintpause; bool ext_zihpm; + bool ext_ztso; bool ext_smstateen; bool ext_sstc; bool ext_svadu; diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 267930e5bc..4a9e4591d1 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -40,7 +40,11 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop); - if (a->aq) { + /* + * TSO defines AMOs as acquire+release-RCsc, but does not define LR/SC as + * AMOs. Instead treat them like loads. + */ + if (a->aq || ctx->ztso) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } @@ -76,9 +80,10 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) gen_set_label(l1); /* * Address comparison failure. However, we still need to - * provide the memory barrier implied by AQ/RL. + * provide the memory barrier implied by AQ/RL/TSO. */ - tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); + TCGBar bar_strl = (ctx->ztso || a->rl) ? TCG_BAR_STRL : 0; + tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + bar_strl); gen_set_gpr(ctx, a->rd, tcg_constant_tl(1)); gen_set_label(l2); diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index faf6d65064..ad40d3e87f 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -266,12 +266,20 @@ static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { + bool out; + decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { - return gen_load_i128(ctx, a, memop); + out = gen_load_i128(ctx, a, memop); } else { - return gen_load_tl(ctx, a, memop); + out = gen_load_tl(ctx, a, memop); } + + if (ctx->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); + } + + return out; } static bool trans_lb(DisasContext *ctx, arg_lb *a) @@ -328,6 +336,10 @@ static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop) TCGv addr = get_address(ctx, a->rs1, a->imm); TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + if (ctx->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + } + tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 9e101ab434..742008f58b 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -636,8 +636,28 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); + /* + * According to the specification + * + * Additionally, if the Ztso extension is implemented, then vector memory + * instructions in the V extension and Zve family of extensions follow + * RVTSO at the instruction level. The Ztso extension does not + * strengthen the ordering of intra-instruction element accesses. + * + * as a result neither ordered nor unordered accesses from the V + * instructions need ordering within the loop but we do still need barriers + * around the loop. + */ + if (is_store && s->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + } + fn(dest, mask, base, tcg_env, desc); + if (!is_store && s->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); + } + if (!is_store) { mark_vs_dirty(s); } diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 177418b2b9..ea5d52b2ef 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -109,6 +109,8 @@ typedef struct DisasContext { /* PointerMasking extension */ bool pm_mask_enabled; bool pm_base_enabled; + /* Ztso */ + bool ztso; /* Use icount trigger for native debug */ bool itrigger; /* FRM is known to contain a valid value. */ @@ -1196,6 +1198,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->cs = cs; ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); + ctx->ztso = cpu->cfg.ext_ztso; ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); ctx->zero = tcg_constant_tl(0); ctx->virt_inst_excp = false;