Revision | 40d78c85f6f321c00588230a400477250a85c2e7 (tree) |
---|---|
Zeit | 2022-01-21 14:52:56 |
Autor | Frank Chang <frank.chang@sifi...> |
Commiter | Alistair Francis |
target/riscv: rvv-1.0: Add Zve64f support for scalar fp insns
Zve64f extension requires the scalar processor to implement the F
extension and implement all vector floating-point instructions for
floating-point operands with EEW=32 (i.e., no widening floating-point
operations).
Signed-off-by: Frank Chang <frank.chang@sifive.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-id: 20220118014522.13613-7-frank.chang@sifive.com
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
@@ -66,6 +66,17 @@ static bool require_scale_rvf(DisasContext *s) | ||
66 | 66 | } |
67 | 67 | } |
68 | 68 | |
69 | +static bool require_zve64f(DisasContext *s) | |
70 | +{ | |
71 | + /* RVV + Zve64f = RVV. */ | |
72 | + if (has_ext(s, RVV)) { | |
73 | + return true; | |
74 | + } | |
75 | + | |
76 | + /* Zve64f doesn't support FP64. (Section 18.2) */ | |
77 | + return s->ext_zve64f ? s->sew <= MO_32 : true; | |
78 | +} | |
79 | + | |
69 | 80 | /* Destination vector register group cannot overlap source mask register. */ |
70 | 81 | static bool require_vm(int vm, int vd) |
71 | 82 | { |
@@ -2206,7 +2217,8 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a) | ||
2206 | 2217 | return require_rvv(s) && |
2207 | 2218 | require_rvf(s) && |
2208 | 2219 | vext_check_isa_ill(s) && |
2209 | - vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); | |
2220 | + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && | |
2221 | + require_zve64f(s); | |
2210 | 2222 | } |
2211 | 2223 | |
2212 | 2224 | /* OPFVV without GVEC IR */ |
@@ -2286,7 +2298,8 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a) | ||
2286 | 2298 | return require_rvv(s) && |
2287 | 2299 | require_rvf(s) && |
2288 | 2300 | vext_check_isa_ill(s) && |
2289 | - vext_check_ss(s, a->rd, a->rs2, a->vm); | |
2301 | + vext_check_ss(s, a->rd, a->rs2, a->vm) && | |
2302 | + require_zve64f(s); | |
2290 | 2303 | } |
2291 | 2304 | |
2292 | 2305 | /* OPFVF without GVEC IR */ |
@@ -2503,7 +2516,8 @@ static bool opfv_check(DisasContext *s, arg_rmr *a) | ||
2503 | 2516 | require_rvf(s) && |
2504 | 2517 | vext_check_isa_ill(s) && |
2505 | 2518 | /* OPFV instructions ignore vs1 check */ |
2506 | - vext_check_ss(s, a->rd, a->rs2, a->vm); | |
2519 | + vext_check_ss(s, a->rd, a->rs2, a->vm) && | |
2520 | + require_zve64f(s); | |
2507 | 2521 | } |
2508 | 2522 | |
2509 | 2523 | static bool do_opfv(DisasContext *s, arg_rmr *a, |
@@ -2568,7 +2582,8 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) | ||
2568 | 2582 | return require_rvv(s) && |
2569 | 2583 | require_rvf(s) && |
2570 | 2584 | vext_check_isa_ill(s) && |
2571 | - vext_check_mss(s, a->rd, a->rs1, a->rs2); | |
2585 | + vext_check_mss(s, a->rd, a->rs1, a->rs2) && | |
2586 | + require_zve64f(s); | |
2572 | 2587 | } |
2573 | 2588 | |
2574 | 2589 | GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) |
@@ -2581,7 +2596,8 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) | ||
2581 | 2596 | return require_rvv(s) && |
2582 | 2597 | require_rvf(s) && |
2583 | 2598 | vext_check_isa_ill(s) && |
2584 | - vext_check_ms(s, a->rd, a->rs2); | |
2599 | + vext_check_ms(s, a->rd, a->rs2) && | |
2600 | + require_zve64f(s); | |
2585 | 2601 | } |
2586 | 2602 | |
2587 | 2603 | GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) |
@@ -2602,7 +2618,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) | ||
2602 | 2618 | if (require_rvv(s) && |
2603 | 2619 | require_rvf(s) && |
2604 | 2620 | vext_check_isa_ill(s) && |
2605 | - require_align(a->rd, s->lmul)) { | |
2621 | + require_align(a->rd, s->lmul) && | |
2622 | + require_zve64f(s)) { | |
2606 | 2623 | gen_set_rm(s, RISCV_FRM_DYN); |
2607 | 2624 | |
2608 | 2625 | TCGv_i64 t1; |
@@ -3328,7 +3345,8 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) | ||
3328 | 3345 | { |
3329 | 3346 | if (require_rvv(s) && |
3330 | 3347 | require_rvf(s) && |
3331 | - vext_check_isa_ill(s)) { | |
3348 | + vext_check_isa_ill(s) && | |
3349 | + require_zve64f(s)) { | |
3332 | 3350 | gen_set_rm(s, RISCV_FRM_DYN); |
3333 | 3351 | |
3334 | 3352 | unsigned int ofs = (8 << s->sew); |
@@ -3354,7 +3372,8 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) | ||
3354 | 3372 | { |
3355 | 3373 | if (require_rvv(s) && |
3356 | 3374 | require_rvf(s) && |
3357 | - vext_check_isa_ill(s)) { | |
3375 | + vext_check_isa_ill(s) && | |
3376 | + require_zve64f(s)) { | |
3358 | 3377 | gen_set_rm(s, RISCV_FRM_DYN); |
3359 | 3378 | |
3360 | 3379 | /* The instructions ignore LMUL and vector register group. */ |
@@ -3405,13 +3424,15 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) | ||
3405 | 3424 | static bool fslideup_check(DisasContext *s, arg_rmrr *a) |
3406 | 3425 | { |
3407 | 3426 | return slideup_check(s, a) && |
3408 | - require_rvf(s); | |
3427 | + require_rvf(s) && | |
3428 | + require_zve64f(s); | |
3409 | 3429 | } |
3410 | 3430 | |
3411 | 3431 | static bool fslidedown_check(DisasContext *s, arg_rmrr *a) |
3412 | 3432 | { |
3413 | 3433 | return slidedown_check(s, a) && |
3414 | - require_rvf(s); | |
3434 | + require_rvf(s) && | |
3435 | + require_zve64f(s); | |
3415 | 3436 | } |
3416 | 3437 | |
3417 | 3438 | GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) |