diff --git a/gcc/config/riscv/arcv-rhx100.md b/gcc/config/riscv/arcv-rhx100.md
new file mode 100644
index 000000000000..398f13131606
--- /dev/null
+++ b/gcc/config/riscv/arcv-rhx100.md
@@ -0,0 +1,113 @@
+;; DFA scheduling description of the Synopsys RHX-100 cpu
+;; for GNU C compiler
+;; Copyright (C) 2023 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+(define_automaton "arcv_rhx100")
+
+(define_cpu_unit "arcv_rhx100_ALU_A_fuse0_early" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_ALU_A_fuse1_early" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_ALU_B_fuse0_early" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_ALU_B_fuse1_early" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_MPY32" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_DIV" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_DMP_fuse0" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_DMP_fuse1" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_fdivsqrt" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_issueA_fuse0" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_issueA_fuse1" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_issueB_fuse0" "arcv_rhx100")
+(define_cpu_unit "arcv_rhx100_issueB_fuse1" "arcv_rhx100")
+
+;; Instruction reservation for arithmetic instructions (pipe A, pipe B).
+(define_insn_reservation "arcv_rhx100_alu_early_arith" 1
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "unknown,move,const,arith,shift,slt,multi,auipc,nop,logical,\
+ bitmanip,min,max,minu,maxu,clz,ctz,atomic,\
+ condmove,mvpair,zicond,cpop,clmul"))
+ "((arcv_rhx100_issueA_fuse0 + arcv_rhx100_ALU_A_fuse0_early) | (arcv_rhx100_issueA_fuse1 + arcv_rhx100_ALU_A_fuse1_early)) | ((arcv_rhx100_issueB_fuse0 + arcv_rhx100_ALU_B_fuse0_early) | (arcv_rhx100_issueB_fuse1 + arcv_rhx100_ALU_B_fuse1_early))")
+
+(define_insn_reservation "arcv_rhx100_imul_fused" 4
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "imul_fused"))
+ "(arcv_rhx100_issueA_fuse0 + arcv_rhx100_issueA_fuse1 + arcv_rhx100_ALU_A_fuse0_early + arcv_rhx100_ALU_A_fuse1_early + arcv_rhx100_MPY32), nothing*3")
+
+(define_insn_reservation "arcv_rhx100_alu_fused" 1
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "alu_fused"))
+ "(arcv_rhx100_issueA_fuse0 + arcv_rhx100_issueA_fuse1 + arcv_rhx100_ALU_A_fuse0_early + arcv_rhx100_ALU_A_fuse1_early) | (arcv_rhx100_issueB_fuse0 + arcv_rhx100_issueB_fuse1 + arcv_rhx100_ALU_B_fuse0_early + arcv_rhx100_ALU_B_fuse1_early)")
+
+(define_insn_reservation "arcv_rhx100_jmp_insn" 1
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "branch,jump,call,jalr,ret,trap"))
+ "arcv_rhx100_issueA_fuse0 | arcv_rhx100_issueA_fuse1")
+
+(define_insn_reservation "arcv_rhx100_div_insn" 12
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "idiv"))
+ "arcv_rhx100_issueA_fuse0 + arcv_rhx100_DIV, nothing*11")
+
+(define_insn_reservation "arcv_rhx100_mpy32_insn" 4
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "imul"))
+ "arcv_rhx100_issueA_fuse0 + arcv_rhx100_MPY32, nothing*3")
+
+(define_insn_reservation "arcv_rhx100_load_insn" 3
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "load,fpload"))
+ "(arcv_rhx100_issueB_fuse0 + arcv_rhx100_DMP_fuse0) | (arcv_rhx100_issueB_fuse1 + arcv_rhx100_DMP_fuse1)")
+
+(define_insn_reservation "arcv_rhx100_store_insn" 1
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "store,fpstore"))
+ "(arcv_rhx100_issueB_fuse0 + arcv_rhx100_DMP_fuse0) | (arcv_rhx100_issueB_fuse1 + arcv_rhx100_DMP_fuse1)")
+
+;; (soft) floating points
+(define_insn_reservation "arcv_rhx100_xfer" 3
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "mfc,mtc,fcvt,fcvt_i2f,fcvt_f2i,fmove,fcmp"))
+ "(arcv_rhx100_ALU_A_fuse0_early | arcv_rhx100_ALU_B_fuse0_early), nothing*2")
+
+(define_insn_reservation "arcv_rhx100_fmul" 5
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "fadd,fmul,fmadd"))
+ "(arcv_rhx100_ALU_A_fuse0_early | arcv_rhx100_ALU_B_fuse0_early)")
+
+(define_insn_reservation "arcv_rhx100_fdiv" 20
+ (and (eq_attr "tune" "arcv_rhx100")
+ (eq_attr "type" "fdiv,fsqrt"))
+ "arcv_rhx100_fdivsqrt*20")
+
+;(final_presence_set "arcv_rhx100_issueA_fuse1" "arcv_rhx100_issueA_fuse0")
+;(final_presence_set "arcv_rhx100_issueB_fuse1" "arcv_rhx100_issueB_fuse0")
+;(final_presence_set "arcv_rhx100_ALU_A_fuse1_early" "arcv_rhx100_ALU_A_fuse0_early")
+;(final_presence_set "arcv_rhx100_ALU_B_fuse1_early" "arcv_rhx100_ALU_B_fuse0_early")
+
+;; Bypasses
+;(define_bypass 0 "arcv_rhx100_alu_early_arith" "arcv_rhx100_store_insn" "riscv_store_data_bypass_p")
+(define_bypass 1 "arcv_rhx100_alu_early_arith" "arcv_rhx100_store_insn" "riscv_store_data_bypass_p")
+
+;(define_bypass 0 "arcv_rhx100_load_insn" "arcv_rhx100_store_insn" "riscv_store_data_bypass_p")
+(define_bypass 1 "arcv_rhx100_load_insn" "arcv_rhx100_store_insn" "riscv_store_data_bypass_p")
+(define_bypass 1 "arcv_rhx100_load_insn" "arcv_rhx100_alu_early_arith")
+(define_bypass 1 "arcv_rhx100_load_insn" "arcv_rhx100_mpy*_insn")
+(define_bypass 2 "arcv_rhx100_load_insn" "arcv_rhx100_load_insn")
+(define_bypass 1 "arcv_rhx100_load_insn" "arcv_rhx100_div_insn")
+
+(define_bypass 3 "arcv_rhx100_mpy32_insn" "arcv_rhx100_mpy*_insn")
+(define_bypass 3 "arcv_rhx100_mpy32_insn" "arcv_rhx100_div_insn")
diff --git a/gcc/config/riscv/iterators.md b/gcc/config/riscv/iterators.md
index 35de17f76cd9..df979031cd88 100644
--- a/gcc/config/riscv/iterators.md
+++ b/gcc/config/riscv/iterators.md
@@ -218,6 +218,8 @@
(zero_extract "srliw")])
(define_code_attr extract_shift [(sign_extract "ashiftrt")
(zero_extract "lshiftrt")])
+(define_code_attr is_zero_extract [(sign_extract "false")
+ (zero_extract "true")])
;; This code iterator allows the two right shift instructions to be
;; generated from the same template.
diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc
index d497326e0611..52d240ceb89f 100644
--- a/gcc/config/riscv/riscv-c.cc
+++ b/gcc/config/riscv/riscv-c.cc
@@ -149,6 +149,9 @@ riscv_cpu_cpp_builtins (cpp_reader *pfile)
builtin_define_with_int_value ("__riscv_th_v_intrinsic",
riscv_ext_version_value (0, 11));
+ if (riscv_is_micro_arch (arcv_rhx100))
+ builtin_define ("__riscv_rhx");
+
/* Define architecture extension test macros. */
builtin_define_with_int_value ("__riscv_arch_test", 1);
diff --git a/gcc/config/riscv/riscv-cores.def b/gcc/config/riscv/riscv-cores.def
index d1708f3785b6..3b5da61d0bc9 100644
--- a/gcc/config/riscv/riscv-cores.def
+++ b/gcc/config/riscv/riscv-cores.def
@@ -51,6 +51,7 @@ RISCV_TUNE("xt-c920v2", generic, generic_ooo_tune_info)
RISCV_TUNE("xiangshan-nanhu", xiangshan, xiangshan_nanhu_tune_info)
RISCV_TUNE("xiangshan-kunminghu", xiangshan, generic_ooo_tune_info)
RISCV_TUNE("arc-v-rmx-100-series", arcv_rmx100, arcv_rmx100_tune_info)
+RISCV_TUNE("arc-v-rhx-100-series", arcv_rhx100, arcv_rhx100_tune_info)
RISCV_TUNE("generic-ooo", generic_ooo, generic_ooo_tune_info)
RISCV_TUNE("size", generic, optimize_size_tune_info)
RISCV_TUNE("mips-p8700", mips_p8700, mips_p8700_tune_info)
diff --git a/gcc/config/riscv/riscv-opts.h b/gcc/config/riscv/riscv-opts.h
index 7be10413b4d9..632d426503be 100644
--- a/gcc/config/riscv/riscv-opts.h
+++ b/gcc/config/riscv/riscv-opts.h
@@ -62,6 +62,7 @@ enum riscv_microarchitecture_type {
mips_p8700,
tt_ascalon_d8,
arcv_rmx100,
+ arcv_rhx100,
};
extern enum riscv_microarchitecture_type riscv_microarchitecture;
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index 5881cb9529ce..be047d7b7692 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -826,6 +826,10 @@ extern unsigned int th_int_get_mask (unsigned int);
extern unsigned int th_int_get_save_adjustment (void);
extern rtx th_int_adjust_cfi_prologue (unsigned int);
extern const char *th_asm_output_opcode (FILE *asm_out_file, const char *p);
+
+extern bool riscv_macro_fusion_p ();
+extern bool riscv_is_micro_arch (enum riscv_microarchitecture_type);
+
#ifdef RTX_CODE
extern const char*
th_mempair_output_move (rtx[4], bool, machine_mode, RTX_CODE);
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 8719c2942b55..60ac19ea6726 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -291,6 +291,7 @@ enum riscv_fusion_pairs
RISCV_FUSE_BFEXT = (1 << 11),
RISCV_FUSE_EXPANDED_LD = (1 << 12),
RISCV_FUSE_B_ALUI = (1 << 13),
+ RISCV_FUSE_ARCV = (1 << 14),
};
/* Costs of various operations on the different architectures. */
@@ -339,6 +340,12 @@ unsigned riscv_stack_boundary;
/* Whether in riscv_output_mi_thunk. */
static bool riscv_in_thunk_func = false;
+static int alu_pipe_scheduled_p;
+static int pipeB_scheduled_p;
+
+static rtx_insn *last_scheduled_insn;
+static short cached_can_issue_more;
+
/* If non-zero, this is an offset to be added to SP to redefine the CFA
when restoring the FP register from the stack. Only valid when generating
the epilogue. */
@@ -709,6 +716,30 @@ static const struct riscv_tune_param arcv_rmx100_tune_info = {
NULL, /* loop_align */
};
+/* Costs to use when optimizing for Synopsys RHX-100. */
+static const struct riscv_tune_param arcv_rhx100_tune_info = {
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
+ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
+ {COSTS_N_INSNS (27), COSTS_N_INSNS (43)}, /* int_div */
+ 4, /* issue_rate */
+ 9, /* branch_cost */
+ 2, /* memory_cost */
+ 8, /* fmv_cost */
+ false, /* slow_unaligned_access */
+ false, /* vector_unaligned_access */
+ false, /* use_divmod_expansion */
+ false, /* overlap_op_by_pieces */
+ true, /* use_zero_stride_load */
+ false, /* speculative_sched_vsetvl */
+ RISCV_FUSE_ARCV, /* fusible_ops */
+ NULL, /* vector cost */
+ NULL, /* function_align */
+ NULL, /* jump_align */
+ NULL, /* loop_align */
+};
+
/* Costs to use when optimizing for size. */
static const struct riscv_tune_param optimize_size_tune_info = {
{COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
@@ -878,6 +909,12 @@ typedef enum
typedef insn_code (*code_for_push_pop_t) (machine_mode);
+bool
+riscv_is_micro_arch (enum riscv_microarchitecture_type arch)
+{
+ return (riscv_microarchitecture == arch);
+}
+
void riscv_frame_info::reset(void)
{
total_size = 0;
@@ -4306,7 +4343,8 @@ riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UN
}
gcc_fallthrough ();
case SIGN_EXTRACT:
- if (TARGET_XTHEADBB && outer_code == SET
+ if ((riscv_is_micro_arch (arcv_rhx100) || TARGET_XTHEADBB)
+ && outer_code == SET
&& CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2)))
{
@@ -10815,6 +10853,21 @@ riscv_sched_init (FILE *, int, int)
static int
riscv_sched_variable_issue (FILE *, int, rtx_insn *insn, int more)
{
+ /* Beginning of cycle - reset variables. */
+ if (more == tune_param->issue_rate)
+ {
+ alu_pipe_scheduled_p = 0;
+ pipeB_scheduled_p = 0;
+ }
+
+ if (alu_pipe_scheduled_p && pipeB_scheduled_p)
+ {
+ cached_can_issue_more = 0;
+ return 0;
+ }
+
+ cached_can_issue_more = more;
+
if (DEBUG_INSN_P (insn))
return more;
@@ -10860,6 +10913,28 @@ riscv_sched_variable_issue (FILE *, int, rtx_insn *insn, int more)
}
}
+ if (next_insn (insn) && INSN_P (next_insn (insn))
+ && SCHED_GROUP_P (next_insn (insn)))
+ {
+ if (get_attr_type (insn) == TYPE_LOAD
+ || get_attr_type (insn) == TYPE_STORE
+ || get_attr_type (next_insn (insn)) == TYPE_LOAD
+ || get_attr_type (next_insn (insn)) == TYPE_STORE)
+ pipeB_scheduled_p = 1;
+ else
+ alu_pipe_scheduled_p = 1;
+ }
+
+ if (get_attr_type (insn) == TYPE_ALU_FUSED
+ || get_attr_type (insn) == TYPE_IMUL_FUSED)
+ {
+ alu_pipe_scheduled_p = 1;
+ more -= 1;
+ }
+
+ last_scheduled_insn = insn;
+ cached_can_issue_more = more - 1;
+
return more - 1;
}
@@ -10920,7 +10995,7 @@ riscv_sched_reorder (FILE *, int, rtx_insn **ready, int *nreadyp, int)
/* Implement TARGET_SCHED_MACRO_FUSION_P. Return true if target supports
instruction fusion of some sort. */
-static bool
+bool
riscv_macro_fusion_p (void)
{
return tune_param->fusible_ops != RISCV_FUSE_NOTHING;
@@ -11009,6 +11084,292 @@ riscv_set_is_shNadduw (rtx set)
&& REG_P (SET_DEST (set)));
}
+/* Return TRUE if the target microarchitecture supports macro-op
+ fusion for two memory operations of mode MODE (the direction
+ of transfer is determined by the IS_LOAD parameter). */
+
+static bool
+pair_fusion_mode_allowed_p (machine_mode mode, bool is_load)
+{
+ if (!riscv_is_micro_arch (arcv_rhx100))
+ return true;
+
+ return ((is_load && (mode == SImode
+ || mode == HImode
+ || mode == QImode))
+ || (!is_load && mode == SImode));
+}
+
+/* Return TRUE if two addresses can be fused. */
+
+static bool
+arcv_fused_addr_p (rtx addr0, rtx addr1, bool is_load)
+{
+ rtx base0, base1, tmp;
+ HOST_WIDE_INT off0 = 0, off1 = 0;
+
+ if (GET_CODE (addr0) == SIGN_EXTEND || GET_CODE (addr0) == ZERO_EXTEND)
+ addr0 = XEXP (addr0, 0);
+
+ if (GET_CODE (addr1) == SIGN_EXTEND || GET_CODE (addr1) == ZERO_EXTEND)
+ addr1 = XEXP (addr1, 0);
+
+ if (!MEM_P (addr0) || !MEM_P (addr1))
+ return false;
+
+ /* Require the accesses to have the same mode. */
+ if (GET_MODE (addr0) != GET_MODE (addr1))
+ return false;
+
+ /* Check if the mode is allowed. */
+ if (!pair_fusion_mode_allowed_p (GET_MODE (addr0), is_load))
+ return false;
+
+ rtx reg0 = XEXP (addr0, 0);
+ rtx reg1 = XEXP (addr1, 0);
+
+ if (GET_CODE (reg0) == PLUS)
+ {
+ base0 = XEXP (reg0, 0);
+ tmp = XEXP (reg0, 1);
+ if (!CONST_INT_P (tmp))
+ return false;
+ off0 = INTVAL (tmp);
+ }
+ else if (REG_P (reg0))
+ base0 = reg0;
+ else
+ return false;
+
+ if (GET_CODE (reg1) == PLUS)
+ {
+ base1 = XEXP (reg1, 0);
+ tmp = XEXP (reg1, 1);
+ if (!CONST_INT_P (tmp))
+ return false;
+ off1 = INTVAL (tmp);
+ }
+ else if (REG_P (reg1))
+ base1 = reg1;
+ else
+ return false;
+
+ /* Check if we have the same base. */
+ gcc_assert (REG_P (base0) && REG_P (base1));
+ if (REGNO (base0) != REGNO (base1))
+ return false;
+
+ /* Fuse adjacent aligned addresses. */
+ if ((off0 % GET_MODE_SIZE (GET_MODE (addr0)).to_constant () == 0)
+ && (abs (off1 - off0) == GET_MODE_SIZE (GET_MODE (addr0)).to_constant ()))
+ return true;
+
+ return false;
+}
+
+/* Return true if PREV and CURR constitute an ordered load/store + op/opimm
+ pair, for the purposes of ARCV-specific macro-op fusion. */
+static bool
+arcv_memop_arith_pair_p (rtx_insn *prev, rtx_insn *curr)
+{
+ rtx prev_set = single_set (prev);
+ rtx curr_set = single_set (curr);
+
+ gcc_assert (prev_set);
+ gcc_assert (curr_set);
+
+ /* Fuse load/store + register post-{inc,dec}rement:
+ * prev (ld) == (set (reg:X rd1) (mem:X (plus:X (reg:X rs1) (const_int))))
+ * or
+ * prev (st) == (set (mem:X (plus:X (reg:X rs1) (const_int))) (reg:X rs2))
+ * ...
+ */
+ if ((get_attr_type (curr) == TYPE_ARITH
+ || get_attr_type (curr) == TYPE_LOGICAL
+ || get_attr_type (curr) == TYPE_SHIFT
+ || get_attr_type (curr) == TYPE_SLT
+ || get_attr_type (curr) == TYPE_BITMANIP
+ || get_attr_type (curr) == TYPE_MIN
+ || get_attr_type (curr) == TYPE_MAX
+ || get_attr_type (curr) == TYPE_MINU
+ || get_attr_type (curr) == TYPE_MAXU
+ || get_attr_type (curr) == TYPE_CLZ
+ || get_attr_type (curr) == TYPE_CTZ)
+ && (CONST_INT_P (SET_SRC (curr_set))
+ || REG_P (XEXP (SET_SRC (curr_set), 0)))
+ && ((get_attr_type (prev) == TYPE_LOAD
+ && REG_P (XEXP (SET_SRC (prev_set), 0))
+ && REGNO (XEXP (SET_SRC (prev_set), 0))
+ == REGNO (XEXP (SET_SRC (curr_set), 0))
+ && REGNO (XEXP (SET_SRC (prev_set), 0))
+ != REGNO (SET_DEST (prev_set))
+ && REGNO (SET_DEST (prev_set)) != REGNO (SET_DEST (curr_set))
+ && (/* (set (reg:X rd1) (not (reg:X rs1))) */
+ GET_RTX_LENGTH (GET_CODE (SET_SRC (curr_set))) == 1
+ /* (op-imm) == (set (reg:X rd2) (plus/minus (reg:X rs1) (const_int))) */
+ || CONST_INT_P (XEXP (SET_SRC (curr_set), 1))
+ /* (op) == (set (reg:X rd2) (plus/minus (reg:X rs1) (reg:X rs2))) */
+ || REGNO (SET_DEST (prev_set))
+ != REGNO (XEXP (SET_SRC (curr_set), 1))))
+ || (get_attr_type (prev) == TYPE_STORE
+ && REG_P (XEXP (SET_DEST (prev_set), 0))
+ && REGNO (XEXP (SET_DEST (prev_set), 0))
+ == REGNO (XEXP (SET_SRC (curr_set), 0))
+ && (/* (set (reg:X rd1) (not (reg:X rs1))) */
+ GET_RTX_LENGTH (GET_CODE (SET_SRC (curr_set))) == 1
+ /* (op-imm) == (set (reg:X rd2) (plus/minus (reg:X rs1) (const_int))) */
+ || CONST_INT_P (XEXP (SET_SRC (curr_set), 1))
+ /* (op) == (set (reg:X rd2) (plus/minus (reg:X rs1) (reg:X rs2))) */
+ || REGNO (XEXP (SET_DEST (prev_set), 0))
+ == REGNO (XEXP (SET_SRC (curr_set), 1))))))
+ return true;
+
+ return false;
+}
+
+/* Return true if PREV and CURR constitute an ordered load/store + lui pair, for
+ the purposes of ARCV-specific macro-op fusion. */
+static bool
+arcv_memop_lui_pair_p (rtx_insn *prev, rtx_insn *curr)
+{
+ rtx prev_set = single_set (prev);
+ rtx curr_set = single_set (curr);
+
+ gcc_assert (prev_set);
+ gcc_assert (curr_set);
+
+ /* Fuse load/store with lui:
+ * prev (ld) == (set (reg:X rd1) (mem:X (plus:X (reg:X) (const_int))))
+ * or
+ * prev (st) == (set (mem:X (plus:X (reg:X) (const_int))) (reg:X rD))
+ *
+ * curr (lui) == (set (reg:X rd2) (const_int UPPER_IMM_20))
+ */
+ if (REG_P (curr)
+ && ((get_attr_type (curr) == TYPE_MOVE
+ && GET_CODE (SET_SRC (curr_set)) == HIGH)
+ || (CONST_INT_P (SET_SRC (curr_set))
+ && LUI_OPERAND (INTVAL (SET_SRC (curr_set)))))
+ && ((get_attr_type (prev) == TYPE_LOAD
+ && REGNO (SET_DEST (prev_set)) != REGNO (SET_DEST (curr_set)))
+ || get_attr_type (prev) == TYPE_STORE))
+ return true;
+
+ return false;
+}
+
+/* Return true if PREV and CURR should be kept together during scheduling. */
+
+static bool
+arcv_macro_fusion_pair_p (rtx_insn *prev, rtx_insn *curr)
+{
+ /* Never create sched groups with more than 2 members. */
+ if (SCHED_GROUP_P (prev))
+ return false;
+
+ rtx prev_set = single_set (prev);
+ rtx curr_set = single_set (curr);
+
+ /* Fuse multiply-add pair. */
+ if (prev_set && curr_set && GET_CODE (SET_SRC (prev_set)) == MULT
+ && GET_CODE (SET_SRC (curr_set)) == PLUS
+ && (REG_P (XEXP (SET_SRC (curr_set), 0))
+ && REGNO (SET_DEST (prev_set)) ==
+ REGNO (XEXP (SET_SRC (curr_set), 0))
+ || (REG_P (XEXP (SET_SRC (curr_set), 1))
+ && REGNO (SET_DEST (prev_set)) ==
+ REGNO (XEXP (SET_SRC (curr_set), 1)))))
+ return true;
+
+ /* Fuse logical shift left with logical shift right (bit-extract pattern). */
+ if (prev_set && curr_set && GET_CODE (SET_SRC (prev_set)) == ASHIFT
+ && GET_CODE (SET_SRC (curr_set)) == LSHIFTRT
+ && REGNO (SET_DEST (prev_set)) == REGNO (SET_DEST (curr_set))
+ && REGNO (SET_DEST (prev_set)) == REGNO (XEXP (SET_SRC (curr_set), 0)))
+ return true;
+
+ /* Fuse load-immediate with a dependent conditional branch. */
+ if (get_attr_type (prev) == TYPE_MOVE
+ && get_attr_move_type (prev) == MOVE_TYPE_CONST
+ && any_condjump_p (curr))
+ {
+ rtx comp = XEXP (SET_SRC (curr_set), 0);
+
+ return (REG_P (XEXP (comp, 0)) && XEXP (comp, 0) == SET_DEST (prev_set))
+ || (REG_P (XEXP (comp, 1)) && XEXP (comp, 1) == SET_DEST (prev_set));
+ }
+
+ /* Do not fuse loads/stores before sched2. */
+ if (!reload_completed || sched_fusion)
+ return false;
+
+ /* prev and curr are simple SET insns i.e. no flag setting or branching. */
+ bool simple_sets_p = prev_set && curr_set && !any_condjump_p (curr);
+
+ /* Don't handle anything with a jump past this point. */
+ if (!simple_sets_p)
+ return false;
+
+ /* Fuse adjacent loads and stores. */
+ if (get_attr_type (prev) == TYPE_LOAD
+ && get_attr_type (curr) == TYPE_LOAD)
+ {
+ if (arcv_fused_addr_p (SET_SRC (prev_set), SET_SRC (curr_set), true))
+ return true;
+ }
+
+ if (get_attr_type (prev) == TYPE_STORE
+ && get_attr_type (curr) == TYPE_STORE)
+ {
+ if (arcv_fused_addr_p (SET_DEST (prev_set), SET_DEST (curr_set), false))
+ return true;
+ }
+
+ /* Look ahead 1 insn to make sure double loads/stores are always
+ fused together, even in the presence of other opportunities. */
+ if (next_insn (curr) && single_set (next_insn (curr))
+ && get_attr_type (curr) == TYPE_LOAD
+ && get_attr_type (next_insn (curr)) == TYPE_LOAD)
+ {
+ if (arcv_fused_addr_p (SET_SRC (curr_set),
+ SET_SRC (single_set (next_insn (curr))),
+ true))
+ return false;
+ }
+
+ if (next_insn (curr) && single_set (next_insn (curr))
+ && get_attr_type (curr) == TYPE_STORE
+ && get_attr_type (next_insn (curr)) == TYPE_STORE)
+ {
+ if (arcv_fused_addr_p (SET_DEST (curr_set),
+ SET_DEST (single_set (next_insn (curr))),
+ false))
+ return false;
+ }
+
+ /* Fuse a pre- or post-update memory operation. */
+ if (arcv_memop_arith_pair_p (prev, curr)
+ || arcv_memop_arith_pair_p (curr, prev))
+ return true;
+
+ /* Fuse a memory operation preceded or followed by a lui. */
+ if (arcv_memop_lui_pair_p (prev, curr)
+ || arcv_memop_lui_pair_p (curr, prev))
+ return true;
+
+ /* Fuse load-immediate with a store of the destination register. */
+ if (get_attr_type (prev) == TYPE_MOVE
+ && get_attr_move_type (prev) == MOVE_TYPE_CONST
+ && get_attr_type (curr) == TYPE_STORE
+ && ((REG_P (SET_SRC (curr_set))
+ && SET_DEST (prev_set) == SET_SRC (curr_set))
+ || (SUBREG_P (SET_SRC (curr_set))
+ && SET_DEST (prev_set) == SUBREG_REG (SET_SRC (curr_set)))))
+ return true;
+
+ return false;
+}
+
/* Implement TARGET_SCHED_MACRO_FUSION_PAIR_P. Return true if PREV and CURR
should be kept together during scheduling. */
@@ -11641,9 +12002,101 @@ riscv_macro_fusion_pair_p (rtx_insn *prev, rtx_insn *curr)
}
}
+ if (riscv_fusion_enabled_p (RISCV_FUSE_ARCV))
+ return arcv_macro_fusion_pair_p (prev, curr);
+
return false;
}
+/* If INSN is a load or store of address in the form of [base+offset],
+ extract the two parts and set to BASE and OFFSET. IS_LOAD is set
+ to TRUE if it's a load. Return TRUE if INSN is such an instruction,
+ otherwise return FALSE. */
+
+static bool
+fusion_load_store (rtx_insn *insn, rtx *base, rtx *offset, machine_mode *mode,
+ bool *is_load)
+{
+ rtx x, dest, src;
+
+ gcc_assert (INSN_P (insn));
+ x = PATTERN (insn);
+ if (GET_CODE (x) != SET)
+ return false;
+
+ src = SET_SRC (x);
+ dest = SET_DEST (x);
+
+ if ((GET_CODE (src) == SIGN_EXTEND || GET_CODE (src) == ZERO_EXTEND)
+ && MEM_P (XEXP (src, 0)))
+ src = XEXP (src, 0);
+
+ if (REG_P (src) && MEM_P (dest))
+ {
+ *is_load = false;
+ if (extract_base_offset_in_addr (dest, base, offset))
+ *mode = GET_MODE (dest);
+ }
+ else if (MEM_P (src) && REG_P (dest))
+ {
+ *is_load = true;
+ if (extract_base_offset_in_addr (src, base, offset))
+ *mode = GET_MODE (src);
+ }
+ else
+ return false;
+
+ return (*base != NULL_RTX && *offset != NULL_RTX);
+}
+
+static void
+riscv_sched_fusion_priority (rtx_insn *insn, int max_pri, int *fusion_pri,
+ int *pri)
+{
+ int tmp, off_val;
+ bool is_load;
+ rtx base, offset;
+ machine_mode mode = SImode;
+
+ gcc_assert (INSN_P (insn));
+
+ tmp = max_pri - 1;
+ if (!fusion_load_store (insn, &base, &offset, &mode, &is_load)
+ || !pair_fusion_mode_allowed_p (mode, is_load))
+ {
+ *pri = tmp;
+ *fusion_pri = tmp;
+ return;
+ }
+
+ tmp /= 2;
+
+ if (mode == HImode)
+ tmp /= 2;
+ else if (mode == QImode)
+ tmp /= 4;
+
+ /* INSN with smaller base register goes first. */
+ tmp -= ((REGNO (base) & 0xff) << 20);
+
+ /* INSN with smaller offset goes first. */
+ off_val = (int)(INTVAL (offset));
+
+ /* Put loads/stores operating on adjacent words into the same
+ * scheduling group. */
+ *fusion_pri = tmp
+ - ((off_val / (GET_MODE_SIZE (mode).to_constant () * 2)) << 1)
+ + is_load;
+
+ if (off_val >= 0)
+ tmp -= (off_val & 0xfffff);
+ else
+ tmp += ((- off_val) & 0xfffff);
+
+ *pri = tmp;
+ return;
+}
+
/* Adjust the cost/latency of instructions for scheduling.
For now this is just used to change the latency of vector instructions
according to their LMUL. We assume that an insn with LMUL == 8 requires
@@ -11652,17 +12105,21 @@ riscv_macro_fusion_pair_p (rtx_insn *prev, rtx_insn *curr)
we currently only perform the adjustment when -madjust-lmul-cost is given.
*/
static int
-riscv_sched_adjust_cost (rtx_insn *, int, rtx_insn *insn, int cost,
- unsigned int)
+riscv_sched_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
+ int cost, unsigned int)
{
+ if (riscv_is_micro_arch (arcv_rhx100) && dep_type == REG_DEP_ANTI
+ && !SCHED_GROUP_P (insn))
+ return cost + 1;
+
/* Only do adjustments for the generic out-of-order scheduling model. */
if (!TARGET_VECTOR || riscv_microarchitecture != generic_ooo)
return cost;
- if (recog_memoized (insn) < 0)
+ if (recog_memoized (dep_insn) < 0)
return cost;
- enum attr_type type = get_attr_type (insn);
+ enum attr_type type = get_attr_type (dep_insn);
if (type == TYPE_VFREDO || type == TYPE_VFWREDO)
{
@@ -11713,6 +12170,7 @@ riscv_sched_adjust_cost (rtx_insn *, int, rtx_insn *insn, int cost,
return new_cost;
}
+
/* Implement TARGET_SCHED_CAN_SPECULATE_INSN hook. Return true if insn can
can be scheduled for speculative execution. Reject vsetvl instructions to
prevent the scheduler from hoisting them out of basic blocks without
@@ -11734,6 +12192,174 @@ riscv_sched_can_speculate_insn (rtx_insn *insn)
}
}
+static int
+riscv_sched_adjust_priority (rtx_insn *insn, int priority)
+{
+ if (!riscv_is_micro_arch (arcv_rhx100))
+ return priority;
+
+ if (DEBUG_INSN_P (insn) || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return priority;
+
+ /* Bump the priority of fused load-store pairs for easier
+ scheduling of the memory pipe. The specific increase
+ value is determined empirically. */
+ if (next_insn (insn) && INSN_P (next_insn (insn))
+ && SCHED_GROUP_P (next_insn (insn))
+ && ((get_attr_type (insn) == TYPE_STORE
+ && get_attr_type (next_insn (insn)) == TYPE_STORE)
+ || (get_attr_type (insn) == TYPE_LOAD
+ && get_attr_type (next_insn (insn)) == TYPE_LOAD)))
+ return priority + 1;
+
+ return priority;
+}
+
+
+static void
+riscv_sched_init (FILE *file ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ int max_ready ATTRIBUTE_UNUSED)
+{
+ last_scheduled_insn = 0;
+}
+
+static int
+riscv_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ rtx_insn **ready,
+ int *n_readyp,
+ int clock ATTRIBUTE_UNUSED)
+{
+ if (sched_fusion)
+ return cached_can_issue_more;
+
+ if (!cached_can_issue_more)
+ return 0;
+
+ /* Fuse double load/store instances missed by sched_fusion. */
+ if (!pipeB_scheduled_p && last_scheduled_insn && ready && *n_readyp > 0
+ && !SCHED_GROUP_P (last_scheduled_insn)
+ && (get_attr_type (last_scheduled_insn) == TYPE_LOAD
+ || get_attr_type (last_scheduled_insn) == TYPE_STORE))
+ {
+ for (int i = 1; i <= *n_readyp; i++)
+ {
+ if (NONDEBUG_INSN_P (ready[*n_readyp - i])
+ && !SCHED_GROUP_P (ready[*n_readyp - i])
+ && (!next_insn (ready[*n_readyp - i])
+ || !NONDEBUG_INSN_P (next_insn (ready[*n_readyp - i]))
+ || !SCHED_GROUP_P (next_insn (ready[*n_readyp - i])))
+ && arcv_macro_fusion_pair_p (last_scheduled_insn, ready[*n_readyp - i]))
+ {
+ std::swap (ready[*n_readyp - 1], ready[*n_readyp - i]);
+ SCHED_GROUP_P (ready[*n_readyp - 1]) = 1;
+ pipeB_scheduled_p = 1;
+ return cached_can_issue_more;
+ }
+ }
+ pipeB_scheduled_p = 1;
+ }
+
+ /* Try to fuse a non-memory last_scheduled_insn. */
+ if ((!alu_pipe_scheduled_p || !pipeB_scheduled_p)
+ && last_scheduled_insn && ready && *n_readyp > 0
+ && !SCHED_GROUP_P (last_scheduled_insn)
+ && (get_attr_type (last_scheduled_insn) != TYPE_LOAD
+ && get_attr_type (last_scheduled_insn) != TYPE_STORE))
+ {
+ for (int i = 1; i <= *n_readyp; i++)
+ {
+ if (NONDEBUG_INSN_P (ready[*n_readyp - i])
+ && !SCHED_GROUP_P (ready[*n_readyp - i])
+ && (!next_insn (ready[*n_readyp - i])
+ || !NONDEBUG_INSN_P (next_insn (ready[*n_readyp - i]))
+ || !SCHED_GROUP_P (next_insn (ready[*n_readyp - i])))
+ && arcv_macro_fusion_pair_p (last_scheduled_insn, ready[*n_readyp - i]))
+ {
+ if (get_attr_type (ready[*n_readyp - i]) == TYPE_LOAD
+ || get_attr_type (ready[*n_readyp - i]) == TYPE_STORE)
+ if (pipeB_scheduled_p)
+ continue;
+ else
+ pipeB_scheduled_p = 1;
+ else if (!alu_pipe_scheduled_p)
+ alu_pipe_scheduled_p = 1;
+ else
+ pipeB_scheduled_p = 1;
+
+ std::swap (ready[*n_readyp - 1], ready[*n_readyp - i]);
+ SCHED_GROUP_P (ready[*n_readyp - 1]) = 1;
+ return cached_can_issue_more;
+ }
+ }
+ alu_pipe_scheduled_p = 1;
+ }
+
+ /* When pipe B is scheduled, we can have no more memops this cycle. */
+ if (pipeB_scheduled_p && *n_readyp > 0
+ && NONDEBUG_INSN_P (ready[*n_readyp - 1])
+ && recog_memoized (ready[*n_readyp - 1]) >= 0
+ && !SCHED_GROUP_P (ready[*n_readyp - 1])
+ && (get_attr_type (ready[*n_readyp - 1]) == TYPE_LOAD
+ || get_attr_type (ready[*n_readyp - 1]) == TYPE_STORE))
+ {
+ if (alu_pipe_scheduled_p)
+ return 0;
+
+ for (int i = 2; i <= *n_readyp; i++)
+ {
+ if ((NONDEBUG_INSN_P (ready[*n_readyp - i])
+ && recog_memoized (ready[*n_readyp - i]) >= 0
+ && get_attr_type (ready[*n_readyp - i]) != TYPE_LOAD
+ && get_attr_type (ready[*n_readyp - i]) != TYPE_STORE
+ && !SCHED_GROUP_P (ready[*n_readyp - i])
+ && ((!next_insn (ready[*n_readyp - i])
+ || !NONDEBUG_INSN_P (next_insn (ready[*n_readyp - i]))
+ || !SCHED_GROUP_P (next_insn (ready[*n_readyp - i])))))
+ || ((next_insn (ready[*n_readyp - i])
+ && NONDEBUG_INSN_P (next_insn (ready[*n_readyp - i]))
+ && recog_memoized (next_insn (ready[*n_readyp - i])) >= 0
+ && get_attr_type (next_insn (ready[*n_readyp - i])) != TYPE_LOAD
+ && get_attr_type (next_insn (ready[*n_readyp - i])) != TYPE_STORE)))
+ {
+ std::swap (ready[*n_readyp - 1], ready[*n_readyp - i]);
+ alu_pipe_scheduled_p = 1;
+ cached_can_issue_more = 1;
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ /* If all else fails, schedule a single instruction. */
+ if (ready && *n_readyp > 0
+ && NONDEBUG_INSN_P (ready[*n_readyp - 1])
+ && recog_memoized (ready[*n_readyp - 1]) >= 0
+ && get_attr_type (ready[*n_readyp - 1]) != TYPE_LOAD
+ && get_attr_type (ready[*n_readyp - 1]) != TYPE_STORE)
+ {
+ if (!pipeB_scheduled_p
+ && (get_attr_type (ready[*n_readyp - 1]) == TYPE_LOAD
+ || get_attr_type (ready[*n_readyp - 1]) == TYPE_STORE))
+ {
+ alu_pipe_scheduled_p = pipeB_scheduled_p = 1;
+ cached_can_issue_more = 1;
+ return 1;
+ }
+ else if (get_attr_type (ready[*n_readyp - 1]) != TYPE_LOAD
+ || get_attr_type (ready[*n_readyp - 1]) != TYPE_STORE)
+ {
+ alu_pipe_scheduled_p = pipeB_scheduled_p = 1;
+ cached_can_issue_more = 1;
+ return 1;
+ }
+ }
+
+ return cached_can_issue_more;
+}
+
/* Auxiliary function to emit RISC-V ELF attribute. */
static void
riscv_emit_attribute ()
@@ -16328,6 +16954,9 @@ riscv_prefetch_offset_address_p (rtx x, machine_mode mode)
#undef TARGET_SCHED_INIT
#define TARGET_SCHED_INIT riscv_sched_init
+#undef TARGET_SCHED_FUSION_PRIORITY
+#define TARGET_SCHED_FUSION_PRIORITY riscv_sched_fusion_priority
+
#undef TARGET_SCHED_VARIABLE_ISSUE
#define TARGET_SCHED_VARIABLE_ISSUE riscv_sched_variable_issue
@@ -16340,6 +16969,15 @@ riscv_prefetch_offset_address_p (rtx x, machine_mode mode)
#undef TARGET_SCHED_CAN_SPECULATE_INSN
#define TARGET_SCHED_CAN_SPECULATE_INSN riscv_sched_can_speculate_insn
+#undef TARGET_SCHED_ADJUST_PRIORITY
+#define TARGET_SCHED_ADJUST_PRIORITY riscv_sched_adjust_priority
+
+#undef TARGET_SCHED_REORDER2
+#define TARGET_SCHED_REORDER2 riscv_sched_reorder2
+
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT riscv_sched_init
+
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index 823f8dda8a30..67e8e76d725b 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -518,7 +518,7 @@
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
- vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16,
+ vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16,imul_fused,alu_fused,
sf_vc,sf_vc_se"
(cond [(eq_attr "got" "load") (const_string "load")
@@ -672,7 +672,7 @@
;; Microarchitectures we know how to tune for.
;; Keep this in sync with enum riscv_microarchitecture.
(define_attr "tune"
- "generic,sifive_7,sifive_p400,sifive_p600,xiangshan,generic_ooo,mips_p8700,tt_ascalon_d8,arcv_rmx100"
+ "generic,sifive_7,sifive_p400,sifive_p600,xiangshan,generic_ooo,mips_p8700,tt_ascalon_d8,arcv_rmx100,arcv_rhx100"
(const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
;; Describe a user's asm statement.
@@ -3100,6 +3100,7 @@
;; * Single-bit extraction (SFB)
;; * Extraction instruction th.ext(u) (XTheadBb)
;; * lshrsi3_extend_2 (see above)
+;; * Zero extraction fusion (ARC-V)
(define_insn_and_split "*3"
[(set (match_operand:GPR 0 "register_operand" "=r")
(any_extract:GPR
@@ -3112,6 +3113,8 @@
&& (INTVAL (operands[2]) == 1))
&& !TARGET_XTHEADBB
&& !TARGET_XANDESPERF
+ && !(riscv_is_micro_arch (arcv_rhx100)
+ && )
&& !(TARGET_64BIT
&& (INTVAL (operands[3]) > 0)
&& (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
@@ -4501,7 +4504,63 @@
(mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
(sign_extend:SI (match_operand:HI 2 "register_operand")))
(match_operand:SI 3 "register_operand")))]
- "TARGET_XTHEADMAC"
+ "TARGET_XTHEADMAC || (riscv_is_micro_arch (arcv_rhx100)
+ && !TARGET_64BIT && (TARGET_ZMMUL || TARGET_MUL))"
+ {
+ if (riscv_is_micro_arch (arcv_rhx100))
+ {
+ rtx tmp0 = gen_reg_rtx (SImode), tmp1 = gen_reg_rtx (SImode);
+ emit_insn (gen_extendhisi2 (tmp0, operands[1]));
+ emit_insn (gen_extendhisi2 (tmp1, operands[2]));
+
+ if (TARGET_64BIT)
+ {
+ rtx op0 = gen_reg_rtx (DImode);
+ emit_insn (gen_madd_split_fused_extended (op0, tmp0, tmp1, operands[3]));
+ op0 = gen_lowpart (SImode, op0);
+ SUBREG_PROMOTED_VAR_P (op0) = 1;
+ SUBREG_PROMOTED_SET (op0, SRP_SIGNED);
+ emit_move_insn (operands[0], op0);
+ }
+ else
+ {
+ emit_insn (gen_madd_split_fused (operands[0], tmp0, tmp1, operands[3]));
+ }
+
+ DONE;
+ }
+ }
+)
+
+(define_expand "umaddhisi4"
+ [(set (match_operand:SI 0 "register_operand")
+ (plus:SI
+ (mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand"))
+ (zero_extend:SI (match_operand:HI 2 "register_operand")))
+ (match_operand:SI 3 "register_operand")))]
+ "riscv_is_micro_arch (arcv_rhx100)
+ && !TARGET_64BIT && (TARGET_ZMMUL || TARGET_MUL)"
+ {
+ rtx tmp0 = gen_reg_rtx (SImode), tmp1 = gen_reg_rtx (SImode);
+ emit_insn (gen_zero_extendhisi2 (tmp0, operands[1]));
+ emit_insn (gen_zero_extendhisi2 (tmp1, operands[2]));
+
+ if (TARGET_64BIT)
+ {
+ rtx op0 = gen_reg_rtx (DImode);
+ emit_insn (gen_madd_split_fused_extended (op0, tmp0, tmp1, operands[3]));
+ op0 = gen_lowpart (SImode, op0);
+ SUBREG_PROMOTED_VAR_P (op0) = 1;
+ SUBREG_PROMOTED_SET (op0, SRP_SIGNED);
+ emit_move_insn (operands[0], op0);
+ }
+ else
+ {
+ emit_insn (gen_madd_split_fused (operands[0], tmp0, tmp1, operands[3]));
+ }
+
+ DONE;
+ }
)
(define_expand "msubhisi4"
@@ -4513,6 +4572,68 @@
"TARGET_XTHEADMAC"
)
+(define_insn "madd_split_fused"
+ [(set (match_operand:SI 0 "register_operand" "=&r,r")
+ (plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (match_operand:SI 3 "register_operand" "r,?0")))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "riscv_is_micro_arch (arcv_rhx100)
+ && !TARGET_64BIT && (TARGET_ZMMUL || TARGET_MUL)"
+ {
+ if (REGNO (operands[0]) == REGNO (operands[3]))
+ {
+ return "mul\t%4,%1,%2\n\tadd\t%4,%3,%4\n\tmv\t%0,%4";
+ }
+ else
+ {
+ return "mul\t%0,%1,%2\n\tadd\t%0,%0,%3";
+ }
+ }
+ [(set_attr "type" "imul_fused")]
+)
+
+(define_insn "madd_split_fused_extended"
+ [(set (match_operand:DI 0 "register_operand" "=&r,r")
+ (sign_extend:DI
+ (plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "register_operand" "r,r"))
+ (match_operand:SI 3 "register_operand" "r,?0"))))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "riscv_is_micro_arch (arcv_rhx100)
+ && (TARGET_ZMMUL || TARGET_MUL)"
+ {
+ if (REGNO (operands[0]) == REGNO (operands[3]))
+ {
+ return "mulw\t%4,%1,%2\n\taddw\t%4,%3,%4\n\tmv\t%0,%4";
+ }
+ else
+ {
+ return "mulw\t%0,%1,%2\n\taddw\t%0,%0,%3";
+ }
+ }
+ [(set_attr "type" "imul_fused")]
+)
+
+(define_insn "*zero_extract_fused"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand")
+ (match_operand 3 "const_int_operand")))]
+ "riscv_is_micro_arch (arcv_rhx100) && !TARGET_64BIT
+ && (INTVAL (operands[2]) > 1 || !TARGET_ZBS)"
+ {
+ int amount = INTVAL (operands[2]);
+ int end = INTVAL (operands[3]) + amount;
+ operands[2] = GEN_INT (BITS_PER_WORD - end);
+ operands[3] = GEN_INT (BITS_PER_WORD - amount);
+ return "slli\t%0,%1,%2\n\tsrli\t%0,%0,%3";
+ }
+ [(set_attr "type" "alu_fused")]
+)
+
;; String compare with length insn.
;; Argument 0 is the target (result)
;; Argument 1 is the source1
@@ -4967,3 +5088,4 @@
(include "generic-ooo.md")
(include "tt-ascalon-d8.md")
(include "arcv-rmx100.md")
+(include "arcv-rhx100.md")
diff --git a/gcc/doc/riscv-mtune.texi b/gcc/doc/riscv-mtune.texi
index 63a01db67726..8ffb3db906fe 100644
--- a/gcc/doc/riscv-mtune.texi
+++ b/gcc/doc/riscv-mtune.texi
@@ -52,6 +52,8 @@ particular CPU name. Permissible values for this option are:
@samp{arc-v-rmx-100-series},
+@samp{arc-v-rhx-100-series},
+
@samp{generic-ooo},
@samp{size},
diff --git a/gcc/testsuite/gcc.target/riscv/arcv-fusion-limm-condbr.c b/gcc/testsuite/gcc.target/riscv/arcv-fusion-limm-condbr.c
new file mode 100644
index 000000000000..cc2a56a2e086
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/arcv-fusion-limm-condbr.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mtune=arc-v-rhx-100-series" } */
+
+int
+f (int x)
+{
+ begin:
+ if (x <= 3)
+ goto begin;
+}
+
+/* { dg-final { scan-assembler "\\sli\\sa5,3\n\\sble\\sa0,a5,.L\[0-9\]+\n" } } */
diff --git a/gcc/testsuite/gcc.target/riscv/arcv-fusion-madd.c b/gcc/testsuite/gcc.target/riscv/arcv-fusion-madd.c
new file mode 100644
index 000000000000..eb8665f576c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/arcv-fusion-madd.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target rv32 } */
+/* { dg-skip-if "" { *-*-* } { "-g" "-flto" "-O0" } } */
+/* { dg-options "-mtune=arc-v-rhx-100-series -march=rv32im -mabi=ilp32" } */
+
+int
+f (int x, int y, int z, int v, int w)
+{
+ return x + y * z + v * w;
+}
+
+/* { dg-final { scan-assembler {\smul\s([ast][0-9]+),a1,a2\n\sadd\s\1,\1,a0\n\smul\sa0,a3,a4\n\sadd\sa0,a0,\1\n} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/arcv-fusion-xbfu.c b/gcc/testsuite/gcc.target/riscv/arcv-fusion-xbfu.c
new file mode 100644
index 000000000000..7abf54ec1448
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/arcv-fusion-xbfu.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target rv32 } */
+/* { dg-skip-if "" { *-*-* } { "-g" "-flto" "-O0" "-Oz" "-Os" } } */
+/* { dg-options "-mtune=arc-v-rhx-100-series -march=rv32im_zbs -mabi=ilp32 -dp" } */
+
+#define bit_extract(x,start,amt) (((x)>>(start)) & (~(0xffffffff << (amt))))
+
+int
+f (int x)
+{
+ return bit_extract(x,10,14) + bit_extract(x,1,1);
+}
+
+/* { dg-final { scan-assembler {\sslli\s([ast][0-9]+),a0,8.*zero_extract_fused\n\ssrli\s([ast][0-9]+),\1,18\n\sbexti\sa0,a0,1.*\n\sadd\sa0,\2,a0.*\n} } } */