From 3d29fb2ccf303942ecbd104ed8639983cb3d7a60 Mon Sep 17 00:00:00 2001 From: Hao Sun Date: Sun, 5 Dec 2021 11:37:39 +0000 Subject: [PATCH 1/4] 8278267: ARM32: several vector test failures for ASHR In ARM32, "VSHL (register)" instruction [1] is shared by vector left shift and vector right shift, and the condition to distinguish them is whether the shift count value is positve or negative. Hence, negation operation is needed before conducting vector right shift. For vector right shift, the shift count can be a RShiftCntV or a normal vector node. Take test case Byte64VectorTests.java [2][3] as an example. Note that RShiftCntV is already negated via rules "vsrcntD" and "vsrcntX" whereas the normal vector node is NOT, since we don't know whether a normal vector node is used as a vector shift count or not. This is the root cause for these vector test failures. The fix is simple, moving the negation from "vsrcntD|X" to the corresponding vector right shift rules. Affected rules are vsrlBB_reg and vsraBB_reg. Note that vector shift related rules are in form of "vsAABB_CC", where 1) AA can be l (left shift), rl (logical right shift) and ra (arithmetic right shift). 2) BB can be 8B/16B (byte type), 4S/8S (short type), 2I/4I (int type) and 2L (long type). 3) CC can be reg (register case) and immI (immediate case). Minor updates: 1) Merge "vslcntD" and "vsrcntD" into rule "vscntD", as these two rules conduct the same duplication operation now. 2) Update the "match" primitive for vsraBB_immI rules. 3) Style issue: remove the surrounding space for "ins_pipe" primitive. Tests: We ran tier 1~3 tests on ARM32 platform. With this patch, previously failed vector test cases can pass now without introducing test regression. [1] https://developer.arm.com/documentation/ddi0406/c/Application-Level-Architecture/Instruction-Details/Alphabetical-list-of-instructions/VSHL--register-?lang=en [2] https://github.com/openjdk/jdk/blame/master/test/jdk/jdk/incubator/vector/Byte64VectorTests.java#L2237 [3] https://github.com/openjdk/jdk/blame/master/test/jdk/jdk/incubator/vector/Byte64VectorTests.java#L2425 --- src/hotspot/cpu/arm/arm.ad | 681 +++++++++++++++++-------------------- 1 file changed, 311 insertions(+), 370 deletions(-) diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index 42d6d4151b2..5749e0cec6e 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -10593,9 +10593,12 @@ instruct vneg16B_reg(vecX dst, vecX src) %{ // ------------------------------ Shift --------------------------------------- -instruct vslcntD(vecD dst, iRegI cnt) %{ +// Low bits of vector "shift" elements are used, so it +// doesn't matter if we treat it as ints or bytes here. +instruct vscntD(vecD dst, iRegI cnt) %{ predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd()); match(Set dst (LShiftCntV cnt)); + match(Set dst (RShiftCntV cnt)); size(4); ins_cost(DEFAULT_COST); // FIXME expand %{ @@ -10603,9 +10606,10 @@ instruct vslcntD(vecD dst, iRegI cnt) %{ %} %} -instruct vslcntX(vecX dst, iRegI cnt) %{ +instruct vscntX(vecX dst, iRegI cnt) %{ predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd()); match(Set dst (LShiftCntV cnt)); + match(Set dst (RShiftCntV cnt)); size(4); ins_cost(DEFAULT_COST); // FIXME expand %{ @@ -10613,180 +10617,39 @@ instruct vslcntX(vecX dst, iRegI cnt) %{ %} %} -// Low bits of vector "shift" elements are used, so it -// doesn't matter if we treat it as ints or bytes here. -instruct vsrcntD(vecD dst, iRegI cnt) %{ - predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd()); - match(Set dst (RShiftCntV cnt)); - size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME - - format %{ "VDUP.8 $dst.D,$cnt\n\t" - "VNEG.S8 $dst.D,$dst.D\t! neg packed8B" %} - ins_encode %{ - bool quad = false; - __ vdupI($dst$$FloatRegister, $cnt$$Register, - MacroAssembler::VELEM_SIZE_8, quad); - __ vnegI($dst$$FloatRegister, $dst$$FloatRegister, - MacroAssembler::VELEM_SIZE_8, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -instruct vsrcntX(vecX dst, iRegI cnt) %{ - predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd()); - match(Set dst (RShiftCntV cnt)); - size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME - format %{ "VDUP.8 $dst.Q,$cnt\n\t" - "VNEG.S8 $dst.Q,$dst.Q\t! neg packed16B" %} - ins_encode %{ - bool quad = true; - __ vdupI($dst$$FloatRegister, $cnt$$Register, - MacroAssembler::VELEM_SIZE_8, quad); - __ vnegI($dst$$FloatRegister, $dst$$FloatRegister, - MacroAssembler::VELEM_SIZE_8, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} +// ------------------------------ LeftShift ----------------------------------- -// Byte vector logical left/right shift based on sign -instruct vsh8B_reg(vecD dst, vecD src, vecD shift) %{ +// Byte vector logical left shift +instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 8); - effect(DEF dst, USE src, USE shift); + match(Set dst (LShiftVB src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHL.U8 $dst.D,$src.D,$shift.D\t! logical left/right shift packed8B" + "VSHL.U8 $dst.D,$src.D,$shift.D\t! logical left shift packed8B" %} ins_encode %{ bool quad = false; __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsh16B_reg(vecX dst, vecX src, vecX shift) %{ +instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 16); - effect(DEF dst, USE src, USE shift); + match(Set dst (LShiftVB src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHL.U8 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed16B" + "VSHL.U8 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed16B" %} ins_encode %{ bool quad = true; __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -// Shorts/Char vector logical left/right shift based on sign -instruct vsh4S_reg(vecD dst, vecD src, vecD shift) %{ - predicate(n->as_Vector()->length() == 4); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U16 $dst.D,$src.D,$shift.D\t! logical left/right shift packed4S" - %} - ins_encode %{ - bool quad = false; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -instruct vsh8S_reg(vecX dst, vecX src, vecX shift) %{ - predicate(n->as_Vector()->length() == 8); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U16 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed8S" - %} - ins_encode %{ - bool quad = true; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -// Integers vector logical left/right shift based on sign -instruct vsh2I_reg(vecD dst, vecD src, vecD shift) %{ - predicate(n->as_Vector()->length() == 2); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U32 $dst.D,$src.D,$shift.D\t! logical left/right shift packed2I" - %} - ins_encode %{ - bool quad = false; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -instruct vsh4I_reg(vecX dst, vecX src, vecX shift) %{ - predicate(n->as_Vector()->length() == 4); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U32 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed4I" - %} - ins_encode %{ - bool quad = true; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -// Longs vector logical left/right shift based on sign -instruct vsh2L_reg(vecX dst, vecX src, vecX shift) %{ - predicate(n->as_Vector()->length() == 2); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U64 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed2L" - %} - ins_encode %{ - bool quad = true; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_64, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -// ------------------------------ LeftShift ----------------------------------- - -// Byte vector left shift -instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{ - predicate(n->as_Vector()->length() == 8); - match(Set dst (LShiftVB src shift)); - size(4*1); - ins_cost(DEFAULT_COST*1); // FIXME - expand %{ - vsh8B_reg(dst, src, shift); - %} -%} - -instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{ - predicate(n->as_Vector()->length() == 16); - match(Set dst (LShiftVB src shift)); - size(4*1); - ins_cost(DEFAULT_COST*1); // FIXME - expand %{ - vsh16B_reg(dst, src, shift); - %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ @@ -10802,7 +10665,7 @@ instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{ @@ -10818,30 +10681,40 @@ instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Shorts/Chars vector logical left/right shift +// Shorts/Chars vector logical left shift instruct vsl4S_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 4); match(Set dst (LShiftVS src shift)); - match(Set dst (URShiftVS src shift)); - size(4*1); - ins_cost(DEFAULT_COST*1); // FIXME - expand %{ - vsh4S_reg(dst, src, shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U16 $dst.D,$src.D,$shift.D\t! logical left shift packed4S" %} + ins_encode %{ + bool quad = false; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl8S_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 8); match(Set dst (LShiftVS src shift)); - match(Set dst (URShiftVS src shift)); - size(4*1); - ins_cost(DEFAULT_COST*1); // FIXME - expand %{ - vsh8S_reg(dst, src, shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U16 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed8S" + %} + ins_encode %{ + bool quad = true; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{ @@ -10857,7 +10730,7 @@ instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{ @@ -10873,30 +10746,40 @@ instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Integers vector logical left/right shift +// Integers vector logical left shift instruct vsl2I_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); match(Set dst (LShiftVI src shift)); - match(Set dst (URShiftVI src shift)); - size(4*1); - ins_cost(DEFAULT_COST*1); // FIXME - expand %{ - vsh2I_reg(dst, src, shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U32 $dst.D,$src.D,$shift.D\t! logical left shift packed2I" + %} + ins_encode %{ + bool quad = false; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl4I_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); match(Set dst (LShiftVI src shift)); - match(Set dst (URShiftVI src shift)); - size(4*1); - ins_cost(DEFAULT_COST*1); // FIXME - expand %{ - vsh4I_reg(dst, src, shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U32 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed4I" + %} + ins_encode %{ + bool quad = true; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{ @@ -10912,7 +10795,7 @@ instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{ @@ -10928,19 +10811,24 @@ instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Longs vector logical left/right shift +// Longs vector logical left shift instruct vsl2L_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 2); match(Set dst (LShiftVL src shift)); - match(Set dst (URShiftVL src shift)); - size(4*1); - ins_cost(DEFAULT_COST*1); // FIXME - expand %{ - vsh2L_reg(dst, src, shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U64 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed2L" %} + ins_encode %{ + bool quad = true; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_64, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{ @@ -10956,7 +10844,7 @@ instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} // ----------------------- LogicalRightShift ----------------------------------- @@ -10966,391 +10854,444 @@ instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{ // sign extension before a shift. // Chars vector logical right shift -instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ +instruct vsrl4S_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ predicate(n->as_Vector()->length() == 4); - match(Set dst (URShiftVS src (RShiftCntV shift))); - size(4); - ins_cost(DEFAULT_COST); // FIXME + match(Set dst (URShiftVS src shift)); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME format %{ - "VSHR.U16 $dst.D,$src.D,$shift\t! logical right shift packed4S" + "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" + "VSHL.U16 $dst.D,$src.D,$tmp.D\t! logical right shift packed4S" %} ins_encode %{ bool quad = false; - __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{ +instruct vsrl8S_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ predicate(n->as_Vector()->length() == 8); - match(Set dst (URShiftVS src (RShiftCntV shift))); - size(4); - ins_cost(DEFAULT_COST); // FIXME + match(Set dst (URShiftVS src shift)); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME format %{ - "VSHR.U16 $dst.Q,$src.Q,$shift\t! logical right shift packed8S" + "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" + "VSHL.U16 $dst.Q,$src.Q,$tmp.Q\t! logical right shift packed8S" %} ins_encode %{ bool quad = true; - __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Integers vector logical right shift -instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); - match(Set dst (URShiftVI src (RShiftCntV shift))); +instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ + predicate(n->as_Vector()->length() == 4); + match(Set dst (URShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.U32 $dst.D,$src.D,$shift\t! logical right shift packed2I" + "VSHR.U16 $dst.D,$src.D,$shift\t! logical right shift packed4S" %} ins_encode %{ bool quad = false; - __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); - match(Set dst (URShiftVI src (RShiftCntV shift))); - size(4); - ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHR.U32 $dst.Q,$src.Q,$shift\t! logical right shift packed4I" - %} - ins_encode %{ - bool quad = true; - __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); + __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Longs vector logical right shift -instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (URShiftVL src (RShiftCntV shift))); +instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{ + predicate(n->as_Vector()->length() == 8); + match(Set dst (URShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.U64 $dst.Q,$src.Q,$shift\t! logical right shift packed2L" + "VSHR.U16 $dst.Q,$src.Q,$shift\t! logical right shift packed8S" %} ins_encode %{ bool quad = true; - __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant, - quad); + __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// ------------------- ArithmeticRightShift ----------------------------------- - -// Bytes vector arithmetic left/right shift based on sign -instruct vsha8B_reg(vecD dst, vecD src, vecD shift) %{ - predicate(n->as_Vector()->length() == 8); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME +// Integers vector logical right shift +instruct vsrl2I_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ + predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); + match(Set dst (URShiftVI src shift)); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME format %{ - "VSHL.S8 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed8B" + "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" + "VSHL.U32 $dst.D,$src.D,$tmp.D\t! logical right shift packed2I" %} ins_encode %{ bool quad = false; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_8, quad); + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsha16B_reg(vecX dst, vecX src, vecX shift) %{ - predicate(n->as_Vector()->length() == 16); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME +instruct vsrl4I_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); + match(Set dst (URShiftVI src shift)); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME format %{ - "VSHL.S8 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed16B" + "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" + "VSHL.U32 $dst.Q,$src.Q,$tmp.Q\t! logical right shift packed4I" %} ins_encode %{ bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_8, quad); + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Shorts vector arithmetic left/right shift based on sign -instruct vsha4S_reg(vecD dst, vecD src, vecD shift) %{ - predicate(n->as_Vector()->length() == 4); - effect(DEF dst, USE src, USE shift); +instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ + predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); + match(Set dst (URShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHL.S16 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed4S" + "VSHR.U32 $dst.D,$src.D,$shift\t! logical right shift packed2I" %} ins_encode %{ bool quad = false; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); + __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsha8S_reg(vecX dst, vecX src, vecX shift) %{ - predicate(n->as_Vector()->length() == 8); - effect(DEF dst, USE src, USE shift); +instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{ + predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); + match(Set dst (URShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHL.S16 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed8S" + "VSHR.U32 $dst.Q,$src.Q,$shift\t! logical right shift packed4I" %} ins_encode %{ bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); + __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Integers vector arithmetic left/right shift based on sign -instruct vsha2I_reg(vecD dst, vecD src, vecD shift) %{ +// Longs vector logical right shift +instruct vsrl2L_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ predicate(n->as_Vector()->length() == 2); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.S32 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed2I" - %} - ins_encode %{ - bool quad = false; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); - %} - ins_pipe( ialu_reg_reg ); // FIXME -%} - -instruct vsha4I_reg(vecX dst, vecX src, vecX shift) %{ - predicate(n->as_Vector()->length() == 4); - effect(DEF dst, USE src, USE shift); - size(4); - ins_cost(DEFAULT_COST); // FIXME + match(Set dst (URShiftVL src shift)); + effect(TEMP tmp, DEF dst, USE src, USE shift); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME format %{ - "VSHL.S32 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed4I" + "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" + "VSHL.U64 $dst.Q,$src.Q,$tmp.Q\t! logical right shift packed2L" %} ins_encode %{ bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_64, quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Longs vector arithmetic left/right shift based on sign -instruct vsha2L_reg(vecX dst, vecX src, vecX shift) %{ +instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{ predicate(n->as_Vector()->length() == 2); - effect(DEF dst, USE src, USE shift); + match(Set dst (URShiftVL src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHL.S64 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed2L" + "VSHR.U64 $dst.Q,$src.Q,$shift\t! logical right shift packed2L" %} ins_encode %{ bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_64, quad); + __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant, + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -// Byte vector arithmetic right shift +// ------------------- ArithmeticRightShift ----------------------------------- -instruct vsra8B_reg(vecD dst, vecD src, vecD shift) %{ +// Byte vector arithmetic right shift +instruct vsra8B_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ predicate(n->as_Vector()->length() == 8); match(Set dst (RShiftVB src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME - expand %{ - vsha8B_reg(dst, src, shift); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ + "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" + "VSHL.S8 $dst.D,$src.D,$tmp.D\t! arithmetic right shift packed8B" + %} + ins_encode %{ + bool quad = false; + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); %} + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsrl16B_reg(vecX dst, vecX src, vecX shift) %{ +instruct vsra16B_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ predicate(n->as_Vector()->length() == 16); match(Set dst (RShiftVB src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME - expand %{ - vsha16B_reg(dst, src, shift); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ + "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" + "VSHL.S8 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed16B" %} + ins_encode %{ + bool quad = true; + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsrl8B_immI(vecD dst, vecD src, immI shift) %{ +instruct vsra8B_immI(vecD dst, vecD src, immI shift) %{ predicate(n->as_Vector()->length() == 8); - match(Set dst (RShiftVB src shift)); + match(Set dst (RShiftVB src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.S8 $dst.D,$src.D,$shift\t! logical right shift packed8B" + "VSHR.S8 $dst.D,$src.D,$shift\t! arithmetic right shift packed8B" %} ins_encode %{ bool quad = false; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, - quad); + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsrl16B_immI(vecX dst, vecX src, immI shift) %{ +instruct vsra16B_immI(vecX dst, vecX src, immI shift) %{ predicate(n->as_Vector()->length() == 16); - match(Set dst (RShiftVB src shift)); + match(Set dst (RShiftVB src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.S8 $dst.Q,$src.Q,$shift\t! logical right shift packed16B" + "VSHR.S8 $dst.Q,$src.Q,$shift\t! arithmetic right shift packed16B" %} ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, - quad); + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} // Shorts vector arithmetic right shift -instruct vsra4S_reg(vecD dst, vecD src, vecD shift) %{ +instruct vsra4S_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ predicate(n->as_Vector()->length() == 4); match(Set dst (RShiftVS src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME - expand %{ - vsha4S_reg(dst, src, shift); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ + "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" + "VSHL.S16 $dst.D,$src.D,$tmp.D\t! arithmetic right shift packed4S" %} + ins_encode %{ + bool quad = false; + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsra8S_reg(vecX dst, vecX src, vecX shift) %{ +instruct vsra8S_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ predicate(n->as_Vector()->length() == 8); match(Set dst (RShiftVS src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME - expand %{ - vsha8S_reg(dst, src, shift); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ + "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" + "VSHL.S16 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed8S" + %} + ins_encode %{ + bool quad = true; + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{ predicate(n->as_Vector()->length() == 4); - match(Set dst (RShiftVS src shift)); + match(Set dst (RShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.S16 $dst.D,$src.D,$shift\t! logical right shift packed4S" + "VSHR.S16 $dst.D,$src.D,$shift\t! arithmetic right shift packed4S" %} ins_encode %{ bool quad = false; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{ predicate(n->as_Vector()->length() == 8); - match(Set dst (RShiftVS src shift)); + match(Set dst (RShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.S16 $dst.Q,$src.Q,$shift\t! logical right shift packed8S" + "VSHR.S16 $dst.Q,$src.Q,$shift\t! arithmetic right shift packed8S" %} ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} // Integers vector arithmetic right shift -instruct vsra2I_reg(vecD dst, vecD src, vecD shift) %{ +instruct vsra2I_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ predicate(n->as_Vector()->length() == 2); match(Set dst (RShiftVI src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME - expand %{ - vsha2I_reg(dst, src, shift); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ + "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" + "VSHL.S32 $dst.D,$src.D,$tmp.D\t! arithmetic right shift packed2I" %} + ins_encode %{ + bool quad = false; + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsra4I_reg(vecX dst, vecX src, vecX shift) %{ +instruct vsra4I_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ predicate(n->as_Vector()->length() == 4); match(Set dst (RShiftVI src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME - expand %{ - vsha4I_reg(dst, src, shift); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ + "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" + "VSHL.S32 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed4I" %} + ins_encode %{ + bool quad = true; + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{ predicate(n->as_Vector()->length() == 2); - match(Set dst (RShiftVI src shift)); + match(Set dst (RShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.S32 $dst.D,$src.D,$shift\t! logical right shift packed2I" + "VSHR.S32 $dst.D,$src.D,$shift\t! arithmetic right shift packed2I" %} ins_encode %{ bool quad = false; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{ predicate(n->as_Vector()->length() == 4); - match(Set dst (RShiftVI src shift)); + match(Set dst (RShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.S32 $dst.Q,$src.Q,$shift\t! logical right shift packed4I" + "VSHR.S32 $dst.Q,$src.Q,$shift\t! arithmetic right shift packed4I" %} ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} // Longs vector arithmetic right shift -instruct vsra2L_reg(vecX dst, vecX src, vecX shift) %{ +instruct vsra2L_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ predicate(n->as_Vector()->length() == 2); match(Set dst (RShiftVL src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME - expand %{ - vsha2L_reg(dst, src, shift); + effect(TEMP tmp); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ + "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" + "VSHL.S64 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed2L" + %} + ins_encode %{ + bool quad = true; + __ vnegI($tmp$$FloatRegister, $shift$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_64, quad); %} + ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra2L_immI(vecX dst, vecX src, immI shift) %{ predicate(n->as_Vector()->length() == 2); - match(Set dst (RShiftVL src shift)); + match(Set dst (RShiftVL src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHR.S64 $dst.Q,$src.Q,$shift\t! logical right shift packed2L" + "VSHR.S64 $dst.Q,$src.Q,$shift\t! arithmetic right shift packed2L" %} ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant, - quad); + quad); %} - ins_pipe( ialu_reg_reg ); // FIXME + ins_pipe(ialu_reg_reg); // FIXME %} // --------------------------------- AND -------------------------------------- From 05dfae3ae54118d9a43c230f3f20e915bb51c19c Mon Sep 17 00:00:00 2001 From: Hao Sun Date: Fri, 24 Dec 2021 01:27:21 +0000 Subject: [PATCH 2/4] Use is_var_shift() to determmine the location of negation use for right shifts Method is_var_shift() denotes that vector shift count is a variable shift: 1) for this case, vector shift count should be negated before conducting right shifts. E.g., vsrl4S_reg_var rule. 2) for the opposite case, vector shift count is generated via RShiftCntV rules and is already negated there. Hence, no negation is needed. E.g., vsrl4S_reg rule. Besides, it's safe to add "hash()" and "cmp()" methods for ShiftV node. --- src/hotspot/cpu/arm/arm.ad | 492 +++++++++++++++++++++----- src/hotspot/share/opto/vectornode.hpp | 9 +- 2 files changed, 401 insertions(+), 100 deletions(-) diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index 5749e0cec6e..3061bf1a512 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -10591,14 +10591,11 @@ instruct vneg16B_reg(vecX dst, vecX src) %{ ins_pipe( ialu_reg_reg ); // FIXME %} -// ------------------------------ Shift --------------------------------------- +// ------------------------------ ShiftCount ---------------------------------- -// Low bits of vector "shift" elements are used, so it -// doesn't matter if we treat it as ints or bytes here. -instruct vscntD(vecD dst, iRegI cnt) %{ +instruct vslcntD(vecD dst, iRegI cnt) %{ predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd()); match(Set dst (LShiftCntV cnt)); - match(Set dst (RShiftCntV cnt)); size(4); ins_cost(DEFAULT_COST); // FIXME expand %{ @@ -10606,10 +10603,9 @@ instruct vscntD(vecD dst, iRegI cnt) %{ %} %} -instruct vscntX(vecX dst, iRegI cnt) %{ +instruct vslcntX(vecX dst, iRegI cnt) %{ predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd()); match(Set dst (LShiftCntV cnt)); - match(Set dst (RShiftCntV cnt)); size(4); ins_cost(DEFAULT_COST); // FIXME expand %{ @@ -10617,16 +10613,52 @@ instruct vscntX(vecX dst, iRegI cnt) %{ %} %} -// ------------------------------ LeftShift ----------------------------------- +// Low bits of vector "shift" elements are used, so it +// doesn't matter if we treat it as ints or bytes here. +instruct vsrcntD(vecD dst, iRegI cnt) %{ + predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd()); + match(Set dst (RShiftCntV cnt)); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ "VDUP.8 $dst.D,$cnt\n\t" + "VNEG.S8 $dst.D,$dst.D\t! neg packed8B" %} + ins_encode %{ + bool quad = false; + __ vdupI($dst$$FloatRegister, $cnt$$Register, + MacroAssembler::VELEM_SIZE_8, quad); + __ vnegI($dst$$FloatRegister, $dst$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} -// Byte vector logical left shift -instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{ +instruct vsrcntX(vecX dst, iRegI cnt) %{ + predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd()); + match(Set dst (RShiftCntV cnt)); + size(4*2); + ins_cost(DEFAULT_COST*2); // FIXME + format %{ "VDUP.8 $dst.Q,$cnt\n\t" + "VNEG.S8 $dst.Q,$dst.Q\t! neg packed16B" %} + ins_encode %{ + bool quad = true; + __ vdupI($dst$$FloatRegister, $cnt$$Register, + MacroAssembler::VELEM_SIZE_8, quad); + __ vnegI($dst$$FloatRegister, $dst$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +// ------------------------------ LogicalShift -------------------------------- + +// Byte vector logical left/right shift based on sign +instruct vsh8B_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 8); - match(Set dst (LShiftVB src shift)); + effect(DEF dst, USE src, USE shift); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHL.U8 $dst.D,$src.D,$shift.D\t! logical left shift packed8B" + "VSHL.U8 $dst.D,$src.D,$shift.D\t! logical left/right shift packed8B" %} ins_encode %{ bool quad = false; @@ -10636,13 +10668,13 @@ instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{ ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{ +instruct vsh16B_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 16); - match(Set dst (LShiftVB src shift)); + effect(DEF dst, USE src, USE shift); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ - "VSHL.U8 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed16B" + "VSHL.U8 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed16B" %} ins_encode %{ bool quad = true; @@ -10652,8 +10684,114 @@ instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{ ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ +// Shorts/Char vector logical left/right shift based on sign +instruct vsh4S_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 4); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U16 $dst.D,$src.D,$shift.D\t! logical left/right shift packed4S" + %} + ins_encode %{ + bool quad = false; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsh8S_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 8); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U16 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed8S" + %} + ins_encode %{ + bool quad = true; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +// Integers vector logical left/right shift based on sign +instruct vsh2I_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 2); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U32 $dst.D,$src.D,$shift.D\t! logical left/right shift packed2I" + %} + ins_encode %{ + bool quad = false; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsh4I_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 4); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U32 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed4I" + %} + ins_encode %{ + bool quad = true; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +// Longs vector logical left/right shift based on sign +instruct vsh2L_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 2); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.U64 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed2L" + %} + ins_encode %{ + bool quad = true; + __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_64, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +// ------------------------------ LogicalLeftShift ---------------------------- + +// Byte vector logical left shift +instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 8); + match(Set dst (LShiftVB src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsh8B_reg(dst, src, shift); + %} +%} + +instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 16); + match(Set dst (LShiftVB src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsh16B_reg(dst, src, shift); + %} +%} + +instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); match(Set dst (LShiftVB src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10669,7 +10807,7 @@ instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 16); + predicate(n->as_Vector()->length() == 16 && !n->as_ShiftV()->is_var_shift()); match(Set dst (LShiftVB src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10690,15 +10828,9 @@ instruct vsl4S_reg(vecD dst, vecD src, vecD shift) %{ match(Set dst (LShiftVS src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U16 $dst.D,$src.D,$shift.D\t! logical left shift packed4S" - %} - ins_encode %{ - bool quad = false; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); + expand %{ + vsh4S_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl8S_reg(vecX dst, vecX src, vecX shift) %{ @@ -10706,19 +10838,13 @@ instruct vsl8S_reg(vecX dst, vecX src, vecX shift) %{ match(Set dst (LShiftVS src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U16 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed8S" - %} - ins_encode %{ - bool quad = true; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); + expand %{ + vsh8S_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 4); + predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); match(Set dst (LShiftVS src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10734,7 +10860,7 @@ instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 8); + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); match(Set dst (LShiftVS src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10755,15 +10881,9 @@ instruct vsl2I_reg(vecD dst, vecD src, vecD shift) %{ match(Set dst (LShiftVI src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U32 $dst.D,$src.D,$shift.D\t! logical left shift packed2I" - %} - ins_encode %{ - bool quad = false; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); + expand %{ + vsh2I_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl4I_reg(vecX dst, vecX src, vecX shift) %{ @@ -10771,19 +10891,15 @@ instruct vsl4I_reg(vecX dst, vecX src, vecX shift) %{ match(Set dst (LShiftVI src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U32 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed4I" - %} - ins_encode %{ - bool quad = true; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); + expand %{ + vsh4I_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); + predicate(n->as_Vector()->length() == 2 && + VM_Version::has_simd() && + !n->as_ShiftV()->is_var_shift()); match(Set dst (LShiftVI src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10799,7 +10915,9 @@ instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); + predicate(n->as_Vector()->length() == 4 && + VM_Version::has_simd() && + !n->as_ShiftV()->is_var_shift()); match(Set dst (LShiftVI src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10820,19 +10938,13 @@ instruct vsl2L_reg(vecX dst, vecX src, vecX shift) %{ match(Set dst (LShiftVL src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.U64 $dst.Q,$src.Q,$shift.Q\t! logical left shift packed2L" - %} - ins_encode %{ - bool quad = true; - __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_64, quad); + expand %{ + vsh2L_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 2); + predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); match(Set dst (LShiftVL src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10853,9 +10965,29 @@ instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{ // for negative data because java code convert short value into int with // sign extension before a shift. +// Right shift with vector shift count on aarch32 SIMD is implemented as left +// shift by negative shift count value. +// +// Method is_var_shift() denotes that vector shift count is a variable shift: +// 1) for this case, vector shift count should be negated before conducting +// right shifts. E.g., vsrl4S_reg_var rule. +// 2) for the opposite case, vector shift count is generated via RShiftCntV +// rules and is already negated there. Hence, no negation is needed. +// E.g., vsrl4S_reg rule. + // Chars vector logical right shift -instruct vsrl4S_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ - predicate(n->as_Vector()->length() == 4); +instruct vsrl4S_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (URShiftVS src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsh4S_reg(dst, src, shift); + %} +%} + +instruct vsrl4S_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ + predicate(n->as_Vector()->length() == 4 && n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVS src shift)); effect(TEMP tmp); size(4*2); @@ -10874,8 +11006,18 @@ instruct vsrl4S_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsrl8S_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ - predicate(n->as_Vector()->length() == 8); +instruct vsrl8S_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (URShiftVS src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsh8S_reg(dst, src, shift); + %} +%} + +instruct vsrl8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 8 && n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVS src shift)); effect(TEMP tmp); size(4*2); @@ -10895,7 +11037,7 @@ instruct vsrl8S_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 4); + predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10911,7 +11053,7 @@ instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 8); + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10927,8 +11069,22 @@ instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{ %} // Integers vector logical right shift -instruct vsrl2I_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ - predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); +instruct vsrl2I_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 2 && + VM_Version::has_simd() && + !n->as_ShiftV()->is_var_shift()); + match(Set dst (URShiftVI src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsh2I_reg(dst, src, shift); + %} +%} + +instruct vsrl2I_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ + predicate(n->as_Vector()->length() == 2 && + VM_Version::has_simd() && + n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVI src shift)); effect(TEMP tmp); size(4*2); @@ -10947,8 +11103,22 @@ instruct vsrl2I_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsrl4I_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ - predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); +instruct vsrl4I_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 4 && + VM_Version::has_simd() && + !n->as_ShiftV()->is_var_shift()); + match(Set dst (URShiftVI src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsh4I_reg(dst, src, shift); + %} +%} + +instruct vsrl4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 4 && + VM_Version::has_simd() && + n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVI src shift)); effect(TEMP tmp); size(4*2); @@ -10968,7 +11138,9 @@ instruct vsrl4I_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); + predicate(n->as_Vector()->length() == 2 && + VM_Version::has_simd() && + !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10984,7 +11156,9 @@ instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); + predicate(n->as_Vector()->length() == 4 && + VM_Version::has_simd() && + !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11000,8 +11174,18 @@ instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{ %} // Longs vector logical right shift -instruct vsrl2L_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ - predicate(n->as_Vector()->length() == 2); +instruct vsrl2L_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (URShiftVL src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsh2L_reg(dst, src, shift); + %} +%} + +instruct vsrl2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 2 && n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVL src shift)); effect(TEMP tmp, DEF dst, USE src, USE shift); size(4*2); @@ -11021,7 +11205,7 @@ instruct vsrl2L_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 2); + predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVL src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11039,8 +11223,24 @@ instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{ // ------------------- ArithmeticRightShift ----------------------------------- // Byte vector arithmetic right shift -instruct vsra8B_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ - predicate(n->as_Vector()->length() == 8); +instruct vsra8B_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVB src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S8 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed8B" + %} + ins_encode %{ + bool quad = false; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsra8B_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ + predicate(n->as_Vector()->length() == 8 && n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVB src shift)); effect(TEMP tmp); size(4*2); @@ -11059,8 +11259,24 @@ instruct vsra8B_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsra16B_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ - predicate(n->as_Vector()->length() == 16); +instruct vsra16B_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 16 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVB src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S8 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed16B" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsra16B_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 16 && n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVB src shift)); effect(TEMP tmp); size(4*2); @@ -11080,7 +11296,7 @@ instruct vsra16B_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra8B_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 8); + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVB src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11096,7 +11312,7 @@ instruct vsra8B_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsra16B_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 16); + predicate(n->as_Vector()->length() == 16 && !n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVB src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11112,8 +11328,24 @@ instruct vsra16B_immI(vecX dst, vecX src, immI shift) %{ %} // Shorts vector arithmetic right shift -instruct vsra4S_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ - predicate(n->as_Vector()->length() == 4); +instruct vsra4S_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVS src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S16 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed4S" + %} + ins_encode %{ + bool quad = false; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsra4S_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ + predicate(n->as_Vector()->length() == 4 && n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVS src shift)); effect(TEMP tmp); size(4*2); @@ -11132,8 +11364,24 @@ instruct vsra4S_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsra8S_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ - predicate(n->as_Vector()->length() == 8); +instruct vsra8S_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVS src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S16 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed8S" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsra8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 8 && n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVS src shift)); effect(TEMP tmp); size(4*2); @@ -11153,7 +11401,7 @@ instruct vsra8S_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 4); + predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11169,7 +11417,7 @@ instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 8); + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11185,8 +11433,24 @@ instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{ %} // Integers vector arithmetic right shift -instruct vsra2I_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ - predicate(n->as_Vector()->length() == 2); +instruct vsra2I_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVI src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S32 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed2I" + %} + ins_encode %{ + bool quad = false; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsra2I_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ + predicate(n->as_Vector()->length() == 2 && n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVI src shift)); effect(TEMP tmp); size(4*2); @@ -11205,8 +11469,24 @@ instruct vsra2I_reg(vecD dst, vecD src, vecD shift, vecD tmp) %{ ins_pipe(ialu_reg_reg); // FIXME %} -instruct vsra4I_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ - predicate(n->as_Vector()->length() == 4); +instruct vsra4I_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVI src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S32 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed4I" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsra4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 4 && n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVI src shift)); effect(TEMP tmp); size(4*2); @@ -11226,7 +11506,7 @@ instruct vsra4I_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 2); + predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11242,7 +11522,7 @@ instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 4); + predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11258,8 +11538,24 @@ instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{ %} // Longs vector arithmetic right shift -instruct vsra2L_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ - predicate(n->as_Vector()->length() == 2); +instruct vsra2L_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVL src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S64 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed2L" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_64, quad); + %} + ins_pipe(ialu_reg_reg); // FIXME +%} + +instruct vsra2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ + predicate(n->as_Vector()->length() == 2 && n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVL src shift)); effect(TEMP tmp); size(4*2); @@ -11279,7 +11575,7 @@ instruct vsra2L_reg(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra2L_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 2); + predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); match(Set dst (RShiftVL src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME diff --git a/src/hotspot/share/opto/vectornode.hpp b/src/hotspot/share/opto/vectornode.hpp index fc424f59a3e..483bfdf9bb4 100644 --- a/src/hotspot/share/opto/vectornode.hpp +++ b/src/hotspot/share/opto/vectornode.hpp @@ -531,7 +531,8 @@ class SqrtVDNode : public VectorNode { // Class ShiftV functionality. This covers the common behaviors for all kinds // of vector shifts. class ShiftVNode : public VectorNode { - bool _is_var_shift; + private: + bool _is_var_shift; public: ShiftVNode(Node* in1, Node* in2, const TypeVect* vt, bool is_var_shift) : VectorNode(in1,in2,vt), _is_var_shift(is_var_shift) { @@ -539,8 +540,12 @@ class ShiftVNode : public VectorNode { } virtual Node* Identity(PhaseGVN* phase); virtual int Opcode() const = 0; + virtual uint hash() const { return VectorNode::hash() + _is_var_shift; } + virtual bool cmp(const Node& n) const { + return VectorNode::cmp(n) && _is_var_shift == ((ShiftVNode&)n)._is_var_shift; + } bool is_var_shift() { return _is_var_shift;} - virtual uint size_of() const { return sizeof(ShiftVNode); } + virtual uint size_of() const { return sizeof(ShiftVNode); } }; //------------------------------LShiftVBNode----------------------------------- From 566efefe2096237f86d0643e4d288e6aa49d1578 Mon Sep 17 00:00:00 2001 From: Hao Sun Date: Mon, 27 Dec 2021 06:01:36 +0000 Subject: [PATCH 3/4] Make minimal updates to exisiting rules 1. logical left shift rules a). add is_var_shift check for vslAA_immI rules. b). for vslAA_reg rules, remove the matching for URShiftV cases as we have the separate logical right shift rules now. 2. logical right shift rules a). add vsrlAA_reg and vsrlAA_reg_var rules. b). add is_var_shift check for vsrlAA_immI rules. 3. arithmetic right shift rules a). add is_var_shift check for vsraAA_reg rules. b). add vsraAA_reg_var rules c). for vsraAA_immI rules, add is_var_shift check and update the match primitive. Code style issues(FIXME and the surrounding space in ins_pipe): 1. for modified rules, keep it as it was 2. for newly added rules, update the style --- src/hotspot/cpu/arm/arm.ad | 347 ++++++++++++++++++++++--------------- 1 file changed, 211 insertions(+), 136 deletions(-) diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index 3061bf1a512..51873b54134 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -10620,6 +10620,7 @@ instruct vsrcntD(vecD dst, iRegI cnt) %{ match(Set dst (RShiftCntV cnt)); size(4*2); ins_cost(DEFAULT_COST*2); // FIXME + format %{ "VDUP.8 $dst.D,$cnt\n\t" "VNEG.S8 $dst.D,$dst.D\t! neg packed8B" %} ins_encode %{ @@ -10629,7 +10630,7 @@ instruct vsrcntD(vecD dst, iRegI cnt) %{ __ vnegI($dst$$FloatRegister, $dst$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsrcntX(vecX dst, iRegI cnt) %{ @@ -10646,7 +10647,7 @@ instruct vsrcntX(vecX dst, iRegI cnt) %{ __ vnegI($dst$$FloatRegister, $dst$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // ------------------------------ LogicalShift -------------------------------- @@ -10665,7 +10666,7 @@ instruct vsh8B_reg(vecD dst, vecD src, vecD shift) %{ __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsh16B_reg(vecX dst, vecX src, vecX shift) %{ @@ -10681,7 +10682,7 @@ instruct vsh16B_reg(vecX dst, vecX src, vecX shift) %{ __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Shorts/Char vector logical left/right shift based on sign @@ -10698,7 +10699,7 @@ instruct vsh4S_reg(vecD dst, vecD src, vecD shift) %{ __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsh8S_reg(vecX dst, vecX src, vecX shift) %{ @@ -10714,7 +10715,7 @@ instruct vsh8S_reg(vecX dst, vecX src, vecX shift) %{ __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Integers vector logical left/right shift based on sign @@ -10731,7 +10732,7 @@ instruct vsh2I_reg(vecD dst, vecD src, vecD shift) %{ __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsh4I_reg(vecX dst, vecX src, vecX shift) %{ @@ -10747,7 +10748,7 @@ instruct vsh4I_reg(vecX dst, vecX src, vecX shift) %{ __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Longs vector logical left/right shift based on sign @@ -10764,7 +10765,7 @@ instruct vsh2L_reg(vecX dst, vecX src, vecX shift) %{ __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_64, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // ------------------------------ LogicalLeftShift ---------------------------- @@ -10773,8 +10774,8 @@ instruct vsh2L_reg(vecX dst, vecX src, vecX shift) %{ instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 8); match(Set dst (LShiftVB src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME + size(4*1); + ins_cost(DEFAULT_COST*1); // FIXME expand %{ vsh8B_reg(dst, src, shift); %} @@ -10783,8 +10784,8 @@ instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{ instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 16); match(Set dst (LShiftVB src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME + size(4*1); + ins_cost(DEFAULT_COST*1); // FIXME expand %{ vsh16B_reg(dst, src, shift); %} @@ -10803,7 +10804,7 @@ instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{ @@ -10819,15 +10820,15 @@ instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Shorts/Chars vector logical left shift instruct vsl4S_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 4); match(Set dst (LShiftVS src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME + size(4*1); + ins_cost(DEFAULT_COST*1); // FIXME expand %{ vsh4S_reg(dst, src, shift); %} @@ -10836,8 +10837,8 @@ instruct vsl4S_reg(vecD dst, vecD src, vecD shift) %{ instruct vsl8S_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 8); match(Set dst (LShiftVS src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME + size(4*1); + ins_cost(DEFAULT_COST*1); // FIXME expand %{ vsh8S_reg(dst, src, shift); %} @@ -10856,7 +10857,7 @@ instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{ @@ -10872,15 +10873,15 @@ instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Integers vector logical left shift instruct vsl2I_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd()); match(Set dst (LShiftVI src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME + size(4*1); + ins_cost(DEFAULT_COST*1); // FIXME expand %{ vsh2I_reg(dst, src, shift); %} @@ -10889,8 +10890,8 @@ instruct vsl2I_reg(vecD dst, vecD src, vecD shift) %{ instruct vsl4I_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd()); match(Set dst (LShiftVI src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME + size(4*1); + ins_cost(DEFAULT_COST*1); // FIXME expand %{ vsh4I_reg(dst, src, shift); %} @@ -10911,7 +10912,7 @@ instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{ @@ -10929,15 +10930,15 @@ instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Longs vector logical left shift instruct vsl2L_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 2); match(Set dst (LShiftVL src shift)); - size(4); - ins_cost(DEFAULT_COST); // FIXME + size(4*1); + ins_cost(DEFAULT_COST*1); // FIXME expand %{ vsh2L_reg(dst, src, shift); %} @@ -10956,7 +10957,7 @@ instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{ __ vshli($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // ----------------------- LogicalRightShift ----------------------------------- @@ -10980,7 +10981,7 @@ instruct vsrl4S_reg(vecD dst, vecD src, vecD shift) %{ predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVS src shift)); size(4); - ins_cost(DEFAULT_COST); // FIXME + ins_cost(DEFAULT_COST); expand %{ vsh4S_reg(dst, src, shift); %} @@ -10991,7 +10992,7 @@ instruct vsrl4S_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ match(Set dst (URShiftVS src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" "VSHL.U16 $dst.D,$src.D,$tmp.D\t! logical right shift packed4S" @@ -11003,14 +11004,14 @@ instruct vsrl4S_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsrl8S_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVS src shift)); size(4); - ins_cost(DEFAULT_COST); // FIXME + ins_cost(DEFAULT_COST); expand %{ vsh8S_reg(dst, src, shift); %} @@ -11021,7 +11022,7 @@ instruct vsrl8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ match(Set dst (URShiftVS src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" "VSHL.U16 $dst.Q,$src.Q,$tmp.Q\t! logical right shift packed8S" @@ -11033,7 +11034,7 @@ instruct vsrl8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ @@ -11047,9 +11048,9 @@ instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ ins_encode %{ bool quad = false; __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{ @@ -11063,9 +11064,9 @@ instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{ ins_encode %{ bool quad = true; __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Integers vector logical right shift @@ -11075,7 +11076,7 @@ instruct vsrl2I_reg(vecD dst, vecD src, vecD shift) %{ !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVI src shift)); size(4); - ins_cost(DEFAULT_COST); // FIXME + ins_cost(DEFAULT_COST); expand %{ vsh2I_reg(dst, src, shift); %} @@ -11088,7 +11089,7 @@ instruct vsrl2I_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ match(Set dst (URShiftVI src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" "VSHL.U32 $dst.D,$src.D,$tmp.D\t! logical right shift packed2I" @@ -11100,7 +11101,7 @@ instruct vsrl2I_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsrl4I_reg(vecX dst, vecX src, vecX shift) %{ @@ -11109,7 +11110,7 @@ instruct vsrl4I_reg(vecX dst, vecX src, vecX shift) %{ !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVI src shift)); size(4); - ins_cost(DEFAULT_COST); // FIXME + ins_cost(DEFAULT_COST); expand %{ vsh4I_reg(dst, src, shift); %} @@ -11122,7 +11123,7 @@ instruct vsrl4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ match(Set dst (URShiftVI src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" "VSHL.U32 $dst.Q,$src.Q,$tmp.Q\t! logical right shift packed4I" @@ -11134,7 +11135,7 @@ instruct vsrl4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ @@ -11150,9 +11151,9 @@ instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ ins_encode %{ bool quad = false; __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{ @@ -11168,9 +11169,9 @@ instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{ ins_encode %{ bool quad = true; __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Longs vector logical right shift @@ -11178,7 +11179,7 @@ instruct vsrl2L_reg(vecX dst, vecX src, vecX shift) %{ predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); match(Set dst (URShiftVL src shift)); size(4); - ins_cost(DEFAULT_COST); // FIXME + ins_cost(DEFAULT_COST); expand %{ vsh2L_reg(dst, src, shift); %} @@ -11189,7 +11190,7 @@ instruct vsrl2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ match(Set dst (URShiftVL src shift)); effect(TEMP tmp, DEF dst, USE src, USE shift); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" "VSHL.U64 $dst.Q,$src.Q,$tmp.Q\t! logical right shift packed2L" @@ -11201,7 +11202,7 @@ instruct vsrl2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ __ vshlUI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_64, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{ @@ -11215,17 +11216,17 @@ instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{ ins_encode %{ bool quad = true; __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // ------------------- ArithmeticRightShift ----------------------------------- -// Byte vector arithmetic right shift -instruct vsra8B_reg(vecD dst, vecD src, vecD shift) %{ - predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); - match(Set dst (RShiftVB src shift)); +// Bytes vector arithmetic left/right shift based on sign +instruct vsha8B_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 8); + effect(DEF dst, USE src, USE shift); size(4); ins_cost(DEFAULT_COST); // FIXME format %{ @@ -11236,7 +11237,117 @@ instruct vsra8B_reg(vecD dst, vecD src, vecD shift) %{ __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME +%} + +instruct vsha16B_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 16); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S8 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed16B" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_8, quad); + %} + ins_pipe( ialu_reg_reg ); // FIXME +%} + +// Shorts vector arithmetic left/right shift based on sign +instruct vsha4S_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 4); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S16 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed4S" + %} + ins_encode %{ + bool quad = false; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe( ialu_reg_reg ); // FIXME +%} + +instruct vsha8S_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 8); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S16 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed8S" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_16, quad); + %} + ins_pipe( ialu_reg_reg ); // FIXME +%} + +// Integers vector arithmetic left/right shift based on sign +instruct vsha2I_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 2); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S32 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed2I" + %} + ins_encode %{ + bool quad = false; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe( ialu_reg_reg ); // FIXME +%} + +instruct vsha4I_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 4); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S32 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed4I" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_32, quad); + %} + ins_pipe( ialu_reg_reg ); // FIXME +%} + +// Longs vector arithmetic left/right shift based on sign +instruct vsha2L_reg(vecX dst, vecX src, vecX shift) %{ + predicate(n->as_Vector()->length() == 2); + effect(DEF dst, USE src, USE shift); + size(4); + ins_cost(DEFAULT_COST); // FIXME + format %{ + "VSHL.S64 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed2L" + %} + ins_encode %{ + bool quad = true; + __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, + MacroAssembler::VELEM_SIZE_64, quad); + %} + ins_pipe( ialu_reg_reg ); // FIXME +%} + +// Byte vector arithmetic right shift +instruct vsra8B_reg(vecD dst, vecD src, vecD shift) %{ + predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + match(Set dst (RShiftVB src shift)); + size(4); + ins_cost(DEFAULT_COST); // FIXME + expand %{ + vsha8B_reg(dst, src, shift); + %} %} instruct vsra8B_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ @@ -11244,7 +11355,7 @@ instruct vsra8B_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ match(Set dst (RShiftVB src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" "VSHL.S8 $dst.D,$src.D,$tmp.D\t! arithmetic right shift packed8B" @@ -11256,7 +11367,7 @@ instruct vsra8B_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsra16B_reg(vecX dst, vecX src, vecX shift) %{ @@ -11264,15 +11375,9 @@ instruct vsra16B_reg(vecX dst, vecX src, vecX shift) %{ match(Set dst (RShiftVB src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.S8 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed16B" - %} - ins_encode %{ - bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_8, quad); + expand %{ + vsha16B_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra16B_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ @@ -11280,7 +11385,7 @@ instruct vsra16B_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ match(Set dst (RShiftVB src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" "VSHL.S8 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed16B" @@ -11292,7 +11397,7 @@ instruct vsra16B_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_8, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsra8B_immI(vecD dst, vecD src, immI shift) %{ @@ -11306,9 +11411,9 @@ instruct vsra8B_immI(vecD dst, vecD src, immI shift) %{ ins_encode %{ bool quad = false; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsra16B_immI(vecX dst, vecX src, immI shift) %{ @@ -11322,9 +11427,9 @@ instruct vsra16B_immI(vecX dst, vecX src, immI shift) %{ ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Shorts vector arithmetic right shift @@ -11333,15 +11438,9 @@ instruct vsra4S_reg(vecD dst, vecD src, vecD shift) %{ match(Set dst (RShiftVS src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.S16 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed4S" - %} - ins_encode %{ - bool quad = false; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); + expand %{ + vsha4S_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra4S_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ @@ -11349,7 +11448,7 @@ instruct vsra4S_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ match(Set dst (RShiftVS src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" "VSHL.S16 $dst.D,$src.D,$tmp.D\t! arithmetic right shift packed4S" @@ -11361,7 +11460,7 @@ instruct vsra4S_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsra8S_reg(vecX dst, vecX src, vecX shift) %{ @@ -11369,15 +11468,9 @@ instruct vsra8S_reg(vecX dst, vecX src, vecX shift) %{ match(Set dst (RShiftVS src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.S16 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed8S" - %} - ins_encode %{ - bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_16, quad); + expand %{ + vsha8S_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ @@ -11385,7 +11478,7 @@ instruct vsra8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ match(Set dst (RShiftVS src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" "VSHL.S16 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed8S" @@ -11397,7 +11490,7 @@ instruct vsra8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_16, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{ @@ -11411,9 +11504,9 @@ instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{ ins_encode %{ bool quad = false; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{ @@ -11427,9 +11520,9 @@ instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{ ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Integers vector arithmetic right shift @@ -11438,15 +11531,9 @@ instruct vsra2I_reg(vecD dst, vecD src, vecD shift) %{ match(Set dst (RShiftVI src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.S32 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed2I" - %} - ins_encode %{ - bool quad = false; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); + expand %{ + vsha2I_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra2I_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ @@ -11454,7 +11541,7 @@ instruct vsra2I_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ match(Set dst (RShiftVI src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.D,$shift.D\n\t! neg packed8B" "VSHL.S32 $dst.D,$src.D,$tmp.D\t! arithmetic right shift packed2I" @@ -11466,7 +11553,7 @@ instruct vsra2I_reg_var(vecD dst, vecD src, vecD shift, vecD tmp) %{ __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsra4I_reg(vecX dst, vecX src, vecX shift) %{ @@ -11474,15 +11561,9 @@ instruct vsra4I_reg(vecX dst, vecX src, vecX shift) %{ match(Set dst (RShiftVI src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.S32 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed4I" - %} - ins_encode %{ - bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_32, quad); + expand %{ + vsha4I_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ @@ -11490,7 +11571,7 @@ instruct vsra4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ match(Set dst (RShiftVI src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" "VSHL.S32 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed4I" @@ -11502,7 +11583,7 @@ instruct vsra4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_32, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{ @@ -11516,9 +11597,9 @@ instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{ ins_encode %{ bool quad = false; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{ @@ -11532,9 +11613,9 @@ instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{ ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // Longs vector arithmetic right shift @@ -11543,15 +11624,9 @@ instruct vsra2L_reg(vecX dst, vecX src, vecX shift) %{ match(Set dst (RShiftVL src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME - format %{ - "VSHL.S64 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed2L" - %} - ins_encode %{ - bool quad = true; - __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister, - MacroAssembler::VELEM_SIZE_64, quad); + expand %{ + vsha2L_reg(dst, src, shift); %} - ins_pipe(ialu_reg_reg); // FIXME %} instruct vsra2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ @@ -11559,7 +11634,7 @@ instruct vsra2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ match(Set dst (RShiftVL src shift)); effect(TEMP tmp); size(4*2); - ins_cost(DEFAULT_COST*2); // FIXME + ins_cost(DEFAULT_COST*2); format %{ "VNEG.S8 $tmp.Q,$shift.Q\n\t! neg packed16B" "VSHL.S64 $dst.Q,$src.Q,$tmp.Q\t! arithmetic right shift packed2L" @@ -11571,7 +11646,7 @@ instruct vsra2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ __ vshlSI($dst$$FloatRegister, $tmp$$FloatRegister, $src$$FloatRegister, MacroAssembler::VELEM_SIZE_64, quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe(ialu_reg_reg); %} instruct vsra2L_immI(vecX dst, vecX src, immI shift) %{ @@ -11585,9 +11660,9 @@ instruct vsra2L_immI(vecX dst, vecX src, immI shift) %{ ins_encode %{ bool quad = true; __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant, - quad); + quad); %} - ins_pipe(ialu_reg_reg); // FIXME + ins_pipe( ialu_reg_reg ); // FIXME %} // --------------------------------- AND -------------------------------------- From 6b09742ff3211faaec723d9cf561d2556879f3fb Mon Sep 17 00:00:00 2001 From: Hao Sun Date: Tue, 4 Jan 2022 07:30:49 +0000 Subject: [PATCH 4/4] Add "assert" for immI cases Define helper assert_not_var_shift() and use it for immI cases. Besides, update the copyright year to 2022. --- src/hotspot/cpu/arm/arm.ad | 49 ++++++++++++++++----------- src/hotspot/share/opto/vectornode.hpp | 2 +- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index 51873b54134..f2ac7b58e73 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -123,9 +123,18 @@ public: }; }; +// Assert that the given node is not a var shift. +bool assert_not_var_shift(const Node *n); %} source %{ + +// Assert that the given node is not a var shift. +bool assert_not_var_shift(const Node *n) { + assert(!n->as_ShiftV()->is_var_shift(), "illegal var shift"); + return true; +} + #define __ _masm. static FloatRegister reg_to_FloatRegister_object(int register_encoding); @@ -10792,7 +10801,7 @@ instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{ %} instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 8 && assert_not_var_shift(n)); match(Set dst (LShiftVB src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10808,7 +10817,7 @@ instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 16 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 16 && assert_not_var_shift(n)); match(Set dst (LShiftVB src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10845,7 +10854,7 @@ instruct vsl8S_reg(vecX dst, vecX src, vecX shift) %{ %} instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 4 && assert_not_var_shift(n)); match(Set dst (LShiftVS src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10861,7 +10870,7 @@ instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 8 && assert_not_var_shift(n)); match(Set dst (LShiftVS src shift)); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10900,7 +10909,7 @@ instruct vsl4I_reg(vecX dst, vecX src, vecX shift) %{ instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{ predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd() && - !n->as_ShiftV()->is_var_shift()); + assert_not_var_shift(n)); match(Set dst (LShiftVI src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10918,7 +10927,7 @@ instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{ instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{ predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd() && - !n->as_ShiftV()->is_var_shift()); + assert_not_var_shift(n)); match(Set dst (LShiftVI src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -10945,7 +10954,7 @@ instruct vsl2L_reg(vecX dst, vecX src, vecX shift) %{ %} instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 2 && assert_not_var_shift(n)); match(Set dst (LShiftVL src (LShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11038,7 +11047,7 @@ instruct vsrl8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 4 && assert_not_var_shift(n)); match(Set dst (URShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11054,7 +11063,7 @@ instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 8 && assert_not_var_shift(n)); match(Set dst (URShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11141,7 +11150,7 @@ instruct vsrl4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd() && - !n->as_ShiftV()->is_var_shift()); + assert_not_var_shift(n)); match(Set dst (URShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11159,7 +11168,7 @@ instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{ instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{ predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd() && - !n->as_ShiftV()->is_var_shift()); + assert_not_var_shift(n)); match(Set dst (URShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11206,7 +11215,7 @@ instruct vsrl2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 2 && assert_not_var_shift(n)); match(Set dst (URShiftVL src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11401,7 +11410,7 @@ instruct vsra16B_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra8B_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 8 && assert_not_var_shift(n)); match(Set dst (RShiftVB src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11417,7 +11426,7 @@ instruct vsra8B_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsra16B_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 16 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 16 && assert_not_var_shift(n)); match(Set dst (RShiftVB src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11494,7 +11503,7 @@ instruct vsra8S_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 4 && assert_not_var_shift(n)); match(Set dst (RShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11510,7 +11519,7 @@ instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 8 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 8 && assert_not_var_shift(n)); match(Set dst (RShiftVS src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11587,7 +11596,7 @@ instruct vsra4I_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{ - predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 2 && assert_not_var_shift(n)); match(Set dst (RShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11603,7 +11612,7 @@ instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{ %} instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 4 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 4 && assert_not_var_shift(n)); match(Set dst (RShiftVI src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME @@ -11650,7 +11659,7 @@ instruct vsra2L_reg_var(vecX dst, vecX src, vecX shift, vecX tmp) %{ %} instruct vsra2L_immI(vecX dst, vecX src, immI shift) %{ - predicate(n->as_Vector()->length() == 2 && !n->as_ShiftV()->is_var_shift()); + predicate(n->as_Vector()->length() == 2 && assert_not_var_shift(n)); match(Set dst (RShiftVL src (RShiftCntV shift))); size(4); ins_cost(DEFAULT_COST); // FIXME diff --git a/src/hotspot/share/opto/vectornode.hpp b/src/hotspot/share/opto/vectornode.hpp index 483bfdf9bb4..062ad947c1f 100644 --- a/src/hotspot/share/opto/vectornode.hpp +++ b/src/hotspot/share/opto/vectornode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it