@@ -943,11 +943,40 @@ bool VPointer::scaled_iv_plus_offset(Node* n) {
943943 }
944944 } else if (opc == Op_SubI || opc == Op_SubL) {
945945 if (offset_plus_k (n->in (2 ), true ) && scaled_iv_plus_offset (n->in (1 ))) {
946+ // (offset1 + invar1 + scale * iv) - (offset2 + invar2)
947+ // Subtraction handled via "negate" flag of "offset_plus_k".
946948 NOT_PRODUCT (_tracer.scaled_iv_plus_offset_6 (n);)
947949 return true ;
948950 }
949- if (offset_plus_k (n->in (1 )) && scaled_iv_plus_offset (n->in (2 ))) {
950- _scale *= -1 ;
951+ VPointer tmp (this );
952+ if (offset_plus_k (n->in (1 )) && tmp.scaled_iv_plus_offset (n->in (2 ))) {
953+ // (offset1 + invar1) - (offset2 + invar2 + scale * iv)
954+ // Subtraction handled explicitly below.
955+ assert (_scale == 0 , " shouldn't be set yet" );
956+ // _scale = -tmp._scale
957+ if (!try_MulI_no_overflow (-1 , tmp._scale , _scale)) {
958+ return false ; // mul overflow.
959+ }
960+ // _offset -= tmp._offset
961+ if (!try_SubI_no_overflow (_offset, tmp._offset , _offset)) {
962+ return false ; // sub overflow.
963+ }
964+ // _invar -= tmp._invar
965+ if (tmp._invar != nullptr ) {
966+ maybe_add_to_invar (tmp._invar , true );
967+ #ifdef ASSERT
968+ _debug_invar_scale = tmp._debug_invar_scale ;
969+ _debug_negate_invar = !tmp._debug_negate_invar ;
970+ #endif
971+ }
972+
973+ // Forward info about the int_index:
974+ assert (!_has_int_index_after_convI2L, " no previous int_index discovered" );
975+ _has_int_index_after_convI2L = tmp._has_int_index_after_convI2L ;
976+ _int_index_after_convI2L_offset = tmp._int_index_after_convI2L_offset ;
977+ _int_index_after_convI2L_invar = tmp._int_index_after_convI2L_invar ;
978+ _int_index_after_convI2L_scale = tmp._int_index_after_convI2L_scale ;
979+
951980 NOT_PRODUCT (_tracer.scaled_iv_plus_offset_7 (n);)
952981 return true ;
953982 }
@@ -989,7 +1018,9 @@ bool VPointer::scaled_iv(Node* n) {
9891018 }
9901019 } else if (opc == Op_LShiftI) {
9911020 if (n->in (1 ) == iv () && n->in (2 )->is_Con ()) {
992- _scale = 1 << n->in (2 )->get_int ();
1021+ if (!try_LShiftI_no_overflow (1 , n->in (2 )->get_int (), _scale)) {
1022+ return false ; // shift overflow.
1023+ }
9931024 NOT_PRODUCT (_tracer.scaled_iv_6 (n, _scale);)
9941025 return true ;
9951026 }
@@ -1012,15 +1043,24 @@ bool VPointer::scaled_iv(Node* n) {
10121043 if (tmp.scaled_iv_plus_offset (n->in (1 )) && tmp.has_iv ()) {
10131044 // We successfully matched an integer index, of the form:
10141045 // int_index = int_offset + int_invar + int_scale * iv
1046+ // Forward scale.
1047+ assert (_scale == 0 && tmp._scale != 0 , " iv only found just now" );
1048+ _scale = tmp._scale ;
1049+ // Accumulate offset.
1050+ if (!try_AddI_no_overflow (_offset, tmp._offset , _offset)) {
1051+ return false ; // add overflow.
1052+ }
1053+ // Accumulate invar.
1054+ if (tmp._invar != nullptr ) {
1055+ maybe_add_to_invar (tmp._invar , false );
1056+ }
1057+ // Set info about the int_index:
1058+ assert (!_has_int_index_after_convI2L, " no previous int_index discovered" );
10151059 _has_int_index_after_convI2L = true ;
10161060 _int_index_after_convI2L_offset = tmp._offset ;
10171061 _int_index_after_convI2L_invar = tmp._invar ;
10181062 _int_index_after_convI2L_scale = tmp._scale ;
1019- }
10201063
1021- // Now parse it again for the real VPointer. This makes sure that the int_offset, int_invar,
1022- // and int_scale are properly added to the final VPointer's offset, invar, and scale.
1023- if (scaled_iv_plus_offset (n->in (1 ))) {
10241064 NOT_PRODUCT (_tracer.scaled_iv_7 (n);)
10251065 return true ;
10261066 }
@@ -1039,12 +1079,14 @@ bool VPointer::scaled_iv(Node* n) {
10391079 NOT_PRODUCT (_tracer.scaled_iv_8 (n, &tmp);)
10401080
10411081 if (tmp.scaled_iv_plus_offset (n->in (1 ))) {
1042- int scale = n->in (2 )->get_int ();
1082+ int shift = n->in (2 )->get_int ();
10431083 // Accumulate scale.
1044- _scale = tmp._scale << scale;
1084+ if (!try_LShiftI_no_overflow (tmp._scale , shift, _scale)) {
1085+ return false ; // shift overflow.
1086+ }
10451087 // Accumulate offset.
10461088 int shifted_offset = 0 ;
1047- if (!try_LShiftI_no_overflow (tmp._offset , scale , shifted_offset)) {
1089+ if (!try_LShiftI_no_overflow (tmp._offset , shift , shifted_offset)) {
10481090 return false ; // shift overflow.
10491091 }
10501092 if (!try_AddI_no_overflow (_offset, shifted_offset, _offset)) {
@@ -1061,6 +1103,7 @@ bool VPointer::scaled_iv(Node* n) {
10611103 }
10621104
10631105 // Forward info about the int_index:
1106+ assert (!_has_int_index_after_convI2L, " no previous int_index discovered" );
10641107 _has_int_index_after_convI2L = tmp._has_int_index_after_convI2L ;
10651108 _int_index_after_convI2L_offset = tmp._int_index_after_convI2L_offset ;
10661109 _int_index_after_convI2L_invar = tmp._int_index_after_convI2L_invar ;
@@ -1255,6 +1298,9 @@ bool VPointer::try_AddSubI_no_overflow(int offset1, int offset2, bool is_sub, in
12551298}
12561299
12571300bool VPointer::try_LShiftI_no_overflow (int offset, int shift, int & result) {
1301+ if (shift < 0 || shift > 31 ) {
1302+ return false ;
1303+ }
12581304 jlong long_offset = java_shift_left ((jlong)(offset), shift);
12591305 jint int_offset = java_shift_left ( offset, shift);
12601306 if (long_offset != int_offset) {
@@ -1264,6 +1310,16 @@ bool VPointer::try_LShiftI_no_overflow(int offset, int shift, int& result) {
12641310 return true ;
12651311}
12661312
1313+ bool VPointer::try_MulI_no_overflow (int offset1, int offset2, int & result) {
1314+ jlong long_offset = java_multiply ((jlong)(offset1), (jlong)(offset2));
1315+ jint int_offset = java_multiply ( offset1, offset2);
1316+ if (long_offset != int_offset) {
1317+ return false ;
1318+ }
1319+ result = int_offset;
1320+ return true ;
1321+ }
1322+
12671323// We use two comparisons, because a subtraction could underflow.
12681324#define RETURN_CMP_VALUE_IF_NOT_EQUAL (a, b ) \
12691325 if (a < b) { return -1 ; } \
0 commit comments