diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index dd233e236e17f..02b4e9894d964 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1049,17 +1049,17 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, // so we can only code-gen them with fpexcept.ignore. setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Custom); setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f32, Custom); + setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Custom); + setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Custom); setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); setOperationAction(ISD::FRINT, MVT::v2f64, Legal); setOperationAction(ISD::FROUND, MVT::v2f64, Legal); setOperationAction(ISD::FROUND, MVT::f64, Legal); setOperationAction(ISD::FRINT, MVT::f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); setOperationAction(ISD::FRINT, MVT::v4f32, Legal); setOperationAction(ISD::FROUND, MVT::v4f32, Legal); setOperationAction(ISD::FROUND, MVT::f32, Legal); diff --git a/llvm/test/CodeGen/PowerPC/vec_rounding.ll b/llvm/test/CodeGen/PowerPC/vec_rounding.ll index 438c8ebdc099e..987b2f12b5a19 100644 --- a/llvm/test/CodeGen/PowerPC/vec_rounding.ll +++ b/llvm/test/CodeGen/PowerPC/vec_rounding.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ ; RUN: -mcpu=pwr6 -mattr=+altivec < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ +; RUN: < %s | FileCheck %s --check-prefix=NO-ALTIVEC ; Check vector round to single-precision toward -infinity (vrfim) ; instruction generation using Altivec. @@ -12,6 +14,33 @@ define <2 x double> @floor_v2f64(<2 x double> %p) ; CHECK-NEXT: frim 1, 1 ; CHECK-NEXT: frim 2, 2 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: floor_v2f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -128(1) +; NO-ALTIVEC-NEXT: std 0, 144(1) +; NO-ALTIVEC-NEXT: .cfi_def_cfa_offset 128 +; NO-ALTIVEC-NEXT: .cfi_offset lr, 16 +; NO-ALTIVEC-NEXT: .cfi_offset f30, -16 +; NO-ALTIVEC-NEXT: .cfi_offset f31, -8 +; NO-ALTIVEC-NEXT: stfd 30, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 31, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 2 +; NO-ALTIVEC-NEXT: bl floor +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl floor +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 2, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: lfd 31, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 30, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 128 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) ret <2 x double> %t @@ -26,6 +55,51 @@ define <4 x double> @floor_v4f64(<4 x double> %p) ; CHECK-NEXT: frim 3, 3 ; CHECK-NEXT: frim 4, 4 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: floor_v4f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -144(1) +; NO-ALTIVEC-NEXT: std 0, 160(1) +; NO-ALTIVEC-NEXT: .cfi_def_cfa_offset 144 +; NO-ALTIVEC-NEXT: .cfi_offset lr, 16 +; NO-ALTIVEC-NEXT: .cfi_offset f28, -32 +; NO-ALTIVEC-NEXT: .cfi_offset f29, -24 +; NO-ALTIVEC-NEXT: .cfi_offset f30, -16 +; NO-ALTIVEC-NEXT: .cfi_offset f31, -8 +; NO-ALTIVEC-NEXT: stfd 28, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 29, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 29, 2 +; NO-ALTIVEC-NEXT: stfd 30, 128(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 30, 3 +; NO-ALTIVEC-NEXT: stfd 31, 136(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 4 +; NO-ALTIVEC-NEXT: bl floor +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 28, 1 +; NO-ALTIVEC-NEXT: fmr 1, 29 +; NO-ALTIVEC-NEXT: bl floor +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 29, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: bl floor +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl floor +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 4, 1 +; NO-ALTIVEC-NEXT: fmr 1, 28 +; NO-ALTIVEC-NEXT: lfd 31, 136(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 28, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: fmr 2, 29 +; NO-ALTIVEC-NEXT: fmr 3, 30 +; NO-ALTIVEC-NEXT: lfd 30, 128(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 29, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 144 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) ret <4 x double> %t @@ -38,6 +112,33 @@ define <2 x double> @ceil_v2f64(<2 x double> %p) ; CHECK-NEXT: frip 1, 1 ; CHECK-NEXT: frip 2, 2 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: ceil_v2f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -128(1) +; NO-ALTIVEC-NEXT: std 0, 144(1) +; NO-ALTIVEC-NEXT: .cfi_def_cfa_offset 128 +; NO-ALTIVEC-NEXT: .cfi_offset lr, 16 +; NO-ALTIVEC-NEXT: .cfi_offset f30, -16 +; NO-ALTIVEC-NEXT: .cfi_offset f31, -8 +; NO-ALTIVEC-NEXT: stfd 30, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 31, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 2 +; NO-ALTIVEC-NEXT: bl ceil +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl ceil +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 2, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: lfd 31, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 30, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 128 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) ret <2 x double> %t @@ -52,6 +153,51 @@ define <4 x double> @ceil_v4f64(<4 x double> %p) ; CHECK-NEXT: frip 3, 3 ; CHECK-NEXT: frip 4, 4 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: ceil_v4f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -144(1) +; NO-ALTIVEC-NEXT: std 0, 160(1) +; NO-ALTIVEC-NEXT: .cfi_def_cfa_offset 144 +; NO-ALTIVEC-NEXT: .cfi_offset lr, 16 +; NO-ALTIVEC-NEXT: .cfi_offset f28, -32 +; NO-ALTIVEC-NEXT: .cfi_offset f29, -24 +; NO-ALTIVEC-NEXT: .cfi_offset f30, -16 +; NO-ALTIVEC-NEXT: .cfi_offset f31, -8 +; NO-ALTIVEC-NEXT: stfd 28, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 29, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 29, 2 +; NO-ALTIVEC-NEXT: stfd 30, 128(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 30, 3 +; NO-ALTIVEC-NEXT: stfd 31, 136(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 4 +; NO-ALTIVEC-NEXT: bl ceil +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 28, 1 +; NO-ALTIVEC-NEXT: fmr 1, 29 +; NO-ALTIVEC-NEXT: bl ceil +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 29, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: bl ceil +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl ceil +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 4, 1 +; NO-ALTIVEC-NEXT: fmr 1, 28 +; NO-ALTIVEC-NEXT: lfd 31, 136(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 28, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: fmr 2, 29 +; NO-ALTIVEC-NEXT: fmr 3, 30 +; NO-ALTIVEC-NEXT: lfd 30, 128(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 29, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 144 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) ret <4 x double> %t @@ -64,6 +210,33 @@ define <2 x double> @trunc_v2f64(<2 x double> %p) ; CHECK-NEXT: friz 1, 1 ; CHECK-NEXT: friz 2, 2 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: trunc_v2f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -128(1) +; NO-ALTIVEC-NEXT: std 0, 144(1) +; NO-ALTIVEC-NEXT: .cfi_def_cfa_offset 128 +; NO-ALTIVEC-NEXT: .cfi_offset lr, 16 +; NO-ALTIVEC-NEXT: .cfi_offset f30, -16 +; NO-ALTIVEC-NEXT: .cfi_offset f31, -8 +; NO-ALTIVEC-NEXT: stfd 30, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 31, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 2 +; NO-ALTIVEC-NEXT: bl trunc +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl trunc +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 2, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: lfd 31, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 30, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 128 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) ret <2 x double> %t @@ -78,6 +251,51 @@ define <4 x double> @trunc_v4f64(<4 x double> %p) ; CHECK-NEXT: friz 3, 3 ; CHECK-NEXT: friz 4, 4 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: trunc_v4f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -144(1) +; NO-ALTIVEC-NEXT: std 0, 160(1) +; NO-ALTIVEC-NEXT: .cfi_def_cfa_offset 144 +; NO-ALTIVEC-NEXT: .cfi_offset lr, 16 +; NO-ALTIVEC-NEXT: .cfi_offset f28, -32 +; NO-ALTIVEC-NEXT: .cfi_offset f29, -24 +; NO-ALTIVEC-NEXT: .cfi_offset f30, -16 +; NO-ALTIVEC-NEXT: .cfi_offset f31, -8 +; NO-ALTIVEC-NEXT: stfd 28, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 29, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 29, 2 +; NO-ALTIVEC-NEXT: stfd 30, 128(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 30, 3 +; NO-ALTIVEC-NEXT: stfd 31, 136(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 4 +; NO-ALTIVEC-NEXT: bl trunc +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 28, 1 +; NO-ALTIVEC-NEXT: fmr 1, 29 +; NO-ALTIVEC-NEXT: bl trunc +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 29, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: bl trunc +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl trunc +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 4, 1 +; NO-ALTIVEC-NEXT: fmr 1, 28 +; NO-ALTIVEC-NEXT: lfd 31, 136(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 28, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: fmr 2, 29 +; NO-ALTIVEC-NEXT: fmr 3, 30 +; NO-ALTIVEC-NEXT: lfd 30, 128(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 29, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 144 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) ret <4 x double> %t @@ -107,6 +325,29 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %p) nounwind ; CHECK-NEXT: ld 0, 16(1) ; CHECK-NEXT: mtlr 0 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: nearbyint_v2f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -128(1) +; NO-ALTIVEC-NEXT: std 0, 144(1) +; NO-ALTIVEC-NEXT: stfd 30, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 31, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 2 +; NO-ALTIVEC-NEXT: bl nearbyint +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl nearbyint +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 2, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: lfd 31, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 30, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 128 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) ret <2 x double> %t @@ -152,6 +393,45 @@ define <4 x double> @nearbyint_v4f64(<4 x double> %p) nounwind ; CHECK-NEXT: ld 0, 16(1) ; CHECK-NEXT: mtlr 0 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: nearbyint_v4f64: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: mflr 0 +; NO-ALTIVEC-NEXT: stdu 1, -144(1) +; NO-ALTIVEC-NEXT: std 0, 160(1) +; NO-ALTIVEC-NEXT: stfd 28, 112(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: stfd 29, 120(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 29, 2 +; NO-ALTIVEC-NEXT: stfd 30, 128(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 30, 3 +; NO-ALTIVEC-NEXT: stfd 31, 136(1) # 8-byte Folded Spill +; NO-ALTIVEC-NEXT: fmr 31, 4 +; NO-ALTIVEC-NEXT: bl nearbyint +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 28, 1 +; NO-ALTIVEC-NEXT: fmr 1, 29 +; NO-ALTIVEC-NEXT: bl nearbyint +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 29, 1 +; NO-ALTIVEC-NEXT: fmr 1, 30 +; NO-ALTIVEC-NEXT: bl nearbyint +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 30, 1 +; NO-ALTIVEC-NEXT: fmr 1, 31 +; NO-ALTIVEC-NEXT: bl nearbyint +; NO-ALTIVEC-NEXT: nop +; NO-ALTIVEC-NEXT: fmr 4, 1 +; NO-ALTIVEC-NEXT: fmr 1, 28 +; NO-ALTIVEC-NEXT: lfd 31, 136(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 28, 112(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: fmr 2, 29 +; NO-ALTIVEC-NEXT: fmr 3, 30 +; NO-ALTIVEC-NEXT: lfd 30, 128(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: lfd 29, 120(1) # 8-byte Folded Reload +; NO-ALTIVEC-NEXT: addi 1, 1, 144 +; NO-ALTIVEC-NEXT: ld 0, 16(1) +; NO-ALTIVEC-NEXT: mtlr 0 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) ret <4 x double> %t @@ -164,6 +444,11 @@ define <4 x float> @floor_v4f32(<4 x float> %p) ; CHECK: # %bb.0: ; CHECK-NEXT: vrfim 2, 2 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: floor_v4f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfim 2, 2 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) ret <4 x float> %t @@ -176,6 +461,12 @@ define <8 x float> @floor_v8f32(<8 x float> %p) ; CHECK-NEXT: vrfim 2, 2 ; CHECK-NEXT: vrfim 3, 3 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: floor_v8f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfim 2, 2 +; NO-ALTIVEC-NEXT: vrfim 3, 3 +; NO-ALTIVEC-NEXT: blr { %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) ret <8 x float> %t @@ -187,6 +478,11 @@ define <4 x float> @ceil_v4f32(<4 x float> %p) ; CHECK: # %bb.0: ; CHECK-NEXT: vrfip 2, 2 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: ceil_v4f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfip 2, 2 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) ret <4 x float> %t @@ -199,6 +495,12 @@ define <8 x float> @ceil_v8f32(<8 x float> %p) ; CHECK-NEXT: vrfip 2, 2 ; CHECK-NEXT: vrfip 3, 3 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: ceil_v8f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfip 2, 2 +; NO-ALTIVEC-NEXT: vrfip 3, 3 +; NO-ALTIVEC-NEXT: blr { %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) ret <8 x float> %t @@ -210,6 +512,11 @@ define <4 x float> @trunc_v4f32(<4 x float> %p) ; CHECK: # %bb.0: ; CHECK-NEXT: vrfiz 2, 2 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: trunc_v4f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfiz 2, 2 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) ret <4 x float> %t @@ -222,6 +529,12 @@ define <8 x float> @trunc_v8f32(<8 x float> %p) ; CHECK-NEXT: vrfiz 2, 2 ; CHECK-NEXT: vrfiz 3, 3 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: trunc_v8f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfiz 2, 2 +; NO-ALTIVEC-NEXT: vrfiz 3, 3 +; NO-ALTIVEC-NEXT: blr { %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) ret <8 x float> %t @@ -233,6 +546,11 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %p) ; CHECK: # %bb.0: ; CHECK-NEXT: vrfin 2, 2 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: nearbyint_v4f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfin 2, 2 +; NO-ALTIVEC-NEXT: blr { %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) ret <4 x float> %t @@ -245,6 +563,12 @@ define <8 x float> @nearbyint_v8f32(<8 x float> %p) ; CHECK-NEXT: vrfin 2, 2 ; CHECK-NEXT: vrfin 3, 3 ; CHECK-NEXT: blr +; +; NO-ALTIVEC-LABEL: nearbyint_v8f32: +; NO-ALTIVEC: # %bb.0: +; NO-ALTIVEC-NEXT: vrfin 2, 2 +; NO-ALTIVEC-NEXT: vrfin 3, 3 +; NO-ALTIVEC-NEXT: blr { %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) ret <8 x float> %t