diff --git a/base/arraymath.jl b/base/arraymath.jl index 4d9b9a8f09ed3..5fbe638d37691 100644 --- a/base/arraymath.jl +++ b/base/arraymath.jl @@ -446,8 +446,14 @@ ctranspose{T<:Real}(A::AbstractVecOrMat{T}) = transpose(A) transpose(x::AbstractVector) = [ transpose(v) for i=of_indices(x, OneTo(1)), v in x ] ctranspose{T}(x::AbstractVector{T}) = T[ ctranspose(v) for i=of_indices(x, OneTo(1)), v in x ] -_cumsum_type{T<:Number}(v::AbstractArray{T}) = typeof(+zero(T)) -_cumsum_type(v) = typeof(v[1]+v[1]) +# see discussion in #18364 ... we try not to widen type of the resulting array +# from cumsum or cumprod, but in some cases (+, Bool) we may not have a choice. +rcum_promote_type{T<:Number}(op, ::Type{T}) = promote_op(op, T) +rcum_promote_type{T}(op, ::Type{T}) = T + +# handle sums of Vector{Bool} and similar. it would be nice to handle +# any AbstractArray here, but it's not clear how that would be possible +rcum_promote_type{T,N}(op, ::Type{Array{T,N}}) = Array{rcum_promote_type(op,T), N} for (f, f!, fp, op) = ((:cumsum, :cumsum!, :cumsum_pairwise!, :+), (:cumprod, :cumprod!, :cumprod_pairwise!, :*) ) @@ -470,14 +476,18 @@ for (f, f!, fp, op) = ((:cumsum, :cumsum!, :cumsum_pairwise!, :+), end @eval function ($f!)(result::AbstractVector, v::AbstractVector) - n = length(v) + li = linearindices(v) + li != linearindices(result) && throw(DimensionMismatch("input and output array sizes and indices must match")) + n = length(li) if n == 0; return result; end - ($fp)(v, result, $(op==:+ ? :(zero(first(v))) : :(one(first(v)))), first(indices(v,1)), n) + i1 = first(li) + @inbounds result[i1] = v1 = v[i1] + n == 1 && return result + ($fp)(v, result, v1, i1+1, n-1) return result end - @eval function ($f)(v::AbstractVector) - c = $(op===:+ ? (:(similar(v,_cumsum_type(v)))) : (:(similar(v)))) - return ($f!)(c, v) + @eval function ($f){T}(v::AbstractVector{T}) + return ($f!)(similar(v, rcum_promote_type($op, T)), v) end end diff --git a/base/multidimensional.jl b/base/multidimensional.jl index 10a495213f639..591e0c46c89a9 100644 --- a/base/multidimensional.jl +++ b/base/multidimensional.jl @@ -527,7 +527,7 @@ julia> cumsum(a,2) 4 9 15 ``` """ -cumsum(A::AbstractArray, axis::Integer=1) = cumsum!(similar(A, Base._cumsum_type(A)), A, axis) +cumsum{T}(A::AbstractArray{T}, axis::Integer=1) = cumsum!(similar(A, Base.rcum_promote_type(+, T)), A, axis) cumsum!(B, A::AbstractArray) = cumsum!(B, A, 1) """ cumprod(A, dim=1) diff --git a/base/reduce.jl b/base/reduce.jl index f437f38fe80c7..7e8d97d059f2a 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -15,24 +15,21 @@ end typealias CommonReduceResult Union{UInt64,UInt128,Int64,Int128,Float32,Float64} typealias WidenReduceResult Union{SmallSigned, SmallUnsigned, Float16} -# r_promote: promote x to the type of reduce(op, [x]) -r_promote(op, x::WidenReduceResult) = widen(x) -r_promote(op, x) = x -r_promote(::typeof(+), x::WidenReduceResult) = widen(x) -r_promote(::typeof(*), x::WidenReduceResult) = widen(x) -r_promote(::typeof(+), x::Number) = oftype(x + zero(x), x) -r_promote(::typeof(*), x::Number) = oftype(x * one(x), x) -r_promote(::typeof(+), x) = x -r_promote(::typeof(*), x) = x -r_promote(::typeof(scalarmax), x::WidenReduceResult) = x -r_promote(::typeof(scalarmin), x::WidenReduceResult) = x -r_promote(::typeof(scalarmax), x) = x -r_promote(::typeof(scalarmin), x) = x -r_promote(::typeof(max), x::WidenReduceResult) = r_promote(scalarmax, x) -r_promote(::typeof(min), x::WidenReduceResult) = r_promote(scalarmin, x) -r_promote(::typeof(max), x) = r_promote(scalarmax, x) -r_promote(::typeof(min), x) = r_promote(scalarmin, x) +# r_promote_type: promote T to the type of reduce(op, ::Array{T}) +# (some "extra" methods are required here to avoid ambiguity warnings) +r_promote_type{T}(op, ::Type{T}) = T +r_promote_type{T<:WidenReduceResult}(op, ::Type{T}) = widen(T) +r_promote_type{T<:WidenReduceResult}(::typeof(+), ::Type{T}) = widen(T) +r_promote_type{T<:WidenReduceResult}(::typeof(*), ::Type{T}) = widen(T) +r_promote_type{T<:Number}(::typeof(+), ::Type{T}) = typeof(zero(T)+zero(T)) +r_promote_type{T<:Number}(::typeof(*), ::Type{T}) = typeof(one(T)*one(T)) +r_promote_type{T<:WidenReduceResult}(::typeof(scalarmax), ::Type{T}) = T +r_promote_type{T<:WidenReduceResult}(::typeof(scalarmin), ::Type{T}) = T +r_promote_type{T<:WidenReduceResult}(::typeof(max), ::Type{T}) = T +r_promote_type{T<:WidenReduceResult}(::typeof(min), ::Type{T}) = T +# r_promote: promote x to the type of reduce(op, [x]) +r_promote{T}(op, x::T) = convert(r_promote_type(op, T), x) ## foldl && mapfoldl diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index d150815d7a478..6d4f366f287e7 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -353,7 +353,7 @@ function sparse_IJ_sorted!{Ti<:Integer}(I::AbstractVector{Ti}, J::AbstractVector end end - colptr = cumsum(cols) + colptr = cumsum!(similar(cols), cols) # Allow up to 20% slack if ndups > 0.2*L diff --git a/test/arrayops.jl b/test/arrayops.jl index 05f23a06a76d5..74014aeb4e3ff 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -1689,6 +1689,18 @@ end @test cumsum([1 2; 3 4], 2) == [1 3; 3 7] @test cumsum([1 2; 3 4], 3) == [1 2; 3 4] +# issue #18363 +@test_throws DimensionMismatch cumsum!([0,0], 1:4) +@test cumsum(Any[])::Vector{Any} == Any[] +@test cumsum(Any[1, 2.3])::Vector{Any} == [1, 3.3] == cumsum(Real[1, 2.3])::Vector{Real} +@test cumsum([true,true,true]) == [1,2,3] +@test cumsum(0x00:0xff)[end] === 0x80 # overflow +@test cumsum([[true], [true], [false]])::Vector{Vector{Int}} == [[1], [2], [2]] + +#issue #18336 +@test cumsum([-0.0, -0.0])[1] === cumsum([-0.0, -0.0])[2] === -0.0 +@test cumprod(-0.0im + (0:0))[1] === Complex(0.0, -0.0) + module TestNLoops15895 using Base.Cartesian