From 1d0fa8a06f6d36745365e16a7e30a7f2ac843038 Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Fri, 8 Apr 2016 11:49:18 +0200 Subject: [PATCH 1/9] Move identity to operators.jl --- base/base.jl | 2 -- base/operators.jl | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/base/base.jl b/base/base.jl index ead103bcb3954..b847a426d21d2 100644 --- a/base/base.jl +++ b/base/base.jl @@ -84,8 +84,6 @@ gc_enable(on::Bool) = ccall(:jl_gc_enable, Cint, (Cint,), on)!=0 bytestring(str::ByteString) = str -identity(x) = x - # used by { } syntax function cell_1d(xs::ANY...) n = length(xs) diff --git a/base/operators.jl b/base/operators.jl index b2672ce4fc30b..f9c74b65dadb4 100644 --- a/base/operators.jl +++ b/base/operators.jl @@ -76,6 +76,8 @@ scalarmin(x::AbstractArray, y ) = throw(ArgumentError("ordering is ## definitions providing basic traits of arithmetic operators ## +identity(x) = x + +(x::Number) = x *(x::Number) = x (&)(x::Integer) = x From 0a7295888ea59671d5148b2ed658314666996b89 Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Wed, 6 Apr 2016 10:01:21 +0200 Subject: [PATCH 2/9] Deprecate unary functors The following functors are deprecated: IdFun, AbsFun, Abs2Fun, ExpFun, LogFun, and ConjFun. Specialization for BitArray map! is done by dispatching on function type instead of using helper type BitFunctorUnary. Also a add a few tests for those. --- base/bitarray.jl | 9 +++--- base/deprecated.jl | 16 +++++++++++ base/functors.jl | 40 --------------------------- base/linalg/dense.jl | 8 +++--- base/reduce.jl | 55 ++++++++++++++++++------------------- base/reducedim.jl | 34 +++++++++++------------ base/sparse.jl | 2 +- base/sparse/sparsematrix.jl | 14 +++++----- base/sparse/sparsevector.jl | 4 +-- test/bitarray.jl | 3 ++ test/functors.jl | 15 ---------- test/reducedim.jl | 4 +-- test/tuple.jl | 12 ++++---- 13 files changed, 90 insertions(+), 126 deletions(-) diff --git a/base/bitarray.jl b/base/bitarray.jl index ed1bdc973a240..a0135bc5e38ea 100644 --- a/base/bitarray.jl +++ b/base/bitarray.jl @@ -1711,19 +1711,20 @@ maximum(B::BitArray) = isempty(B) ? throw(ArgumentError("argument must be non-em # arrays since there can be a 64x speedup by working at the level of Int64 # instead of looping bit-by-bit. -map(f::Function, A::BitArray) = map(specialized_bitwise_unary(f), A) map(f::Function, A::BitArray, B::BitArray) = map(specialized_bitwise_binary(f), A, B) -map(f::BitFunctorUnary, A::BitArray) = map!(f, similar(A), A) +map(f::Function, A::BitArray) = map!(f, similar(A), A) map(f::BitFunctorBinary, A::BitArray, B::BitArray) = map!(f, similar(A), A, B) map!(f, A::BitArray) = map!(f, A, A) -map!(f::Function, dest::BitArray, A::BitArray) = map!(specialized_bitwise_unary(f), dest, A) +map!(f::typeof(!), dest::BitArray, A::BitArray) = map!(~, dest, A) +map!(f::typeof(zero), dest::BitArray, A::BitArray) = fill!(dest, false) +map!(f::typeof(one), dest::BitArray, A::BitArray) = fill!(dest, true) map!(f::Function, dest::BitArray, A::BitArray, B::BitArray) = map!(specialized_bitwise_binary(f), dest, A, B) # If we were able to specialize the function to a known bitwise operation, # map across the chunks. Otherwise, fall-back to the AbstractArray method that # iterates bit-by-bit. -function map!(f::BitFunctorUnary, dest::BitArray, A::BitArray) +function map!(f::Union{typeof(identity), typeof(~)}, dest::BitArray, A::BitArray) size(A) == size(dest) || throw(DimensionMismatch("sizes of dest and A must match")) isempty(A) && return dest for i=1:length(A.chunks)-1 diff --git a/base/deprecated.jl b/base/deprecated.jl index dc463d55626cf..f9c301e1f7980 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1020,6 +1020,22 @@ function pmap(f, c...; err_retry=nothing, err_stop=nothing, pids=nothing) return pmap(p, f, c...) end +# 15692 +for (Fun, func) in [(:IdFun, :identity), + (:AbsFun, :abs), + (:Abs2Fun, :abs2), + (:ExpFun, :exp), + (:LogFun, :log), + (:ConjFun, :conj), + ] + @eval begin + @deprecate_binding $(Fun) typeof($(func)) + (::Type{typeof($(func))})() = $(func) + end +end +@deprecate specialized_unary(f::Function) f +@deprecate specialized_bitwise_unary(f::Function) f + # During the 0.5 development cycle, do not add any deprecations below this line # To be deprecated in 0.6 diff --git a/base/functors.jl b/base/functors.jl index b8e847c3ee58b..f83efa50c3ca2 100644 --- a/base/functors.jl +++ b/base/functors.jl @@ -10,24 +10,6 @@ abstract Func{N} -immutable IdFun <: Func{1} end -(::IdFun)(x) = x - -immutable AbsFun <: Func{1} end -(::AbsFun)(x) = abs(x) - -immutable Abs2Fun <: Func{1} end -(::Abs2Fun)(x) = abs2(x) - -immutable ExpFun <: Func{1} end -(::ExpFun)(x) = exp(x) - -immutable LogFun <: Func{1} end -(::LogFun)(x) = log(x) - -immutable ConjFun <: Func{1} end -(::ConjFun)(x) = conj(x) - immutable AndFun <: Func{2} end (::AndFun)(x, y) = x & y @@ -106,7 +88,6 @@ immutable DotRSFun <: Func{2} end immutable UnspecializedFun{N} <: Func{N} f::Function end -(f::UnspecializedFun{1})(x) = f.f(x) (f::UnspecializedFun{2})(x, y) = f.f(x,y) # Special purpose functors @@ -148,12 +129,6 @@ promote_op{T<:Integer}(::PowFun, ::Type{Bool}, ::Type{T}) = Bool ############################################################################## -immutable BitFunctorUnary{T,F} <: Func{1} end -(::BitFunctorUnary{true, true})( p) = p | ~p # Must work for bits and ints -(::BitFunctorUnary{false, false})(p) = p & ~p # LLVM figures them out nicely -(::BitFunctorUnary{true, false})(p) = p -(::BitFunctorUnary{false, true})( p) = ~p - immutable BitFunctorBinary{TT,TF,FT,FF} <: Func{2} end (::BitFunctorBinary{true, true, true, true })(p, q) = p | ~p (::BitFunctorBinary{true, true, true, false})(p, q) = p | q @@ -175,14 +150,6 @@ immutable BitFunctorBinary{TT,TF,FT,FF} <: Func{2} end # Specializations by value -function specialized_unary(f::Function) - is(f, identity) ? IdFun() : - is(f, abs) ? AbsFun() : - is(f, abs2) ? Abs2Fun() : - is(f, exp) ? ExpFun() : - is(f, log) ? LogFun() : - UnspecializedFun{1}(f) -end function specialized_binary(f::Function) is(f, +) ? AddFun() : is(f, -) ? SubFun() : @@ -199,13 +166,6 @@ function specialized_binary(f::Function) UnspecializedFun{2}(f) end -function specialized_bitwise_unary(f::Function) - is(f, identity) ? BitFunctorUnary{true, false}() : - is(f, !) | is(f, ~) ? BitFunctorUnary{false, true }() : - is(f, one) ? BitFunctorUnary{true, true }() : - is(f, zero) ? BitFunctorUnary{false, false}() : - UnspecializedFun{1}(f) -end function specialized_bitwise_binary(f::Function) is(f, &) | is(f, *) | is(f, min) ? BitFunctorBinary{true, false, false, false}() : is(f, |) | is(f, max) ? BitFunctorBinary{true, true, true, false}() : diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index e6e7c3be05b19..f8ca21b5ef1f1 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -36,19 +36,19 @@ isposdef(x::Number) = imag(x)==0 && real(x) > 0 stride1(x::Array) = 1 stride1(x::StridedVector) = stride(x, 1)::Int -import Base: mapreduce_seq_impl, AbsFun, Abs2Fun, AddFun +import Base: mapreduce_seq_impl, AddFun -mapreduce_seq_impl{T<:BlasReal}(::AbsFun, ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) = +mapreduce_seq_impl{T<:BlasReal}(::typeof(abs), ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) = BLAS.asum(ilast-ifirst+1, pointer(a, ifirst), stride1(a)) -function mapreduce_seq_impl{T<:BlasReal}(::Abs2Fun, ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) +function mapreduce_seq_impl{T<:BlasReal}(::typeof(abs2), ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) n = ilast-ifirst+1 px = pointer(a, ifirst) incx = stride1(a) BLAS.dot(n, px, incx, px, incx) end -function mapreduce_seq_impl{T<:BlasComplex}(::Abs2Fun, ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) +function mapreduce_seq_impl{T<:BlasComplex}(::typeof(abs2), ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) n = ilast-ifirst+1 px = pointer(a, ifirst) incx = stride1(a) diff --git a/base/reduce.jl b/base/reduce.jl index 8e5238b7624ce..bafea5749de7e 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -62,8 +62,8 @@ function mapfoldl(f, op, itr) mapfoldl_impl(f, op, v0, itr, i) end -foldl(op, v0, itr) = mapfoldl(IdFun(), op, v0, itr) -foldl(op, itr) = mapfoldl(IdFun(), op, itr) +foldl(op, v0, itr) = mapfoldl(identity, op, v0, itr) +foldl(op, itr) = mapfoldl(identity, op, itr) ## foldr & mapfoldr @@ -86,8 +86,8 @@ end mapfoldr(f, op, v0, itr) = mapfoldr_impl(f, op, v0, itr, endof(itr)) mapfoldr(f, op, itr) = (i = endof(itr); mapfoldr_impl(f, op, f(itr[i]), itr, i-1)) -foldr(op, v0, itr) = mapfoldr(IdFun(), op, v0, itr) -foldr(op, itr) = mapfoldr(IdFun(), op, itr) +foldr(op, v0, itr) = mapfoldr(identity, op, v0, itr) +foldr(op, itr) = mapfoldr(identity, op, itr) ## reduce & mapreduce @@ -122,12 +122,12 @@ mapreduce_impl(f, op, A::AbstractArray, ifirst::Int, ilast::Int) = # handling empty arrays mr_empty(f, op, T) = throw(ArgumentError("reducing over an empty collection is not allowed")) # use zero(T)::T to improve type information when zero(T) is not defined -mr_empty(::IdFun, op::AddFun, T) = r_promote(op, zero(T)::T) -mr_empty(::AbsFun, op::AddFun, T) = r_promote(op, abs(zero(T)::T)) -mr_empty(::Abs2Fun, op::AddFun, T) = r_promote(op, abs2(zero(T)::T)) -mr_empty(::IdFun, op::MulFun, T) = r_promote(op, one(T)::T) -mr_empty(::AbsFun, op::MaxFun, T) = abs(zero(T)::T) -mr_empty(::Abs2Fun, op::MaxFun, T) = abs2(zero(T)::T) +mr_empty(::typeof(identity), op::AddFun, T) = r_promote(op, zero(T)::T) +mr_empty(::typeof(abs), op::AddFun, T) = r_promote(op, abs(zero(T)::T)) +mr_empty(::typeof(abs2), op::AddFun, T) = r_promote(op, abs2(zero(T)::T)) +mr_empty(::typeof(identity), op::MulFun, T) = r_promote(op, one(T)::T) +mr_empty(::typeof(abs), op::MaxFun, T) = abs(zero(T)::T) +mr_empty(::typeof(abs2), op::MaxFun, T) = abs2(zero(T)::T) mr_empty(f, op::AndFun, T) = true mr_empty(f, op::OrFun, T) = false @@ -161,8 +161,8 @@ mapreduce(f, op, a::Number) = f(a) mapreduce(f, op::Function, A::AbstractArray) = mapreduce(f, specialized_binary(op), A) -reduce(op, v0, itr) = mapreduce(IdFun(), op, v0, itr) -reduce(op, itr) = mapreduce(IdFun(), op, itr) +reduce(op, v0, itr) = mapreduce(identity, op, v0, itr) +reduce(op, itr) = mapreduce(identity, op, itr) reduce(op, a::Number) = a ### short-circuiting specializations of mapreduce @@ -205,11 +205,10 @@ end mapreduce_no_sc(f, op, itr::Any) = mapfoldl(f, op, itr) mapreduce_no_sc(f, op, itr::AbstractArray) = _mapreduce(f, op, itr) -mapreduce_sc(f::Function, op, itr) = mapreduce_sc(specialized_unary(f), op, itr) +mapreduce_sc(f::Function, op, itr) = mapreduce_no_sc(f, op, itr) mapreduce_sc(f::ReturnsBool, op, itr) = mapreduce_sc_impl(f, op, itr) -mapreduce_sc(f::Func{1}, op, itr) = mapreduce_no_sc(f, op, itr) -mapreduce_sc(f::IdFun, op, itr) = +mapreduce_sc(f::typeof(identity), op, itr) = eltype(itr) <: Bool ? mapreduce_sc_impl(f, op, itr) : mapreduce_no_sc(f, op, itr) @@ -237,16 +236,16 @@ end sum_pairwise_blocksize(f) = 1024 # This appears to show a benefit from a larger block size -sum_pairwise_blocksize(::Abs2Fun) = 4096 +sum_pairwise_blocksize(::typeof(abs2)) = 4096 mapreduce_impl(f, op::AddFun, A::AbstractArray, ifirst::Int, ilast::Int) = mapreduce_pairwise_impl(f, op, A, ifirst, ilast, sum_pairwise_blocksize(f)) sum(f::Union{Callable,Func{1}}, a) = mapreduce(f, AddFun(), a) -sum(a) = mapreduce(IdFun(), AddFun(), a) +sum(a) = mapreduce(identity, AddFun(), a) sum(a::AbstractArray{Bool}) = countnz(a) -sumabs(a) = mapreduce(AbsFun(), AddFun(), a) -sumabs2(a) = mapreduce(Abs2Fun(), AddFun(), a) +sumabs(a) = mapreduce(abs, AddFun(), a) +sumabs2(a) = mapreduce(abs2, AddFun(), a) # Kahan (compensated) summation: O(1) error growth, at the expense # of a considerable increase in computational expense. @@ -274,7 +273,7 @@ end ## prod prod(f::Union{Callable,Func{1}}, a) = mapreduce(f, MulFun(), a) -prod(a) = mapreduce(IdFun(), MulFun(), a) +prod(a) = mapreduce(identity, MulFun(), a) ## maximum & minimum @@ -321,11 +320,11 @@ end maximum(f::Union{Callable,Func{1}}, a) = mapreduce(f, MaxFun(), a) minimum(f::Union{Callable,Func{1}}, a) = mapreduce(f, MinFun(), a) -maximum(a) = mapreduce(IdFun(), MaxFun(), a) -minimum(a) = mapreduce(IdFun(), MinFun(), a) +maximum(a) = mapreduce(identity, MaxFun(), a) +minimum(a) = mapreduce(identity, MinFun(), a) -maxabs(a) = mapreduce(AbsFun(), MaxFun(), a) -minabs(a) = mapreduce(AbsFun(), MinFun(), a) +maxabs(a) = mapreduce(abs, MaxFun(), a) +minabs(a) = mapreduce(abs, MinFun(), a) ## extrema @@ -397,19 +396,19 @@ end ## all & any -any(itr) = any(IdFun(), itr) -all(itr) = all(IdFun(), itr) +any(itr) = any(identity, itr) +all(itr) = all(identity, itr) any(f::Any, itr) = any(Predicate(f), itr) any(f::Predicate, itr) = mapreduce_sc_impl(f, OrFun(), itr) -any(f::IdFun, itr) = +any(f::typeof(identity), itr) = eltype(itr) <: Bool ? mapreduce_sc_impl(f, OrFun(), itr) : reduce(or_bool_only, itr) all(f::Any, itr) = all(Predicate(f), itr) all(f::Predicate, itr) = mapreduce_sc_impl(f, AndFun(), itr) -all(f::IdFun, itr) = +all(f::typeof(identity), itr) = eltype(itr) <: Bool ? mapreduce_sc_impl(f, AndFun(), itr) : reduce(and_bool_only, itr) diff --git a/base/reducedim.jl b/base/reducedim.jl index c861173fd88e5..377094513c174 100644 --- a/base/reducedim.jl +++ b/base/reducedim.jl @@ -122,7 +122,7 @@ end reducedim_init{T}(f, op::MaxFun, A::AbstractArray{T}, region) = reducedim_initarray0(A, region, typemin(f(zero(T)))) reducedim_init{T}(f, op::MinFun, A::AbstractArray{T}, region) = reducedim_initarray0(A, region, typemax(f(zero(T)))) -reducedim_init{T}(f::Union{AbsFun,Abs2Fun}, op::MaxFun, A::AbstractArray{T}, region) = +reducedim_init{T}(f::Union{typeof(abs),typeof(abs2)}, op::MaxFun, A::AbstractArray{T}, region) = reducedim_initarray(A, region, zero(f(zero(T)))) reducedim_init(f, op::AndFun, A::AbstractArray, region) = reducedim_initarray(A, region, true) @@ -133,17 +133,17 @@ reducedim_init(f, op::OrFun, A::AbstractArray, region) = reducedim_initarray(A, for (IT, RT) in ((CommonReduceResult, :(eltype(A))), (SmallSigned, :Int), (SmallUnsigned, :UInt)) T = Union{[AbstractArray{t} for t in IT.types]..., [AbstractArray{Complex{t}} for t in IT.types]...} @eval begin - reducedim_init(f::IdFun, op::AddFun, A::$T, region) = + reducedim_init(f::typeof(identity), op::AddFun, A::$T, region) = reducedim_initarray(A, region, zero($RT)) - reducedim_init(f::IdFun, op::MulFun, A::$T, region) = + reducedim_init(f::typeof(identity), op::MulFun, A::$T, region) = reducedim_initarray(A, region, one($RT)) - reducedim_init(f::Union{AbsFun,Abs2Fun}, op::AddFun, A::$T, region) = + reducedim_init(f::Union{typeof(abs),typeof(abs2)}, op::AddFun, A::$T, region) = reducedim_initarray(A, region, real(zero($RT))) - reducedim_init(f::Union{AbsFun,Abs2Fun}, op::MulFun, A::$T, region) = + reducedim_init(f::Union{typeof(abs),typeof(abs2)}, op::MulFun, A::$T, region) = reducedim_initarray(A, region, real(one($RT))) end end -reducedim_init(f::Union{IdFun,AbsFun,Abs2Fun}, op::AddFun, A::AbstractArray{Bool}, region) = +reducedim_init(f::Union{typeof(identity),typeof(abs),typeof(abs2)}, op::AddFun, A::AbstractArray{Bool}, region) = reducedim_initarray(A, region, 0) @@ -234,15 +234,15 @@ mapreducedim!(f, op, R::AbstractArray, A::AbstractArray) = (_mapreducedim!(f, to_op(op), R, A); R) reducedim!{RT}(op, R::AbstractArray{RT}, A::AbstractArray) = - mapreducedim!(IdFun(), op, R, A, zero(RT)) + mapreducedim!(identity, op, R, A, zero(RT)) mapreducedim(f, op, A::AbstractArray, region, v0) = mapreducedim!(f, op, reducedim_initarray(A, region, v0), A) mapreducedim{T}(f, op, A::AbstractArray{T}, region) = mapreducedim!(f, op, reducedim_init(f, to_op(op), A, region), A) -reducedim(op, A::AbstractArray, region, v0) = mapreducedim(IdFun(), op, A, region, v0) -reducedim(op, A::AbstractArray, region) = mapreducedim(IdFun(), op, A, region) +reducedim(op, A::AbstractArray, region, v0) = mapreducedim(identity, op, A, region, v0) +reducedim(op, A::AbstractArray, region) = mapreducedim(identity, op, A, region) ##### Specific reduction functions ##### @@ -255,24 +255,24 @@ for (fname, Op) in [(:sum, :AddFun), (:prod, :MulFun), @eval begin $(fname!)(f::Union{Function,Func{1}}, r::AbstractArray, A::AbstractArray; init::Bool=true) = mapreducedim!(f, $(Op)(), initarray!(r, $(Op)(), init), A) - $(fname!)(r::AbstractArray, A::AbstractArray; init::Bool=true) = $(fname!)(IdFun(), r, A; init=init) + $(fname!)(r::AbstractArray, A::AbstractArray; init::Bool=true) = $(fname!)(identity, r, A; init=init) $(fname)(f::Union{Function,Func{1}}, A::AbstractArray, region) = mapreducedim(f, $(Op)(), A, region) - $(fname)(A::AbstractArray, region) = $(fname)(IdFun(), A, region) + $(fname)(A::AbstractArray, region) = $(fname)(identity, A, region) end end -for (fname, fbase, Fun) in [(:sumabs, :sum, :AbsFun), - (:sumabs2, :sum, :Abs2Fun), - (:maxabs, :maximum, :AbsFun), - (:minabs, :minimum, :AbsFun)] +for (fname, fbase, fun) in [(:sumabs, :sum, :abs), + (:sumabs2, :sum, :abs2), + (:maxabs, :maximum, :abs), + (:minabs, :minimum, :abs)] fname! = symbol(fname, '!') fbase! = symbol(fbase, '!') @eval begin $(fname!)(r::AbstractArray, A::AbstractArray; init::Bool=true) = - $(fbase!)($(Fun)(), r, A; init=init) - $(fname)(A::AbstractArray, region) = $(fbase)($(Fun)(), A, region) + $(fbase!)($(fun), r, A; init=init) + $(fname)(A::AbstractArray, region) = $(fbase)($(fun), A, region) end end diff --git a/base/sparse.jl b/base/sparse.jl index 6a4d684d1b943..db34080d384ab 100644 --- a/base/sparse.jl +++ b/base/sparse.jl @@ -2,7 +2,7 @@ module SparseArrays -using Base: Func, AddFun, OrFun, ConjFun, IdFun +using Base: Func, AddFun, OrFun using Base: ReshapedArray using Base.Sort: Forward using Base.LinAlg: AbstractTriangular, PosDefException diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index b04abd2264cd8..0dd10d78062db 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -706,8 +706,8 @@ function qftranspose!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC{Tv,Ti C end -transpose!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC{Tv,Ti}) = qftranspose!(C, A, 1:A.n, Base.IdFun()) -ctranspose!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC{Tv,Ti}) = qftranspose!(C, A, 1:A.n, Base.ConjFun()) +transpose!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC{Tv,Ti}) = qftranspose!(C, A, 1:A.n, identity) +ctranspose!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC{Tv,Ti}) = qftranspose!(C, A, 1:A.n, conj) "See `qftranspose!`" ftranspose!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC{Tv,Ti}, f) = qftranspose!(C, A, 1:A.n, f) """ @@ -722,8 +722,8 @@ function qftranspose{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}, q::AbstractVector, f) Cnzval = Array{Tv}(Cnnz) qftranspose!(SparseMatrixCSC(Cm, Cn, Ccolptr, Crowval, Cnzval), A, q, f) end -transpose{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}) = qftranspose(A, 1:A.n, Base.IdFun()) -ctranspose{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}) = qftranspose(A, 1:A.n, Base.ConjFun()) +transpose{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}) = qftranspose(A, 1:A.n, identity) +ctranspose{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}) = qftranspose(A, 1:A.n, conj) "See `qftranspose`" ftranspose{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}, f) = qftranspose(A, 1:A.n, f) @@ -2932,11 +2932,11 @@ function blkdiag(X::SparseMatrixCSC...) end ## Structure query functions -issymmetric(A::SparseMatrixCSC) = is_hermsym(A, IdFun()) +issymmetric(A::SparseMatrixCSC) = is_hermsym(A, identity) -ishermitian(A::SparseMatrixCSC) = is_hermsym(A, ConjFun()) +ishermitian(A::SparseMatrixCSC) = is_hermsym(A, conj) -function is_hermsym(A::SparseMatrixCSC, check::Func) +function is_hermsym(A::SparseMatrixCSC, check::Function) m, n = size(A) if m != n; return false; end diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index 02303a8f987d4..cf59fedb6e072 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -1168,8 +1168,8 @@ vecnorm(x::AbstractSparseVector, p::Real=2) = vecnorm(nonzeros(x), p) # Transpose # (The only sparse matrix structure in base is CSC, so a one-row sparse matrix is worse than dense) -transpose(x::SparseVector) = _ct(IdFun(), x) -ctranspose(x::SparseVector) = _ct(ConjFun(), x) +transpose(x::SparseVector) = _ct(identity, x) +ctranspose(x::SparseVector) = _ct(conj, x) function _ct{T}(f, x::SparseVector{T}) isempty(x) && return Array(T, 1, 0) A = zeros(T, 1, length(x)) diff --git a/test/bitarray.jl b/test/bitarray.jl index 3429fe46c51ba..f7f1ac58286e8 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -1104,7 +1104,10 @@ q[[1,3]] = true # map! r = falses(4) @test map!(~, r, p) == map!(x->~x, r, p) == ~p == r +@test map!(!, r, p) == map!(x->!x, r, p) == ~p == r @test map!(identity, r, p) == map!(x->x, r, p) == p == r +@test map!(zero, r, p) == map!(x->false, r, p) == falses(4) == r +@test map!(one, r, p) == map!(x->true, r, p) == trues(4) == r @test map!(&, r, p, q) == map!((x,y)->x&y, r, p, q) == p & q == r @test map!(|, r, p, q) == map!((x,y)->x|y, r, p, q) == p | q == r diff --git a/test/functors.jl b/test/functors.jl index c722323f30ce2..7e7819861dc21 100644 --- a/test/functors.jl +++ b/test/functors.jl @@ -2,31 +2,16 @@ ## Testing functors and specialization-by-value -for op in (identity, abs, abs2, exp, log) - @test Base.specialized_unary(op)(3) == Base.specialized_unary(x->op(x))(3) == op(3) - @test Base.specialized_unary(op)(-5+im) == Base.specialized_unary(x->op(x))(-5+im) == op(-5+im) -end for op in (+, -, *, /, \, div, ^, &, |) @test Base.specialized_binary(op)(2,10) == Base.specialized_binary((x,y)->op(x,y))(2,10) == op(2,10) end -for op in (!, ~, identity) - @test Base.specialized_bitwise_unary(op)(true) == Base.specialized_bitwise_unary(x->op(x))(true) == op(true) - @test Base.specialized_bitwise_unary(op)(false) == Base.specialized_bitwise_unary(x->op(x))(false) == op(false) -end -@test Base.specialized_bitwise_unary(~)(0x123456789abcdef) == Base.specialized_bitwise_unary(x->~(x))(0x123456789abcdef) == ~(0x123456789abcdef) -@test Base.specialized_bitwise_unary(identity)(0x123456789abcdef) == Base.specialized_bitwise_unary(x->identity(x))(0x123456789abcdef) == (0x123456789abcdef) - for op in (&, *, min, |, max, $, !=, >=, ^, <=, ==, <, >) for p in (true, false), q in (true, false) @test Base.specialized_bitwise_binary(op)(p, q) == Base.specialized_bitwise_binary((x,y)->op(x,y))(p, q) == op(p, q) end end -for t in (true, false), f in (true, false) - functor = Base.BitFunctorUnary{t, f}() - @test (functor(0b10) & 0b11) == Int(t)<<1 | Int(f) -end for tt in (true, false), tf in (true, false), ft in (true, false), ff in (true, false) functor = Base.BitFunctorBinary{tt,tf,ft,ff}() @test (functor(0b1100, 0b1010) & 0b1111) == (Int(tt)<<3 | Int(tf)<<2 | Int(ft)<<1 | Int(ff)) diff --git a/test/reducedim.jl b/test/reducedim.jl index 1c74fa5b3959c..6775ed1de5028 100644 --- a/test/reducedim.jl +++ b/test/reducedim.jl @@ -96,8 +96,8 @@ A = reshape(1:6, 3, 2) @test typeof(@inferred(Base.sumabs([1.0+1.0im], 1))) == Vector{Float64} @test typeof(@inferred(Base.sumabs2([1.0+1.0im], 1))) == Vector{Float64} @test typeof(@inferred(prod([1.0+1.0im], 1))) == Vector{Complex128} -@test typeof(@inferred(Base.prod(Base.AbsFun(), [1.0+1.0im], 1))) == Vector{Float64} -@test typeof(@inferred(Base.prod(Base.Abs2Fun(), [1.0+1.0im], 1))) == Vector{Float64} +@test typeof(@inferred(Base.prod(abs, [1.0+1.0im], 1))) == Vector{Float64} +@test typeof(@inferred(Base.prod(abs2, [1.0+1.0im], 1))) == Vector{Float64} # Heterogeneously typed arrays @test sum(Union{Float32, Float64}[1.0], 1) == [1.0] diff --git a/test/tuple.jl b/test/tuple.jl index 7d6d8f318f1b1..bb9fa02c0e7d8 100644 --- a/test/tuple.jl +++ b/test/tuple.jl @@ -145,12 +145,12 @@ foo(x, y, z) = x + y + z @test any((true,true,false)) === true @test any((true,true,true)) === true -@test @inferred(ntuple(Base.Abs2Fun(), Val{0})) == () -@test @inferred(ntuple(Base.Abs2Fun(), Val{2})) == (1, 4) -@test @inferred(ntuple(Base.Abs2Fun(), Val{3})) == (1, 4, 9) -@test @inferred(ntuple(Base.Abs2Fun(), Val{4})) == (1, 4, 9, 16) -@test @inferred(ntuple(Base.Abs2Fun(), Val{5})) == (1, 4, 9, 16, 25) -@test @inferred(ntuple(Base.Abs2Fun(), Val{6})) == (1, 4, 9, 16, 25, 36) +@test @inferred(ntuple(abs2, Val{0})) == () +@test @inferred(ntuple(abs2, Val{2})) == (1, 4) +@test @inferred(ntuple(abs2, Val{3})) == (1, 4, 9) +@test @inferred(ntuple(abs2, Val{4})) == (1, 4, 9, 16) +@test @inferred(ntuple(abs2, Val{5})) == (1, 4, 9, 16, 25) +@test @inferred(ntuple(abs2, Val{6})) == (1, 4, 9, 16, 25, 36) # issue #12854 @test_throws TypeError ntuple(identity, Val{1:2}) From 812e2d6085a90daea972f9b1ad4a63b6b88f1336 Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Wed, 6 Apr 2016 14:38:14 +0200 Subject: [PATCH 3/9] Deprecate binary functors Deprecate AndFun, OrFun, XorFun, AddFun, DotAddFun, SubFun, DotSubFun, MulFun, DotMulFun, RDivFun, DotRDivFun, LDivFun, IDivFun, DotIDivFun, ModFun, RemFun, DotRemFun, PowFun, MaxFun, MinFun, LessFun, MoreFun, DotLSFun, and DotRSFun. Rewrite specialization of BitArray map! to use function types and a helper type BitChunkFunctor for operations without a named function. --- base/arraymath.jl | 34 ++------ base/bitarray.jl | 38 +++++---- base/broadcast.jl | 13 ++- base/char.jl | 6 +- base/dates/arithmetic.jl | 24 ++---- base/deprecated.jl | 26 ++++++ base/functors.jl | 153 +----------------------------------- base/linalg.jl | 2 +- base/linalg/dense.jl | 8 +- base/linalg/diagonal.jl | 4 +- base/linalg/matmul.jl | 32 ++++---- base/operators.jl | 4 +- base/promotion.jl | 4 +- base/reduce.jl | 96 +++++++++++----------- base/reducedim.jl | 58 ++++++-------- base/sparse.jl | 2 +- base/sparse/sparsematrix.jl | 26 +++--- base/sparse/sparsevector.jl | 30 +++---- base/statistics.jl | 4 +- test/arrayops.jl | 6 +- test/core.jl | 4 +- test/functors.jl | 15 ---- test/linalg/matmul.jl | 2 +- 23 files changed, 211 insertions(+), 380 deletions(-) diff --git a/base/arraymath.jl b/base/arraymath.jl index 7427b73c18636..799079cbb7178 100644 --- a/base/arraymath.jl +++ b/base/arraymath.jl @@ -56,37 +56,31 @@ promote_array_type(F, ::Type{Bool}, ::Type{Bool}) = promote_op(F, Bool, Bool) .^(X::AbstractArray, y::Number ) = reshape([ x ^ y for x in X ], size(X)) -for (f,F) in ((:+, AddFun()), - (:-, SubFun()), - (:div, IDivFun()), - (:mod, ModFun()), - (:&, AndFun()), - (:|, OrFun()), - (:$, XorFun())) +for f in (:+, :-, :div, :mod, :&, :|, :$) @eval begin function ($f){S,T}(A::Range{S}, B::Range{T}) - F = similar(A, promote_op($F,S,T), promote_shape(size(A),size(B))) + F = similar(A, promote_op($f,S,T), promote_shape(size(A),size(B))) for (iF, iA, iB) in zip(eachindex(F), eachindex(A), eachindex(B)) @inbounds F[iF] = ($f)(A[iA], B[iB]) end return F end function ($f){S,T}(A::AbstractArray{S}, B::Range{T}) - F = similar(A, promote_op($F,S,T), promote_shape(size(A),size(B))) + F = similar(A, promote_op($f,S,T), promote_shape(size(A),size(B))) for (iF, iA, iB) in zip(eachindex(F), eachindex(A), eachindex(B)) @inbounds F[iF] = ($f)(A[iA], B[iB]) end return F end function ($f){S,T}(A::Range{S}, B::AbstractArray{T}) - F = similar(B, promote_op($F,S,T), promote_shape(size(A),size(B))) + F = similar(B, promote_op($f,S,T), promote_shape(size(A),size(B))) for (iF, iA, iB) in zip(eachindex(F), eachindex(A), eachindex(B)) @inbounds F[iF] = ($f)(A[iA], B[iB]) end return F end function ($f){S,T}(A::AbstractArray{S}, B::AbstractArray{T}) - F = similar(A, promote_op($F,S,T), promote_shape(size(A),size(B))) + F = similar(A, promote_op($f,S,T), promote_shape(size(A),size(B))) for (iF, iA, iB) in zip(eachindex(F), eachindex(A), eachindex(B)) @inbounds F[iF] = ($f)(A[iA], B[iB]) end @@ -94,29 +88,17 @@ for (f,F) in ((:+, AddFun()), end end end -for (f,F) in ((:.+, DotAddFun()), - (:.-, DotSubFun()), - (:.*, DotMulFun()), - (:.÷, DotIDivFun()), - (:.%, DotRemFun()), - (:.<<, DotLSFun()), - (:.>>, DotRSFun()), - (:div, IDivFun()), - (:mod, ModFun()), - (:rem, RemFun()), - (:&, AndFun()), - (:|, OrFun()), - (:$, XorFun())) +for f in (:.+, :.-, :.*, :.÷, :.%, :.<<, :.>>, :div, :mod, :rem, :&, :|, :$) @eval begin function ($f){T}(A::Number, B::AbstractArray{T}) - F = similar(B, promote_array_type($F,typeof(A),T)) + F = similar(B, promote_array_type($f,typeof(A),T)) for (iF, iB) in zip(eachindex(F), eachindex(B)) @inbounds F[iF] = ($f)(A, B[iB]) end return F end function ($f){T}(A::AbstractArray{T}, B::Number) - F = similar(A, promote_array_type($F,typeof(B),T)) + F = similar(A, promote_array_type($f,typeof(B),T)) for (iF, iA) in zip(eachindex(F), eachindex(A)) @inbounds F[iF] = ($f)(A[iA], B) end diff --git a/base/bitarray.jl b/base/bitarray.jl index a0135bc5e38ea..25775041244b9 100644 --- a/base/bitarray.jl +++ b/base/bitarray.jl @@ -1047,12 +1047,11 @@ for f in (:+, :-) return r end end -for (f,F) in ((:.+, DotAddFun()), - (:.-, DotSubFun())) +for (f) in (:.+, :.-) for (arg1, arg2, T, fargs) in ((:(B::BitArray), :(x::Bool) , Int , :(b, x)), - (:(B::BitArray), :(x::Number) , :(promote_array_type($F, typeof(x), Bool)), :(b, x)), + (:(B::BitArray), :(x::Number) , :(promote_array_type($f, typeof(x), Bool)), :(b, x)), (:(x::Bool) , :(B::BitArray), Int , :(x, b)), - (:(x::Number) , :(B::BitArray), :(promote_array_type($F, typeof(x), Bool)), :(x, b))) + (:(x::Number) , :(B::BitArray), :(promote_array_type($f, typeof(x), Bool)), :(x, b))) @eval function ($f)($arg1, $arg2) r = Array($T, size(B)) bi = start(B) @@ -1091,7 +1090,7 @@ function div(x::Bool, B::BitArray) end function div(x::Number, B::BitArray) all(B) || throw(DivideError()) - pt = promote_array_type(IDivFun(), typeof(x), Bool) + pt = promote_array_type(div, typeof(x), Bool) y = div(x, true) reshape(pt[ y for i = 1:length(B) ], size(B)) end @@ -1112,16 +1111,15 @@ function mod(x::Bool, B::BitArray) end function mod(x::Number, B::BitArray) all(B) || throw(DivideError()) - pt = promote_array_type(ModFun(), typeof(x), Bool) + pt = promote_array_type(mod, typeof(x), Bool) y = mod(x, true) reshape(pt[ y for i = 1:length(B) ], size(B)) end -for (f,F) in ((:div, IDivFun()), - (:mod, ModFun())) +for f in (:div, :mod) @eval begin function ($f)(B::BitArray, x::Number) - F = Array(promote_array_type($F, typeof(x), Bool), size(B)) + F = Array(promote_array_type($f, typeof(x), Bool), size(B)) for i = 1:length(F) F[i] = ($f)(B[i], x) end @@ -1676,7 +1674,7 @@ end ## Reductions ## -sum(A::BitArray, region) = reducedim(AddFun(), A, region) +sum(A::BitArray, region) = reducedim(+, A, region) sum(B::BitArray) = countnz(B) function all(B::BitArray) @@ -1711,15 +1709,27 @@ maximum(B::BitArray) = isempty(B) ? throw(ArgumentError("argument must be non-em # arrays since there can be a 64x speedup by working at the level of Int64 # instead of looping bit-by-bit. -map(f::Function, A::BitArray, B::BitArray) = map(specialized_bitwise_binary(f), A, B) map(f::Function, A::BitArray) = map!(f, similar(A), A) -map(f::BitFunctorBinary, A::BitArray, B::BitArray) = map!(f, similar(A), A, B) +map(f::Function, A::BitArray, B::BitArray) = map!(f, similar(A), A, B) map!(f, A::BitArray) = map!(f, A, A) map!(f::typeof(!), dest::BitArray, A::BitArray) = map!(~, dest, A) map!(f::typeof(zero), dest::BitArray, A::BitArray) = fill!(dest, false) map!(f::typeof(one), dest::BitArray, A::BitArray) = fill!(dest, true) -map!(f::Function, dest::BitArray, A::BitArray, B::BitArray) = map!(specialized_bitwise_binary(f), dest, A, B) + +immutable BitChunkFunctor{F<:Function} + f::F +end +(f::BitChunkFunctor)(x, y) = f.f(x,y) + +map!(f::Union{typeof(*), typeof(min)}, dest::BitArray, A::BitArray, B::BitArray) = map!(&, dest, A, B) +map!(f::typeof(max), dest::BitArray, A::BitArray, B::BitArray) = map!(|, dest, A, B) +map!(f::typeof(!=), dest::BitArray, A::BitArray, B::BitArray) = map!($, dest, A, B) +map!(f::Union{typeof(>=), typeof(^)}, dest::BitArray, A::BitArray, B::BitArray) = map!(BitChunkFunctor((p, q) -> p | ~q), dest, A, B) +map!(f::typeof(<=), dest::BitArray, A::BitArray, B::BitArray) = map!(BitChunkFunctor((p, q) -> ~p | q), dest, A, B) +map!(f::typeof(==), dest::BitArray, A::BitArray, B::BitArray) = map!(BitChunkFunctor((p, q) -> ~(p $ q)), dest, A, B) +map!(f::typeof(<), dest::BitArray, A::BitArray, B::BitArray) = map!(BitChunkFunctor((p, q) -> ~p & q), dest, A, B) +map!(f::typeof(>), dest::BitArray, A::BitArray, B::BitArray) = map!(BitChunkFunctor((p, q) -> p & ~q), dest, A, B) # If we were able to specialize the function to a known bitwise operation, # map across the chunks. Otherwise, fall-back to the AbstractArray method that @@ -1733,7 +1743,7 @@ function map!(f::Union{typeof(identity), typeof(~)}, dest::BitArray, A::BitArray dest.chunks[end] = f(A.chunks[end]) & _msk_end(A) dest end -function map!(f::BitFunctorBinary, dest::BitArray, A::BitArray, B::BitArray) +function map!(f::Union{BitChunkFunctor, typeof(&), typeof(|), typeof($)}, dest::BitArray, A::BitArray, B::BitArray) size(A) == size(B) == size(dest) || throw(DimensionMismatch("sizes of dest, A, and B must all match")) isempty(A) && return dest for i=1:length(A.chunks)-1 diff --git a/base/broadcast.jl b/base/broadcast.jl index 1d34020a124d5..0c1a4c4fc2025 100644 --- a/base/broadcast.jl +++ b/base/broadcast.jl @@ -4,7 +4,6 @@ module Broadcast using ..Cartesian using Base: promote_op, promote_eltype, promote_eltype_op, @get!, _msk_end, unsafe_bitgetindex -using Base: AddFun, SubFun, MulFun, LDivFun, RDivFun, PowFun import Base: .+, .-, .*, ./, .\, .//, .==, .<, .!=, .<=, .÷, .%, .<<, .>>, .^ export broadcast, broadcast!, broadcast_function, broadcast!_function, bitbroadcast export broadcast_getindex, broadcast_setindex! @@ -277,24 +276,24 @@ end .<<(A::AbstractArray, B::AbstractArray) = broadcast(<<, A, B) .>>(A::AbstractArray, B::AbstractArray) = broadcast(>>, A, B) -eltype_plus(As::AbstractArray...) = promote_eltype_op(AddFun(), As...) +eltype_plus(As::AbstractArray...) = promote_eltype_op(+, As...) .+(As::AbstractArray...) = broadcast!(+, Array(eltype_plus(As...), broadcast_shape(As...)), As...) function .-(A::AbstractArray, B::AbstractArray) - broadcast!(-, Array(promote_op(SubFun(), eltype(A), eltype(B)), broadcast_shape(A,B)), A, B) + broadcast!(-, Array(promote_op(-, eltype(A), eltype(B)), broadcast_shape(A,B)), A, B) end -eltype_mul(As::AbstractArray...) = promote_eltype_op(MulFun(), As...) +eltype_mul(As::AbstractArray...) = promote_eltype_op(*, As...) .*(As::AbstractArray...) = broadcast!(*, Array(eltype_mul(As...), broadcast_shape(As...)), As...) function ./(A::AbstractArray, B::AbstractArray) - broadcast!(/, Array(promote_op(RDivFun(), eltype(A), eltype(B)), broadcast_shape(A, B)), A, B) + broadcast!(/, Array(promote_op(/, eltype(A), eltype(B)), broadcast_shape(A, B)), A, B) end function .\(A::AbstractArray, B::AbstractArray) - broadcast!(\, Array(promote_op(LDivFun(), eltype(A), eltype(B)), broadcast_shape(A, B)), A, B) + broadcast!(\, Array(promote_op(\, eltype(A), eltype(B)), broadcast_shape(A, B)), A, B) end typealias RatIntT{T<:Integer} Union{Type{Rational{T}},Type{T}} @@ -308,7 +307,7 @@ function .//(A::AbstractArray, B::AbstractArray) end function .^(A::AbstractArray, B::AbstractArray) - broadcast!(^, Array(promote_op(PowFun(), eltype(A), eltype(B)), broadcast_shape(A, B)), A, B) + broadcast!(^, Array(promote_op(^, eltype(A), eltype(B)), broadcast_shape(A, B)), A, B) end ## element-wise comparison operators returning BitArray ## diff --git a/base/char.jl b/base/char.jl index c99498582b349..22196dbfdeee4 100644 --- a/base/char.jl +++ b/base/char.jl @@ -42,9 +42,9 @@ isless(x::Integer, y::Char) = isless(x, UInt32(y)) +(x::Char, y::Integer) = Char(Int32(x) + Int32(y)) +(x::Integer, y::Char) = y + x -Base.promote_op{I<:Integer}(::Base.SubFun, ::Type{Char}, ::Type{I}) = Char -Base.promote_op{I<:Integer}(::Base.AddFun, ::Type{Char}, ::Type{I}) = Char -Base.promote_op{I<:Integer}(::Base.AddFun, ::Type{I}, ::Type{Char}) = Char +Base.promote_op{I<:Integer}(::typeof(-), ::Type{Char}, ::Type{I}) = Char +Base.promote_op{I<:Integer}(::typeof(+), ::Type{Char}, ::Type{I}) = Char +Base.promote_op{I<:Integer}(::typeof(+), ::Type{I}, ::Type{Char}) = Char bswap(x::Char) = Char(bswap(UInt32(x))) diff --git a/base/dates/arithmetic.jl b/base/dates/arithmetic.jl index be55e618396d8..947b0eef4f3ea 100644 --- a/base/dates/arithmetic.jl +++ b/base/dates/arithmetic.jl @@ -97,26 +97,18 @@ end # promotion rules -for (op,F) in ((:+, Base.AddFun), - (:-, Base.SubFun), - (:.+, Base.DotAddFun), - (:.-, Base.DotSubFun)) +for op in (:+, :-, :.+, :.-) @eval begin - Base.promote_op{P<:Period}(::$F, ::Type{P}, ::Type{P}) = P - Base.promote_op{P1<:Period,P2<:Period}(::$F, ::Type{P1}, ::Type{P2}) = CompoundPeriod - Base.promote_op{D<:Date}(::$F, ::Type{D}, ::Type{D}) = Day - Base.promote_op{D<:DateTime}(::$F, ::Type{D}, ::Type{D}) = Millisecond + Base.promote_op{P<:Period}(::typeof($op), ::Type{P}, ::Type{P}) = P + Base.promote_op{P1<:Period,P2<:Period}(::typeof($op), ::Type{P1}, ::Type{P2}) = CompoundPeriod + Base.promote_op{D<:Date}(::typeof($op), ::Type{D}, ::Type{D}) = Day + Base.promote_op{D<:DateTime}(::typeof($op), ::Type{D}, ::Type{D}) = Millisecond end end -for (op,F) in ((:/, Base.RDivFun), - (:%, Base.RemFun), - (:div, Base.IDivFun), - (:mod, Base.ModFun), - (:./, Base.DotRDivFun), - (:.%, Base.DotRemFun)) +for op in (:/, :%, :div, :mod, :./, :.%) @eval begin - Base.promote_op{P<:Period}(::$F, ::Type{P}, ::Type{P}) = typeof($op(1,1)) - Base.promote_op{P<:Period,R<:Real}(::$F, ::Type{P}, ::Type{R}) = P + Base.promote_op{P<:Period}(::typeof($op), ::Type{P}, ::Type{P}) = typeof($op(1,1)) + Base.promote_op{P<:Period,R<:Real}(::typeof($op), ::Type{P}, ::Type{R}) = P end end diff --git a/base/deprecated.jl b/base/deprecated.jl index f9c301e1f7980..250e739084479 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1027,6 +1027,30 @@ for (Fun, func) in [(:IdFun, :identity), (:ExpFun, :exp), (:LogFun, :log), (:ConjFun, :conj), + (:AndFun, :&), + (:OrFun, :|), + (:XorFun, :$), + (:AddFun, :+), + (:DotAddFun, :.+), + (:SubFun, :-), + (:DotSubFun, :.-), + (:MulFun, :*), + (:DotMulFun, :.*), + (:RDivFun, :/), + (:DotRDivFun, :./), + (:LDivFun, :\), + (:IDivFun, :div), + (:DotIDivFun, :.÷), + (:ModFun, :mod), + (:RemFun, :rem), + (:DotRemFun, :.%), + (:PowFun, :^), + (:MaxFun, :scalarmax), + (:MinFun, :scalarmin), + (:LessFun, :<), + (:MoreFun, :>), + (:DotLSFun, :.<<), + (:DotRSFun, :.>>), ] @eval begin @deprecate_binding $(Fun) typeof($(func)) @@ -1034,7 +1058,9 @@ for (Fun, func) in [(:IdFun, :identity), end end @deprecate specialized_unary(f::Function) f +@deprecate specialized_binary(f::Function) f @deprecate specialized_bitwise_unary(f::Function) f +@deprecate specialized_bitwise_binary(f::Function) f # During the 0.5 development cycle, do not add any deprecations below this line diff --git a/base/functors.jl b/base/functors.jl index f83efa50c3ca2..1ccc1b3c8d37a 100644 --- a/base/functors.jl +++ b/base/functors.jl @@ -10,86 +10,6 @@ abstract Func{N} -immutable AndFun <: Func{2} end -(::AndFun)(x, y) = x & y - -immutable OrFun <: Func{2} end -(::OrFun)(x, y) = x | y - -immutable XorFun <: Func{2} end -(::XorFun)(x, y) = x $ y - -immutable AddFun <: Func{2} end -(::AddFun)(x, y) = x + y - -immutable DotAddFun <: Func{2} end -(::DotAddFun)(x, y) = x .+ y - -immutable SubFun <: Func{2} end -(::SubFun)(x, y) = x - y - -immutable DotSubFun <: Func{2} end -(::DotSubFun)(x, y) = x .- y - -immutable MulFun <: Func{2} end -(::MulFun)(x, y) = x * y - -immutable DotMulFun <: Func{2} end -(::DotMulFun)(x, y) = x .* y - -immutable RDivFun <: Func{2} end -(::RDivFun)(x, y) = x / y - -immutable DotRDivFun <: Func{2} end -(::DotRDivFun)(x, y) = x ./ y - -immutable LDivFun <: Func{2} end -(::LDivFun)(x, y) = x \ y - -immutable IDivFun <: Func{2} end -(::IDivFun)(x, y) = div(x, y) - -immutable DotIDivFun <: Func{2} end -(::DotIDivFun)(x, y) = x .÷ y - -immutable ModFun <: Func{2} end -(::ModFun)(x, y) = mod(x, y) - -immutable RemFun <: Func{2} end -(::RemFun)(x, y) = rem(x, y) - -immutable DotRemFun <: Func{2} end -(::DotRemFun)(x, y) = x .% y - -immutable PowFun <: Func{2} end -(::PowFun)(x, y) = x ^ y - -immutable MaxFun <: Func{2} end -(::MaxFun)(x, y) = scalarmax(x,y) - -immutable MinFun <: Func{2} end -(::MinFun)(x, y) = scalarmin(x, y) - -immutable LessFun <: Func{2} end -(::LessFun)(x, y) = x < y - -immutable MoreFun <: Func{2} end -(::MoreFun)(x, y) = x > y - -immutable DotLSFun <: Func{2} end -(::DotLSFun)(x, y) = x .<< y - -immutable DotRSFun <: Func{2} end -(::DotRSFun)(x, y) = x .>> y - -# a fallback unspecialized function object that allows code using -# function objects to not care whether they were able to specialize on -# the function value or not -immutable UnspecializedFun{N} <: Func{N} - f::Function -end -(f::UnspecializedFun{2})(x, y) = f.f(x,y) - # Special purpose functors immutable Predicate{F} <: Func{1} @@ -105,75 +25,4 @@ end # More promote_op rules -promote_op{T<:Integer}(::PowFun, ::Type{Bool}, ::Type{T}) = Bool - -#### Bitwise operators #### - -# BitFunctors are functions that behave in the same bit-wise manner when applied -# to individual bits as well as integers, allowing them to be used in BitArrays - -# Note that there are 16 possible pure two-argument logical functions, -# of which eight don't exist as a single function in Base (but six of those are trivial): -############################################################################## -## p = TTFF ## p = TTFF ## -## q = TFTF function bit-op ## q = TFTF function bit-op ## -## -------------------------------- ## --------------------------------- ## -## TTTT (true) p | ~p ## FFFF (false) p & ~p ## -## TTTF |, max p | q ## FFFT ??? ~(p | q) ## -## TTFT >=, ^ p | ~q ## FFTF < ~p & q ## -## TTFF (p) p ## FFTT (~p) ~p ## -## TFTT <= ~p | q ## FTFF > p & ~q ## -## TFTF (q) q ## FTFT (~q) ~q ## -## TFFT == ~(p $ q) ## FTTF $, != p $ q ## -## TFFF &, *, min p & q ## FTTT ??? ~(p & q) ## -############################################################################## - - -immutable BitFunctorBinary{TT,TF,FT,FF} <: Func{2} end -(::BitFunctorBinary{true, true, true, true })(p, q) = p | ~p -(::BitFunctorBinary{true, true, true, false})(p, q) = p | q -(::BitFunctorBinary{true, true, false, true })(p, q) = p | ~q -(::BitFunctorBinary{true, true, false, false})(p, q) = p -(::BitFunctorBinary{true, false, true, true })(p, q) = ~p | q -(::BitFunctorBinary{true, false, true, false})(p, q) = q -(::BitFunctorBinary{true, false, false, true })(p, q) = ~(p $ q) -(::BitFunctorBinary{true, false, false, false})(p, q) = p & q - -(::BitFunctorBinary{false, false, false, false})(p, q) = p & ~p -(::BitFunctorBinary{false, false, false, true })(p, q) = ~(p | q) -(::BitFunctorBinary{false, false, true, false})(p, q) = ~p & q -(::BitFunctorBinary{false, false, true, true })(p, q) = ~p -(::BitFunctorBinary{false, true, false, false})(p, q) = p & ~q -(::BitFunctorBinary{false, true, false, true })(p, q) = ~q -(::BitFunctorBinary{false, true, true, false})(p, q) = p $ q -(::BitFunctorBinary{false, true, true, true })(p, q) = ~(p & q) - -# Specializations by value - -function specialized_binary(f::Function) - is(f, +) ? AddFun() : - is(f, -) ? SubFun() : - is(f, *) ? MulFun() : - is(f, /) ? RDivFun() : - is(f, \) ? LDivFun() : - is(f, ^) ? PowFun() : - is(f, &) ? AndFun() : - is(f, |) ? OrFun() : - is(f, %) ? RemFun() : - is(f, rem) ? RemFun() : - is(f, ÷) ? IDivFun() : - is(f, div) ? IDivFun() : - UnspecializedFun{2}(f) -end - -function specialized_bitwise_binary(f::Function) - is(f, &) | is(f, *) | is(f, min) ? BitFunctorBinary{true, false, false, false}() : - is(f, |) | is(f, max) ? BitFunctorBinary{true, true, true, false}() : - is(f, $) | is(f, !=) ? BitFunctorBinary{false, true, true, false}() : - is(f, >=) | is(f, ^) ? BitFunctorBinary{true, true, false, true }() : - is(f, <=) ? BitFunctorBinary{true, false, true, true }() : - is(f, ==) ? BitFunctorBinary{true, false, false, true }() : - is(f, <) ? BitFunctorBinary{false, false, true, false}() : - is(f, >) ? BitFunctorBinary{false, true, false, false}() : - UnspecializedFun{2}(f) -end +promote_op{T<:Integer}(::typeof(^), ::Type{Bool}, ::Type{T}) = Bool diff --git a/base/linalg.jl b/base/linalg.jl index 631722bfde676..33e7368fd10ca 100644 --- a/base/linalg.jl +++ b/base/linalg.jl @@ -10,7 +10,7 @@ import Base: USE_BLAS64, abs, big, ceil, conj, convert, copy, copy!, copy_transp imag, inv, isapprox, kron, ndims, parent, power_by_squaring, print_matrix, promote_rule, real, round, setindex!, show, similar, size, transpose, transpose!, trunc -using Base: promote_op, MulFun +using Base: promote_op export # Modules diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index f8ca21b5ef1f1..2dc3d593f056b 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -36,19 +36,19 @@ isposdef(x::Number) = imag(x)==0 && real(x) > 0 stride1(x::Array) = 1 stride1(x::StridedVector) = stride(x, 1)::Int -import Base: mapreduce_seq_impl, AddFun +import Base: mapreduce_seq_impl -mapreduce_seq_impl{T<:BlasReal}(::typeof(abs), ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) = +mapreduce_seq_impl{T<:BlasReal}(::typeof(abs), ::typeof(+), a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) = BLAS.asum(ilast-ifirst+1, pointer(a, ifirst), stride1(a)) -function mapreduce_seq_impl{T<:BlasReal}(::typeof(abs2), ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) +function mapreduce_seq_impl{T<:BlasReal}(::typeof(abs2), ::typeof(+), a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) n = ilast-ifirst+1 px = pointer(a, ifirst) incx = stride1(a) BLAS.dot(n, px, incx, px, incx) end -function mapreduce_seq_impl{T<:BlasComplex}(::typeof(abs2), ::AddFun, a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) +function mapreduce_seq_impl{T<:BlasComplex}(::typeof(abs2), ::typeof(+), a::Union{Array{T},StridedVector{T}}, ifirst::Int, ilast::Int) n = ilast-ifirst+1 px = pointer(a, ifirst) incx = stride1(a) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index fe537cf305517..d08e716dfb253 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -107,9 +107,9 @@ end *(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag .* Db.diag) *(D::Diagonal, V::AbstractVector) = D.diag .* V *(A::AbstractMatrix, D::Diagonal) = - scale!(similar(A, promote_op(MulFun(), eltype(A), eltype(D.diag))), A, D.diag) + scale!(similar(A, promote_op(*, eltype(A), eltype(D.diag))), A, D.diag) *(D::Diagonal, A::AbstractMatrix) = - scale!(similar(A, promote_op(MulFun(), eltype(A), eltype(D.diag))), D.diag, A) + scale!(similar(A, promote_op(*, eltype(A), eltype(D.diag))), D.diag, A) A_mul_B!(A::Diagonal,B::AbstractMatrix) = scale!(A.diag,B) At_mul_B!(A::Diagonal,B::AbstractMatrix)= scale!(A.diag,B) diff --git a/base/linalg/matmul.jl b/base/linalg/matmul.jl index 4423cacaa4152..74fb7ca424dd2 100644 --- a/base/linalg/matmul.jl +++ b/base/linalg/matmul.jl @@ -76,11 +76,11 @@ At_mul_B{T<:BlasComplex}(x::StridedVector{T}, y::StridedVector{T}) = [BLAS.dotu( # Matrix-vector multiplication function (*){T<:BlasFloat,S}(A::StridedMatrix{T}, x::StridedVector{S}) - TS = promote_op(MulFun(),arithtype(T),arithtype(S)) + TS = promote_op(*,arithtype(T),arithtype(S)) A_mul_B!(similar(x, TS, size(A,1)), A, convert(AbstractVector{TS}, x)) end function (*){T,S}(A::AbstractMatrix{T}, x::AbstractVector{S}) - TS = promote_op(MulFun(),arithtype(T),arithtype(S)) + TS = promote_op(*,arithtype(T),arithtype(S)) A_mul_B!(similar(x,TS,size(A,1)),A,x) end (*)(A::AbstractVector, B::AbstractMatrix) = reshape(A,length(A),1)*B @@ -99,22 +99,22 @@ end A_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) = generic_matvecmul!(y, 'N', A, x) function At_mul_B{T<:BlasFloat,S}(A::StridedMatrix{T}, x::StridedVector{S}) - TS = promote_op(MulFun(),arithtype(T),arithtype(S)) + TS = promote_op(*,arithtype(T),arithtype(S)) At_mul_B!(similar(x,TS,size(A,2)), A, convert(AbstractVector{TS}, x)) end function At_mul_B{T,S}(A::AbstractMatrix{T}, x::AbstractVector{S}) - TS = promote_op(MulFun(),arithtype(T),arithtype(S)) + TS = promote_op(*,arithtype(T),arithtype(S)) At_mul_B!(similar(x,TS,size(A,2)), A, x) end At_mul_B!{T<:BlasFloat}(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}) = gemv!(y, 'T', A, x) At_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) = generic_matvecmul!(y, 'T', A, x) function Ac_mul_B{T<:BlasFloat,S}(A::StridedMatrix{T}, x::StridedVector{S}) - TS = promote_op(MulFun(),arithtype(T),arithtype(S)) + TS = promote_op(*,arithtype(T),arithtype(S)) Ac_mul_B!(similar(x,TS,size(A,2)),A,convert(AbstractVector{TS},x)) end function Ac_mul_B{T,S}(A::AbstractMatrix{T}, x::AbstractVector{S}) - TS = promote_op(MulFun(),arithtype(T),arithtype(S)) + TS = promote_op(*,arithtype(T),arithtype(S)) Ac_mul_B!(similar(x,TS,size(A,2)), A, x) end @@ -125,7 +125,7 @@ Ac_mul_B!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector) = generic_m # Matrix-matrix multiplication function (*){T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S}) - TS = promote_op(MulFun(), arithtype(T), arithtype(S)) + TS = promote_op(*, arithtype(T), arithtype(S)) A_mul_B!(similar(B, TS, (size(A,1), size(B,2))), A, B) end A_mul_B!{T<:BlasFloat}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = gemm_wrapper!(C, 'N', 'N', A, B) @@ -142,14 +142,14 @@ end A_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) = generic_matmatmul!(C, 'N', 'N', A, B) function At_mul_B{T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S}) - TS = promote_op(MulFun(),arithtype(T), arithtype(S)) + TS = promote_op(*,arithtype(T), arithtype(S)) At_mul_B!(similar(B, TS, (size(A,2), size(B,2))), A, B) end At_mul_B!{T<:BlasFloat}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = is(A,B) ? syrk_wrapper!(C, 'T', A) : gemm_wrapper!(C, 'T', 'N', A, B) At_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) = generic_matmatmul!(C, 'T', 'N', A, B) function A_mul_Bt{T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S}) - TS = promote_op(MulFun(),arithtype(T), arithtype(S)) + TS = promote_op(*,arithtype(T), arithtype(S)) A_mul_Bt!(similar(B, TS, (size(A,1), size(B,1))), A, B) end A_mul_Bt!{T<:BlasFloat}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = is(A,B) ? syrk_wrapper!(C, 'N', A) : gemm_wrapper!(C, 'N', 'T', A, B) @@ -166,7 +166,7 @@ end A_mul_Bt!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMat) = generic_matmatmul!(C, 'N', 'T', A, B) function At_mul_Bt{T,S}(A::AbstractMatrix{T}, B::AbstractVecOrMat{S}) - TS = promote_op(MulFun(),arithtype(T), arithtype(S)) + TS = promote_op(*,arithtype(T), arithtype(S)) At_mul_Bt!(similar(B, TS, (size(A,2), size(B,1))), A, B) end At_mul_Bt!{T<:BlasFloat}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = gemm_wrapper!(C, 'T', 'T', A, B) @@ -175,7 +175,7 @@ At_mul_Bt!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) = generi Ac_mul_B{T<:BlasReal}(A::StridedMatrix{T}, B::StridedMatrix{T}) = At_mul_B(A, B) Ac_mul_B!{T<:BlasReal}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = At_mul_B!(C, A, B) function Ac_mul_B{T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S}) - TS = promote_op(MulFun(),arithtype(T), arithtype(S)) + TS = promote_op(*,arithtype(T), arithtype(S)) Ac_mul_B!(similar(B, TS, (size(A,2), size(B,2))), A, B) end Ac_mul_B!{T<:BlasComplex}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = is(A,B) ? herk_wrapper!(C,'C',A) : gemm_wrapper!(C,'C', 'N', A, B) @@ -184,13 +184,13 @@ Ac_mul_B!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) = generic A_mul_Bc{T<:BlasFloat,S<:BlasReal}(A::StridedMatrix{T}, B::StridedMatrix{S}) = A_mul_Bt(A, B) A_mul_Bc!{T<:BlasFloat,S<:BlasReal}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{S}) = A_mul_Bt!(C, A, B) function A_mul_Bc{T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S}) - TS = promote_op(MulFun(),arithtype(T),arithtype(S)) + TS = promote_op(*,arithtype(T),arithtype(S)) A_mul_Bc!(similar(B,TS,(size(A,1),size(B,1))),A,B) end A_mul_Bc!{T<:BlasComplex}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = is(A,B) ? herk_wrapper!(C, 'N', A) : gemm_wrapper!(C, 'N', 'C', A, B) A_mul_Bc!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) = generic_matmatmul!(C, 'N', 'C', A, B) -Ac_mul_Bc{T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S}) = Ac_mul_Bc!(similar(B, promote_op(MulFun(),arithtype(T), arithtype(S)), (size(A,2), size(B,1))), A, B) +Ac_mul_Bc{T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S}) = Ac_mul_Bc!(similar(B, promote_op(*,arithtype(T), arithtype(S)), (size(A,2), size(B,1))), A, B) Ac_mul_Bc!{T<:BlasFloat}(C::StridedMatrix{T}, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}) = gemm_wrapper!(C, 'C', 'C', A, B) Ac_mul_Bc!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) = generic_matmatmul!(C, 'C', 'C', A, B) Ac_mul_Bt!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat) = generic_matmatmul!(C, 'C', 'T', A, B) @@ -423,7 +423,7 @@ end function generic_matmatmul{T,S}(tA, tB, A::AbstractVecOrMat{T}, B::AbstractMatrix{S}) mA, nA = lapack_size(tA, A) mB, nB = lapack_size(tB, B) - C = similar(B, promote_op(MulFun(),arithtype(T),arithtype(S)), mA, nB) + C = similar(B, promote_op(*,arithtype(T),arithtype(S)), mA, nB) generic_matmatmul!(C, tA, tB, A, B) end @@ -640,7 +640,7 @@ end # multiply 2x2 matrices function matmul2x2{T,S}(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) - matmul2x2!(similar(B, promote_op(MulFun(),T,S), 2, 2), tA, tB, A, B) + matmul2x2!(similar(B, promote_op(*,T,S), 2, 2), tA, tB, A, B) end function matmul2x2!{T,S,R}(C::AbstractMatrix{R}, tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) @@ -669,7 +669,7 @@ end # Multiply 3x3 matrices function matmul3x3{T,S}(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) - matmul3x3!(similar(B, promote_op(MulFun(),T,S), 3, 3), tA, tB, A, B) + matmul3x3!(similar(B, promote_op(*,T,S), 3, 3), tA, tB, A, B) end function matmul3x3!{T,S,R}(C::AbstractMatrix{R}, tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) diff --git a/base/operators.jl b/base/operators.jl index f9c74b65dadb4..2e5826fd583f9 100644 --- a/base/operators.jl +++ b/base/operators.jl @@ -102,8 +102,8 @@ immutable ElementwiseMaxFun end immutable ElementwiseMinFun end (::ElementwiseMinFun)(x, y) = min(x, y) -for (op,F) in ((:+,:(AddFun())), (:*,:(MulFun())), (:&,:(AndFun())), (:|,:(OrFun())), - (:$,:(XorFun())), (:min,:(ElementwiseMinFun())), (:max,:(ElementwiseMaxFun())), (:kron,:kron)) +for (op,F) in ((:+,:+), (:*,:*), (:&,:&), (:|,:|), + (:$,:$), (:min,:(ElementwiseMinFun())), (:max,:(ElementwiseMaxFun())), (:kron,:kron)) @eval begin # note: these definitions must not cause a dispatch loop when +(a,b) is # not defined, and must only try to call 2-argument definitions, so diff --git a/base/promotion.jl b/base/promotion.jl index b71ed7c4d5191..af38a41118235 100644 --- a/base/promotion.jl +++ b/base/promotion.jl @@ -200,10 +200,10 @@ max(x::Real, y::Real) = max(promote(x,y)...) min(x::Real, y::Real) = min(promote(x,y)...) minmax(x::Real, y::Real) = minmax(promote(x, y)...) -# "Promotion" that takes a Functor into account. You can override this +# "Promotion" that takes a function into account. You can override this # as needed. For example, if you need to provide a custom result type # for the multiplication of two types, -# promote_op{R<:MyType,S<:MyType}(::MulFun, ::Type{R}, ::Type{S}) = MyType{multype(R,S)} +# promote_op{R<:MyType,S<:MyType}(::typeof(*), ::Type{R}, ::Type{S}) = MyType{multype(R,S)} promote_op(::Any) = (@_pure_meta; Bottom) promote_op(::Any, T) = (@_pure_meta; T) promote_op{R,S}(::Any, ::Type{R}, ::Type{S}) = (@_pure_meta; promote_type(R, S)) diff --git a/base/reduce.jl b/base/reduce.jl index bafea5749de7e..99a26dd9a8cbc 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -18,16 +18,16 @@ typealias WidenReduceResult Union{SmallSigned, SmallUnsigned, Float16} # r_promote: promote x to the type of reduce(op, [x]) r_promote(op, x::WidenReduceResult) = widen(x) r_promote(op, x) = x -r_promote(::AddFun, x::WidenReduceResult) = widen(x) -r_promote(::MulFun, x::WidenReduceResult) = widen(x) -r_promote(::AddFun, x::Number) = oftype(x + zero(x), x) -r_promote(::MulFun, x::Number) = oftype(x * one(x), x) -r_promote(::AddFun, x) = x -r_promote(::MulFun, x) = x -r_promote(::MaxFun, x::WidenReduceResult) = x -r_promote(::MinFun, x::WidenReduceResult) = x -r_promote(::MaxFun, x) = x -r_promote(::MinFun, x) = x +r_promote(::typeof(+), x::WidenReduceResult) = widen(x) +r_promote(::typeof(*), x::WidenReduceResult) = widen(x) +r_promote(::typeof(+), x::Number) = oftype(x + zero(x), x) +r_promote(::typeof(*), x::Number) = oftype(x * one(x), x) +r_promote(::typeof(+), x) = x +r_promote(::typeof(*), x) = x +r_promote(::typeof(scalarmax), x::WidenReduceResult) = x +r_promote(::typeof(scalarmin), x::WidenReduceResult) = x +r_promote(::typeof(scalarmax), x) = x +r_promote(::typeof(scalarmin), x) = x ## foldl && mapfoldl @@ -50,8 +50,6 @@ end mapfoldl(f, op, v0, itr) = mapfoldl_impl(f, op, v0, itr, start(itr)) -mapfoldl(f, op::Function, v0, itr) = mapfoldl_impl(f, specialized_binary(op), v0, itr, start(itr)) - function mapfoldl(f, op, itr) i = start(itr) if done(itr, i) @@ -122,14 +120,14 @@ mapreduce_impl(f, op, A::AbstractArray, ifirst::Int, ilast::Int) = # handling empty arrays mr_empty(f, op, T) = throw(ArgumentError("reducing over an empty collection is not allowed")) # use zero(T)::T to improve type information when zero(T) is not defined -mr_empty(::typeof(identity), op::AddFun, T) = r_promote(op, zero(T)::T) -mr_empty(::typeof(abs), op::AddFun, T) = r_promote(op, abs(zero(T)::T)) -mr_empty(::typeof(abs2), op::AddFun, T) = r_promote(op, abs2(zero(T)::T)) -mr_empty(::typeof(identity), op::MulFun, T) = r_promote(op, one(T)::T) -mr_empty(::typeof(abs), op::MaxFun, T) = abs(zero(T)::T) -mr_empty(::typeof(abs2), op::MaxFun, T) = abs2(zero(T)::T) -mr_empty(f, op::AndFun, T) = true -mr_empty(f, op::OrFun, T) = false +mr_empty(::typeof(identity), op::typeof(+), T) = r_promote(op, zero(T)::T) +mr_empty(::typeof(abs), op::typeof(+), T) = r_promote(op, abs(zero(T)::T)) +mr_empty(::typeof(abs2), op::typeof(+), T) = r_promote(op, abs2(zero(T)::T)) +mr_empty(::typeof(identity), op::typeof(*), T) = r_promote(op, one(T)::T) +mr_empty(::typeof(abs), op::typeof(scalarmax), T) = abs(zero(T)::T) +mr_empty(::typeof(abs2), op::typeof(scalarmax), T) = abs2(zero(T)::T) +mr_empty(f, op::typeof(&), T) = true +mr_empty(f, op::typeof(|), T) = false _mapreduce(f, op, A::AbstractArray) = _mapreduce(f, op, linearindexing(A), A) @@ -159,8 +157,6 @@ _mapreduce{T}(f, op, ::LinearSlow, A::AbstractArray{T}) = mapfoldl(f, op, A) mapreduce(f, op, A::AbstractArray) = _mapreduce(f, op, linearindexing(A), A) mapreduce(f, op, a::Number) = f(a) -mapreduce(f, op::Function, A::AbstractArray) = mapreduce(f, specialized_binary(op), A) - reduce(op, v0, itr) = mapreduce(identity, op, v0, itr) reduce(op, itr) = mapreduce(identity, op, itr) reduce(op, a::Number) = a @@ -169,17 +165,17 @@ reduce(op, a::Number) = a ## conditions and results of short-circuiting -const ShortCircuiting = Union{AndFun, OrFun} +const ShortCircuiting = Union{typeof(&), typeof(|)} const ReturnsBool = Union{EqX, Predicate} -shortcircuits(::AndFun, x::Bool) = !x -shortcircuits(::OrFun, x::Bool) = x +shortcircuits(::typeof(&), x::Bool) = !x +shortcircuits(::typeof(|), x::Bool) = x -shorted(::AndFun) = false -shorted(::OrFun) = true +shorted(::typeof(&)) = false +shorted(::typeof(|)) = true -sc_finish(::AndFun) = true -sc_finish(::OrFun) = false +sc_finish(::typeof(&)) = true +sc_finish(::typeof(|)) = false ## short-circuiting (sc) mapreduce definitions @@ -222,7 +218,7 @@ mapreduce(f, op::ShortCircuiting, itr::Any) = mapreduce_sc(f,op,itr) ## sum -function mapreduce_seq_impl(f, op::AddFun, a::AbstractArray, ifirst::Int, ilast::Int) +function mapreduce_seq_impl(f, op::typeof(+), a::AbstractArray, ifirst::Int, ilast::Int) s = r_promote(op, f(a[ifirst])) + f(a[ifirst+1]) @simd for i = ifirst+2:ilast @inbounds ai = a[i] @@ -238,20 +234,20 @@ sum_pairwise_blocksize(f) = 1024 # This appears to show a benefit from a larger block size sum_pairwise_blocksize(::typeof(abs2)) = 4096 -mapreduce_impl(f, op::AddFun, A::AbstractArray, ifirst::Int, ilast::Int) = +mapreduce_impl(f, op::typeof(+), A::AbstractArray, ifirst::Int, ilast::Int) = mapreduce_pairwise_impl(f, op, A, ifirst, ilast, sum_pairwise_blocksize(f)) -sum(f::Union{Callable,Func{1}}, a) = mapreduce(f, AddFun(), a) -sum(a) = mapreduce(identity, AddFun(), a) +sum(f::Union{Callable,Func{1}}, a) = mapreduce(f, +, a) +sum(a) = mapreduce(identity, +, a) sum(a::AbstractArray{Bool}) = countnz(a) -sumabs(a) = mapreduce(abs, AddFun(), a) -sumabs2(a) = mapreduce(abs2, AddFun(), a) +sumabs(a) = mapreduce(abs, +, a) +sumabs2(a) = mapreduce(abs2, +, a) # Kahan (compensated) summation: O(1) error growth, at the expense # of a considerable increase in computational expense. function sum_kbn{T<:AbstractFloat}(A::AbstractArray{T}) n = length(A) - c = r_promote(AddFun(), zero(T)::T) + c = r_promote(+, zero(T)::T) if n == 0 return c end @@ -272,12 +268,12 @@ end ## prod -prod(f::Union{Callable,Func{1}}, a) = mapreduce(f, MulFun(), a) -prod(a) = mapreduce(identity, MulFun(), a) +prod(f::Union{Callable,Func{1}}, a) = mapreduce(f, *, a) +prod(a) = mapreduce(identity, *, a) ## maximum & minimum -function mapreduce_impl(f, op::MaxFun, A::AbstractArray, first::Int, last::Int) +function mapreduce_impl(f, op::typeof(scalarmax), A::AbstractArray, first::Int, last::Int) # locate the first non NaN number v = f(A[first]) i = first + 1 @@ -297,7 +293,7 @@ function mapreduce_impl(f, op::MaxFun, A::AbstractArray, first::Int, last::Int) v end -function mapreduce_impl(f, op::MinFun, A::AbstractArray, first::Int, last::Int) +function mapreduce_impl(f, op::typeof(scalarmin), A::AbstractArray, first::Int, last::Int) # locate the first non NaN number v = f(A[first]) i = first + 1 @@ -317,14 +313,14 @@ function mapreduce_impl(f, op::MinFun, A::AbstractArray, first::Int, last::Int) v end -maximum(f::Union{Callable,Func{1}}, a) = mapreduce(f, MaxFun(), a) -minimum(f::Union{Callable,Func{1}}, a) = mapreduce(f, MinFun(), a) +maximum(f::Union{Callable,Func{1}}, a) = mapreduce(f, scalarmax, a) +minimum(f::Union{Callable,Func{1}}, a) = mapreduce(f, scalarmin, a) -maximum(a) = mapreduce(identity, MaxFun(), a) -minimum(a) = mapreduce(identity, MinFun(), a) +maximum(a) = mapreduce(identity, scalarmax, a) +minimum(a) = mapreduce(identity, scalarmin, a) -maxabs(a) = mapreduce(abs, MaxFun(), a) -minabs(a) = mapreduce(abs, MinFun(), a) +maxabs(a) = mapreduce(abs, scalarmax, a) +minabs(a) = mapreduce(abs, scalarmin, a) ## extrema @@ -400,17 +396,17 @@ any(itr) = any(identity, itr) all(itr) = all(identity, itr) any(f::Any, itr) = any(Predicate(f), itr) -any(f::Predicate, itr) = mapreduce_sc_impl(f, OrFun(), itr) +any(f::Predicate, itr) = mapreduce_sc_impl(f, |, itr) any(f::typeof(identity), itr) = eltype(itr) <: Bool ? - mapreduce_sc_impl(f, OrFun(), itr) : + mapreduce_sc_impl(f, |, itr) : reduce(or_bool_only, itr) all(f::Any, itr) = all(Predicate(f), itr) -all(f::Predicate, itr) = mapreduce_sc_impl(f, AndFun(), itr) +all(f::Predicate, itr) = mapreduce_sc_impl(f, &, itr) all(f::typeof(identity), itr) = eltype(itr) <: Bool ? - mapreduce_sc_impl(f, AndFun(), itr) : + mapreduce_sc_impl(f, &, itr) : reduce(and_bool_only, itr) ## in & contains diff --git a/base/reducedim.jl b/base/reducedim.jl index 377094513c174..05c3c6068ab87 100644 --- a/base/reducedim.jl +++ b/base/reducedim.jl @@ -74,11 +74,11 @@ end ## initialization -for (Op, initfun) in ((:AddFun, :zero), (:MulFun, :one), (:MaxFun, :typemin), (:MinFun, :typemax)) +for (Op, initfun) in ((:(typeof(+)), :zero), (:(typeof(*)), :one), (:(typeof(scalarmax)), :typemin), (:(typeof(scalarmin)), :typemax)) @eval initarray!{T}(a::AbstractArray{T}, ::$(Op), init::Bool) = (init && fill!(a, $(initfun)(T)); a) end -for (Op, initval) in ((:AndFun, true), (:OrFun, false)) +for (Op, initval) in ((:(typeof(&)), true), (:(typeof(|)), false)) @eval initarray!(a::AbstractArray, ::$(Op), init::Bool) = (init && fill!(a, $initval); a) end @@ -94,7 +94,7 @@ reducedim_initarray0{T}(A::AbstractArray, region, v0::T) = reducedim_initarray0( # promote_union(T::Union) = promote_type(T.types...) promote_union(T) = T -function reducedim_init{S}(f, op::AddFun, A::AbstractArray{S}, region) +function reducedim_init{S}(f, op::typeof(+), A::AbstractArray{S}, region) T = promote_union(S) if method_exists(zero, Tuple{Type{T}}) x = f(zero(T)) @@ -107,7 +107,7 @@ function reducedim_init{S}(f, op::AddFun, A::AbstractArray{S}, region) return reducedim_initarray(A, region, z, Tr) end -function reducedim_init{S}(f, op::MulFun, A::AbstractArray{S}, region) +function reducedim_init{S}(f, op::typeof(*), A::AbstractArray{S}, region) T = promote_union(S) if method_exists(zero, Tuple{Type{T}}) x = f(zero(T)) @@ -120,30 +120,30 @@ function reducedim_init{S}(f, op::MulFun, A::AbstractArray{S}, region) return reducedim_initarray(A, region, z, Tr) end -reducedim_init{T}(f, op::MaxFun, A::AbstractArray{T}, region) = reducedim_initarray0(A, region, typemin(f(zero(T)))) -reducedim_init{T}(f, op::MinFun, A::AbstractArray{T}, region) = reducedim_initarray0(A, region, typemax(f(zero(T)))) -reducedim_init{T}(f::Union{typeof(abs),typeof(abs2)}, op::MaxFun, A::AbstractArray{T}, region) = +reducedim_init{T}(f, op::typeof(scalarmax), A::AbstractArray{T}, region) = reducedim_initarray0(A, region, typemin(f(zero(T)))) +reducedim_init{T}(f, op::typeof(scalarmin), A::AbstractArray{T}, region) = reducedim_initarray0(A, region, typemax(f(zero(T)))) +reducedim_init{T}(f::Union{typeof(abs),typeof(abs2)}, op::typeof(scalarmax), A::AbstractArray{T}, region) = reducedim_initarray(A, region, zero(f(zero(T)))) -reducedim_init(f, op::AndFun, A::AbstractArray, region) = reducedim_initarray(A, region, true) -reducedim_init(f, op::OrFun, A::AbstractArray, region) = reducedim_initarray(A, region, false) +reducedim_init(f, op::typeof(&), A::AbstractArray, region) = reducedim_initarray(A, region, true) +reducedim_init(f, op::typeof(|), A::AbstractArray, region) = reducedim_initarray(A, region, false) # specialize to make initialization more efficient for common cases for (IT, RT) in ((CommonReduceResult, :(eltype(A))), (SmallSigned, :Int), (SmallUnsigned, :UInt)) T = Union{[AbstractArray{t} for t in IT.types]..., [AbstractArray{Complex{t}} for t in IT.types]...} @eval begin - reducedim_init(f::typeof(identity), op::AddFun, A::$T, region) = + reducedim_init(f::typeof(identity), op::typeof(+), A::$T, region) = reducedim_initarray(A, region, zero($RT)) - reducedim_init(f::typeof(identity), op::MulFun, A::$T, region) = + reducedim_init(f::typeof(identity), op::typeof(*), A::$T, region) = reducedim_initarray(A, region, one($RT)) - reducedim_init(f::Union{typeof(abs),typeof(abs2)}, op::AddFun, A::$T, region) = + reducedim_init(f::Union{typeof(abs),typeof(abs2)}, op::typeof(+), A::$T, region) = reducedim_initarray(A, region, real(zero($RT))) - reducedim_init(f::Union{typeof(abs),typeof(abs2)}, op::MulFun, A::$T, region) = + reducedim_init(f::Union{typeof(abs),typeof(abs2)}, op::typeof(*), A::$T, region) = reducedim_initarray(A, region, real(one($RT))) end end -reducedim_init(f::Union{typeof(identity),typeof(abs),typeof(abs2)}, op::AddFun, A::AbstractArray{Bool}, region) = +reducedim_init(f::Union{typeof(identity),typeof(abs),typeof(abs2)}, op::typeof(+), A::AbstractArray{Bool}, region) = reducedim_initarray(A, region, 0) @@ -222,16 +222,8 @@ function _mapreducedim!{T,N}(f, op, R::AbstractArray, A::AbstractArray{T,N}) return R end -to_op(op) = op -function to_op(op::Function) - is(op, +) ? AddFun() : - is(op, *) ? MulFun() : - is(op, &) ? AndFun() : - is(op, |) ? OrFun() : op -end - mapreducedim!(f, op, R::AbstractArray, A::AbstractArray) = - (_mapreducedim!(f, to_op(op), R, A); R) + (_mapreducedim!(f, op, R, A); R) reducedim!{RT}(op, R::AbstractArray{RT}, A::AbstractArray) = mapreducedim!(identity, op, R, A, zero(RT)) @@ -239,7 +231,7 @@ reducedim!{RT}(op, R::AbstractArray{RT}, A::AbstractArray) = mapreducedim(f, op, A::AbstractArray, region, v0) = mapreducedim!(f, op, reducedim_initarray(A, region, v0), A) mapreducedim{T}(f, op, A::AbstractArray{T}, region) = - mapreducedim!(f, op, reducedim_init(f, to_op(op), A, region), A) + mapreducedim!(f, op, reducedim_init(f, op, A, region), A) reducedim(op, A::AbstractArray, region, v0) = mapreducedim(identity, op, A, region, v0) reducedim(op, A::AbstractArray, region) = mapreducedim(identity, op, A, region) @@ -247,18 +239,18 @@ reducedim(op, A::AbstractArray, region) = mapreducedim(identity, op, A, region) ##### Specific reduction functions ##### -for (fname, Op) in [(:sum, :AddFun), (:prod, :MulFun), - (:maximum, :MaxFun), (:minimum, :MinFun), - (:all, :AndFun), (:any, :OrFun)] +for (fname, op) in [(:sum, :+), (:prod, :*), + (:maximum, :scalarmax), (:minimum, :scalarmin), + (:all, :&), (:any, :|)] fname! = symbol(fname, '!') @eval begin $(fname!)(f::Union{Function,Func{1}}, r::AbstractArray, A::AbstractArray; init::Bool=true) = - mapreducedim!(f, $(Op)(), initarray!(r, $(Op)(), init), A) + mapreducedim!(f, $(op), initarray!(r, $(op), init), A) $(fname!)(r::AbstractArray, A::AbstractArray; init::Bool=true) = $(fname!)(identity, r, A; init=init) $(fname)(f::Union{Function,Func{1}}, A::AbstractArray, region) = - mapreducedim(f, $(Op)(), A, region) + mapreducedim(f, $(op), A, region) $(fname)(A::AbstractArray, region) = $(fname)(identity, A, region) end end @@ -334,7 +326,7 @@ function findmin!{R}(rval::AbstractArray{R}, rind::AbstractArray, A::AbstractArray; init::Bool=true) - findminmax!(LessFun(), initarray!(rval, MinFun(), init), rind, A) + findminmax!(<, initarray!(rval, scalarmin, init), rind, A) end function findmin{T}(A::AbstractArray{T}, region) @@ -342,7 +334,7 @@ function findmin{T}(A::AbstractArray{T}, region) return (similar(A, reduced_dims0(A, region)), zeros(Int, reduced_dims0(A, region))) end - return findminmax!(LessFun(), reducedim_initarray0(A, region, typemax(T)), + return findminmax!(<, reducedim_initarray0(A, region, typemax(T)), zeros(Int, reduced_dims0(A, region)), A) end @@ -356,7 +348,7 @@ function findmax!{R}(rval::AbstractArray{R}, rind::AbstractArray, A::AbstractArray; init::Bool=true) - findminmax!(MoreFun(), initarray!(rval, MaxFun(), init), rind, A) + findminmax!(>, initarray!(rval, scalarmax, init), rind, A) end function findmax{T}(A::AbstractArray{T}, region) @@ -364,7 +356,7 @@ function findmax{T}(A::AbstractArray{T}, region) return (similar(A, reduced_dims0(A,region)), zeros(Int, reduced_dims0(A,region))) end - return findminmax!(MoreFun(), reducedim_initarray0(A, region, typemin(T)), + return findminmax!(>, reducedim_initarray0(A, region, typemin(T)), zeros(Int, reduced_dims0(A, region)), A) end diff --git a/base/sparse.jl b/base/sparse.jl index db34080d384ab..b148fb394707f 100644 --- a/base/sparse.jl +++ b/base/sparse.jl @@ -2,7 +2,7 @@ module SparseArrays -using Base: Func, AddFun, OrFun +using Base: Func using Base: ReshapedArray using Base.Sort: Forward using Base.LinAlg: AbstractTriangular, PosDefException diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 0dd10d78062db..6f39094145e8d 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -314,9 +314,9 @@ sparse{Tv}(A::AbstractMatrix{Tv}) = convert(SparseMatrixCSC{Tv,Int}, A) sparse(S::SparseMatrixCSC) = copy(S) -sparse_IJ_sorted!(I,J,V,m,n) = sparse_IJ_sorted!(I,J,V,m,n,AddFun()) +sparse_IJ_sorted!(I,J,V,m,n) = sparse_IJ_sorted!(I,J,V,m,n,+) -sparse_IJ_sorted!(I,J,V::AbstractVector{Bool},m,n) = sparse_IJ_sorted!(I,J,V,m,n,OrFun()) +sparse_IJ_sorted!(I,J,V::AbstractVector{Bool},m,n) = sparse_IJ_sorted!(I,J,V,m,n,|) function sparse_IJ_sorted!{Ti<:Integer}(I::AbstractVector{Ti}, J::AbstractVector{Ti}, V::AbstractVector, @@ -592,9 +592,9 @@ sparse(I,J,V::AbstractVector) = sparse(I, J, V, dimlub(I), dimlub(J)) sparse(I,J,v::Number,m,n) = sparse(I, J, fill(v,length(I)), Int(m), Int(n)) -sparse(I,J,V::AbstractVector,m,n) = sparse(I, J, V, Int(m), Int(n), AddFun()) +sparse(I,J,V::AbstractVector,m,n) = sparse(I, J, V, Int(m), Int(n), +) -sparse(I,J,V::AbstractVector{Bool},m,n) = sparse(I, J, V, Int(m), Int(n), OrFun()) +sparse(I,J,V::AbstractVector{Bool},m,n) = sparse(I, J, V, Int(m), Int(n), |) sparse(I,J,v::Number,m,n,combine::Union{Function,Func}) = sparse(I, J, fill(v,length(I)), Int(m), Int(n), combine) @@ -926,7 +926,7 @@ function sprand{T}(r::AbstractRNG, m::Integer, n::Integer, density::AbstractFloa N == 1 && return rand(r) <= density ? sparse(rfn(r,1)) : spzeros(T,1,1) I,J = sprand_IJ(r, m, n, density) - sparse_IJ_sorted!(I, J, rfn(r,length(I)), m, n, AddFun()) # it will never need to combine + sparse_IJ_sorted!(I, J, rfn(r,length(I)), m, n, +) # it will never need to combine end function sprand{T}(m::Integer, n::Integer, density::AbstractFloat, @@ -936,7 +936,7 @@ function sprand{T}(m::Integer, n::Integer, density::AbstractFloat, N == 1 && return rand() <= density ? sparse(rfn(1)) : spzeros(T,1,1) I,J = sprand_IJ(GLOBAL_RNG, m, n, density) - sparse_IJ_sorted!(I, J, rfn(length(I)), m, n, AddFun()) # it will never need to combine + sparse_IJ_sorted!(I, J, rfn(length(I)), m, n, +) # it will never need to combine end sprand(r::AbstractRNG, m::Integer, n::Integer, density::AbstractFloat) = sprand(r,m,n,density,rand,Float64) @@ -1441,7 +1441,7 @@ end # macro (.-)(A::Number, B::SparseMatrixCSC) = A .- full(B) ( -)(A::Array , B::SparseMatrixCSC) = A - full(B) -(.*)(A::AbstractArray, B::AbstractArray) = broadcast_zpreserving(MulFun(), A, B) +(.*)(A::AbstractArray, B::AbstractArray) = broadcast_zpreserving(*, A, B) (.*)(A::SparseMatrixCSC, B::Number) = SparseMatrixCSC(A.m, A.n, copy(A.colptr), copy(A.rowval), A.nzval .* B) (.*)(A::Number, B::SparseMatrixCSC) = SparseMatrixCSC(B.m, B.n, copy(B.colptr), copy(B.rowval), A .* B.nzval) @@ -1557,13 +1557,13 @@ function Base._mapreduce{T}(f, op, ::Base.LinearSlow, A::SparseMatrixCSC{T}) end end -# Specialized mapreduce for AddFun/MulFun -_mapreducezeros(f, ::Base.AddFun, T::Type, nzeros::Int, v0) = +# Specialized mapreduce for +/* +_mapreducezeros(f, ::typeof(+), T::Type, nzeros::Int, v0) = nzeros == 0 ? v0 : f(zero(T))*nzeros + v0 -_mapreducezeros(f, ::Base.MulFun, T::Type, nzeros::Int, v0) = +_mapreducezeros(f, ::typeof(*), T::Type, nzeros::Int, v0) = nzeros == 0 ? v0 : f(zero(T))^nzeros * v0 -function Base._mapreduce{T}(f, op::Base.MulFun, A::SparseMatrixCSC{T}) +function Base._mapreduce{T}(f, op::typeof(*), A::SparseMatrixCSC{T}) nzeros = length(A)-nnz(A) if nzeros == 0 # No zeros, so don't compute f(0) since it might throw @@ -1658,9 +1658,9 @@ function Base._mapreducedim!{T}(f, op, R::AbstractArray, A::SparseMatrixCSC{T}) R end -# Specialized mapreducedim for AddFun cols to avoid allocating a +# Specialized mapreducedim for + cols to avoid allocating a # temporary array when f(0) == 0 -function _mapreducecols!{Tv,Ti}(f, op::Base.AddFun, R::AbstractArray, A::SparseMatrixCSC{Tv,Ti}) +function _mapreducecols!{Tv,Ti}(f, op::typeof(+), R::AbstractArray, A::SparseMatrixCSC{Tv,Ti}) nzval = A.nzval m, n = size(A) if length(nzval) == m*n diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index cf59fedb6e072..fa90504b0b167 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -2,7 +2,7 @@ ### Common definitions -import Base: Func, AddFun, MulFun, MaxFun, MinFun, SubFun, sort +import Base: Func, scalarmax, scalarmin, sort immutable ComplexFun <: Func{2} end (::ComplexFun)(x::Real, y::Real) = complex(x, y) @@ -129,18 +129,18 @@ function sparsevec{Tv,Ti<:Integer}(I::AbstractVector{Ti}, V::AbstractVector{Tv}, end sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, V::Union{Number, AbstractVector}) = - sparsevec(I, V, AddFun()) + sparsevec(I, V, +) sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, V::Union{Number, AbstractVector}, len::Integer) = - sparsevec(I, V, len, AddFun()) + sparsevec(I, V, len, +) sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, V::Union{Bool, AbstractVector{Bool}}) = - sparsevec(I, V, OrFun()) + sparsevec(I, V, |) sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, V::Union{Bool, AbstractVector{Bool}}, len::Integer) = - sparsevec(I, V, len, OrFun()) + sparsevec(I, V, len, |) sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, v::Number, combine::BinaryOp) = sparsevec(I, fill(v, length(I)), combine) @@ -1094,13 +1094,13 @@ end ### Binary arithmetics: +, -, * -for (vop, fun, mode) in [(:_vadd, :AddFun, 1), - (:_vsub, :SubFun, 1), - (:_vmul, :MulFun, 0)] +for (vop, fun, mode) in [(:_vadd, :+, 1), + (:_vsub, :-, 1), + (:_vmul, :*, 0)] @eval begin - $(vop)(x::AbstractSparseVector, y::AbstractSparseVector) = _binarymap($(fun)(), x, y, $mode) - $(vop)(x::StridedVector, y::AbstractSparseVector) = _binarymap($(fun)(), x, y, $mode) - $(vop)(x::AbstractSparseVector, y::StridedVector) = _binarymap($(fun)(), x, y, $mode) + $(vop)(x::AbstractSparseVector, y::AbstractSparseVector) = _binarymap($(fun), x, y, $mode) + $(vop)(x::StridedVector, y::AbstractSparseVector) = _binarymap($(fun), x, y, $mode) + $(vop)(x::AbstractSparseVector, y::StridedVector) = _binarymap($(fun), x, y, $mode) end end @@ -1122,8 +1122,8 @@ end # definition of other binary functions -for (op, fun, TF, mode) in [(:max, :MaxFun, :Real, 2), - (:min, :MinFun, :Real, 2), +for (op, fun, TF, mode) in [(:max, :(typeof(scalarmax)), :Real, 2), + (:min, :(typeof(scalarmin)), :Real, 2), (:complex, :ComplexFun, :Real, 1)] @eval begin $(op){Tx<:$(TF),Ty<:$(TF)}(x::AbstractSparseVector{Tx}, y::AbstractSparseVector{Ty}) = @@ -1435,7 +1435,7 @@ At_mul_B!{Tx,Ty}(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVect At_mul_B!(one(Tx), A, x, zero(Ty), y) At_mul_B!{Tx,Ty}(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}, β::Number, y::StridedVector{Ty}) = - _At_or_Ac_mul_B!(MulFun(), α, A, x, β, y) + _At_or_Ac_mul_B!(*, α, A, x, β, y) Ac_mul_B!{Tx,Ty}(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) = Ac_mul_B!(one(Tx), A, x, zero(Ty), y) @@ -1480,7 +1480,7 @@ function *(A::SparseMatrixCSC, x::AbstractSparseVector) end At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) = - _At_or_Ac_mul_B(MulFun(), A, x) + _At_or_Ac_mul_B(*, A, x) Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) = _At_or_Ac_mul_B(DotFun(), A, x) diff --git a/base/statistics.jl b/base/statistics.jl index 0e8472ff0719a..1edd67af2dc10 100644 --- a/base/statistics.jl +++ b/base/statistics.jl @@ -100,9 +100,9 @@ immutable CentralizedAbs2Fun{T<:Number} <: Func{1} end (f::CentralizedAbs2Fun)(x) = abs2(x - f.m) centralize_sumabs2(A::AbstractArray, m::Number) = - mapreduce(CentralizedAbs2Fun(m), AddFun(), A) + mapreduce(CentralizedAbs2Fun(m), +, A) centralize_sumabs2(A::AbstractArray, m::Number, ifirst::Int, ilast::Int) = - mapreduce_impl(CentralizedAbs2Fun(m), AddFun(), A, ifirst, ilast) + mapreduce_impl(CentralizedAbs2Fun(m), +, A, ifirst, ilast) @generated function centralize_sumabs2!{S,T,N}(R::AbstractArray{S}, A::AbstractArray{T,N}, means::AbstractArray) quote diff --git a/test/arrayops.jl b/test/arrayops.jl index 9867e062af864..656977672f92e 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -1300,9 +1300,9 @@ module RetTypeDecl (.*){T}(x::MeterUnits{T,1}, y::MeterUnits{T,1}) = MeterUnits{T,2}(x.val*y.val) zero{T,pow}(x::MeterUnits{T,pow}) = MeterUnits{T,pow}(zero(T)) - Base.promote_op{R,S}(::Base.AddFun, ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),1} - Base.promote_op{R,S}(::Base.MulFun, ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} - Base.promote_op{R,S}(::Base.DotMulFun, ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} + Base.promote_op{R,S}(::typeof(+), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),1} + Base.promote_op{R,S}(::typeof(*), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} + Base.promote_op{R,S}(::typeof(.*), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} @test @inferred(m+[m,m]) == [m+m,m+m] @test @inferred([m,m]+m) == [m+m,m+m] diff --git a/test/core.jl b/test/core.jl index e4548ceeb2bcb..a777df1980ad2 100644 --- a/test/core.jl +++ b/test/core.jl @@ -3333,8 +3333,8 @@ end @noinline function foo13855(x) @m13855() end -@test foo13855(Base.AddFun())() == Base.AddFun() -@test foo13855(Base.MulFun())() == Base.MulFun() +@test foo13855(+)() == + +@test foo13855(*)() == * # issue #8487 @test [x for x in 1:3] == [x for x ∈ 1:3] == [x for x = 1:3] diff --git a/test/functors.jl b/test/functors.jl index 7e7819861dc21..8e33ad244e362 100644 --- a/test/functors.jl +++ b/test/functors.jl @@ -1,18 +1,3 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license ## Testing functors and specialization-by-value - -for op in (+, -, *, /, \, div, ^, &, |) - @test Base.specialized_binary(op)(2,10) == Base.specialized_binary((x,y)->op(x,y))(2,10) == op(2,10) -end - -for op in (&, *, min, |, max, $, !=, >=, ^, <=, ==, <, >) - for p in (true, false), q in (true, false) - @test Base.specialized_bitwise_binary(op)(p, q) == Base.specialized_bitwise_binary((x,y)->op(x,y))(p, q) == op(p, q) - end -end - -for tt in (true, false), tf in (true, false), ft in (true, false), ff in (true, false) - functor = Base.BitFunctorBinary{tt,tf,ft,ff}() - @test (functor(0b1100, 0b1010) & 0b1111) == (Int(tt)<<3 | Int(tf)<<2 | Int(ft)<<1 | Int(ff)) -end diff --git a/test/linalg/matmul.jl b/test/linalg/matmul.jl index 7885d850e0f85..67dfdeae969c6 100644 --- a/test/linalg/matmul.jl +++ b/test/linalg/matmul.jl @@ -230,7 +230,7 @@ immutable RootInt end import Base: *, promote_op (*)(x::RootInt, y::RootInt) = x.i*y.i -promote_op(::Base.MulFun, ::Type{RootInt}, ::Type{RootInt}) = Int +promote_op(::typeof(*), ::Type{RootInt}, ::Type{RootInt}) = Int a = [RootInt(3)] C = [0] From e65762172ad4d81dae9851d09bc7474d37b976b5 Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Fri, 8 Apr 2016 13:07:33 +0200 Subject: [PATCH 4/9] Remove (now empty) test/functors.jl --- test/choosetests.jl | 2 +- test/functors.jl | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 test/functors.jl diff --git a/test/choosetests.jl b/test/choosetests.jl index 5dcd20bc7bf78..892a57feeb830 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -30,7 +30,7 @@ function choosetests(choices = []) "euler", "show", "lineedit", "replcompletions", "repl", "replutil", "sets", "test", "goto", "llvmcall", "grisu", "nullable", "meta", "stacktraces", "profile", "libgit2", "docs", - "markdown", "base64", "serialize", "functors", "misc", "threads", + "markdown", "base64", "serialize", "misc", "threads", "enums", "cmdlineargs", "i18n", "workspace", "libdl", "int", "checked", "intset", "floatfuncs", "compile", "parallel", "inline", "boundscheck", "error" diff --git a/test/functors.jl b/test/functors.jl deleted file mode 100644 index 8e33ad244e362..0000000000000 --- a/test/functors.jl +++ /dev/null @@ -1,3 +0,0 @@ -# This file is a part of Julia. License is MIT: http://julialang.org/license - -## Testing functors and specialization-by-value From 7943016485c791b4d26b1f56fc4af4f131cfbf0c Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Thu, 14 Apr 2016 09:21:10 +0200 Subject: [PATCH 5/9] Remove/deprecate special purpose functors and move Predicate to reduce.jl --- base/char.jl | 2 +- base/deprecated.jl | 6 ++++++ base/functors.jl | 13 ------------- base/operators.jl | 11 ++--------- base/reduce.jl | 17 +++++++++-------- base/sparse/sparsematrix.jl | 17 +++++------------ base/sparse/sparsevector.jl | 32 +++++++++++--------------------- base/statistics.jl | 9 +++------ 8 files changed, 37 insertions(+), 70 deletions(-) diff --git a/base/char.jl b/base/char.jl index 22196dbfdeee4..319e4b35257b8 100644 --- a/base/char.jl +++ b/base/char.jl @@ -18,7 +18,7 @@ length(c::Char) = 1 endof(c::Char) = 1 getindex(c::Char) = c getindex(c::Char, i::Integer) = i == 1 ? c : throw(BoundsError()) -getindex(c::Char, I::Integer...) = all(EqX(1), I) ? c : throw(BoundsError()) +getindex(c::Char, I::Integer...) = all(Predicate(x -> x == 1), I) ? c : throw(BoundsError()) first(c::Char) = c last(c::Char) = c eltype(::Type{Char}) = Char diff --git a/base/deprecated.jl b/base/deprecated.jl index 250e739084479..cf4afffe4d389 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1051,12 +1051,18 @@ for (Fun, func) in [(:IdFun, :identity), (:MoreFun, :>), (:DotLSFun, :.<<), (:DotRSFun, :.>>), + (:ElementwiseMaxFun, :max), + (:ElementwiseMinFun, :min), + (:ComplexFun, :complex), + (:DotFun, :dot), ] @eval begin @deprecate_binding $(Fun) typeof($(func)) (::Type{typeof($(func))})() = $(func) end end +@deprecate_binding CentralizedAbs2Fun typeof(centralizedabs2fun(0)).name.primary +(::Type{typeof(centralizedabs2fun(0)).name.primary})(m::Number) = centralizedabs2fun(m) @deprecate specialized_unary(f::Function) f @deprecate specialized_binary(f::Function) f @deprecate specialized_bitwise_unary(f::Function) f diff --git a/base/functors.jl b/base/functors.jl index 1ccc1b3c8d37a..9103b005c6bcb 100644 --- a/base/functors.jl +++ b/base/functors.jl @@ -10,19 +10,6 @@ abstract Func{N} -# Special purpose functors - -immutable Predicate{F} <: Func{1} - f::F -end -(pred::Predicate)(x) = pred.f(x)::Bool - -immutable EqX{T} <: Func{1} - x::T -end - -(f::EqX)(y) = f.x == y - # More promote_op rules promote_op{T<:Integer}(::typeof(^), ::Type{Bool}, ::Type{T}) = Bool diff --git a/base/operators.jl b/base/operators.jl index 2e5826fd583f9..1e6af424173ed 100644 --- a/base/operators.jl +++ b/base/operators.jl @@ -96,19 +96,12 @@ function afoldl(op,a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,qs...) y end -immutable ElementwiseMaxFun end -(::ElementwiseMaxFun)(x, y) = max(x,y) - -immutable ElementwiseMinFun end -(::ElementwiseMinFun)(x, y) = min(x, y) - -for (op,F) in ((:+,:+), (:*,:*), (:&,:&), (:|,:|), - (:$,:$), (:min,:(ElementwiseMinFun())), (:max,:(ElementwiseMaxFun())), (:kron,:kron)) +for op in (:+, :*, :&, :|, :$, :min, :max, :kron) @eval begin # note: these definitions must not cause a dispatch loop when +(a,b) is # not defined, and must only try to call 2-argument definitions, so # that defining +(a,b) is sufficient for full functionality. - ($op)(a, b, c, xs...) = afoldl($F, ($op)(($op)(a,b),c), xs...) + ($op)(a, b, c, xs...) = afoldl($op, ($op)(($op)(a,b),c), xs...) # a further concern is that it's easy for a type like (Int,Int...) # to match many definitions, so we need to keep the number of # definitions down to avoid losing type information. diff --git a/base/reduce.jl b/base/reduce.jl index 99a26dd9a8cbc..45e0c7f66fdb7 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -165,8 +165,12 @@ reduce(op, a::Number) = a ## conditions and results of short-circuiting +immutable Predicate{F} <: Func{1} + f::F +end +(pred::Predicate)(x) = pred.f(x)::Bool + const ShortCircuiting = Union{typeof(&), typeof(|)} -const ReturnsBool = Union{EqX, Predicate} shortcircuits(::typeof(&), x::Bool) = !x shortcircuits(::typeof(|), x::Bool) = x @@ -201,8 +205,8 @@ end mapreduce_no_sc(f, op, itr::Any) = mapfoldl(f, op, itr) mapreduce_no_sc(f, op, itr::AbstractArray) = _mapreduce(f, op, itr) -mapreduce_sc(f::Function, op, itr) = mapreduce_no_sc(f, op, itr) -mapreduce_sc(f::ReturnsBool, op, itr) = mapreduce_sc_impl(f, op, itr) +mapreduce_sc(f::Function, op, itr) = mapreduce_no_sc(f, op, itr) +mapreduce_sc(f::Predicate, op, itr) = mapreduce_sc_impl(f, op, itr) mapreduce_sc(f::typeof(identity), op, itr) = eltype(itr) <: Bool ? @@ -411,7 +415,7 @@ all(f::typeof(identity), itr) = ## in & contains -in(x, itr) = any(EqX(x), itr) +in(x, itr) = any(Predicate(y -> y == x), itr) const ∈ = in ∉(x, itr)=!∈(x, itr) @@ -436,13 +440,10 @@ function count(pred, itr) return n end -immutable NotEqZero <: Func{1} end -(::NotEqZero)(x) = x != 0 - """ countnz(A) Counts the number of nonzero values in array `A` (dense or sparse). Note that this is not a constant-time operation. For sparse matrices, one should usually use `nnz`, which returns the number of stored values. """ -countnz(a) = count(NotEqZero(), a) +countnz(a) = count(x -> x != 0, a) diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 6f39094145e8d..73b46b65a87e8 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -786,30 +786,23 @@ function fkeep!(A::SparseMatrixCSC, f, other, trim::Bool = true) A end -immutable TrilFunc <: Base.Func{4} end -immutable TriuFunc <: Base.Func{4} end -(::TrilFunc){Tv,Ti}(i::Ti, j::Ti, x::Tv, k::Integer) = i + k >= j -(::TriuFunc){Tv,Ti}(i::Ti, j::Ti, x::Tv, k::Integer) = j >= i + k function tril!(A::SparseMatrixCSC, k::Integer = 0, trim::Bool = true) if k > A.n-1 || k < 1-A.m throw(ArgumentError("requested diagonal, $k, out of bounds in matrix of size ($(A.m),$(A.n))")) end - fkeep!(A, TrilFunc(), k, trim) + fkeep!(A, (i, j, x, k) -> i + k >= j, k, trim) end function triu!(A::SparseMatrixCSC, k::Integer = 0, trim::Bool = true) if k > A.n-1 || k < 1-A.m throw(ArgumentError("requested diagonal, $k, out of bounds in matrix of size ($(A.m),$(A.n))")) end - fkeep!(A, TriuFunc(), k, trim) + fkeep!(A, (i, j, x, k) -> j >= i + k, k, trim) end -immutable DroptolFunc <: Base.Func{4} end -(::DroptolFunc){Tv,Ti}(i::Ti, j::Ti, x::Tv, tol::Real) = abs(x) > tol -droptol!(A::SparseMatrixCSC, tol, trim::Bool = true) = fkeep!(A, DroptolFunc(), tol, trim) +droptol!(A::SparseMatrixCSC, tol, trim::Bool = true) = + fkeep!(A, (i, j, x, tol) -> abs(x) > tol, tol, trim) -immutable DropzerosFunc <: Base.Func{4} end -(::DropzerosFunc){Tv,Ti}(i::Ti, j::Ti, x::Tv, other) = x != 0 -dropzeros!(A::SparseMatrixCSC, trim::Bool = true) = fkeep!(A, DropzerosFunc(), nothing, trim) +dropzeros!(A::SparseMatrixCSC, trim::Bool = true) = fkeep!(A, (i, j, x, other) -> x != 0, nothing, trim) dropzeros(A::SparseMatrixCSC, trim::Bool = true) = dropzeros!(copy(A), trim) diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index fa90504b0b167..ac4bff5d3d460 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -4,12 +4,6 @@ import Base: Func, scalarmax, scalarmin, sort -immutable ComplexFun <: Func{2} end -(::ComplexFun)(x::Real, y::Real) = complex(x, y) - -immutable DotFun <: Func{2} end -(::DotFun)(x::Number, y::Number) = conj(x) * y - typealias UnaryOp Union{Function, Func{1}} typealias BinaryOp Union{Function, Func{2}} @@ -1122,16 +1116,16 @@ end # definition of other binary functions -for (op, fun, TF, mode) in [(:max, :(typeof(scalarmax)), :Real, 2), - (:min, :(typeof(scalarmin)), :Real, 2), - (:complex, :ComplexFun, :Real, 1)] +for (op, TF, mode) in [(:max, :Real, 2), + (:min, :Real, 2), + (:complex, :Real, 1)] @eval begin $(op){Tx<:$(TF),Ty<:$(TF)}(x::AbstractSparseVector{Tx}, y::AbstractSparseVector{Ty}) = - _binarymap($(fun)(), x, y, $mode) + _binarymap($(op), x, y, $mode) $(op){Tx<:$(TF),Ty<:$(TF)}(x::StridedVector{Tx}, y::AbstractSparseVector{Ty}) = - _binarymap($(fun)(), x, y, $mode) + _binarymap($(op), x, y, $mode) $(op){Tx<:$(TF),Ty<:$(TF)}(x::AbstractSparseVector{Tx}, y::StridedVector{Ty}) = - _binarymap($(fun)(), x, y, $mode) + _binarymap($(op), x, y, $mode) end end @@ -1284,7 +1278,7 @@ function dot{Tx<:Number,Ty<:Number}(x::AbstractSparseVector{Tx}, y::AbstractSpar xnzval = nonzeros(x) ynzval = nonzeros(y) - _spdot(DotFun(), + _spdot(dot, 1, length(xnzind), xnzind, xnzval, 1, length(ynzind), ynzind, ynzval) end @@ -1441,7 +1435,7 @@ Ac_mul_B!{Tx,Ty}(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVect Ac_mul_B!(one(Tx), A, x, zero(Ty), y) Ac_mul_B!{Tx,Ty}(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}, β::Number, y::StridedVector{Ty}) = - _At_or_Ac_mul_B!(DotFun(), α, A, x, β, y) + _At_or_Ac_mul_B!(dot, α, A, x, β, y) function _At_or_Ac_mul_B!{Tx,Ty}(tfun::BinaryOp, α::Number, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}, @@ -1483,7 +1477,7 @@ At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) = _At_or_Ac_mul_B(*, A, x) Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) = - _At_or_Ac_mul_B(DotFun(), A, x) + _At_or_Ac_mul_B(dot, A, x) function _At_or_Ac_mul_B{TvA,TiA,TvX,TiX}(tfun::BinaryOp, A::SparseMatrixCSC{TvA,TiA}, x::AbstractSparseVector{TvX,TiX}) m, n = size(A) @@ -1686,11 +1680,7 @@ function fkeep!(x::SparseVector, f, other, trim::Bool = true) x end -immutable DroptolFuncVec <: Base.Func{3} end -(::DroptolFuncVec){Tv,Ti}(i::Ti, x::Tv, tol::Real) = abs(x) > tol -droptol!(x::SparseVector, tol, trim::Bool = true) = fkeep!(x, DroptolFuncVec(), tol, trim) +droptol!(x::SparseVector, tol, trim::Bool = true) = fkeep!(x, (i, x, tol) -> abs(x) > tol, tol, trim) -immutable DropzerosFuncVec <: Base.Func{3} end -(::DropzerosFuncVec){Tv,Ti}(i::Ti, x::Tv, other) = x != 0 -dropzeros!(x::SparseVector, trim::Bool = true) = fkeep!(x, DropzerosFuncVec(), nothing, trim) +dropzeros!(x::SparseVector, trim::Bool = true) = fkeep!(x, (i, x, other) -> x != 0, nothing, trim) dropzeros(x::SparseVector, trim::Bool = true) = dropzeros!(copy(x), trim) diff --git a/base/statistics.jl b/base/statistics.jl index 1edd67af2dc10..2556abb448bdd 100644 --- a/base/statistics.jl +++ b/base/statistics.jl @@ -95,14 +95,11 @@ end varzm{T}(A::AbstractArray{T}, region; corrected::Bool=true) = varzm!(reducedim_initarray(A, region, 0, real(momenttype(T))), A; corrected=corrected) -immutable CentralizedAbs2Fun{T<:Number} <: Func{1} - m::T -end -(f::CentralizedAbs2Fun)(x) = abs2(x - f.m) +centralizedabs2fun(m::Number) = x -> abs2(x - m) centralize_sumabs2(A::AbstractArray, m::Number) = - mapreduce(CentralizedAbs2Fun(m), +, A) + mapreduce(centralizedabs2fun(m), +, A) centralize_sumabs2(A::AbstractArray, m::Number, ifirst::Int, ilast::Int) = - mapreduce_impl(CentralizedAbs2Fun(m), +, A, ifirst, ilast) + mapreduce_impl(centralizedabs2fun(m), +, A, ifirst, ilast) @generated function centralize_sumabs2!{S,T,N}(R::AbstractArray{S}, A::AbstractArray{T,N}, means::AbstractArray) quote From 1207e646d9a643e10e396fcd6d58e6fa376f09f2 Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Fri, 8 Apr 2016 14:48:50 +0200 Subject: [PATCH 6/9] Deprecate Func --- base/deprecated.jl | 2 ++ base/functors.jl | 8 -------- base/reduce.jl | 10 +++++----- base/reducedim.jl | 4 ++-- base/sparse.jl | 1 - base/sparse/sparsematrix.jl | 4 ++-- base/sparse/sparsevector.jl | 33 +++++++++++++++------------------ 7 files changed, 26 insertions(+), 36 deletions(-) diff --git a/base/deprecated.jl b/base/deprecated.jl index cf4afffe4d389..5b4c4b1ec691c 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1021,6 +1021,8 @@ function pmap(f, c...; err_retry=nothing, err_stop=nothing, pids=nothing) end # 15692 +typealias Func{N} Function +deprecate(:Func) for (Fun, func) in [(:IdFun, :identity), (:AbsFun, :abs), (:Abs2Fun, :abs2), diff --git a/base/functors.jl b/base/functors.jl index 9103b005c6bcb..9d26a90478f2b 100644 --- a/base/functors.jl +++ b/base/functors.jl @@ -2,14 +2,6 @@ ###### Function Objects ("Functors") ###### -# Note that function objects are merely used as internal machinery to -# enhance code reuse and improve performance of map/reduce. -# They are not exported. -# When function arguments can be inlined, the use of function objects -# can be removed. - -abstract Func{N} - # More promote_op rules promote_op{T<:Integer}(::typeof(^), ::Type{Bool}, ::Type{T}) = Bool diff --git a/base/reduce.jl b/base/reduce.jl index 45e0c7f66fdb7..7d8278aa73431 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -165,7 +165,7 @@ reduce(op, a::Number) = a ## conditions and results of short-circuiting -immutable Predicate{F} <: Func{1} +immutable Predicate{F} f::F end (pred::Predicate)(x) = pred.f(x)::Bool @@ -241,7 +241,7 @@ sum_pairwise_blocksize(::typeof(abs2)) = 4096 mapreduce_impl(f, op::typeof(+), A::AbstractArray, ifirst::Int, ilast::Int) = mapreduce_pairwise_impl(f, op, A, ifirst, ilast, sum_pairwise_blocksize(f)) -sum(f::Union{Callable,Func{1}}, a) = mapreduce(f, +, a) +sum(f::Callable, a) = mapreduce(f, +, a) sum(a) = mapreduce(identity, +, a) sum(a::AbstractArray{Bool}) = countnz(a) sumabs(a) = mapreduce(abs, +, a) @@ -272,7 +272,7 @@ end ## prod -prod(f::Union{Callable,Func{1}}, a) = mapreduce(f, *, a) +prod(f::Callable, a) = mapreduce(f, *, a) prod(a) = mapreduce(identity, *, a) ## maximum & minimum @@ -317,8 +317,8 @@ function mapreduce_impl(f, op::typeof(scalarmin), A::AbstractArray, first::Int, v end -maximum(f::Union{Callable,Func{1}}, a) = mapreduce(f, scalarmax, a) -minimum(f::Union{Callable,Func{1}}, a) = mapreduce(f, scalarmin, a) +maximum(f::Callable, a) = mapreduce(f, scalarmax, a) +minimum(f::Callable, a) = mapreduce(f, scalarmin, a) maximum(a) = mapreduce(identity, scalarmax, a) minimum(a) = mapreduce(identity, scalarmin, a) diff --git a/base/reducedim.jl b/base/reducedim.jl index 05c3c6068ab87..fa2a1e6a1b832 100644 --- a/base/reducedim.jl +++ b/base/reducedim.jl @@ -245,11 +245,11 @@ for (fname, op) in [(:sum, :+), (:prod, :*), fname! = symbol(fname, '!') @eval begin - $(fname!)(f::Union{Function,Func{1}}, r::AbstractArray, A::AbstractArray; init::Bool=true) = + $(fname!)(f::Function, r::AbstractArray, A::AbstractArray; init::Bool=true) = mapreducedim!(f, $(op), initarray!(r, $(op), init), A) $(fname!)(r::AbstractArray, A::AbstractArray; init::Bool=true) = $(fname!)(identity, r, A; init=init) - $(fname)(f::Union{Function,Func{1}}, A::AbstractArray, region) = + $(fname)(f::Function, A::AbstractArray, region) = mapreducedim(f, $(op), A, region) $(fname)(A::AbstractArray, region) = $(fname)(identity, A, region) end diff --git a/base/sparse.jl b/base/sparse.jl index b148fb394707f..4a16213bda971 100644 --- a/base/sparse.jl +++ b/base/sparse.jl @@ -2,7 +2,6 @@ module SparseArrays -using Base: Func using Base: ReshapedArray using Base.Sort: Forward using Base.LinAlg: AbstractTriangular, PosDefException diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 73b46b65a87e8..e30634f806956 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -320,7 +320,7 @@ sparse_IJ_sorted!(I,J,V::AbstractVector{Bool},m,n) = sparse_IJ_sorted!(I,J,V,m,n function sparse_IJ_sorted!{Ti<:Integer}(I::AbstractVector{Ti}, J::AbstractVector{Ti}, V::AbstractVector, - m::Integer, n::Integer, combine::Union{Function,Func}) + m::Integer, n::Integer, combine::Function) m = m < 0 ? 0 : m n = n < 0 ? 0 : n @@ -596,7 +596,7 @@ sparse(I,J,V::AbstractVector,m,n) = sparse(I, J, V, Int(m), Int(n), +) sparse(I,J,V::AbstractVector{Bool},m,n) = sparse(I, J, V, Int(m), Int(n), |) -sparse(I,J,v::Number,m,n,combine::Union{Function,Func}) = sparse(I, J, fill(v,length(I)), Int(m), Int(n), combine) +sparse(I,J,v::Number,m,n,combine::Function) = sparse(I, J, fill(v,length(I)), Int(m), Int(n), combine) function sparse(T::SymTridiagonal) m = length(T.dv) diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index ac4bff5d3d460..a1efa36bee8ad 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -2,10 +2,7 @@ ### Common definitions -import Base: Func, scalarmax, scalarmin, sort - -typealias UnaryOp Union{Function, Func{1}} -typealias BinaryOp Union{Function, Func{2}} +import Base: scalarmax, scalarmin, sort ### The SparseVector @@ -58,7 +55,7 @@ function _sparsevector!{Ti<:Integer}(I::Vector{Ti}, V::Vector, len::Integer) SparseVector(len, I, V) end -function _sparsevector!{Tv,Ti<:Integer}(I::Vector{Ti}, V::Vector{Tv}, len::Integer, combine::BinaryOp) +function _sparsevector!{Tv,Ti<:Integer}(I::Vector{Ti}, V::Vector{Tv}, len::Integer, combine::Function) if !isempty(I) p = sortperm(I) permute!(I, p) @@ -99,7 +96,7 @@ Duplicates are combined using the `combine` function, which defaults to `+` if no `combine` argument is provided, unless the elements of `V` are Booleans in which case `combine` defaults to `|`. """ -function sparsevec{Tv,Ti<:Integer}(I::AbstractVector{Ti}, V::AbstractVector{Tv}, combine::BinaryOp) +function sparsevec{Tv,Ti<:Integer}(I::AbstractVector{Ti}, V::AbstractVector{Tv}, combine::Function) length(I) == length(V) || throw(ArgumentError("index and value vectors must be the same length")) len = 0 @@ -112,7 +109,7 @@ function sparsevec{Tv,Ti<:Integer}(I::AbstractVector{Ti}, V::AbstractVector{Tv}, _sparsevector!(collect(Ti, I), collect(Tv, V), len, combine) end -function sparsevec{Tv,Ti<:Integer}(I::AbstractVector{Ti}, V::AbstractVector{Tv}, len::Integer, combine::BinaryOp) +function sparsevec{Tv,Ti<:Integer}(I::AbstractVector{Ti}, V::AbstractVector{Tv}, len::Integer, combine::Function) length(I) == length(V) || throw(ArgumentError("index and value vectors must be the same length")) maxi = convert(Ti, len) @@ -136,10 +133,10 @@ sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, V::Union{Bool, AbstractVector{Bool len::Integer) = sparsevec(I, V, len, |) -sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, v::Number, combine::BinaryOp) = +sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, v::Number, combine::Function) = sparsevec(I, fill(v, length(I)), combine) -sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, v::Number, len::Integer, combine::BinaryOp) = +sparsevec{Ti<:Integer}(I::AbstractVector{Ti}, v::Number, len::Integer, combine::Function) = sparsevec(I, fill(v, length(I)), len, combine) @@ -857,7 +854,7 @@ end # 1: f(nz, nz) -> z/nz, f(z, nz) -> nz, f(nz, z) -> nz # 2: f(nz, nz) -> z/nz, f(z, nz) -> z/nz, f(nz, z) -> z/nz -function _binarymap{Tx,Ty}(f::BinaryOp, +function _binarymap{Tx,Ty}(f::Function, x::AbstractSparseVector{Tx}, y::AbstractSparseVector{Ty}, mode::Int) @@ -895,7 +892,7 @@ function _binarymap{Tx,Ty}(f::BinaryOp, return SparseVector(n, rind, rval) end -function _binarymap_mode_0!(f::BinaryOp, mx::Int, my::Int, +function _binarymap_mode_0!(f::Function, mx::Int, my::Int, xnzind, xnzval, ynzind, ynzval, rind, rval) # f(nz, nz) -> nz, f(z, nz) -> z, f(nz, z) -> z ir = 0; ix = 1; iy = 1 @@ -915,7 +912,7 @@ function _binarymap_mode_0!(f::BinaryOp, mx::Int, my::Int, return ir end -function _binarymap_mode_1!{Tx,Ty}(f::BinaryOp, mx::Int, my::Int, +function _binarymap_mode_1!{Tx,Ty}(f::Function, mx::Int, my::Int, xnzind, xnzval::AbstractVector{Tx}, ynzind, ynzval::AbstractVector{Ty}, rind, rval) @@ -953,7 +950,7 @@ function _binarymap_mode_1!{Tx,Ty}(f::BinaryOp, mx::Int, my::Int, return ir end -function _binarymap_mode_2!{Tx,Ty}(f::BinaryOp, mx::Int, my::Int, +function _binarymap_mode_2!{Tx,Ty}(f::Function, mx::Int, my::Int, xnzind, xnzval::AbstractVector{Tx}, ynzind, ynzval::AbstractVector{Ty}, rind, rval) @@ -999,7 +996,7 @@ function _binarymap_mode_2!{Tx,Ty}(f::BinaryOp, mx::Int, my::Int, return ir end -function _binarymap{Tx,Ty}(f::BinaryOp, +function _binarymap{Tx,Ty}(f::Function, x::AbstractVector{Tx}, y::AbstractSparseVector{Ty}, mode::Int) @@ -1042,7 +1039,7 @@ function _binarymap{Tx,Ty}(f::BinaryOp, return dst end -function _binarymap{Tx,Ty}(f::BinaryOp, +function _binarymap{Tx,Ty}(f::Function, x::AbstractSparseVector{Tx}, y::AbstractVector{Ty}, mode::Int) @@ -1247,7 +1244,7 @@ function dot{Tx<:Number,Ty<:Number}(x::AbstractSparseVector{Tx}, y::AbstractVect return s end -function _spdot(f::BinaryOp, +function _spdot(f::Function, xj::Int, xj_last::Int, xnzind, xnzval, yj::Int, yj_last::Int, ynzind, ynzval) # dot product between ranges of non-zeros, @@ -1437,7 +1434,7 @@ Ac_mul_B!{Tx,Ty}(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVect Ac_mul_B!{Tx,Ty}(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}, β::Number, y::StridedVector{Ty}) = _At_or_Ac_mul_B!(dot, α, A, x, β, y) -function _At_or_Ac_mul_B!{Tx,Ty}(tfun::BinaryOp, +function _At_or_Ac_mul_B!{Tx,Ty}(tfun::Function, α::Number, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}, β::Number, y::StridedVector{Ty}) m, n = size(A) @@ -1479,7 +1476,7 @@ At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) = Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) = _At_or_Ac_mul_B(dot, A, x) -function _At_or_Ac_mul_B{TvA,TiA,TvX,TiX}(tfun::BinaryOp, A::SparseMatrixCSC{TvA,TiA}, x::AbstractSparseVector{TvX,TiX}) +function _At_or_Ac_mul_B{TvA,TiA,TvX,TiX}(tfun::Function, A::SparseMatrixCSC{TvA,TiA}, x::AbstractSparseVector{TvX,TiX}) m, n = size(A) length(x) == m || throw(DimensionMismatch()) Tv = promote_type(TvA, TvX) From d8259d53d49dec022b24dd1a30988c08ff8cd06e Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Fri, 8 Apr 2016 14:50:41 +0200 Subject: [PATCH 7/9] Move promote_op for Bool^Int to bool.jl --- base/bool.jl | 1 + base/functors.jl | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/base/bool.jl b/base/bool.jl index 915d67a97bb3f..67c3831f2d4e6 100644 --- a/base/bool.jl +++ b/base/bool.jl @@ -61,3 +61,4 @@ rem(x::Bool, y::Bool) = y ? false : throw(DivideError()) mod(x::Bool, y::Bool) = rem(x,y) promote_op(op, ::Type{Bool}, ::Type{Bool}) = typeof(op(true, true)) +promote_op{T<:Integer}(::typeof(^), ::Type{Bool}, ::Type{T}) = Bool diff --git a/base/functors.jl b/base/functors.jl index 9d26a90478f2b..b9b34e81c4363 100644 --- a/base/functors.jl +++ b/base/functors.jl @@ -1,7 +1,3 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license ###### Function Objects ("Functors") ###### - -# More promote_op rules - -promote_op{T<:Integer}(::typeof(^), ::Type{Bool}, ::Type{T}) = Bool From 323a8d2a7aebb174ff54b6a481efaa9386b4c0f6 Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Fri, 8 Apr 2016 15:27:58 +0200 Subject: [PATCH 8/9] Remove (now empty) base/functors.jl --- Makefile | 1 - base/coreimg.jl | 1 - base/functors.jl | 3 --- base/sysimg.jl | 1 - 4 files changed, 6 deletions(-) delete mode 100644 base/functors.jl diff --git a/Makefile b/Makefile index e96fa7ef69e0d..9b82834edbf6d 100644 --- a/Makefile +++ b/Makefile @@ -193,7 +193,6 @@ CORE_SRCS := $(addprefix $(JULIAHOME)/, \ base/essentials.jl \ base/generator.jl \ base/expr.jl \ - base/functors.jl \ base/hashing.jl \ base/inference.jl \ base/int.jl \ diff --git a/base/coreimg.jl b/base/coreimg.jl index 82df77054e6e2..09909f26ec4e9 100644 --- a/base/coreimg.jl +++ b/base/coreimg.jl @@ -56,7 +56,6 @@ include("nofloat_hashing.jl") macro simd(forloop) esc(forloop) end -include("functors.jl") include("reduce.jl") ## core structures diff --git a/base/functors.jl b/base/functors.jl deleted file mode 100644 index b9b34e81c4363..0000000000000 --- a/base/functors.jl +++ /dev/null @@ -1,3 +0,0 @@ -# This file is a part of Julia. License is MIT: http://julialang.org/license - -###### Function Objects ("Functors") ###### diff --git a/base/sysimg.jl b/base/sysimg.jl index cf7e55471ae0b..8e468f586fa70 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -48,7 +48,6 @@ include("operators.jl") include("pointer.jl") include("refpointer.jl") (::Type{T}){T}(arg) = convert(T, arg)::T -include("functors.jl") include("checked.jl") importall .Checked From 0429c381d73d815e61a88295faf17d78e3c24689 Mon Sep 17 00:00:00 2001 From: Martin Holters Date: Fri, 8 Apr 2016 15:28:51 +0200 Subject: [PATCH 9/9] Remove functor references from docs --- base/sparse/sparsematrix.jl | 9 --------- doc/devdocs/promote-op.rst | 10 ++++------ 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index e30634f806956..062287ec41ea7 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -462,9 +462,6 @@ This method runs in `O(m, n, length(I))` time. The HALFPERM algorithm described F. Gustavson, "Two fast algorithms for sparse matrices: multiplication and permuted transposition," ACM TOMS 4(3), 250-269 (1978) inspired this method's use of a pair of counting sorts. - -Performance note: As of January 2016, `combine` should be a functor for this method to -perform well. This caveat may disappear when the work in `jb/functions` lands. """ function sparse!{Tv,Ti<:Integer}(I::AbstractVector{Ti}, J::AbstractVector{Ti}, V::AbstractVector{Tv}, m::Integer, n::Integer, combine, klasttouch::Vector{Ti}, @@ -633,9 +630,6 @@ This method implements the HALFPERM algorithm described in F. Gustavson, "Two fa algorithms for sparse matrices: multiplication and permuted transposition," ACM TOMS 4(3), 250-269 (1978). The algorithm runs in `O(A.m, A.n, nnz(A))` time and requires no space beyond that passed in. - -Performance note: As of January 2016, `f` should be a functor for this method to perform - well. This caveat may disappear when the work in `jb/functions` lands. """ function qftranspose!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC{Tv,Ti}, q::AbstractVector, f) # Attach source matrix @@ -741,9 +735,6 @@ and `other` is passed in from the call to `fkeep!`. This method makes a single s through `A`, requiring `O(A.n, nnz(A))`-time for matrices and `O(nnz(A))`-time for vectors and no space beyond that passed in. If `trim` is `true`, this method trims `A.rowval` or `A.nzind` and `A.nzval` to length `nnz(A)` after dropping elements. - -Performance note: As of January 2016, `f` should be a functor for this method to perform -well. This caveat may disappear when the work in `jb/functions` lands. """ function fkeep!(A::SparseMatrixCSC, f, other, trim::Bool = true) An = A.n diff --git a/doc/devdocs/promote-op.rst b/doc/devdocs/promote-op.rst index beb74fe07a56e..53a5a93768549 100644 --- a/doc/devdocs/promote-op.rst +++ b/doc/devdocs/promote-op.rst @@ -25,14 +25,12 @@ just the input types, ``promote_rule`` will be inadequate. Fortunately, it's possible to provide such definitions via ``promote_op``:: - Base.promote_op{R,S}(::Base.AddFun, ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),1} - Base.promote_op{R,S}(::Base.MulFun, ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} - Base.promote_op{R,S}(::Base.DotMulFun, ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} + Base.promote_op{R,S}(::typeof(+), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),1} + Base.promote_op{R,S}(::typeof(*), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} + Base.promote_op{R,S}(::typeof(.*), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2} The first one defines the promotion rule for ``+``, and the second one -for ``*``. ``AddFun``, ``MulFun``, and ``DotMulFun`` are "functor -types" defined in `functor.jl -`_. +for ``*``. It's worth noting that as julia's internal representation of functions evolves, this interface may change in a future version of Julia.