Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Start adding McCormick objects without (sub)gradients #49

Merged
merged 15 commits into from
Jun 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ jobs:
- version: '1.4'
os: ubuntu-latest
arch: x64
- version: '1.5'
os: ubuntu-latest
arch: x64
- version: '1'
os: windows-latest
arch: x64
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ and `Diff` for differentiable relaxations ([Khan2016](https://link.springer.com/
## **Supported Operators**

In addition, to supporting the implicit relaxation routines of ([Stuber 2015](https://www.tandfonline.com/doi/abs/10.1080/10556788.2014.924514?journalCode=goms20)). This package
supports the computation of convex/concave relaxations (and asssociated subgradients) for
supports the computation of convex/concave relaxations (and associated subgradients) for
expressions containing the following operations:

**Common algebraic expressions**: `inv`, `log`, `log2`, `log10`, `exp`, `exp2`, `exp10`,
Expand Down
91 changes: 90 additions & 1 deletion src/McCormick.jl
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ end
import Base.MathConstants.golden

# Export forward operators
export MC, cc, cv, Intv, lo, hi, cc_grad, cv_grad, cnst, +, -, *, /, convert,
export MC, MCNoGrad, cc, cv, Intv, lo, hi, cc_grad, cv_grad, cnst, +, -, *, /, convert,
one, zero, dist, real, eps, mid, exp, exp2, exp10, expm1, log, log2,
log10, log1p, acosh, sqrt, sin, cos, tan, min, max, sec, csc, cot, ^,
abs, step, sign, pow, in, isempty, intersect, length, mid3,
Expand Down Expand Up @@ -115,6 +115,8 @@ abstract type RelaxTag end
struct NS <: RelaxTag end
struct MV <: RelaxTag end
struct Diff <: RelaxTag end
const ANYRELAX = Union{NS, MV, Diff}


const MC_ENV_MAX_INT = 100
const MC_ENV_TOL = 1E-10
Expand Down Expand Up @@ -389,6 +391,93 @@ function isone(x::MC)
return flag
end

"""
$(TYPEDEF)

`MCNoGrad <: Real` is a McCormick structure without RelaxType Tag or subgradients.
This structure is used for source-code transformation approaches to constructing
McCormick relaxations. Methods definitions and calls should specify the
relaxation type used (i.e.) `+(::NS, x::MCNoGrad, y::MCNoGrad)...`. Moreover,
the kernel associated with this returns all intermediate calculations necessary
to compute subgradient information whereas the overloading calculation simply
returns the `MCNoGrad` object. For univariate calculations without
tiepoints such as we `log2(::NS, x::MCNoGrad)::MCNoGrad` whereas
`log2_kernel(::NS, x::MCNoGrad, ::Bool) = (::MCNoGrad, cv_id::Int, cc_id::Int, dcv, dcc)`.
Univariate NS functions follow convention (MCNoGrad, cv_id, cc_id, dcv, dcc,
tp1cv, tp1cc, .... tpncv, tpncc) where cv_id is the subgradient selected
(1 = cv, 2 = cc, 3 = 0), dcv and dcc are derivatives (or elements of subdifferential)
of the outside function evaluated per theorem at the point being evaluated and
tpicv, tpicc are the ith tiepoints associated with computing the envelope
of the outside function.
.
$(TYPEDFIELDS)
"""
struct MCNoGrad <: Real
"Convex relaxation"
cv::Float64
"Concave relaxation"
cc::Float64
"Interval bounds"
Intv::Interval{Float64}
"Boolean indicating whether the relaxations are constant over the domain. True if bounding an interval/constant.
False, otherwise. This may change over the course of a calculation `cnst` for `zero(x)` is `true` even if `x.cnst`
is `false`."
cnst::Bool
function MCNoGrad(u::Float64, o::Float64, X::Interval{Float64}, b::Bool)
new(u, o, X, b)
end
end

"""
MCNoGrad(y::Interval{Float64})

Constructs McCormick relaxation with convex relaxation equal to `y.lo` and
concave relaxation equal to `y.hi`.
"""
function MCNoGrad(y::Interval{Float64})
MCNoGrad(y.lo, y.hi, y, true)
end

"""
MCNoGrad(y::Float64)

Constructs McCormick relaxation with convex relaxation equal to `y` and
concave relaxation equal to `y`.
"""
MCNoGrad(y::Float64) = MCNoGrad(Interval{Float64}(y))
function MCNoGrad(y::Y) where Y <: AbstractIrrational
MCNoGrad(Interval{Float64}(y))
end
MCNoGrad(y::Q) where Q <: NumberNotRelax = MCNoGrad(Interval{Float64}(y))

"""
MCNoGrad(cv::Float64, cc::Float64)

Constructs McCormick relaxation with convex relaxation equal to `cv` and
concave relaxation equal to `cc`.
"""
function MCNoGrad(cv::Float64, cc::Float64)
MC{N,T}(cv, cc, Interval{Float64}(cv, cc), true)
end

Intv(x::MCNoGrad) = x.Intv
lo(x::MCNoGrad) = x.Intv.lo
hi(x::MCNoGrad) = x.Intv.hi
cc(x::MCNoGrad) = x.cc
cv(x::MCNoGrad) = x.cv
cnst(x::MCNoGrad) = x.cnst

diam(x::MCNoGrad) = diam(x.Intv)
isthin(x::MCNoGrad) = isthin(x.Intv)

function isone(x::MCNoGrad)
flag = true
flag &= (x.Intv.lo == 1.0)
flag &= (x.Intv.hi == 1.0)
flag &= x.cnst
return flag
end

"""
$(TYPEDSIGNATURES)

Expand Down
2 changes: 2 additions & 0 deletions src/forward_operators/arithmetic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -158,3 +158,5 @@ promote_rule(::Type{MC{N,T}}, ::Type{S}) where {S<:Real, N, T <: RelaxTag} = MC{

convert(::Type{MC{N,T}}, x::S) where {S<:NumberNotRelax, N, T <: RelaxTag} = MC{N,T}(Interval{Float64}(x))
convert(::Type{MC{N,T}}, x::S) where {S<:AbstractInterval, N, T <: RelaxTag} = MC{N,T}(Interval{Float64}(x.lo, x.hi))
Interval(x::MC{N,T}) where {N,T<:RelaxTag} = x.Intv
Interval{Float64}(x::MC{N,T}) where {N,T<:RelaxTag} = x.Intv
4 changes: 4 additions & 0 deletions src/forward_operators/forward.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,7 @@ include("set_bounds.jl")
include("comparison.jl")
include("trilinear.jl")
include("apriori_mult.jl")

joinpath(@__DIR__, "no_gradient", "arithmetic.jl")
joinpath(@__DIR__, "no_gradient", "convex_increasing.jl")
joinpath(@__DIR__, "no_gradient", "mixed_convexity.jl")
180 changes: 180 additions & 0 deletions src/forward_operators/no_gradient/arithmetic.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber.
# This code is licensed under MIT license (see LICENSE.md for full details)
#############################################################################
# McCormick.jl
# A McCormick operator library in Julia
# See https://github.com/PSORLab/McCormick.jl
#############################################################################
# src/forward_operators/no_gradient/arithmetic.jl
# Contains definitions of +, -, /, *, promotions, conversion, one, zero.
#############################################################################

# Defines functions required for linear algebra packages
@inline nan(::ANYRELAX, ::Type{MCNoGrad}) = MCNoGrad(NaN, NaN, Interval{Float64}(NaN), true)
@inline nan(::ANYRELAX, x::MCNoGrad)= MCNoGrad(NaN, NaN, Interval{Float64}(NaN), true)

@inline one(::ANYRELAX, ::Type{MCNoGrad}) = MCNoGrad(1.0, 1.0, one(Interval{Float64}), true)
@inline one(::ANYRELAX, x::MCNoGrad) = MCNoGrad(1.0, 1.0, one(Interval{Float64}), true)

@inline zero(::ANYRELAX, ::Type{MCNoGrad}) = MCNoGrad(0.0, 0.0, zero(Interval{Float64}), true)
@inline zero(::T, x::MCNoGrad) where T<:ANYRELAX = zero(T(), MCNoGrad)

@inline real(::ANYRELAX, x::MCNoGrad) = x
@inline dist(::ANYRELAX, x1::MCNoGrad, x2::MCNoGrad) = max(abs(x1.cc - x2.cc), abs(x1.cv - x2.cv))
@inline eps(::ANYRELAX, x::MCNoGrad) = max(eps(x.cc), eps(x.cv))
@inline mid(::ANYRELAX, x::MCNoGrad) = mid(x.Intv)

# Unsafe addition
@inline function plus_kernel(::ANYRELAX, x::MCNoGrad, y::MCNoGrad, z::Interval{Float64})
MCNoGrad(x.cv + y.cv, x.cc + y.cc, z, x.cnst && y.cnst)
end
@inline +(::T, x::MCNoGrad, y::MCNoGrad) where T<:ANYRELAX = plus_kernel(T(), x, y, x.Intv + y.Intv)
@inline plus_kernel(::ANYRELAX, x::MCNoGrad, y::Interval{Float64}) = x
@inline +(::ANYRELAX, x::MCNoGrad) = x

@inline minus_kernel(::ANYRELAX, x::MCNoGrad, z::Interval{Float64}) = MCNoGrad(-x.cc, -x.cv, z, x.cnst)
@inline -(::ANYRELAX, x::MC) = minus_kernel(x, -x.Intv)
@inline -(::ANYRELAX, x::MCNoGrad, y::MCNoGrad) = minus_kernel(x, y, x.Intv - y.Intv)

# Unsafe subtraction
@inline function minus_kernel(::ANYRELAX, x::MCNoGrad, y::MCNoGrad, z::Interval{Float64})
MCNoGrad(x.cv - y.cc, x.cc - y.cv, z, x.cnst && y.cnst)
end

################## CONVERT THROUGH BINARY DEFINITIONS #########################
# Unsafe scalar addition
@inline function plus_kernel(::ANYRELAX, x::MCNoGrad, y::Float64, z::Interval{Float64})
MCNoGrad(x.cv + y, x.cc + y, z, x.cnst)
end
@inline +(::T, x::MCNoGrad, y::Float64) where T<:ANYRELAX = plus_kernel(T(), x, y, x.Intv + y)
@inline +(::T, y::Float64, x::MCNoGrad) where T<:ANYRELAX = plus_kernel(T(), x, y, x.Intv + y)
@inline +(::ANYRELAX, x::MCNoGrad, y::Interval{Float64}) = x + MCNoGrad(y)
@inline +(::ANYRELAX, y::Interval{Float64}, x::MCNoGrad) = x + MCNoGrad(y)

@inline plus_kernel(x::MCNoGrad, y::C, z::Interval{Float64}) where {C <: NumberNotRelax} = plus_kernel(x, convert(Float64, y), z)
@inline plus_kernel(x::C, y::MCNoGrad, z::Interval{Float64}) where {C <: NumberNotRelax} = plus_kernel(y, convert(Float64, x), z)
@inline +(::ANYRELAX, x::MCNoGrad, y::C) where {C <: NumberNotRelax} = x + convert(Float64, y)
@inline +(::ANYRELAX, y::C, x::MCNoGrad) where {C <: NumberNotRelax} = x + convert(Float64, y)

# Unsafe scalar subtraction
@inline function minus_kernel(::ANYRELAX, x::MCNoGrad, c::Float64, z::Interval{Float64})
MCNoGrad(x.cv - c, x.cc - c, z, x.cnst)
end
@inline function minus_kernel(::ANYRELAX, c::Float64, x::MCNoGrad, z::Interval{Float64})
MCNoGrad(c - x.cc, c - x.cv, z, x.cnst)
end
@inline -(::T, x::MCNoGrad, c::Float64) where T<:ANYRELAX = minus_kernel(T(), x, c, x.Intv - c)
@inline -(::T, c::Float64, x::MCNoGrad) where T<:ANYRELAX = minus_kernel(T(), c, x, c - x.Intv)
@inline -(::ANYRELAX, x::MCNoGrad, y::Interval{Float64}) = x - MCNoGrad(y)
@inline -(::ANYRELAX, y::Interval{Float64}, x::MCNoGrad) = MCNoGrad(y) - x

@inline minus_kernel(::T, x::MCNoGrad, y::C, z::Interval{Float64}) where {T<:ANYRELAX, C<:NumberNotRelax} = minus_kernel(T(), x, convert(Float64, y), z)
@inline minus_kernel(::T, y::C, x::MCNoGrad, z::Interval{Float64}) where {T<:ANYRELAX, C<:NumberNotRelax} = minus_kernel(T(), convert(Float64, y), x, z)
@inline -(::ANYRELAX, x::MCNoGrad, c::C) where {C <: NumberNotRelax} = x - convert(Float64,c)
@inline -(::ANYRELAX, c::C, x::MCNoGrad) where {C <: NumberNotRelax} = convert(Float64,c) - x

# Unsafe Scalar Multiplication
@inline function mult_kernel(::ANYRELAX, x::MCNoGrad, c::Float64, z::Interval{Float64})
if c >= 0.0
zMC = MCNoGrad(c*x.cv, c*x.cc, z, x.cnst)
else
zMC = MCNoGrad(c*x.cc, c*x.cv, z, x.cnst)
end
return zMC
end
@inline *(::T, x::MCNoGrad, c::Float64) where T <: ANYRELAX = mult_kernel(T(), x, c, c*x.Intv)
@inline *(::T, c::Float64, x::MCNoGrad) where T <: ANYRELAX = mult_kernel(T(), x, c, c*x.Intv)
@inline *(::ANYRELAX, x::MCNoGrad, y::Interval{Float64}) = x*MCNoGrad(y)
@inline *(::ANYRELAX, y::Interval{Float64}, x::MCNoGrad) = MCNoGrad(y)*x

@inline mult_kernel(x::MCNoGrad, c::C, z::Interval{Float64}) where {T<:ANYRELAX, C<:NumberNotRelax} = mult_kernel(T(), x, convert(Float64, c), z)
@inline mult_kernel(c::C, x::MCNoGrad, z::Interval{Float64}) where {T<:ANYRELAX, C<:NumberNotRelax} = mult_kernel(T(), x, convert(Float64, c), z)
@inline *(::ANYRELAX, c::C, x::MCNoGrad) where {C <: NumberNotRelax} = x*Float64(c)
@inline *(::ANYRELAX, x::MCNoGrad, c::C) where {C <: NumberNotRelax} = x*Float64(c)

# Unsafe scalar division
@inline function div_kernel(::T, x::MCNoGrad, y::Float64, z::Interval{Float64}) where T<:ANYRELAX
mult_kernel(T(), x, inv(y), z)
end
@inline function div_kernel(::T, x::Float64, y::MCNoGrad, z::Interval{Float64}) where T<:ANYRELAX
mult_kernel(T(), inv(y), x, z)
end
@inline function div_kernel(::T, x::MCNoGrad, y::C, z::Interval{Float64}) where {C<:NumberNotRelax, T<:ANYRELAX}
mult_kernel(T(), x, inv(y), z)
end
@inline function div_kernel(::T, x::C, y::MCNoGrad, z::Interval{Float64}) where {C<:NumberNotRelax, T<:ANYRELAX}
mult_kernel(T(), inv(y), x, z)
end
@inline /(::ANYRELAX, x::MCNoGrad, y::Float64) = x*inv(y)
@inline /(::T, x::Float64, y::MCNoGrad) where T<:ANYRELAX = x*inv(T(),y)
@inline /(::ANYRELAX, x::MCNoGrad, y::C) = x*inv(convert(Float64,y))
@inline /(::T, x::C, y::MCNoGrad) where {C<:NumberNotRelax, T<:ANYRELAX} = convert(Float64,x)*inv(T(), y)
@inline /(::ANYRELAX, x::MCNoGrad, y::Interval{Float64}) = x/MCNoGrad(y)
@inline /(::T, y::Interval{Float64}, x::MCNoGrad) where T<:ANYRELAX = /(T(), MCNoGrad(y), x)

# Maximization
@inline function max_kernel(::T, c::Float64, x::MCNoGrad, z::Interval{Float64}) where T<:ANYRELAX
max_kernel(T(), x, c, z)
end
@inline function max_kernel(::T, x::MCNoGrad, c::C, z::Interval{Float64}) where {C<:NumberNotRelax, T<:ANYRELAX}
max_kernel(T(), x, convert(Float64, c), z)
end
@inline function max_kernel(::T, c::C, x::MCNoGrad, z::Interval{Float64}) where {C<:NumberNotRelax, T<:ANYRELAX}
max_kernel(T(), x, convert(Float64, c), z)
end

@inline function max(::T, c::Float64, x::MCNoGrad) where T<:ANYRELAX
max_kernel(T(), x, c, max(x.Intv, c))
end
@inline function max(::T, x::MCNoGrad, c::C) where {C<:NumberNotRelax, T<:ANYRELAX}
max_kernel(T(), x, convert(Float64, c), max(x.Intv, c))
end
@inline function max(::T, c::C, x::MCNoGrad) where {C<:NumberNotRelax, T<:ANYRELAX}
max_kernel(T(), x, convert(Float64, c), max(x.Intv, c))
end
@inline max(::T, x::MCNoGrad, y::Interval{Float64}) where T<:ANYRELAX = max(T(), x, MCNoGrad(y))
@inline max(::T, y::Interval{Float64}, x::MCNoGrad) where T<:ANYRELAX = max(T(), MCNoGrad(y), x)

# Minimization
@inline function min_kernel(::T, x::MCNoGrad, c::C, z::Interval{Float64}) where {C<:NumberNotRelax, T<:ANYRELAX}
min_kernel(T(), x, convert(Float64, c), z)
end
@inline function min_kernel(::T, c::C, x::MCNoGrad, z::Interval{Float64}) where {C<:NumberNotRelax, T<:ANYRELAX}
min_kernel(T(), x, convert(Float64, c), z)
end

@inline min(::T, c::Float64, x::MCNoGrad) where T<:ANYRELAX = min_kernel(T(), x, c, min(x.Intv, c))
@inline function min(::T, x::MCNoGrad, c::C) where {C<:NumberNotRelax, T<:ANYRELAX}
min_kernel(T(), x, convert(Float64, c), min(x.Intv, c))
end
@inline function min(::T, c::C, x::MCNoGrad) where {C<:NumberNotRelax, T<:ANYRELAX}
min_kernel(T(), x, convert(Float64, c), min(x.Intv, c))
end
@inline min(::T, x::MCNoGrad, y::Interval{Float64}) where T<:ANYRELAX = min(T(), x, MCNoGrad(y))
@inline min(::T, y::Interval{Float64}, x::MCNoGrad) where T<:ANYRELAX = min(T(), MCNoGrad(y), x)

# Add fma function
@inline fma(::ANYRELAX, x::MCNoGrad, y::Float64, z::Float64) = x*y + z
@inline fma(::ANYRELAX, x::MCNoGrad, y::MCNoGrad, z::Float64) = x*y + z
@inline fma(::ANYRELAX, x::MCNoGrad, y::Float64, z::MCNoGrad) = x*y + z
@inline fma(::ANYRELAX, x::MCNoGrad, y::MCNoGrad, z::MCNoGrad) = x*y + z
@inline fma(::ANYRELAX, x::Float64, y::MCNoGrad, z::Float64) = x*y + z
@inline fma(::ANYRELAX, x::Float64, y::MCNoGrad, z::MCNoGrad) = x*y + z
@inline fma(::ANYRELAX, x::Float64, y::Float64, z::MCNoGrad) = x*y + z

@inline fma(::ANYRELAX, x::MCNoGrad, y::Float64, z::Float64, q::Interval{Float64}) = x*y + z
@inline fma(::ANYRELAX, x::MCNoGrad, y::MCNoGrad, z::Float64, q::Interval{Float64}) = x*y + z
@inline fma(::ANYRELAX, x::MCNoGrad, y::Float64, z::MCNoGrad, q::Interval{Float64}) = x*y + z
@inline fma(::ANYRELAX, x::MCNoGrad, y::MCNoGrad, z::MCNoGrad, q::Interval{Float64}) = x*y + z
@inline fma(::ANYRELAX, x::Float64, y::MCNoGrad, z::Float64, q::Interval{Float64}) = x*y + z
@inline fma(::ANYRELAX, x::Float64, y::MCNoGrad, z::MCNoGrad, q::Interval{Float64}) = x*y + z
@inline fma(::ANYRELAX, x::Float64, y::Float64, z::MCNoGrad, q::Interval{Float64}) = x*y + z

# Promote and Convert
promote_rule(::Type{MCNoGrad}, ::Type{S}) where {S<:NumberNotRelax, N, T <: RelaxTag} = MCNoGrad
promote_rule(::Type{MCNoGrad}, ::Type{S}) where {S<:Real, N, T <: RelaxTag} = MCNoGrad

convert(::Type{MCNoGrad}, x::S) where {S<:NumberNotRelax, N, T <: RelaxTag} = MCNoGrad(Interval{Float64}(x))
convert(::Type{MCNoGrad}, x::S) where {S<:AbstractInterval, N, T <: RelaxTag} = MCNoGrad(Interval{Float64}(x.lo, x.hi))
Interval(x::MCNoGrad) = x.Intv
Interval{Float64}(x::MCNoGrad) = x.Intv
66 changes: 66 additions & 0 deletions src/forward_operators/no_gradient/convex_increasing.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber.
# This code is licensed under MIT license (see LICENSE.md for full details)
#############################################################################
# McCormick.jl
# A McCormick operator library in Julia
# See https://github.com/PSORLab/McCormick.jl
#############################################################################
# src/forward_operators/convex_increasing.jl
# Contains definitions of exp, exp2, exp10, expm1.
#############################################################################

for f in (:exp, :exp2, :exp10, :expm1)
function ($f)(::Union{NS,MV}, x::MCNoGrad)
X = x.Intv; xL = x.Intv.lo; xU = x.Intv.hi
z = ($f)(X); zL = z.lo; zU = z.hi
mcc = mid3v(x.cv, x.cc, xU)
mcv = mid3v(x.cv, x.cc, xL)
zcc = zU > zL ? (zL*(xU - mcc) + zU*(mcc - xL))/(xU - xL) : zU
zcv = ($f)(mcv)
(zL > zcv) && (zcv = zL;)
(zU < zcc) && (zcc = zU;)
return MCNoGrad(zcv, zcc, z, x.cnst)
end
function ($f)(::Diff, x::MCNoGrad)
X = x.Intv; xL = x.Intv.lo; xU = x.Intv.hi
z = ($f)(X); zL = z.lo; zU = z.hi
mcc = mid3v(x.cv, x.cc, xU)
mcv = mid3v(x.cv, x.cc, xL)
zcc = zU > zL ? (zL*(xU - mcc) + zU*(mcc - xL))/(xU - xL) : zU
zcv = ($f)(mcv)
return MCNoGrad(zcv, zcc, z, x.cnst)
end
f_kernel = Symbol(String(opMC)*"_kernel")
df = diffrule(:Base, opMC, :mcv) # Replace with cv ruleset
function ($f_kernel)(::Union{NS,MV}, x::MCNoGrad, z::Interval{Float64})
xL = x.Intv.lo; xU = x.Intv.hi; zL = z.lo; zU = z.hi
mcc, cci = mid3(x.cc, x.cv, xU)
mcv, cvi = mid3(x.cc, x.cv, xL)
zcc = zU > zL ? (zL*(xU - mcc) + zU*(mcc - xL))/(xU - xL) : zU
zcv = ($f)(mcv)
if zL > zcv
cvi = 3
dcv = 0.0
else
dcv = $df
end
if zU < zcc
cci = 3
dcc = 0.0
else
dcc = (zU - zL)/(xU - xL)
end
return MCNoGrad(u, o, z, x.cnst), cvi, cci, dcv, dcc
end
ddf = diffrule(:Base, opMC, :mcv) # Replace with cv ruleset
function ($f_kernel)(::Diff, x::MCNoGrad, z::Interval{Float64})
xL = x.Intv.lo; xU = x.Intv.hi; zL = z.lo; zU = z.hi
mcc, cci = mid3(x.cc, x.cv, xU)
mcv, cvi = mid3(x.cc, x.cv, xL)
zcc = zU > zL ? (zL*(xU - mcc) + zU*(mcc - xL))/(xU - xL) : zU
zcv = ($f)(mcv)
dcv = $ddf
dcc = (zU - zL)/(xU - xL)
return MCNoGrad(u, o, z, x.cnst), cvi, cci, dcv, dcc
end
end
Loading