Skip to content

Commit 8d6e093

Browse files
authored
Delete trailing whitespace (#234)
modified: src/Distances.jl modified: src/bregman.jl
1 parent 9e23809 commit 8d6e093

File tree

2 files changed

+20
-20
lines changed

2 files changed

+20
-20
lines changed

src/Distances.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ export
102102

103103
if VERSION < v"1.2-"
104104
import Base: has_offset_axes
105-
require_one_based_indexing(A...) =
106-
!has_offset_axes(A...) ||
105+
require_one_based_indexing(A...) =
106+
!has_offset_axes(A...) ||
107107
throw(ArgumentError("offset arrays are not supported but got an array with index other than 1"))
108108
else
109109
import Base: require_one_based_indexing

src/bregman.jl

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
1-
# Bregman divergence
1+
# Bregman divergence
22

33
"""
44
Implements the Bregman divergence, a friendly introduction to which can be found
5-
[here](http://mark.reid.name/blog/meet-the-bregman-divergences.html).
6-
Bregman divergences are a minimal implementation of the "mean-minimizer" property.
5+
[here](http://mark.reid.name/blog/meet-the-bregman-divergences.html).
6+
Bregman divergences are a minimal implementation of the "mean-minimizer" property.
77
8-
It is assumed that the (convex differentiable) function F maps vectors (of any type or size) to real numbers.
9-
The inner product used is `Base.dot`, but one can be passed in either by defining `inner` or by
10-
passing in a keyword argument. If an analytic gradient isn't available, Julia offers a suite
11-
of good automatic differentiation packages.
8+
It is assumed that the (convex differentiable) function F maps vectors (of any type or size) to real numbers.
9+
The inner product used is `Base.dot`, but one can be passed in either by defining `inner` or by
10+
passing in a keyword argument. If an analytic gradient isn't available, Julia offers a suite
11+
of good automatic differentiation packages.
1212
1313
function evaluate(dist::Bregman, p::AbstractVector, q::AbstractVector)
1414
"""
@@ -18,31 +18,31 @@ struct Bregman{T1 <: Function, T2 <: Function, T3 <: Function} <: PreMetric
1818
inner::T3
1919
end
2020

21-
# Default costructor.
21+
# Default costructor.
2222
Bregman(F, ∇) = Bregman(F, ∇, LinearAlgebra.dot)
2323

24-
# Evaluation fuction
24+
# Evaluation fuction
2525
function (dist::Bregman)(p, q)
2626
# Create cache vals.
2727
FP_val = dist.F(p)
28-
FQ_val = dist.F(q)
28+
FQ_val = dist.F(q)
2929
DQ_val = dist.(q)
3030
p_size = length(p)
31-
# Check F codomain.
31+
# Check F codomain.
3232
if !(isa(FP_val, Real) && isa(FQ_val, Real))
3333
throw(ArgumentError("F Codomain Error: F doesn't map the vectors to real numbers"))
34-
end
35-
# Check vector size.
34+
end
35+
# Check vector size.
3636
if p_size != length(q)
3737
throw(DimensionMismatch("The vector p ($(size(p))) and q ($(size(q))) are different sizes."))
3838
end
39-
# Check gradient size.
39+
# Check gradient size.
4040
if length(DQ_val) != p_size
4141
throw(DimensionMismatch("The gradient result is not the same size as p and q"))
42-
end
43-
# Return the Bregman divergence.
42+
end
43+
# Return the Bregman divergence.
4444
return FP_val - FQ_val - dist.inner(DQ_val, p .- q)
45-
end
45+
end
4646

47-
# Convenience function.
47+
# Convenience function.
4848
bregman(F, ∇, x, y; inner = LinearAlgebra.dot) = Bregman(F, ∇, inner)(x, y)

0 commit comments

Comments
 (0)