Skip to content

Commit 411ba0e

Browse files
committed
Patched most deprecations and blockers
1 parent f51ee46 commit 411ba0e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+596
-739
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ if(ENABLE_PYTHON_SUPPORT)
118118
GIT_REPOSITORY
119119
git://github.com/pvieito/PythonKit
120120
GIT_TAG
121-
master
121+
6a05a15
122122
CMAKE_ARGS
123123
-D BUILD_SHARED_LIBS=YES
124124
-D CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}

Sources/TensorFlow/BackwardsCompatibility.swift

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ import _Differentiation
2323
/// - Parameters:
2424
/// - predicted: Predicted outputs from a neural network.
2525
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
26-
@differentiable(wrt: predicted)
27-
@differentiable(wrt: (predicted, expected))
26+
@differentiable(reverse, wrt: predicted)
27+
@differentiable(reverse, wrt: (predicted, expected))
2828
public func l1Loss<Scalar: TensorFlowFloatingPoint>(
2929
predicted: Tensor<Scalar>,
3030
expected: Tensor<Scalar>
@@ -37,8 +37,8 @@ public func l1Loss<Scalar: TensorFlowFloatingPoint>(
3737
/// - Parameters:
3838
/// - predicted: Predicted outputs from a neural network.
3939
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
40-
@differentiable(wrt: predicted)
41-
@differentiable(wrt: (predicted, expected))
40+
@differentiable(reverse, wrt: predicted)
41+
@differentiable(reverse, wrt: (predicted, expected))
4242
public func l2Loss<Scalar: TensorFlowFloatingPoint>(
4343
predicted: Tensor<Scalar>,
4444
expected: Tensor<Scalar>
@@ -51,8 +51,8 @@ public func l2Loss<Scalar: TensorFlowFloatingPoint>(
5151
/// - Parameters:
5252
/// - predicted: Predicted outputs from a neural network.
5353
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
54-
@differentiable(wrt: predicted)
55-
@differentiable(wrt: (predicted, expected))
54+
@differentiable(reverse, wrt: predicted)
55+
@differentiable(reverse, wrt: (predicted, expected))
5656
public func hingeLoss<Scalar: TensorFlowFloatingPoint>(
5757
predicted: Tensor<Scalar>,
5858
expected: Tensor<Scalar>
@@ -65,8 +65,8 @@ public func hingeLoss<Scalar: TensorFlowFloatingPoint>(
6565
/// - Parameters:
6666
/// - predicted: Predicted outputs from a neural network.
6767
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
68-
@differentiable(wrt: predicted)
69-
@differentiable(wrt: (predicted, expected))
68+
@differentiable(reverse, wrt: predicted)
69+
@differentiable(reverse, wrt: (predicted, expected))
7070
public func squaredHingeLoss<Scalar: TensorFlowFloatingPoint>(
7171
predicted: Tensor<Scalar>,
7272
expected: Tensor<Scalar>
@@ -79,8 +79,8 @@ public func squaredHingeLoss<Scalar: TensorFlowFloatingPoint>(
7979
/// - Parameters:
8080
/// - predicted: Predicted outputs from a neural network.
8181
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
82-
@differentiable(wrt: predicted)
83-
@differentiable(wrt: (predicted, expected))
82+
@differentiable(reverse, wrt: predicted)
83+
@differentiable(reverse, wrt: (predicted, expected))
8484
public func categoricalHingeLoss<Scalar: TensorFlowFloatingPoint>(
8585
predicted: Tensor<Scalar>,
8686
expected: Tensor<Scalar>
@@ -94,8 +94,8 @@ public func categoricalHingeLoss<Scalar: TensorFlowFloatingPoint>(
9494
/// - Parameters:
9595
/// - predicted: Predicted outputs from a neural network.
9696
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
97-
@differentiable(wrt: predicted)
98-
@differentiable(wrt: (predicted, expected))
97+
@differentiable(reverse, wrt: predicted)
98+
@differentiable(reverse, wrt: (predicted, expected))
9999
public func logCoshLoss<Scalar: TensorFlowFloatingPoint>(
100100
predicted: Tensor<Scalar>,
101101
expected: Tensor<Scalar>
@@ -108,8 +108,8 @@ public func logCoshLoss<Scalar: TensorFlowFloatingPoint>(
108108
/// - Parameters:
109109
/// - predicted: Predicted outputs from a neural network.
110110
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
111-
@differentiable(wrt: predicted)
112-
@differentiable(wrt: (predicted, expected))
111+
@differentiable(reverse, wrt: predicted)
112+
@differentiable(reverse, wrt: (predicted, expected))
113113
public func poissonLoss<Scalar: TensorFlowFloatingPoint>(
114114
predicted: Tensor<Scalar>,
115115
expected: Tensor<Scalar>
@@ -123,8 +123,8 @@ public func poissonLoss<Scalar: TensorFlowFloatingPoint>(
123123
/// - Parameters:
124124
/// - predicted: Predicted outputs from a neural network.
125125
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
126-
@differentiable(wrt: predicted)
127-
@differentiable(wrt: (predicted, expected))
126+
@differentiable(reverse, wrt: predicted)
127+
@differentiable(reverse, wrt: (predicted, expected))
128128
public func kullbackLeiblerDivergence<Scalar: TensorFlowFloatingPoint>(
129129
predicted: Tensor<Scalar>,
130130
expected: Tensor<Scalar>
@@ -137,7 +137,7 @@ public func kullbackLeiblerDivergence<Scalar: TensorFlowFloatingPoint>(
137137
/// - Parameters:
138138
/// - logits: One-hot encoded outputs from a neural network.
139139
/// - labels: Indices (zero-indexed) of the correct outputs.
140-
@differentiable(wrt: logits)
140+
@differentiable(reverse, wrt: logits)
141141
public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
142142
logits: Tensor<Scalar>,
143143
probabilities: Tensor<Scalar>
@@ -149,8 +149,8 @@ public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
149149
/// - Parameters:
150150
/// - logits: The unscaled output of a neural network.
151151
/// - labels: Integer values that correspond to the correct output.
152-
@differentiable(wrt: logits)
153-
@differentiable(wrt: (logits, labels))
152+
@differentiable(reverse, wrt: logits)
153+
@differentiable(reverse, wrt: (logits, labels))
154154
public func sigmoidCrossEntropy<Scalar: TensorFlowFloatingPoint>(
155155
logits: Tensor<Scalar>,
156156
labels: Tensor<Scalar>

Sources/TensorFlow/Core/DifferentialOperators.swift

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@ import _Differentiation
2323
@inlinable
2424
public func valueWithGradient<T, R>(
2525
at x: T,
26-
in f: @differentiable (T) -> Tensor<R>
26+
in f: @differentiable(reverse) (T) -> Tensor<R>
2727
) -> (value: Tensor<R>, gradient: T.TangentVector)
2828
where T: Differentiable, R: TensorFlowFloatingPoint {
29-
let (y, pullback) = valueWithPullback(at: x, in: f)
29+
let (y, pullback) = valueWithPullback(at: x, of: f)
3030
precondition(
3131
y.rank == 0,
3232
"""
@@ -40,10 +40,10 @@ where T: Differentiable, R: TensorFlowFloatingPoint {
4040
public func valueWithGradient<T, U, R>(
4141
at x: T,
4242
_ y: U,
43-
in f: @differentiable (T, U) -> Tensor<R>
43+
in f: @differentiable(reverse) (T, U) -> Tensor<R>
4444
) -> (value: Tensor<R>, gradient: (T.TangentVector, U.TangentVector))
4545
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
46-
let (y, pullback) = valueWithPullback(at: x, y, in: f)
46+
let (y, pullback) = valueWithPullback(at: x, y, of: f)
4747
precondition(
4848
y.rank == 0,
4949
"""
@@ -58,10 +58,10 @@ public func valueWithGradient<T, U, V, R>(
5858
at x: T,
5959
_ y: U,
6060
_ z: V,
61-
in f: @differentiable (T, U, V) -> Tensor<R>
61+
in f: @differentiable(reverse) (T, U, V) -> Tensor<R>
6262
) -> (value: Tensor<R>, gradient: (T.TangentVector, U.TangentVector, V.TangentVector))
6363
where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloatingPoint {
64-
let (y, pullback) = valueWithPullback(at: x, y, z, in: f)
64+
let (y, pullback) = valueWithPullback(at: x, y, z, of: f)
6565
precondition(y.rank == 0)
6666
return (y, pullbackOfOneLikeY(y: y, pullback: pullback))
6767
}
@@ -70,23 +70,23 @@ where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloa
7070

7171
@inlinable
7272
public func valueWithGradient<T, R>(
73-
of f: @escaping @differentiable (T) -> Tensor<R>
73+
of f: @escaping @differentiable(reverse) (T) -> Tensor<R>
7474
) -> (T) -> (value: Tensor<R>, gradient: T.TangentVector)
7575
where T: Differentiable, R: TensorFlowFloatingPoint {
7676
return { x in valueWithGradient(at: x, in: f) }
7777
}
7878

7979
@inlinable
8080
public func valueWithGradient<T, U, R>(
81-
of f: @escaping @differentiable (T, U) -> Tensor<R>
81+
of f: @escaping @differentiable(reverse) (T, U) -> Tensor<R>
8282
) -> (T, U) -> (value: Tensor<R>, gradient: (T.TangentVector, U.TangentVector))
8383
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
8484
return { x, y in valueWithGradient(at: x, y, in: f) }
8585
}
8686

8787
@inlinable
8888
public func valueWithGradient<T, U, V, R>(
89-
of f: @escaping @differentiable (T, U, V) -> Tensor<R>
89+
of f: @escaping @differentiable(reverse) (T, U, V) -> Tensor<R>
9090
) -> (T, U, V) -> (
9191
value: Tensor<R>,
9292
gradient: (T.TangentVector, U.TangentVector, V.TangentVector)
@@ -100,7 +100,7 @@ where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloa
100100
@inlinable
101101
public func gradient<T, R>(
102102
at x: T,
103-
in f: @differentiable (T) -> Tensor<R>
103+
in f: @differentiable(reverse) (T) -> Tensor<R>
104104
) -> T.TangentVector where T: Differentiable, R: TensorFlowFloatingPoint {
105105
return valueWithGradient(at: x, in: f).1
106106
}
@@ -109,7 +109,7 @@ public func gradient<T, R>(
109109
public func gradient<T, U, R>(
110110
at x: T,
111111
_ y: U,
112-
in f: @differentiable (T, U) -> Tensor<R>
112+
in f: @differentiable(reverse) (T, U) -> Tensor<R>
113113
) -> (T.TangentVector, U.TangentVector)
114114
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
115115
return valueWithGradient(at: x, y, in: f).1
@@ -120,7 +120,7 @@ public func gradient<T, U, V, R>(
120120
at x: T,
121121
_ y: U,
122122
_ z: V,
123-
in f: @differentiable (T, U, V) -> Tensor<R>
123+
in f: @differentiable(reverse) (T, U, V) -> Tensor<R>
124124
) -> (T.TangentVector, U.TangentVector, V.TangentVector)
125125
where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloatingPoint {
126126
return valueWithGradient(at: x, y, z, in: f).1
@@ -130,22 +130,22 @@ where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloa
130130

131131
@inlinable
132132
public func gradient<T, R>(
133-
of f: @escaping @differentiable (T) -> Tensor<R>
133+
of f: @escaping @differentiable(reverse) (T) -> Tensor<R>
134134
) -> (T) -> T.TangentVector where T: Differentiable, R: TensorFlowFloatingPoint {
135135
return { x in gradient(at: x, in: f) }
136136
}
137137

138138
@inlinable
139139
public func gradient<T, U, R>(
140-
of f: @escaping @differentiable (T, U) -> Tensor<R>
140+
of f: @escaping @differentiable(reverse) (T, U) -> Tensor<R>
141141
) -> (T, U) -> (T.TangentVector, U.TangentVector)
142142
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
143143
return { x, y in gradient(at: x, y, in: f) }
144144
}
145145

146146
@inlinable
147147
public func gradient<T, U, V, R>(
148-
of f: @escaping @differentiable (T, U, V) -> Tensor<R>
148+
of f: @escaping @differentiable(reverse) (T, U, V) -> Tensor<R>
149149
) -> (T, U, V) -> (T.TangentVector, U.TangentVector, V.TangentVector)
150150
where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloatingPoint {
151151
return { x, y, z in gradient(at: x, y, z, in: f) }

Sources/TensorFlow/Core/MixedPrecision.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ extension Tensor {
153153

154154
/// Promotes a scalar to a tensor with the same device and precision as the given tensor.
155155
// TODO (SR-12968): Mark `tensor` with `@noDerivative` and remove custom vjp below.
156-
@differentiable(where Scalar: TensorFlowFloatingPoint)
156+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
157157
public init(_ value: Scalar, deviceAndPrecisionLike tensor: Tensor) {
158158
let device = tensor.device
159159
let tmp = Tensor(value, on: device)

Sources/TensorFlow/Core/Tensor.swift

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -40,12 +40,12 @@ public struct Tensor<Scalar: TensorFlowScalar> {
4040
@usableFromInline
4141
internal var _isScalarZero = false
4242

43-
/// An internal workaround for SR-13263: debug info generation crash.
44-
@usableFromInline
45-
class SR13263Workaround {}
43+
// /// An internal workaround for SR-13263: debug info generation crash.
44+
// @usableFromInline
45+
// class SR13263Workaround {}
4646

47-
/// An internal workaround for SR-13263: debug info generation crash.
48-
internal var _sr13263Workaround: SR13263Workaround?
47+
// /// An internal workaround for SR-13263: debug info generation crash.
48+
// internal var _sr13263Workaround: SR13263Workaround?
4949

5050
@inlinable
5151
public init(handle: TensorHandle<Scalar>) {
@@ -132,7 +132,7 @@ extension Tensor {
132132
/// Reshape to scalar.
133133
/// - Precondition: The tensor has exactly one scalar.
134134
@inlinable
135-
@differentiable(where Scalar: TensorFlowFloatingPoint)
135+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
136136
public func scalarized() -> Scalar {
137137
precondition(
138138
shape.contiguousSize == 1,
@@ -174,7 +174,7 @@ extension Tensor {
174174
return handle.makeHostCopy()
175175
}
176176

177-
@differentiable(where Scalar: TensorFlowFloatingPoint)
177+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
178178
public var scalars: [Scalar] {
179179
if handle.backend == .XLA {
180180
let (storage, _) = xlaTensor.fetchTensorValues(Scalar.self)
@@ -203,7 +203,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint {
203203

204204
extension Tensor {
205205
/// Creates a 0-D tensor from a scalar value.
206-
@differentiable(where Scalar: TensorFlowFloatingPoint)
206+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
207207
public init(_ value: Scalar, on device: Device = .default) {
208208
switch device.backend {
209209
case .XLA:
@@ -227,7 +227,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint {
227227
extension Tensor {
228228
/// Creates a 1D tensor from scalars.
229229
@inlinable
230-
@differentiable(where Scalar: TensorFlowFloatingPoint)
230+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
231231
public init(_ scalars: [Scalar], on device: Device = .default) {
232232
self.init(shape: [scalars.count], scalars: scalars, on: device)
233233
}
@@ -247,7 +247,7 @@ extension Tensor {
247247
/// - scalars: The scalar contents of the tensor.
248248
/// - Precondition: The product of the dimensions of the shape must equal the number of scalars.
249249
@inlinable
250-
@differentiable(where Scalar: TensorFlowFloatingPoint)
250+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
251251
public init(shape: TensorShape, scalars: [Scalar], on device: Device = .default) {
252252
precondition(
253253
shape.contiguousSize == scalars.count,
@@ -628,7 +628,7 @@ extension Tensor: AdditiveArithmetic where Scalar: Numeric {
628628
/// Adds two tensors and produces their sum.
629629
/// - Note: `+` supports broadcasting.
630630
@inlinable
631-
@differentiable(where Scalar: TensorFlowFloatingPoint)
631+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
632632
public static func + (lhs: Tensor, rhs: Tensor) -> Tensor {
633633
if lhs._isScalarZero {
634634
return rhs
@@ -641,7 +641,7 @@ extension Tensor: AdditiveArithmetic where Scalar: Numeric {
641641
/// Subtracts one tensor from another and produces their difference.
642642
/// - Note: `-` supports broadcasting.
643643
@inlinable
644-
@differentiable(where Scalar: TensorFlowFloatingPoint)
644+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
645645
public static func - (lhs: Tensor, rhs: Tensor) -> Tensor {
646646
if rhs._isScalarZero {
647647
return lhs
@@ -745,7 +745,7 @@ public protocol TensorProtocol {
745745
public protocol DifferentiableTensorProtocol:
746746
TensorProtocol & Differentiable & EuclideanDifferentiable
747747
where Scalar: TensorFlowFloatingPoint {
748-
@differentiable(wrt: self)
748+
@differentiable(reverse, wrt: self)
749749
func annotate(_ annotation: String) -> Self
750750
}
751751

@@ -773,7 +773,7 @@ where Scalar: TensorFlowFloatingPoint {
773773
///
774774
/// - Parameter annotation: The annotation to be added.
775775
/// - Returns: The annotated tensor.
776-
@differentiable(wrt: self)
776+
@differentiable(reverse, wrt: self)
777777
public func annotate(_ annotation: String) -> Tensor<Scalar> {
778778
switch handle.backend {
779779
case .XLA:

Sources/TensorFlow/Initializers.swift

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ extension Tensor {
3636
/// - repeatedValue: The scalar value to repeat.
3737
/// - shape: The dimensions of the tensor.
3838
@inlinable
39-
@differentiable(where Scalar: TensorFlowFloatingPoint)
39+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
4040
public init(
4141
repeating repeatedValue: Scalar, shape: TensorShape,
4242
on device: Device = .default
@@ -49,7 +49,7 @@ extension Tensor {
4949
/// Creates a tensor by broadcasting the given scalar to a given rank with
5050
/// all dimensions being 1.
5151
@inlinable
52-
// @differentiable(where Scalar: TensorFlowFloatingPoint)
52+
// @differentiable(reverse where Scalar: TensorFlowFloatingPoint)
5353
public init(broadcasting scalar: Scalar, rank: Int, on device: Device = .default) {
5454
self = Tensor(scalar, on: device).reshaped(to: TensorShape(repeating: 1, count: rank))
5555
}
@@ -93,7 +93,7 @@ extension Tensor where Scalar: Numeric {
9393

9494
/// Perform an element-wise conversion from another `Tensor`.
9595
@inlinable
96-
@differentiable(where Scalar: TensorFlowFloatingPoint, OtherScalar: TensorFlowFloatingPoint)
96+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint, OtherScalar: TensorFlowFloatingPoint)
9797
public init<OtherScalar: Numeric>(_ other: Tensor<OtherScalar>) {
9898
self = _Raw.cast(other)
9999
}
@@ -116,7 +116,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint {
116116
extension Tensor {
117117
/// Creates a tensor from an array of tensors (which may themselves be scalars).
118118
@inlinable
119-
@differentiable(where Scalar: TensorFlowFloatingPoint)
119+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
120120
public init(_ elements: [Tensor]) {
121121
self = _Raw.pack(elements)
122122
}
@@ -150,7 +150,7 @@ extension Tensor {
150150
///
151151
/// - Returns: The stacked tensor.
152152
@inlinable
153-
@differentiable(where Scalar: TensorFlowFloatingPoint)
153+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
154154
public init(stacking tensors: [Tensor], alongAxis axis: Int = 0) {
155155
self = _Raw.pack(tensors, axis: Int64(axis))
156156
}
@@ -188,7 +188,7 @@ extension Tensor {
188188
///
189189
/// - Returns: The concatenated tensor.
190190
@inlinable
191-
@differentiable(where Scalar: TensorFlowFloatingPoint)
191+
@differentiable(reverse where Scalar: TensorFlowFloatingPoint)
192192
public init(concatenating tensors: [Tensor], alongAxis axis: Int = 0) {
193193
precondition(tensors.count > 0)
194194
self = _Raw.concatV2(tensors, axis: Tensor<Int32>(Int32(axis), on: tensors.first!.device))

0 commit comments

Comments
 (0)