@@ -219,6 +219,7 @@ extension Complex /*: ElementaryFunctions */ {
219
219
}
220
220
221
221
// MARK: - log-like functions
222
+ @inlinable
222
223
public static func log( _ z: Complex ) -> Complex {
223
224
// If z is zero or infinite, the phase is undefined, so the result is
224
225
// the single exceptional value.
@@ -233,8 +234,68 @@ extension Complex /*: ElementaryFunctions */ {
233
234
return Complex ( . log( z. magnitude) + . log( w. lengthSquared) / 2 , θ)
234
235
}
235
236
237
+ @inlinable
236
238
public static func log( onePlus z: Complex ) -> Complex {
237
- fatalError ( )
239
+ // Nevin proposed the idea for this implementation on the Swift forums:
240
+ // https://forums.swift.org/t/elementaryfunctions-compliance-for-complex/37903/3
241
+ //
242
+ // Here's a quick explainer on why it works: in exact arithmetic,
243
+ //
244
+ // log(1+z) = (log |1+z|, atan2(y, 1+x))
245
+ //
246
+ // where x and y are the real and imaginary parts of z, respectively.
247
+ //
248
+ // The first thing to note is that the expression for the imaginary
249
+ // part works fine as is. If cancellation occurs (because x ≈ -1),
250
+ // then 1+x is exact, and so we have good componentwise relative
251
+ // accuracy. Otherwise, x is bounded away from -1 and 1+x has good
252
+ // relative accuracy, and therefore so does atan2(y, 1+x).
253
+ //
254
+ // So the real part is the hard part (no surprise, just like expPlusOne).
255
+ // Nevin's clever idea is simply to take advantage of the expansion:
256
+ //
257
+ // Re(log 1+z) = (log 1+z + Conj(log 1+z))/2
258
+ //
259
+ // Log commutes with conjugation, so this becomes:
260
+ //
261
+ // Re(log 1+z) = (log 1+z + log 1+z̅)/2
262
+ // = log((1+z)(1+z̅)/2
263
+ // = log(1+z+z̅+zz̅)/2
264
+ //
265
+ // This behaves well close to zero, because the z+z̅ term dominates
266
+ // and is computed exactly. Away from zero, cancellation occurs near
267
+ // the circle x(x+2) + y^2 = 0, but everywhere along this curve we
268
+ // have |Im(log 1+z)| >= π/2, so the relative error in the complex
269
+ // norm is well-controlled. We can take advantage of FMA to further
270
+ // reduce the cancellation error and recover a good error bound.
271
+ //
272
+ // The other common implementation choice for log1p is Kahan's trick:
273
+ //
274
+ // w := 1+z
275
+ // return z/(w-1) * log(w)
276
+ //
277
+ // But this actually doesn't do as well as Nevin's approach does,
278
+ // and requires a complex division, which we want to avoid when we
279
+ // can do so.
280
+ var a = 2 * z. x
281
+ // We want to add the larger term first (contra usual guidance for
282
+ // floating-point error optimization), because we're optimizing for
283
+ // the catastrophic cancellation case; when that happens adding the
284
+ // larger term via FMA is always exact. When cancellation doesn't
285
+ // happen, the simple relative error bound carries through the
286
+ // rest of the computation.
287
+ let large = max ( z. x. magnitude, z. y. magnitude)
288
+ let small = min ( z. x. magnitude, z. y. magnitude)
289
+ a. addProduct ( large, large)
290
+ a. addProduct ( small, small)
291
+ // If r2 overflowed, then |z| ≫ 1, and so log(1+z) = log(z).
292
+ guard a. isFinite else { return log ( z) }
293
+ // Unlike log(z), we do not need to worry about what happens if a
294
+ // underflows.
295
+ return Complex (
296
+ RealType . log ( onePlus: a) / 2 ,
297
+ RealType . atan2 ( y: z. y, x: 1 + z. x)
298
+ )
238
299
}
239
300
240
301
public static func acos( _ z: Complex ) -> Complex {
0 commit comments