|
1 | | -export GradientConfig, GradientDiffResult, JacobianConfig, JacobianDiffResult, |
| 1 | +export GradientConfig, GradientDiffResult, config, |
2 | 2 | gradient!, evaluate, evaluate!, jacobian, jacobian!, value |
3 | 3 |
|
4 | 4 | const Index = UInt16 |
@@ -75,20 +75,9 @@ function PolyConfig(g::Polynomial{T}, reduced_exponents::Matrix{UInt16}, big_loo |
75 | 75 | grad_monomials, |
76 | 76 | reduced_exponents_delimiters, |
77 | 77 | reduced_exponents_map, |
78 | | - zeros(promote_type(T, S), n)) |
| 78 | + zeros(typeof(one(T) * one(S) + one(T) * one(S)), n)) |
79 | 79 | end |
80 | 80 |
|
81 | | -# function Base.deepcopy(cfg::PolyConfig) |
82 | | -# PolyConfig( |
83 | | -# deepcopy(cfg.monomials_delimiters), |
84 | | -# deepcopy(cfg.monomials), |
85 | | -# deepcopy(cfg.grad_monomials_delimiters), |
86 | | -# deepcopy(cfg.grad_monomials), |
87 | | -# deepcopy(cfg.reduced_exponents_delimiters), |
88 | | -# deepcopy(cfg.reduced_exponents_map), |
89 | | -# deepcopy(cfg.reduced_values)) |
90 | | -# end |
91 | | - |
92 | 81 | @inline function fillreduced_values!( |
93 | 82 | cfg::PolyConfig{T}, |
94 | 83 | g::Polynomial, |
@@ -255,6 +244,15 @@ function GradientConfig(f::Polynomial{T}, ::Type{S}) where {T, S} |
255 | 244 | GradientConfig(poly, diffs, diffs_values) |
256 | 245 | end |
257 | 246 |
|
| 247 | +""" |
| 248 | + config(F::Polynomial, x) |
| 249 | +
|
| 250 | +Construct a `GradientConfig` for the evaluation of `f` with values like `x`. |
| 251 | +""" |
| 252 | +function config(f::Polynomial{T}, x::AbstractVector{S}) where {S, T} |
| 253 | + GradientConfig(f, typeof(one(T) * one(S) + one(T) * one(S))) |
| 254 | +end |
| 255 | + |
258 | 256 | function differences(f::Polynomial{T}, ::Type{S}) where {T, S} |
259 | 257 | exponents = f.exponents |
260 | 258 | reduced_exponents = convert.(UInt16, max.(exponents .- 1, 0)) |
@@ -415,224 +413,3 @@ function gradient!(diffresult::GradientDiffResult, g::Polynomial, x::AbstractVec |
415 | 413 | _gradient!(diffresult.grad, x, cfg.poly) |
416 | 414 | diffresult |
417 | 415 | end |
418 | | - |
419 | | - |
420 | | -""" |
421 | | -
|
422 | | - JacobianConfig(F::Vector{Polynomial{T}}, [x::AbstractVector{S}]) |
423 | | -
|
424 | | -A data structure with which the jacobian of a `Vector` `F` of `Polynomial`s can be |
425 | | -evaluated efficiently. Note that `x` is only used to determine the |
426 | | -output type of `F(x)`. |
427 | | -
|
428 | | - JacobianConfig(F::Vector{Polynomial{T}}, [S]) |
429 | | -
|
430 | | -Instead of a vector `x` a type can also be given directly. |
431 | | -""" |
432 | | -mutable struct JacobianConfig{T} |
433 | | - polys::Vector{PolyConfig{T}} |
434 | | - differences::Matrix{UInt8} |
435 | | - differences_values::Matrix{T} |
436 | | -end |
437 | | - |
438 | | - |
439 | | -function JacobianConfig(f::Vector{Polynomial{T}}, ::AbstractArray{S}) where {T, S} |
440 | | - JacobianConfig(f, S) |
441 | | -end |
442 | | -JacobianConfig(f::Vector{Polynomial{T}}) where T = JacobianConfig(f, T) |
443 | | -function JacobianConfig(F::Vector{Polynomial{T}}, ::Type{S}) where {T, S} |
444 | | - diffs, diffs_values, big_lookups, reduced_exponents = differences(F, S) |
445 | | - polys = broadcast(PolyConfig, F, reduced_exponents, big_lookups, S) |
446 | | - |
447 | | - JacobianConfig(polys, diffs, diffs_values) |
448 | | -end |
449 | | - |
450 | | - |
451 | | -function differences(F::Vector{Polynomial{T}}, ::Type{S}) where {T, S} |
452 | | - reduced_exponents = map(F) do f |
453 | | - convert.(UInt16, max.(f.exponents .- 1, 0)) |
454 | | - end |
455 | | - differences, big_lookups = computetables(reduced_exponents) |
456 | | - differences_values = convert.(promote_type(T, S), differences) |
457 | | - |
458 | | - differences, differences_values, big_lookups, reduced_exponents |
459 | | -end |
460 | | - |
461 | | -function Base.deepcopy(cfg::JacobianConfig) |
462 | | - JacobianConfig( |
463 | | - deepcopy(cfg.polys), |
464 | | - deepcopy(cfg.differences), |
465 | | - deepcopy(cfg.differences_values)) |
466 | | -end |
467 | | - |
468 | | -""" |
469 | | - evaluate(F, x, cfg::JacobianConfig [, precomputed=false]) |
470 | | -
|
471 | | -Evaluate the system `F` at `x` using the precomputated values in `cfg`. |
472 | | -Note that this is usually signifcant faster than `map(f -> evaluate(f, x), F)`. |
473 | | -The return vector is constructed using `similar(x, T)`. |
474 | | -
|
475 | | -### Example |
476 | | -```julia |
477 | | -cfg = JacobianConfig(F) |
478 | | -evaluate(F, x, cfg) |
479 | | -``` |
480 | | -
|
481 | | -With `precomputed=true` we rely on the previous intermediate results in `cfg`. Therefore |
482 | | -the result is only correct if you previouls called `evaluate`, or `jacobian` with the same |
483 | | -`x`. |
484 | | -""" |
485 | | -function evaluate(G::Vector{<:Polynomial}, x::AbstractVector, cfg::JacobianConfig{T}, precomputed=false) where T |
486 | | - evaluate!(similar(x, T, length(G)), G, x, cfg, precomputed) |
487 | | -end |
488 | | - |
489 | | -""" |
490 | | - evaluate!(u, F, x, cfg::JacobianConfig [, precomputed=false]) |
491 | | -
|
492 | | -Evaluate the system `F` at `x` using the precomputated values in `cfg` |
493 | | -and store the result in `u`. |
494 | | -Note that this is usually signifcant faster than `map!(u, f -> evaluate(f, x), F)`. |
495 | | -
|
496 | | -### Example |
497 | | -```julia |
498 | | -cfg = JacobianConfig(F) |
499 | | -evaluate!(u, F, x, cfg) |
500 | | -``` |
501 | | -
|
502 | | -With `precomputed=true` we rely on the previous intermediate results in `cfg`. Therefore |
503 | | -the result is only correct if you previouls called `evaluate`, or `jacobian` with the same |
504 | | -`x`. |
505 | | -""" |
506 | | -function evaluate!(u::AbstractVector, G::Vector{<:Polynomial}, x::AbstractVector, cfg::JacobianConfig{T}, precomputed=false) where T |
507 | | - if !precomputed |
508 | | - fillvalues!(cfg.differences_values, x, cfg.differences) |
509 | | - for i=1:length(cfg.polys) |
510 | | - fillreduced_values!(cfg.polys[i], G[i], x, cfg.differences_values) |
511 | | - u[i] = _evaluate(x, cfg.polys[i]) |
512 | | - end |
513 | | - else |
514 | | - for i=1:length(cfg.polys) |
515 | | - u[i] = _evaluate(x, cfg.polys[i]) |
516 | | - end |
517 | | - end |
518 | | - u |
519 | | -end |
520 | | - |
521 | | -""" |
522 | | - jacobian(u, F, x, cfg::JacobianConfig [, precomputed=false]) |
523 | | -
|
524 | | -Evaluate the jacobian of `F` at `x` using the precomputated values in `cfg`. The return |
525 | | -matrix is constructed using `similar(x, T, m, n)`. |
526 | | -
|
527 | | -### Example |
528 | | -```julia |
529 | | -cfg = JacobianConfig(F) |
530 | | -jacobian(F, x, cfg) |
531 | | -``` |
532 | | -
|
533 | | -With `precomputed=true` we rely on the previous intermediate results in `cfg`. Therefore |
534 | | -the result is only correct if you previouls called `evaluate`, or `jacobian` with the same |
535 | | -`x`. |
536 | | -""" |
537 | | -function jacobian(g::Vector{<:Polynomial}, x::AbstractVector, cfg::JacobianConfig{T}, precomputed=false) where T |
538 | | - u = similar(x, T, (length(g), length(x))) |
539 | | - jacobian!(u, g, x, cfg, precomputed) |
540 | | - u |
541 | | -end |
542 | | - |
543 | | -""" |
544 | | - jacobian!(u, F, x, cfg::JacobianConfig [, precomputed=false]) |
545 | | -
|
546 | | -Evaluate the jacobian of `F` at `x` using the precomputated values in `cfg` |
547 | | -and store the result in `u`. |
548 | | -
|
549 | | -### Example |
550 | | -```julia |
551 | | -cfg = JacobianConfig(F) |
552 | | -jacobian!(u, F, x, cfg) |
553 | | -``` |
554 | | -
|
555 | | -With `precomputed=true` we rely on the previous intermediate results in `cfg`. Therefore |
556 | | -the result is only correct if you previouls called `evaluate`, or `jacobian` with the same |
557 | | -`x`. |
558 | | -""" |
559 | | -function jacobian!(u::AbstractMatrix, G::Vector{<:Polynomial}, x::AbstractVector, cfg::JacobianConfig{T}, precomputed=false) where T |
560 | | - if !precomputed |
561 | | - fillvalues!(cfg.differences_values, x, cfg.differences) |
562 | | - for i=1:length(G) |
563 | | - fillreduced_values!(cfg.polys[i], G[i], x, cfg.differences_values) |
564 | | - gradient_row!(u, x, cfg.polys[i], i) |
565 | | - end |
566 | | - else |
567 | | - for i=1:length(G) |
568 | | - gradient_row!(u, x, cfg.polys[i], i) |
569 | | - end |
570 | | - end |
571 | | - u |
572 | | -end |
573 | | - |
574 | | -""" |
575 | | - JacobianDiffResult(cfg::GradientConfig) |
576 | | -
|
577 | | -During the computation of the jacobian ``J_F(x)`` we compute nearly everything we need for the evaluation of |
578 | | -``F(x)``. `JacobianDiffResult` allocates memory to hold both values. |
579 | | -This structure also signals `jacobian!` to store ``F(x)`` and ``J_F(x)``. |
580 | | -
|
581 | | -### Example |
582 | | -
|
583 | | -```julia |
584 | | -cfg = JacobianConfig(F, x) |
585 | | -r = JacobianDiffResult(cfg) |
586 | | -jacobian!(r, F, x, cfg) |
587 | | -
|
588 | | -value(r) == map(f -> f(x), F) |
589 | | -jacobian(r) == jacobian(F, x, cfg) |
590 | | -``` |
591 | | -
|
592 | | - JacobianDiffResult(value::AbstractVector, jacobian::AbstractMatrix) |
593 | | -
|
594 | | -Allocate the memory to hold the value and the jacobian by yourself. |
595 | | -""" |
596 | | -mutable struct JacobianDiffResult{T, AV<:AbstractVector{T}, AM<:AbstractMatrix{T}} |
597 | | - value::AV |
598 | | - jacobian::AM |
599 | | -end |
600 | | - |
601 | | -function JacobianDiffResult(cfg::JacobianConfig{T}) where T |
602 | | - JacobianDiffResult{T, Vector{T}, Matrix{T}}( |
603 | | - zeros(T, length(cfg.polys)), |
604 | | - zeros(T, length(cfg.polys), size(cfg.differences, 1))) |
605 | | -end |
606 | | - |
607 | | -function JacobianDiffResult(value::AbstractVector{T}, jacobian::AbstractMatrix{T}) where T |
608 | | - JacobianDiffResult{T, typeof(value), typeof(jacobian)}(value, jacobian) |
609 | | -end |
610 | | -value(r::JacobianDiffResult) = r.value |
611 | | -jacobian(r::JacobianDiffResult) = r.jacobian |
612 | | - |
613 | | -""" |
614 | | - jacobian!(r::JacobianDiffResult, F, x, cfg::JacobianConfig) |
615 | | -
|
616 | | -Compute ``F(x)`` and the jacobian of `F` at `x` at once using the precomputated values in `cfg` |
617 | | -and store thre result in `r`. This is faster than computing both values separetely. |
618 | | -
|
619 | | -### Example |
620 | | -```julia |
621 | | -cfg = GradientConfig(g) |
622 | | -r = GradientDiffResult(cfg) |
623 | | -gradient!(r, g, x, cfg) |
624 | | -
|
625 | | -value(r) == g(x) |
626 | | -gradient(r) == gradient(g, x, cfg) |
627 | | -``` |
628 | | -""" |
629 | | -function jacobian!(r::JacobianDiffResult, G::Vector{<:Polynomial}, x::AbstractVector, cfg::JacobianConfig{T}) where T |
630 | | - fillvalues!(cfg.differences_values, x, cfg.differences) |
631 | | - for i=1:length(G) |
632 | | - fillreduced_values!(cfg.polys[i], G[i], x, cfg.differences_values) |
633 | | - gradient_row!(r.jacobian, x, cfg.polys[i], i) |
634 | | - r.value[i] = _evaluate(x, cfg.polys[i]) |
635 | | - end |
636 | | - |
637 | | - r |
638 | | -end |
0 commit comments