|
1 | 1 | #include "config.h" |
| 2 | +#include <ccan/asort/asort.h> |
2 | 3 | #include <ccan/tal/str/str.h> |
3 | 4 | #include <common/gossmap.h> |
4 | 5 | #include <plugins/askrene/askrene.h> |
5 | 6 | #include <plugins/askrene/flow.h> |
6 | 7 | #include <plugins/askrene/refine.h> |
7 | 8 | #include <plugins/askrene/reserve.h> |
8 | 9 |
|
| 10 | +/* Channel data for fast retrieval. */ |
| 11 | +struct channel_data { |
| 12 | + struct amount_msat htlc_min, htlc_max, liquidity_max; |
| 13 | + u32 fee_base_msat, fee_proportional_millionths; |
| 14 | + struct short_channel_id_dir scidd; |
| 15 | + u32 idx; |
| 16 | +}; |
| 17 | + |
9 | 18 | /* We (ab)use the reservation system to place temporary reservations |
10 | 19 | * on channels while we are refining each flow. This has the effect |
11 | 20 | * of making flows aware of each other. */ |
@@ -604,3 +613,217 @@ refine_with_fees_and_limits(const tal_t *ctx, |
604 | 613 | tal_free(reservations); |
605 | 614 | return ret; |
606 | 615 | } |
| 616 | + |
| 617 | +/* Cache channel data along the path used by this flow. */ |
| 618 | +static struct channel_data *new_channel_path_cache(const tal_t *ctx, |
| 619 | + struct route_query *rq, |
| 620 | + struct flow *flow) |
| 621 | +{ |
| 622 | + const size_t pathlen = tal_count(flow->path); |
| 623 | + struct channel_data *path = tal_arr(ctx, struct channel_data, pathlen); |
| 624 | + |
| 625 | + for (size_t i = 0; i < pathlen; i++) { |
| 626 | + /* knowledge on liquidity bounds */ |
| 627 | + struct amount_msat known_min, known_max; |
| 628 | + const struct half_chan *h = flow_edge(flow, i); |
| 629 | + struct short_channel_id_dir scidd; |
| 630 | + |
| 631 | + get_scidd(rq->gossmap, flow, i, &scidd); |
| 632 | + get_constraints(rq, flow->path[i], flow->dirs[i], &known_min, |
| 633 | + &known_max); |
| 634 | + |
| 635 | + path[i].htlc_min = get_chan_htlc_min(rq, flow->path[i], &scidd); |
| 636 | + path[i].htlc_max = get_chan_htlc_max(rq, flow->path[i], &scidd); |
| 637 | + path[i].fee_base_msat = h->base_fee; |
| 638 | + path[i].fee_proportional_millionths = h->proportional_fee; |
| 639 | + path[i].liquidity_max = known_max; |
| 640 | + path[i].scidd = scidd; |
| 641 | + path[i].idx = scidd.dir + |
| 642 | + 2 * gossmap_chan_idx(rq->gossmap, flow->path[i]); |
| 643 | + } |
| 644 | + return path; |
| 645 | +} |
| 646 | + |
| 647 | +/* Cache channel data along multiple paths. */ |
| 648 | +static struct channel_data **new_channel_mpp_cache(const tal_t *ctx, |
| 649 | + struct route_query *rq, |
| 650 | + struct flow **flows) |
| 651 | +{ |
| 652 | + const size_t npaths = tal_count(flows); |
| 653 | + struct channel_data **paths = |
| 654 | + tal_arr(ctx, struct channel_data *, npaths); |
| 655 | + for (size_t i = 0; i < npaths; i++) { |
| 656 | + paths[i] = new_channel_path_cache(paths, rq, flows[i]); |
| 657 | + } |
| 658 | + return paths; |
| 659 | +} |
| 660 | + |
| 661 | +/* Reverse order: bigger first */ |
| 662 | +static int revcmp_flows(const size_t *a, const size_t *b, struct flow **flows) |
| 663 | +{ |
| 664 | + if (amount_msat_eq(flows[*a]->delivers, flows[*b]->delivers)) |
| 665 | + return 0; |
| 666 | + if (amount_msat_greater(flows[*a]->delivers, flows[*b]->delivers)) |
| 667 | + return -1; |
| 668 | + return 1; |
| 669 | +} |
| 670 | + |
| 671 | +// TODO: unit test: |
| 672 | +// -> make a path |
| 673 | +// -> compute x = path_max_deliverable |
| 674 | +// -> check that htlc_max are all satisfied |
| 675 | +// -> check that (x+1) at least one htlc_max is violated |
| 676 | +/* Given the channel constraints, return the maximum amount that can be |
| 677 | + * delivered. */ |
| 678 | +static struct amount_msat path_max_deliverable(struct channel_data *path) |
| 679 | +{ |
| 680 | + struct amount_msat deliver = AMOUNT_MSAT(-1); |
| 681 | + for (size_t i = 0; i < tal_count(path); i++) { |
| 682 | + deliver = |
| 683 | + amount_msat_sub_fee(deliver, path[i].fee_base_msat, |
| 684 | + path[i].fee_proportional_millionths); |
| 685 | + deliver = amount_msat_min(deliver, path[i].htlc_max); |
| 686 | + deliver = amount_msat_min(deliver, path[i].liquidity_max); |
| 687 | + } |
| 688 | + return deliver; |
| 689 | +} |
| 690 | + |
| 691 | +static struct amount_msat sum_all_deliver(struct flow **flows, |
| 692 | + size_t *flows_index) |
| 693 | +{ |
| 694 | + struct amount_msat all_deliver = AMOUNT_MSAT(0); |
| 695 | + for (size_t i = 0; i < tal_count(flows_index); i++) { |
| 696 | + if (!amount_msat_accumulate(&all_deliver, |
| 697 | + flows[flows_index[i]]->delivers)) |
| 698 | + abort(); |
| 699 | + } |
| 700 | + return all_deliver; |
| 701 | +} |
| 702 | + |
| 703 | +/* It reduces the amount of the flows and/or removes some flows in order to |
| 704 | + * deliver no more than max_deliver. It will leave at least one flow. |
| 705 | + * Returns the total delivery amount. */ |
| 706 | +static struct amount_msat remove_excess(struct flow **flows, |
| 707 | + size_t **flows_index, |
| 708 | + struct amount_msat max_deliver) |
| 709 | +{ |
| 710 | + if (tal_count(flows) == 0) |
| 711 | + return AMOUNT_MSAT(0); |
| 712 | + |
| 713 | + struct amount_msat all_deliver, excess; |
| 714 | + all_deliver = sum_all_deliver(flows, *flows_index); |
| 715 | + |
| 716 | + /* early exit: there is no excess */ |
| 717 | + if (!amount_msat_sub(&excess, all_deliver, max_deliver) || |
| 718 | + amount_msat_is_zero(excess)) |
| 719 | + return all_deliver; |
| 720 | + |
| 721 | + asort(*flows_index, tal_count(*flows_index), revcmp_flows, flows); |
| 722 | + |
| 723 | + /* Remove the smaller parts if they deliver less than the |
| 724 | + * excess. */ |
| 725 | + for (int i = tal_count(*flows_index) - 1; i >= 0; i--) { |
| 726 | + if (!amount_msat_sub(&excess, excess, |
| 727 | + flows[(*flows_index)[i]]->delivers)) |
| 728 | + break; |
| 729 | + if (!amount_msat_sub(&all_deliver, all_deliver, |
| 730 | + flows[(*flows_index)[i]]->delivers)) |
| 731 | + abort(); |
| 732 | + tal_arr_remove(flows_index, i); |
| 733 | + } |
| 734 | + |
| 735 | + /* If we still have some excess, remove it from the |
| 736 | + * current flows in the same proportion every flow contributes to the |
| 737 | + * total. */ |
| 738 | + struct amount_msat old_excess = excess; |
| 739 | + struct amount_msat old_deliver = all_deliver; |
| 740 | + for (size_t i = 0; i < tal_count(*flows_index); i++) { |
| 741 | + double fraction = amount_msat_ratio( |
| 742 | + flows[(*flows_index)[i]]->delivers, old_deliver); |
| 743 | + struct amount_msat remove; |
| 744 | + |
| 745 | + if (!amount_msat_scale(&remove, old_excess, fraction)) |
| 746 | + abort(); |
| 747 | + |
| 748 | + /* rounding errors: don't remove more than excess */ |
| 749 | + remove = amount_msat_min(remove, excess); |
| 750 | + |
| 751 | + if (!amount_msat_sub(&excess, excess, remove)) |
| 752 | + abort(); |
| 753 | + |
| 754 | + if (!amount_msat_sub(&all_deliver, all_deliver, remove) || |
| 755 | + !amount_msat_sub(&flows[(*flows_index)[i]]->delivers, |
| 756 | + flows[(*flows_index)[i]]->delivers, |
| 757 | + remove)) |
| 758 | + abort(); |
| 759 | + } |
| 760 | + |
| 761 | + /* any rounding error left, take it from the first */ |
| 762 | + assert(tal_count(*flows_index) > 0); |
| 763 | + if (!amount_msat_sub(&all_deliver, all_deliver, excess) || |
| 764 | + !amount_msat_sub(&flows[(*flows_index)[0]]->delivers, |
| 765 | + flows[(*flows_index)[0]]->delivers, excess)) |
| 766 | + abort(); |
| 767 | + return all_deliver; |
| 768 | +} |
| 769 | + |
| 770 | +/* FIXME: on failure return error message */ |
| 771 | +const char *refine_flows(const tal_t *ctx, struct route_query *rq, |
| 772 | + struct amount_msat deliver, struct flow ***flows) |
| 773 | +{ |
| 774 | + const tal_t *working_ctx = tal(ctx, tal_t); |
| 775 | + struct amount_msat *max_deliverable; |
| 776 | + struct channel_data **channel_mpp_cache; |
| 777 | + size_t *flows_index; |
| 778 | + |
| 779 | + /* we might need to access this data multiple times, so we cache |
| 780 | + * it */ |
| 781 | + channel_mpp_cache = new_channel_mpp_cache(working_ctx, rq, *flows); |
| 782 | + max_deliverable = tal_arrz(working_ctx, struct amount_msat, |
| 783 | + tal_count(channel_mpp_cache)); |
| 784 | + flows_index = tal_arrz(working_ctx, size_t, tal_count(*flows)); |
| 785 | + for (size_t i = 0; i < tal_count(channel_mpp_cache); i++) { |
| 786 | + // FIXME: does path_max_deliverable work for a single |
| 787 | + // channel with 0 fees? |
| 788 | + max_deliverable[i] = path_max_deliverable(channel_mpp_cache[i]); |
| 789 | + /* We use an array of indexes to keep track of the order |
| 790 | + * of the flows. Likewise flows can be removed by simply |
| 791 | + * shrinking the flows_index array. */ |
| 792 | + flows_index[i] = i; |
| 793 | + } |
| 794 | + |
| 795 | + /* do not deliver more than HTLC_MAX allow us */ |
| 796 | + for (size_t i = 0; i < tal_count(flows_index); i++) { |
| 797 | + (*flows)[flows_index[i]]->delivers = |
| 798 | + amount_msat_min((*flows)[flows_index[i]]->delivers, |
| 799 | + max_deliverable[flows_index[i]]); |
| 800 | + } |
| 801 | + |
| 802 | + /* remove excess from MCF granularity if any */ |
| 803 | + remove_excess(*flows, &flows_index, deliver); |
| 804 | + |
| 805 | + /* remove 0 amount flows if any */ |
| 806 | + asort(flows_index, tal_count(flows_index), revcmp_flows, *flows); |
| 807 | + for (int i = tal_count(flows_index) - 1; i >= 0; i--) { |
| 808 | + if (!amount_msat_is_zero((*flows)[flows_index[i]]->delivers)) |
| 809 | + break; |
| 810 | + tal_arr_remove(&flows_index, i); |
| 811 | + } |
| 812 | + |
| 813 | + /* finally write the remaining flows */ |
| 814 | + struct flow **tmp_flows = tal_arr(working_ctx, struct flow *, 0); |
| 815 | + for (size_t i = 0; i < tal_count(flows_index); i++) { |
| 816 | + tal_arr_expand(&tmp_flows, (*flows)[flows_index[i]]); |
| 817 | + (*flows)[flows_index[i]] = NULL; |
| 818 | + } |
| 819 | + for (size_t i = 0; i < tal_count(*flows); i++) { |
| 820 | + (*flows)[i] = tal_free((*flows)[i]); |
| 821 | + } |
| 822 | + tal_resize(flows, 0); |
| 823 | + for (size_t i = 0; i < tal_count(tmp_flows); i++) { |
| 824 | + tal_arr_expand(flows, tmp_flows[i]); |
| 825 | + } |
| 826 | + |
| 827 | + tal_free(working_ctx); |
| 828 | + return NULL; |
| 829 | +} |
0 commit comments