diff --git a/.github/workflows/num-bigint-ci.yaml b/.github/workflows/num-bigint-ci.yaml new file mode 100644 index 000000000..757dbfe48 --- /dev/null +++ b/.github/workflows/num-bigint-ci.yaml @@ -0,0 +1,105 @@ +name: num-bigint-generic +on: merge_group + +jobs: + + test: + name: Test + runs-on: ubuntu-latest + strategy: + matrix: + rust: [ + 1.60.0, # MSRV + stable, + beta, + nightly + ] + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: startsWith(matrix.rust, '1') + with: + path: ~/.cargo/registry/index + key: cargo-${{ matrix.rust }}-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - run: cargo build + - run: vendor/num-bigint-generic/ci/test_full.sh + + # try a target with `BigDigit = u32` + i686: + name: Test (i686) + runs-on: ubuntu-latest + steps: + - run: | + sudo apt-get update + sudo apt-get install gcc-multilib + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: stable-i686-unknown-linux-gnu + - run: cargo build + - run: vendor/num-bigint-generic/ci/test_full.sh + + # try building the x32 target -- x86_64 with target_pointer_width="32" + # (we can't execute without kernel CONFIG_X86_X32_ABI though) + x32: + name: Test (x32) + runs-on: ubuntu-latest + steps: + - run: | + sudo apt-get update + sudo apt-get install gcc-multilib + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: x86_64-unknown-linux-gnux32 + - run: cargo build --target x86_64-unknown-linux-gnux32 --all-features + - run: cargo test --no-run --target x86_64-unknown-linux-gnux32 --all-features + + # try a target that doesn't have std at all, but does have alloc + no_std: + name: No Std + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: thumbv6m-none-eabi + - run: cargo build --target thumbv6m-none-eabi --no-default-features --features "serde rand" + + fmt: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@1.62.0 + with: + components: rustfmt + - run: cargo fmt --all --check + + doc: + name: Docs.rs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - run: cargo doc --features std,serde,rand,quickcheck,arbitrary + env: + RUSTDOCFLAGS: --cfg docsrs + + # One job that "summarizes" the success state of this pipeline. This can then be added to branch + # protection, rather than having to add each job separately. + success: + name: Success + runs-on: ubuntu-latest + needs: [test, i686, x32, no_std, fmt, doc] + # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency + # failed" as success. So we have to do some contortions to ensure the job fails if any of its + # dependencies fails. + if: always() # make sure this is never "skipped" + steps: + # Manually check the status of all dependencies. `if: failure()` does not work. + - name: check if any dependency failed + run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/CHANGELOG.md b/CHANGELOG.md index e312672ee..14153170b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -106,6 +106,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#1230](https://github.com/o1-labs/openmina/pull/1230)) - bump itertools from 0.10.5 to 0.12.0 #1228 ([#1228](https://github.com/o1-labs/openmina/pull/1228)) +- vendor num-bigint#1996d080 and rename it in num-bigint-generic + ([#1379](https://github.com/o1-labs/mina-rust/pull/1379/)) ### Other diff --git a/Cargo.lock b/Cargo.lock index d2877a792..b6c468eb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -220,6 +220,12 @@ version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" + [[package]] name = "arc-swap" version = "1.7.1" @@ -6043,6 +6049,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "num-bigint-generic" +version = "0.4.6" +dependencies = [ + "arbitrary", + "num-integer", + "num-traits", + "quickcheck", + "rand", + "serde", + "tinyvec", +] + [[package]] name = "num-complex" version = "0.4.4" @@ -6090,6 +6109,16 @@ dependencies = [ "serde", ] +[[package]] +name = "num-rational-generic" +version = "0.4.2" +dependencies = [ + "num-bigint-generic", + "num-integer", + "num-traits", + "serde", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -7199,6 +7228,15 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "quickcheck" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +dependencies = [ + "rand", +] + [[package]] name = "quinn" version = "0.10.2" diff --git a/Cargo.toml b/Cargo.toml index ead3c17d2..7a1ed095d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,8 @@ members = [ "producer-dashboard", "fuzzer", + "vendor/num-bigint-generic", + "vendor/num-rational-generic", ] resolver = "2" diff --git a/vendor/num-bigint-generic/.github/workflows/master.yaml b/vendor/num-bigint-generic/.github/workflows/master.yaml new file mode 100644 index 000000000..034e37936 --- /dev/null +++ b/vendor/num-bigint-generic/.github/workflows/master.yaml @@ -0,0 +1,28 @@ +name: master +on: + push: + branches: + - master + schedule: + - cron: '0 0 * * 0' # 00:00 Sunday + +jobs: + + test: + name: Test + runs-on: ubuntu-latest + strategy: + matrix: + rust: [1.60.0, stable] + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: startsWith(matrix.rust, '1') + with: + path: ~/.cargo/registry/index + key: cargo-${{ matrix.rust }}-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - run: cargo build + - run: ./ci/test_full.sh diff --git a/vendor/num-bigint-generic/.github/workflows/pr.yaml b/vendor/num-bigint-generic/.github/workflows/pr.yaml new file mode 100644 index 000000000..baac64847 --- /dev/null +++ b/vendor/num-bigint-generic/.github/workflows/pr.yaml @@ -0,0 +1,49 @@ +name: PR +on: + pull_request: + +jobs: + + test: + name: Test + runs-on: ubuntu-latest + strategy: + matrix: + rust: [1.60.0, stable] + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: startsWith(matrix.rust, '1') + with: + path: ~/.cargo/registry/index + key: cargo-${{ matrix.rust }}-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - run: cargo build + - run: ./ci/test_full.sh + + fmt: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@1.62.0 + with: + components: rustfmt + - run: cargo fmt --all --check + + # One job that "summarizes" the success state of this pipeline. This can then be added to branch + # protection, rather than having to add each job separately. + success: + name: Success + runs-on: ubuntu-latest + needs: [test, fmt] + # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency + # failed" as success. So we have to do some contortions to ensure the job fails if any of its + # dependencies fails. + if: always() # make sure this is never "skipped" + steps: + # Manually check the status of all dependencies. `if: failure()` does not work. + - name: check if any dependency failed + run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/vendor/num-bigint-generic/.gitignore b/vendor/num-bigint-generic/.gitignore new file mode 100644 index 000000000..6809567db --- /dev/null +++ b/vendor/num-bigint-generic/.gitignore @@ -0,0 +1,4 @@ +Cargo.lock +target +*.bk +*.orig diff --git a/vendor/num-bigint-generic/Cargo.toml b/vendor/num-bigint-generic/Cargo.toml new file mode 100644 index 000000000..e6d670f62 --- /dev/null +++ b/vendor/num-bigint-generic/Cargo.toml @@ -0,0 +1,82 @@ +[package] +authors = ["The Rust Project Developers"] +description = "Big integer implementation for Rust" +documentation = "https://docs.rs/num-bigint" +homepage = "https://github.com/rust-num/num-bigint" +keywords = ["mathematics", "numerics", "bignum"] +categories = ["algorithms", "data-structures", "science"] +license = "MIT OR Apache-2.0" +name = "num-bigint-generic" +repository = "https://github.com/rust-num/num-bigint" +version = "0.4.6" +readme = "README.md" +exclude = ["/ci/*", "/.github/*"] +edition = "2021" +rust-version = "1.60" + +[features] +default = ["std"] +std = ["num-integer/std", "num-traits/std"] +arbitrary = ["dep:arbitrary"] +quickcheck = ["dep:quickcheck"] +rand = ["dep:rand"] +serde = ["dep:serde"] +nightly = [] + +[package.metadata.docs.rs] +features = ["std", "serde", "rand", "quickcheck", "arbitrary"] +rustdoc-args = ["--cfg", "docsrs"] + +[[bench]] +name = "bigint" +required-features = ["nightly"] + +[[bench]] +name = "factorial" +required-features = ["nightly"] + +[[bench]] +name = "gcd" +required-features = ["nightly"] + +[[bench]] +name = "roots" +required-features = ["nightly"] + +[[bench]] +harness = false +name = "shootout-pidigits" +required-features = ["nightly"] + +[dependencies] +tinyvec = { version = "1", features = ["alloc", "rustc_1_55"] } + +[dependencies.num-integer] +default-features = false +features = ["i128"] +version = "0.1.46" + +[dependencies.num-traits] +default-features = false +features = ["i128"] +version = "0.2.18" + +[dependencies.rand] +default-features = false +optional = true +version = "0.8" + +[dependencies.serde] +default-features = false +optional = true +version = "1.0" + +[dependencies.quickcheck] +default-features = false +optional = true +version = "1" + +[dependencies.arbitrary] +default-features = false +optional = true +version = "1" diff --git a/vendor/num-bigint-generic/LICENSE-APACHE b/vendor/num-bigint-generic/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/vendor/num-bigint-generic/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/num-bigint-generic/LICENSE-MIT b/vendor/num-bigint-generic/LICENSE-MIT new file mode 100644 index 000000000..39d4bdb5a --- /dev/null +++ b/vendor/num-bigint-generic/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/num-bigint-generic/README.md b/vendor/num-bigint-generic/README.md new file mode 100644 index 000000000..e31510f07 --- /dev/null +++ b/vendor/num-bigint-generic/README.md @@ -0,0 +1,83 @@ +# num-bigint + +[![crate](https://img.shields.io/crates/v/num-bigint.svg)](https://crates.io/crates/num-bigint) +[![documentation](https://docs.rs/num-bigint/badge.svg)](https://docs.rs/num-bigint) +[![minimum rustc 1.60](https://img.shields.io/badge/rustc-1.60+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html) +[![build status](https://github.com/rust-num/num-bigint/workflows/master/badge.svg)](https://github.com/rust-num/num-bigint/actions) + +Big integer types for Rust, `BigInt` and `BigUint`. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +num-bigint = "0.4" +``` + +## Features + +The `std` crate feature is enabled by default, and is mandatory before Rust 1.36 +and the stabilized `alloc` crate. If you depend on `num-bigint` with +`default-features = false`, you must manually enable the `std` feature yourself +if your compiler is not new enough. + +### Random Generation + +`num-bigint` supports the generation of random big integers when the `rand` +feature is enabled. To enable it include rand as + +```toml +rand = "0.8" +num-bigint = { version = "0.4", features = ["rand"] } +``` + +Note that you must use the version of `rand` that `num-bigint` is compatible +with: `0.8`. + +## Releases + +Release notes are available in [RELEASES.md](RELEASES.md). + +## Compatibility + +The `num-bigint` crate is tested for rustc 1.60 and greater. + +## Alternatives + +While `num-bigint` strives for good performance in pure Rust code, other crates +may offer better performance with different trade-offs. The following table +offers a brief comparison to a few alternatives. + +| Crate | License | Min rustc | Implementation | Features | +| :---------------- | :------------- | :-------- | :--------------------------------- | :-------------------------------------------------------- | +| **`num-bigint`** | MIT/Apache-2.0 | 1.60 | pure rust | dynamic width, number theoretical functions | +| [`awint`] | MIT/Apache-2.0 | 1.66 | pure rust | fixed width, heap or stack, concatenation macros | +| [`bnum`] | MIT/Apache-2.0 | 1.65 | pure rust | fixed width, parity with Rust primitives including floats | +| [`crypto-bigint`] | MIT/Apache-2.0 | 1.73 | pure rust | fixed width, stack only | +| [`ibig`] | MIT/Apache-2.0 | 1.49 | pure rust | dynamic width, number theoretical functions | +| [`rug`] | LGPL-3.0+ | 1.65 | bundles [GMP] via [`gmp-mpfr-sys`] | all the features of GMP, MPFR, and MPC | + +[`awint`]: https://crates.io/crates/awint +[`bnum`]: https://crates.io/crates/bnum +[`crypto-bigint`]: https://crates.io/crates/crypto-bigint +[`ibig`]: https://crates.io/crates/ibig +[`rug`]: https://crates.io/crates/rug +[GMP]: https://gmplib.org/ +[`gmp-mpfr-sys`]: https://crates.io/crates/gmp-mpfr-sys + +## License + +Licensed under either of + +- [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) +- [MIT license](http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/vendor/num-bigint-generic/RELEASES.md b/vendor/num-bigint-generic/RELEASES.md new file mode 100644 index 000000000..8b472b5ed --- /dev/null +++ b/vendor/num-bigint-generic/RELEASES.md @@ -0,0 +1,348 @@ +# Release 0.4.6 (2024-06-27) + +- [Fixed compilation on `x86_64-unknown-linux-gnux32`.][312] + +**Contributors**: @cuviper, @ralphtandetzky, @yhx-12243 + +[312]: https://github.com/rust-num/num-bigint/pull/312 + +# Release 0.4.5 (2024-05-06) + +- [Upgrade to 2021 edition, **MSRV 1.60**][292] +- [Add `const ZERO` and implement `num_traits::ConstZero`][298] +- [Add `modinv` methods for the modular inverse][288] +- [Optimize multiplication with imbalanced operands][295] +- [Optimize scalar division on x86 and x86-64][236] + +**Contributors**: @cuviper, @joelonsql, @waywardmonkeys + +[236]: https://github.com/rust-num/num-bigint/pull/236 +[288]: https://github.com/rust-num/num-bigint/pull/288 +[292]: https://github.com/rust-num/num-bigint/pull/292 +[295]: https://github.com/rust-num/num-bigint/pull/295 +[298]: https://github.com/rust-num/num-bigint/pull/298 + +# Release 0.4.4 (2023-08-22) + +- [Implemented `From` for `BigInt` and `BigUint`.][239] +- [Implemented `num_traits::Euclid` and `CheckedEuclid` for `BigInt` and + `BigUint`.][245] +- [Implemented ties-to-even for `BigInt` and `BigUint::to_f32` and + `to_f64`.][271] +- [Implemented `num_traits::FromBytes` and `ToBytes` for `BigInt` and + `BigUint`.][276] +- Limited pre-allocation from serde size hints against potential OOM. +- Miscellaneous other code cleanups and maintenance tasks. + +**Contributors**: @AaronKutch, @archseer, @cuviper, @dramforever, @icecream17, +@icedrocket, @janmarthedal, @jaybosamiya, @OliveIsAWord, @PatrickNorton, +@smoelius, @waywardmonkeys + +[239]: https://github.com/rust-num/num-bigint/pull/239 +[245]: https://github.com/rust-num/num-bigint/pull/245 +[271]: https://github.com/rust-num/num-bigint/pull/271 +[276]: https://github.com/rust-num/num-bigint/pull/276 + +# Release 0.4.3 (2021-11-02) + +- [GHSA-v935-pqmr-g8v9]: [Fix unexpected panics in multiplication.][228] + +**Contributors**: @arvidn, @cuviper, @guidovranken + +[228]: https://github.com/rust-num/num-bigint/pull/228 +[GHSA-v935-pqmr-g8v9]: + https://github.com/rust-num/num-bigint/security/advisories/GHSA-v935-pqmr-g8v9 + +# Release 0.4.2 (2021-09-03) + +- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219] + +**Contributors**: @catenacyber, @cuviper + +[219]: https://github.com/rust-num/num-bigint/pull/219 + +# Release 0.4.1 (2021-08-27) + +- [Fixed scalar divide-by-zero panics.][200] +- [Implemented `DoubleEndedIterator` for `U32Digits` and `U64Digits`.][208] +- [Optimized multiplication to avoid unnecessary allocations.][199] +- [Optimized string formatting for very large values.][216] + +**Contributors**: @cuviper, @PatrickNorton + +[199]: https://github.com/rust-num/num-bigint/pull/199 +[200]: https://github.com/rust-num/num-bigint/pull/200 +[208]: https://github.com/rust-num/num-bigint/pull/208 +[216]: https://github.com/rust-num/num-bigint/pull/216 + +# Release 0.4.0 (2021-03-05) + +### Breaking Changes + +- Updated public dependences on [arbitrary, quickcheck][194], and [rand][185]: + - `arbitrary` support has been updated to 1.0, requiring Rust 1.40. + - `quickcheck` support has been updated to 1.0, requiring Rust 1.46. + - `rand` support has been updated to 0.8, requiring Rust 1.36. +- [`Debug` now shows plain numeric values for `BigInt` and `BigUint`][195], + rather than the raw list of internal digits. + +**Contributors**: @cuviper, @Gelbpunkt + +[185]: https://github.com/rust-num/num-bigint/pull/185 +[194]: https://github.com/rust-num/num-bigint/pull/194 +[195]: https://github.com/rust-num/num-bigint/pull/195 + +# Release 0.3.3 (2021-09-03) + +- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219] + +**Contributors**: @catenacyber, @cuviper + +# Release 0.3.2 (2021-03-04) + +- [The new `BigUint` methods `count_ones` and `trailing_ones`][175] return the + number of `1` bits in the entire value or just its least-significant tail, + respectively. +- [The new `BigInt` and `BigUint` methods `bit` and `set_bit`][183] will read + and write individual bits of the value. For negative `BigInt`, bits are + determined as if they were in the two's complement representation. +- [The `from_radix_le` and `from_radix_be` methods][187] now accept empty + buffers to represent zero. +- [`BigInt` and `BigUint` can now iterate digits as `u32` or `u64`][192], + regardless of the actual internal digit size. + +**Contributors**: @BartMassey, @cuviper, @janmarthedal, @sebastianv89, @Speedy37 + +[175]: https://github.com/rust-num/num-bigint/pull/175 +[183]: https://github.com/rust-num/num-bigint/pull/183 +[187]: https://github.com/rust-num/num-bigint/pull/187 +[192]: https://github.com/rust-num/num-bigint/pull/192 + +# Release 0.3.1 (2020-11-03) + +- [Addition and subtraction now uses intrinsics][141] for performance on `x86` + and `x86_64` when built with Rust 1.33 or later. +- [Conversions `to_f32` and `to_f64` now return infinity][163] for very large + numbers, rather than `None`. This does preserve the sign too, so a large + negative `BigInt` will convert to negative infinity. +- [The optional `arbitrary` feature implements `arbitrary::Arbitrary`][166], + distinct from `quickcheck::Arbitrary`. +- [The division algorithm has been optimized][170] to reduce the number of + temporary allocations and improve the internal guesses at each step. +- [`BigInt` and `BigUint` will opportunistically shrink capacity][171] if the + internal vector is much larger than needed. + +**Contributors**: @cuviper, @e00E, @ejmahler, @notoria, @tczajka + +[141]: https://github.com/rust-num/num-bigint/pull/141 +[163]: https://github.com/rust-num/num-bigint/pull/163 +[166]: https://github.com/rust-num/num-bigint/pull/166 +[170]: https://github.com/rust-num/num-bigint/pull/170 +[171]: https://github.com/rust-num/num-bigint/pull/171 + +# Release 0.3.0 (2020-06-12) + +### Enhancements + +- [The internal `BigDigit` may now be either `u32` or `u64`][62], although that + implementation detail is not exposed in the API. For now, this is chosen to + match the target pointer size, but may change in the future. +- [No-`std` is now supported with the `alloc` crate on Rust 1.36][101]. +- [`Pow` is now implemented for bigint values][137], not just references. +- [`TryFrom` is now implemented on Rust 1.34 and later][123], converting signed + integers to unsigned, and narrowing big integers to primitives. +- [`Shl` and `Shr` are now implemented for a variety of shift types][142]. +- A new `trailing_zeros()` returns the number of consecutive zeros from the + least significant bit. +- The new `BigInt::magnitude` and `into_parts` methods give access to its + `BigUint` part as the magnitude. + +### Breaking Changes + +- `num-bigint` now requires Rust 1.31 or greater. + - The "i128" opt-in feature was removed, now always available. +- [Updated public dependences][110]: + - `rand` support has been updated to 0.7, requiring Rust 1.32. + - `quickcheck` support has been updated to 0.9, requiring Rust 1.34. +- [Removed `impl Neg for BigUint`][145], which only ever panicked. +- [Bit counts are now `u64` instead of `usize`][143]. + +**Contributors**: @cuviper, @dignifiedquire, @hansihe, @kpcyrd, @milesand, +@tech6hutch + +[62]: https://github.com/rust-num/num-bigint/pull/62 +[101]: https://github.com/rust-num/num-bigint/pull/101 +[110]: https://github.com/rust-num/num-bigint/pull/110 +[123]: https://github.com/rust-num/num-bigint/pull/123 +[137]: https://github.com/rust-num/num-bigint/pull/137 +[142]: https://github.com/rust-num/num-bigint/pull/142 +[143]: https://github.com/rust-num/num-bigint/pull/143 +[145]: https://github.com/rust-num/num-bigint/pull/145 + +# Release 0.2.6 (2020-01-27) + +- [Fix the promotion of negative `isize` in `BigInt` assign-ops][133]. + +**Contributors**: @cuviper, @HactarCE + +[133]: https://github.com/rust-num/num-bigint/pull/133 + +# Release 0.2.5 (2020-01-09) + +- [Updated the `autocfg` build dependency to 1.0][126]. + +**Contributors**: @cuviper, @tspiteri + +[126]: https://github.com/rust-num/num-bigint/pull/126 + +# Release 0.2.4 (2020-01-01) + +- [The new `BigUint::to_u32_digits` method][104] returns the number as a + little-endian vector of base-232 digits. The same method on + `BigInt` also returns the sign. +- [`BigUint::modpow` now applies a modulus even for exponent 1][113], which also + affects `BigInt::modpow`. +- [`BigInt::modpow` now returns the correct sign for negative bases with even + exponents][114]. + +[104]: https://github.com/rust-num/num-bigint/pull/104 +[113]: https://github.com/rust-num/num-bigint/pull/113 +[114]: https://github.com/rust-num/num-bigint/pull/114 + +**Contributors**: @alex-ozdemir, @cuviper, @dingelish, @Speedy37, @youknowone + +# Release 0.2.3 (2019-09-03) + +- [`Pow` is now implemented for `BigUint` exponents][77]. +- [The optional `quickcheck` feature enables implementations of + `Arbitrary`][99]. +- See the [full comparison][compare-0.2.3] for performance enhancements and + more! + +[77]: https://github.com/rust-num/num-bigint/pull/77 +[99]: https://github.com/rust-num/num-bigint/pull/99 +[compare-0.2.3]: + https://github.com/rust-num/num-bigint/compare/num-bigint-0.2.2...num-bigint-0.2.3 + +**Contributors**: @cuviper, @lcnr, @maxbla, @mikelodder7, @mikong, +@TheLetterTheta, @tspiteri, @XAMPPRocky, @youknowone + +# Release 0.2.2 (2018-12-14) + +- [The `Roots` implementations now use better initial guesses][71]. +- [Fixed `to_signed_bytes_*` for some positive numbers][72], where the + most-significant byte is `0x80` and the rest are `0`. + +[71]: https://github.com/rust-num/num-bigint/pull/71 +[72]: https://github.com/rust-num/num-bigint/pull/72 + +**Contributors**: @cuviper, @leodasvacas + +# Release 0.2.1 (2018-11-02) + +- [`RandBigInt` now uses `Rng::fill_bytes`][53] to improve performance, instead + of repeated `gen::` calls. The also affects the implementations of the + other `rand` traits. This may potentially change the values produced by some + seeded RNGs on previous versions, but the values were tested to be stable with + `ChaChaRng`, `IsaacRng`, and `XorShiftRng`. +- [`BigInt` and `BigUint` now implement `num_integer::Roots`][56]. +- [`BigInt` and `BigUint` now implement `num_traits::Pow`][54]. +- [`BigInt` and `BigUint` now implement operators with 128-bit integers][64]. + +**Contributors**: @cuviper, @dignifiedquire, @mancabizjak, @Robbepop, +@TheIronBorn, @thomwiggers + +[53]: https://github.com/rust-num/num-bigint/pull/53 +[54]: https://github.com/rust-num/num-bigint/pull/54 +[56]: https://github.com/rust-num/num-bigint/pull/56 +[64]: https://github.com/rust-num/num-bigint/pull/64 + +# Release 0.2.0 (2018-05-25) + +### Enhancements + +- [`BigInt` and `BigUint` now implement `Product` and `Sum`][22] for iterators + of any item that we can `Mul` and `Add`, respectively. For example, a + factorial can now be simply: `let f: BigUint = (1u32..1000).product();` +- [`BigInt` now supports two's-complement logic operations][26], namely + `BitAnd`, `BitOr`, `BitXor`, and `Not`. These act conceptually as if each + number had an infinite prefix of `0` or `1` bits for positive or negative. +- [`BigInt` now supports assignment operators][41] like `AddAssign`. +- [`BigInt` and `BigUint` now support conversions with `i128` and `u128`][44], + if sufficient compiler support is detected. +- [`BigInt` and `BigUint` now implement rand's `SampleUniform` trait][48], and + [a custom `RandomBits` distribution samples by bit size][49]. +- The release also includes other miscellaneous improvements to performance. + +### Breaking Changes + +- [`num-bigint` now requires rustc 1.15 or greater][23]. +- [The crate now has a `std` feature, and won't build without it][46]. This is + in preparation for someday supporting `#![no_std]` with `alloc`. +- [The `serde` dependency has been updated to 1.0][24], still disabled by + default. The `rustc-serialize` crate is no longer supported by `num-bigint`. +- [The `rand` dependency has been updated to 0.5][48], now disabled by default. + This requires rustc 1.22 or greater for `rand`'s own requirement. +- [`Shr for BigInt` now rounds down][8] rather than toward zero, matching the + behavior of the primitive integers for negative values. +- [`ParseBigIntError` is now an opaque type][37]. +- [The `big_digit` module is no longer public][38], nor are the `BigDigit` and + `DoubleBigDigit` types and `ZERO_BIG_DIGIT` constant that were re-exported in + the crate root. Public APIs which deal in digits, like `BigUint::from_slice`, + will now always be base-`u32`. + +**Contributors**: @clarcharr, @cuviper, @dodomorandi, @tiehuis, @tspiteri + +[8]: https://github.com/rust-num/num-bigint/pull/8 +[22]: https://github.com/rust-num/num-bigint/pull/22 +[23]: https://github.com/rust-num/num-bigint/pull/23 +[24]: https://github.com/rust-num/num-bigint/pull/24 +[26]: https://github.com/rust-num/num-bigint/pull/26 +[37]: https://github.com/rust-num/num-bigint/pull/37 +[38]: https://github.com/rust-num/num-bigint/pull/38 +[41]: https://github.com/rust-num/num-bigint/pull/41 +[44]: https://github.com/rust-num/num-bigint/pull/44 +[46]: https://github.com/rust-num/num-bigint/pull/46 +[48]: https://github.com/rust-num/num-bigint/pull/48 +[49]: https://github.com/rust-num/num-bigint/pull/49 + +# Release 0.1.44 (2018-05-14) + +- [Division with single-digit divisors is now much faster.][42] +- The README now compares [`ramp`, `rug`, `rust-gmp`][20], and [`apint`][21]. + +**Contributors**: @cuviper, @Robbepop + +[20]: https://github.com/rust-num/num-bigint/pull/20 +[21]: https://github.com/rust-num/num-bigint/pull/21 +[42]: https://github.com/rust-num/num-bigint/pull/42 + +# Release 0.1.43 (2018-02-08) + +- [The new `BigInt::modpow`][18] performs signed modular exponentiation, using + the existing `BigUint::modpow` and rounding negatives similar to `mod_floor`. + +**Contributors**: @cuviper + +[18]: https://github.com/rust-num/num-bigint/pull/18 + +# Release 0.1.42 (2018-02-07) + +- [num-bigint now has its own source repository][num-356] at + [rust-num/num-bigint][home]. +- [`lcm` now avoids creating a large intermediate product][num-350]. +- [`gcd` now uses Stein's algorithm][15] with faster shifts instead of division. +- [`rand` support is now extended to 0.4][11] (while still allowing 0.3). + +**Contributors**: @cuviper, @Emerentius, @ignatenkobrain, @mhogrefe + +[home]: https://github.com/rust-num/num-bigint +[num-350]: https://github.com/rust-num/num/pull/350 +[num-356]: https://github.com/rust-num/num/pull/356 +[11]: https://github.com/rust-num/num-bigint/pull/11 +[15]: https://github.com/rust-num/num-bigint/pull/15 + +# Prior releases + +No prior release notes were kept. Thanks all the same to the many contributors +that have made this crate what it is! diff --git a/vendor/num-bigint-generic/benches/bigint.rs b/vendor/num-bigint-generic/benches/bigint.rs new file mode 100644 index 000000000..8cba6847b --- /dev/null +++ b/vendor/num-bigint-generic/benches/bigint.rs @@ -0,0 +1,450 @@ +#![feature(test)] +#![cfg(feature = "rand")] + +extern crate test; + +use num_bigint_generic::{BigInt, BigUint, RandBigInt}; +use num_traits::{FromPrimitive, Num, One, Zero}; +use std::mem::replace; +use test::Bencher; + +mod rng; +use rng::get_rng; + +fn multiply_bench(b: &mut Bencher, xbits: u64, ybits: u64) { + let mut rng = get_rng(); + let x = rng.gen_bigint(xbits); + let y = rng.gen_bigint(ybits); + + b.iter(|| &x * &y); +} + +fn divide_bench(b: &mut Bencher, xbits: u64, ybits: u64) { + let mut rng = get_rng(); + let x = rng.gen_bigint(xbits); + let y = rng.gen_bigint(ybits); + + b.iter(|| &x / &y); +} + +fn remainder_bench(b: &mut Bencher, xbits: u64, ybits: u64) { + let mut rng = get_rng(); + let x = rng.gen_bigint(xbits); + let y = rng.gen_bigint(ybits); + + b.iter(|| &x % &y); +} + +fn factorial(n: usize) -> BigUint { + let mut f: BigUint = One::one(); + for i in 1..=n { + let bu: BigUint = FromPrimitive::from_usize(i).unwrap(); + f *= bu; + } + f +} + +/// Compute Fibonacci numbers +fn fib(n: usize) -> BigUint { + let mut f0: BigUint = Zero::zero(); + let mut f1: BigUint = One::one(); + for _ in 0..n { + let f2 = f0 + &f1; + f0 = replace(&mut f1, f2); + } + f0 +} + +/// Compute Fibonacci numbers with two ops per iteration +/// (add and subtract, like issue #200) +fn fib2(n: usize) -> BigUint { + let mut f0: BigUint = Zero::zero(); + let mut f1: BigUint = One::one(); + for _ in 0..n { + f1 += &f0; + f0 = &f1 - f0; + } + f0 +} + +#[bench] +fn multiply_0(b: &mut Bencher) { + multiply_bench(b, 1 << 8, 1 << 8); +} + +#[bench] +fn multiply_1(b: &mut Bencher) { + multiply_bench(b, 1 << 8, 1 << 16); +} + +#[bench] +fn multiply_2(b: &mut Bencher) { + multiply_bench(b, 1 << 16, 1 << 16); +} + +#[bench] +fn multiply_3(b: &mut Bencher) { + multiply_bench(b, 1 << 16, 1 << 17); +} + +#[bench] +fn multiply_4(b: &mut Bencher) { + multiply_bench(b, 1 << 12, 1 << 13); +} + +#[bench] +fn multiply_5(b: &mut Bencher) { + multiply_bench(b, 1 << 12, 1 << 14); +} + +#[bench] +fn divide_0(b: &mut Bencher) { + divide_bench(b, 1 << 8, 1 << 6); +} + +#[bench] +fn divide_1(b: &mut Bencher) { + divide_bench(b, 1 << 12, 1 << 8); +} + +#[bench] +fn divide_2(b: &mut Bencher) { + divide_bench(b, 1 << 16, 1 << 12); +} + +#[bench] +fn divide_big_little(b: &mut Bencher) { + divide_bench(b, 1 << 16, 1 << 4); +} + +#[bench] +fn remainder_0(b: &mut Bencher) { + remainder_bench(b, 1 << 8, 1 << 6); +} + +#[bench] +fn remainder_1(b: &mut Bencher) { + remainder_bench(b, 1 << 12, 1 << 8); +} + +#[bench] +fn remainder_2(b: &mut Bencher) { + remainder_bench(b, 1 << 16, 1 << 12); +} + +#[bench] +fn remainder_big_little(b: &mut Bencher) { + remainder_bench(b, 1 << 16, 1 << 4); +} + +#[bench] +fn factorial_100(b: &mut Bencher) { + b.iter(|| factorial(100)); +} + +#[bench] +fn fib_100(b: &mut Bencher) { + b.iter(|| fib(100)); +} + +#[bench] +fn fib_1000(b: &mut Bencher) { + b.iter(|| fib(1000)); +} + +#[bench] +fn fib_10000(b: &mut Bencher) { + b.iter(|| fib(10000)); +} + +#[bench] +fn fib2_100(b: &mut Bencher) { + b.iter(|| fib2(100)); +} + +#[bench] +fn fib2_1000(b: &mut Bencher) { + b.iter(|| fib2(1000)); +} + +#[bench] +fn fib2_10000(b: &mut Bencher) { + b.iter(|| fib2(10000)); +} + +#[bench] +fn fac_to_string(b: &mut Bencher) { + let fac = factorial(100); + b.iter(|| fac.to_string()); +} + +#[bench] +fn fib_to_string(b: &mut Bencher) { + let fib = fib(100); + b.iter(|| fib.to_string()); +} + +fn to_str_radix_bench(b: &mut Bencher, radix: u32, bits: u64) { + let mut rng = get_rng(); + let x = rng.gen_bigint(bits); + b.iter(|| x.to_str_radix(radix)); +} + +#[bench] +fn to_str_radix_02(b: &mut Bencher) { + to_str_radix_bench(b, 2, 1009); +} + +#[bench] +fn to_str_radix_08(b: &mut Bencher) { + to_str_radix_bench(b, 8, 1009); +} + +#[bench] +fn to_str_radix_10(b: &mut Bencher) { + to_str_radix_bench(b, 10, 1009); +} + +#[bench] +fn to_str_radix_10_2(b: &mut Bencher) { + to_str_radix_bench(b, 10, 10009); +} + +#[bench] +fn to_str_radix_16(b: &mut Bencher) { + to_str_radix_bench(b, 16, 1009); +} + +#[bench] +fn to_str_radix_36(b: &mut Bencher) { + to_str_radix_bench(b, 36, 1009); +} + +fn from_str_radix_bench(b: &mut Bencher, radix: u32) { + let mut rng = get_rng(); + let x = rng.gen_bigint(1009); + let s = x.to_str_radix(radix); + assert_eq!(x, BigInt::from_str_radix(&s, radix).unwrap()); + b.iter(|| BigInt::from_str_radix(&s, radix)); +} + +#[bench] +fn from_str_radix_02(b: &mut Bencher) { + from_str_radix_bench(b, 2); +} + +#[bench] +fn from_str_radix_08(b: &mut Bencher) { + from_str_radix_bench(b, 8); +} + +#[bench] +fn from_str_radix_10(b: &mut Bencher) { + from_str_radix_bench(b, 10); +} + +#[bench] +fn from_str_radix_16(b: &mut Bencher) { + from_str_radix_bench(b, 16); +} + +#[bench] +fn from_str_radix_36(b: &mut Bencher) { + from_str_radix_bench(b, 36); +} + +fn rand_bench(b: &mut Bencher, bits: u64) { + let mut rng = get_rng(); + + b.iter(|| rng.gen_bigint(bits)); +} + +#[bench] +fn rand_64(b: &mut Bencher) { + rand_bench(b, 1 << 6); +} + +#[bench] +fn rand_256(b: &mut Bencher) { + rand_bench(b, 1 << 8); +} + +#[bench] +fn rand_1009(b: &mut Bencher) { + rand_bench(b, 1009); +} + +#[bench] +fn rand_2048(b: &mut Bencher) { + rand_bench(b, 1 << 11); +} + +#[bench] +fn rand_4096(b: &mut Bencher) { + rand_bench(b, 1 << 12); +} + +#[bench] +fn rand_8192(b: &mut Bencher) { + rand_bench(b, 1 << 13); +} + +#[bench] +fn rand_65536(b: &mut Bencher) { + rand_bench(b, 1 << 16); +} + +#[bench] +fn rand_131072(b: &mut Bencher) { + rand_bench(b, 1 << 17); +} + +#[bench] +fn shl(b: &mut Bencher) { + let n = BigUint::one() << 1000u32; + let mut m = n.clone(); + b.iter(|| { + m.clone_from(&n); + for i in 0..50 { + m <<= i; + } + }) +} + +#[bench] +fn shr(b: &mut Bencher) { + let n = BigUint::one() << 2000u32; + let mut m = n.clone(); + b.iter(|| { + m.clone_from(&n); + for i in 0..50 { + m >>= i; + } + }) +} + +#[bench] +fn hash(b: &mut Bencher) { + use std::collections::HashSet; + let mut rng = get_rng(); + let v: Vec = (1000..2000).map(|bits| rng.gen_bigint(bits)).collect(); + b.iter(|| { + let h: HashSet<&BigInt> = v.iter().collect(); + assert_eq!(h.len(), v.len()); + }); +} + +#[bench] +fn pow_bench(b: &mut Bencher) { + b.iter(|| { + let upper = 100_u32; + let mut i_big = BigUint::from(1u32); + for _i in 2..=upper { + i_big += 1u32; + for j in 2..=upper { + i_big.pow(j); + } + } + }); +} + +#[bench] +fn pow_bench_bigexp(b: &mut Bencher) { + use num_traits::Pow; + + b.iter(|| { + let upper = 100_u32; + let mut i_big = BigUint::from(1u32); + for _i in 2..=upper { + i_big += 1u32; + let mut j_big = BigUint::from(1u32); + for _j in 2..=upper { + j_big += 1u32; + Pow::pow(&i_big, &j_big); + } + } + }); +} + +#[bench] +fn pow_bench_1e1000(b: &mut Bencher) { + b.iter(|| BigUint::from(10u32).pow(1_000)); +} + +#[bench] +fn pow_bench_1e10000(b: &mut Bencher) { + b.iter(|| BigUint::from(10u32).pow(10_000)); +} + +#[bench] +fn pow_bench_1e100000(b: &mut Bencher) { + b.iter(|| BigUint::from(10u32).pow(100_000)); +} + +/// This modulus is the prime from the 2048-bit MODP DH group: +/// https://tools.ietf.org/html/rfc3526#section-3 +const RFC3526_2048BIT_MODP_GROUP: &str = "\ + FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\ + 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\ + EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\ + E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\ + EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\ + C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\ + 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\ + 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\ + E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\ + DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\ + 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF"; + +#[bench] +fn modpow(b: &mut Bencher) { + let mut rng = get_rng(); + let base = rng.gen_biguint(2048); + let e = rng.gen_biguint(2048); + let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap(); + + b.iter(|| base.modpow(&e, &m)); +} + +#[bench] +fn modpow_even(b: &mut Bencher) { + let mut rng = get_rng(); + let base = rng.gen_biguint(2048); + let e = rng.gen_biguint(2048); + // Make the modulus even, so monty (base-2^32) doesn't apply. + let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap() - 1u32; + + b.iter(|| base.modpow(&e, &m)); +} + +#[bench] +fn to_u32_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.to_u32_digits()); +} + +#[bench] +fn iter_u32_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.iter_u32_digits().max()); +} + +#[bench] +fn to_u64_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.to_u64_digits()); +} + +#[bench] +fn iter_u64_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.iter_u64_digits().max()); +} diff --git a/vendor/num-bigint-generic/benches/factorial.rs b/vendor/num-bigint-generic/benches/factorial.rs new file mode 100644 index 000000000..f05a41c3f --- /dev/null +++ b/vendor/num-bigint-generic/benches/factorial.rs @@ -0,0 +1,42 @@ +#![feature(test)] + +extern crate test; + +use num_bigint_generic::BigUint; +use num_traits::One; +use std::ops::{Div, Mul}; +use test::Bencher; + +#[bench] +fn factorial_mul_biguint(b: &mut Bencher) { + b.iter(|| { + (1u32..1000) + .map(BigUint::from) + .fold(BigUint::one(), Mul::mul) + }); +} + +#[bench] +fn factorial_mul_u32(b: &mut Bencher) { + b.iter(|| (1u32..1000).fold(BigUint::one(), Mul::mul)); +} + +// The division test is inspired by this blog comparison: +// + +#[bench] +fn factorial_div_biguint(b: &mut Bencher) { + let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul); + b.iter(|| { + (1u32..1000) + .rev() + .map(BigUint::from) + .fold(n.clone(), Div::div) + }); +} + +#[bench] +fn factorial_div_u32(b: &mut Bencher) { + let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul); + b.iter(|| (1u32..1000).rev().fold(n.clone(), Div::div)); +} diff --git a/vendor/num-bigint-generic/benches/gcd.rs b/vendor/num-bigint-generic/benches/gcd.rs new file mode 100644 index 000000000..ad5b656a4 --- /dev/null +++ b/vendor/num-bigint-generic/benches/gcd.rs @@ -0,0 +1,76 @@ +#![feature(test)] +#![cfg(feature = "rand")] + +extern crate test; + +use num_bigint_generic::{BigUint, RandBigInt}; +use num_integer::Integer; +use num_traits::Zero; +use test::Bencher; + +mod rng; +use rng::get_rng; + +fn bench(b: &mut Bencher, bits: u64, gcd: fn(&BigUint, &BigUint) -> BigUint) { + let mut rng = get_rng(); + let x = rng.gen_biguint(bits); + let y = rng.gen_biguint(bits); + + assert_eq!(euclid(&x, &y), x.gcd(&y)); + + b.iter(|| gcd(&x, &y)); +} + +fn euclid(x: &BigUint, y: &BigUint) -> BigUint { + // Use Euclid's algorithm + let mut m = x.clone(); + let mut n = y.clone(); + while !m.is_zero() { + let temp = m; + m = n % &temp; + n = temp; + } + n +} + +#[bench] +fn gcd_euclid_0064(b: &mut Bencher) { + bench(b, 64, euclid); +} + +#[bench] +fn gcd_euclid_0256(b: &mut Bencher) { + bench(b, 256, euclid); +} + +#[bench] +fn gcd_euclid_1024(b: &mut Bencher) { + bench(b, 1024, euclid); +} + +#[bench] +fn gcd_euclid_4096(b: &mut Bencher) { + bench(b, 4096, euclid); +} + +// Integer for BigUint now uses Stein for gcd + +#[bench] +fn gcd_stein_0064(b: &mut Bencher) { + bench(b, 64, BigUint::gcd); +} + +#[bench] +fn gcd_stein_0256(b: &mut Bencher) { + bench(b, 256, BigUint::gcd); +} + +#[bench] +fn gcd_stein_1024(b: &mut Bencher) { + bench(b, 1024, BigUint::gcd); +} + +#[bench] +fn gcd_stein_4096(b: &mut Bencher) { + bench(b, 4096, BigUint::gcd); +} diff --git a/vendor/num-bigint-generic/benches/rng/mod.rs b/vendor/num-bigint-generic/benches/rng/mod.rs new file mode 100644 index 000000000..33e4f0fad --- /dev/null +++ b/vendor/num-bigint-generic/benches/rng/mod.rs @@ -0,0 +1,38 @@ +use rand::RngCore; + +pub(crate) fn get_rng() -> impl RngCore { + XorShiftStar { + a: 0x0123_4567_89AB_CDEF, + } +} + +/// Simple `Rng` for benchmarking without additional dependencies +struct XorShiftStar { + a: u64, +} + +impl RngCore for XorShiftStar { + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + fn next_u64(&mut self) -> u64 { + // https://en.wikipedia.org/wiki/Xorshift#xorshift* + self.a ^= self.a >> 12; + self.a ^= self.a << 25; + self.a ^= self.a >> 27; + self.a.wrapping_mul(0x2545_F491_4F6C_DD1D) + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + for chunk in dest.chunks_mut(8) { + let bytes = self.next_u64().to_le_bytes(); + let slice = &bytes[..chunk.len()]; + chunk.copy_from_slice(slice) + } + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { + Ok(self.fill_bytes(dest)) + } +} diff --git a/vendor/num-bigint-generic/benches/roots.rs b/vendor/num-bigint-generic/benches/roots.rs new file mode 100644 index 000000000..d567252c8 --- /dev/null +++ b/vendor/num-bigint-generic/benches/roots.rs @@ -0,0 +1,166 @@ +#![feature(test)] +#![cfg(feature = "rand")] + +extern crate test; + +use num_bigint_generic::{BigUint, RandBigInt}; +use test::Bencher; + +mod rng; +use rng::get_rng; + +// The `big64` cases demonstrate the speed of cases where the value +// can be converted to a `u64` primitive for faster calculation. +// +// The `big1k` cases demonstrate those that can convert to `f64` for +// a better initial guess of the actual value. +// +// The `big2k` and `big4k` cases are too big for `f64`, and use a simpler guess. + +fn check(x: &BigUint, n: u32) { + let root = x.nth_root(n); + if n == 2 { + assert_eq!(root, x.sqrt()) + } else if n == 3 { + assert_eq!(root, x.cbrt()) + } + + let lo = root.pow(n); + assert!(lo <= *x); + assert_eq!(lo.nth_root(n), root); + assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32); + + let hi = (&root + 1u32).pow(n); + assert!(hi > *x); + assert_eq!(hi.nth_root(n), &root + 1u32); + assert_eq!((&hi - 1u32).nth_root(n), root); +} + +fn bench_sqrt(b: &mut Bencher, bits: u64) { + let x = get_rng().gen_biguint(bits); + eprintln!("bench_sqrt({})", x); + + check(&x, 2); + b.iter(|| x.sqrt()); +} + +#[bench] +fn big64_sqrt(b: &mut Bencher) { + bench_sqrt(b, 64); +} + +#[bench] +fn big1k_sqrt(b: &mut Bencher) { + bench_sqrt(b, 1024); +} + +#[bench] +fn big2k_sqrt(b: &mut Bencher) { + bench_sqrt(b, 2048); +} + +#[bench] +fn big4k_sqrt(b: &mut Bencher) { + bench_sqrt(b, 4096); +} + +fn bench_cbrt(b: &mut Bencher, bits: u64) { + let x = get_rng().gen_biguint(bits); + eprintln!("bench_cbrt({})", x); + + check(&x, 3); + b.iter(|| x.cbrt()); +} + +#[bench] +fn big64_cbrt(b: &mut Bencher) { + bench_cbrt(b, 64); +} + +#[bench] +fn big1k_cbrt(b: &mut Bencher) { + bench_cbrt(b, 1024); +} + +#[bench] +fn big2k_cbrt(b: &mut Bencher) { + bench_cbrt(b, 2048); +} + +#[bench] +fn big4k_cbrt(b: &mut Bencher) { + bench_cbrt(b, 4096); +} + +fn bench_nth_root(b: &mut Bencher, bits: u64, n: u32) { + let x = get_rng().gen_biguint(bits); + eprintln!("bench_{}th_root({})", n, x); + + check(&x, n); + b.iter(|| x.nth_root(n)); +} + +#[bench] +fn big64_nth_10(b: &mut Bencher) { + bench_nth_root(b, 64, 10); +} + +#[bench] +fn big1k_nth_10(b: &mut Bencher) { + bench_nth_root(b, 1024, 10); +} + +#[bench] +fn big1k_nth_100(b: &mut Bencher) { + bench_nth_root(b, 1024, 100); +} + +#[bench] +fn big1k_nth_1000(b: &mut Bencher) { + bench_nth_root(b, 1024, 1000); +} + +#[bench] +fn big1k_nth_10000(b: &mut Bencher) { + bench_nth_root(b, 1024, 10000); +} + +#[bench] +fn big2k_nth_10(b: &mut Bencher) { + bench_nth_root(b, 2048, 10); +} + +#[bench] +fn big2k_nth_100(b: &mut Bencher) { + bench_nth_root(b, 2048, 100); +} + +#[bench] +fn big2k_nth_1000(b: &mut Bencher) { + bench_nth_root(b, 2048, 1000); +} + +#[bench] +fn big2k_nth_10000(b: &mut Bencher) { + bench_nth_root(b, 2048, 10000); +} + +#[bench] +fn big4k_nth_10(b: &mut Bencher) { + bench_nth_root(b, 4096, 10); +} + +#[bench] +fn big4k_nth_100(b: &mut Bencher) { + bench_nth_root(b, 4096, 100); +} + +#[bench] +fn big4k_nth_1000(b: &mut Bencher) { + bench_nth_root(b, 4096, 1000); +} + +#[bench] +fn big4k_nth_10000(b: &mut Bencher) { + bench_nth_root(b, 4096, 10000); +} diff --git a/vendor/num-bigint-generic/benches/shootout-pidigits.rs b/vendor/num-bigint-generic/benches/shootout-pidigits.rs new file mode 100644 index 000000000..8f3b7cc90 --- /dev/null +++ b/vendor/num-bigint-generic/benches/shootout-pidigits.rs @@ -0,0 +1,137 @@ +// The Computer Language Benchmarks Game +// http://benchmarksgame.alioth.debian.org/ +// +// contributed by the Rust Project Developers + +// Copyright (c) 2013-2014 The Rust Project Developers +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// - Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// - Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the +// distribution. +// +// - Neither the name of "The Computer Language Benchmarks Game" nor +// the name of "The Computer Language Shootout Benchmarks" nor the +// names of its contributors may be used to endorse or promote +// products derived from this software without specific prior +// written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{io, str::FromStr}; + +use num_bigint_generic::BigInt; +use num_integer::Integer; +use num_traits::{FromPrimitive, One, ToPrimitive, Zero}; + +struct Context { + numer: BigInt, + accum: BigInt, + denom: BigInt, +} + +impl Context { + fn new() -> Context { + Context { + numer: One::one(), + accum: Zero::zero(), + denom: One::one(), + } + } + + fn from_i32(i: i32) -> BigInt { + FromPrimitive::from_i32(i).unwrap() + } + + fn extract_digit(&self) -> i32 { + if self.numer > self.accum { + return -1; + } + let (q, r) = (&self.numer * Context::from_i32(3) + &self.accum).div_rem(&self.denom); + if r + &self.numer >= self.denom { + return -1; + } + q.to_i32().unwrap() + } + + fn next_term(&mut self, k: i32) { + let y2 = Context::from_i32(k * 2 + 1); + self.accum = (&self.accum + (&self.numer << 1)) * &y2; + self.numer = &self.numer * Context::from_i32(k); + self.denom = &self.denom * y2; + } + + fn eliminate_digit(&mut self, d: i32) { + let d = Context::from_i32(d); + let ten = Context::from_i32(10); + self.accum = (&self.accum - &self.denom * d) * &ten; + self.numer = &self.numer * ten; + } +} + +fn pidigits(n: isize, out: &mut dyn io::Write) -> io::Result<()> { + let mut k = 0; + let mut context = Context::new(); + + for i in 1..=n { + let mut d; + loop { + k += 1; + context.next_term(k); + d = context.extract_digit(); + if d != -1 { + break; + } + } + + write!(out, "{}", d)?; + if i % 10 == 0 { + writeln!(out, "\t:{}", i)?; + } + + context.eliminate_digit(d); + } + + let m = n % 10; + if m != 0 { + for _ in m..10 { + write!(out, " ")?; + } + writeln!(out, "\t:{}", n)?; + } + Ok(()) +} + +const DEFAULT_DIGITS: isize = 512; + +fn main() { + let args = std::env::args().collect::>(); + let n = if args.len() < 2 { + DEFAULT_DIGITS + } else if args[1] == "--bench" { + return pidigits(DEFAULT_DIGITS, &mut std::io::sink()).unwrap(); + } else { + FromStr::from_str(&args[1]).unwrap() + }; + pidigits(n, &mut std::io::stdout()).unwrap(); +} diff --git a/vendor/num-bigint-generic/ci/big_quickcheck/Cargo.toml b/vendor/num-bigint-generic/ci/big_quickcheck/Cargo.toml new file mode 100644 index 000000000..a6498260a --- /dev/null +++ b/vendor/num-bigint-generic/ci/big_quickcheck/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "big_quickcheck" +version = "0.1.0" +authors = ["Josh Stone "] +edition = "2018" + +[dependencies] +num-integer = "0.1.42" +num-traits = "0.2.11" +quickcheck_macros = "1" + +[dependencies.quickcheck] +default-features = false +version = "1" + +[dependencies.num-bigint] +features = ["quickcheck"] +path = "../.." diff --git a/vendor/num-bigint-generic/ci/big_quickcheck/src/lib.rs b/vendor/num-bigint-generic/ci/big_quickcheck/src/lib.rs new file mode 100644 index 000000000..41018a693 --- /dev/null +++ b/vendor/num-bigint-generic/ci/big_quickcheck/src/lib.rs @@ -0,0 +1,433 @@ +//! Quickcheck of `BigUint` and `BigInt` +//! +//! This test is in a completely separate crate so we can use `quickcheck_macros` only when +//! `quickcheck` is active. The main crate can't have optional dev-dependencies, and it's +//! better not to expose it as a "feature" optional dependency. + +#![cfg(test)] + +use num_bigint::{BigInt, BigUint}; +use num_integer::Integer; +use num_traits::{Num, One, Signed, ToPrimitive, Zero}; +use quickcheck::{Gen, QuickCheck, TestResult}; +use quickcheck_macros::quickcheck; + +#[quickcheck] +fn quickcheck_unsigned_eq_reflexive(a: BigUint) -> bool { + a == a +} + +#[quickcheck] +fn quickcheck_signed_eq_reflexive(a: BigInt) -> bool { + a == a +} + +#[quickcheck] +fn quickcheck_unsigned_eq_symmetric(a: BigUint, b: BigUint) -> bool { + if a == b { + b == a + } else { + b != a + } +} + +#[quickcheck] +fn quickcheck_signed_eq_symmetric(a: BigInt, b: BigInt) -> bool { + if a == b { + b == a + } else { + b != a + } +} + +#[test] +fn quickcheck_arith_primitive() { + let gen = Gen::new(usize::MAX); + let mut qc = QuickCheck::new().gen(gen); + + fn test_unsigned_add_primitive(a: usize, b: usize) -> TestResult { + let actual = BigUint::from(a) + BigUint::from(b); + match a.checked_add(b) { + None => TestResult::discard(), + Some(expected) => TestResult::from_bool(BigUint::from(expected) == actual), + } + } + + fn test_signed_add_primitive(a: isize, b: isize) -> TestResult { + let actual = BigInt::from(a) + BigInt::from(b); + match a.checked_add(b) { + None => TestResult::discard(), + Some(expected) => TestResult::from_bool(BigInt::from(expected) == actual), + } + } + + fn test_unsigned_mul_primitive(a: u64, b: u64) -> bool { + //maximum value of u64 means no overflow + BigUint::from(a as u128 * b as u128) == BigUint::from(a) * BigUint::from(b) + } + + fn test_signed_mul_primitive(a: i64, b: i64) -> bool { + //maximum value of i64 means no overflow + BigInt::from(a as i128 * b as i128) == BigInt::from(a) * BigInt::from(b) + } + + fn test_unsigned_sub_primitive(a: u128, b: u128) -> bool { + if b < a { + BigUint::from(a - b) == BigUint::from(a) - BigUint::from(b) + } else { + BigUint::from(b - a) == BigUint::from(b) - BigUint::from(a) + } + } + + fn test_signed_sub_primitive(a: i128, b: i128) -> TestResult { + let actual = BigInt::from(a) - BigInt::from(b); + match a.checked_sub(b) { + None => TestResult::discard(), + Some(expected) => TestResult::from_bool(BigInt::from(expected) == actual), + } + } + + fn test_unsigned_div_primitive(a: u128, b: u128) -> TestResult { + if b == 0 { + TestResult::discard() + } else { + TestResult::from_bool(BigUint::from(a / b) == BigUint::from(a) / BigUint::from(b)) + } + } + + fn test_signed_div_primitive(a: i128, b: i128) -> TestResult { + if b == 0 || (a == i128::MIN && b == -1) { + TestResult::discard() + } else { + TestResult::from_bool(BigInt::from(a / b) == BigInt::from(a) / BigInt::from(b)) + } + } + + qc.quickcheck(test_unsigned_add_primitive as fn(usize, usize) -> TestResult); + qc.quickcheck(test_signed_add_primitive as fn(isize, isize) -> TestResult); + qc.quickcheck(test_unsigned_mul_primitive as fn(u64, u64) -> bool); + qc.quickcheck(test_signed_mul_primitive as fn(i64, i64) -> bool); + qc.quickcheck(test_unsigned_sub_primitive as fn(u128, u128) -> bool); + qc.quickcheck(test_signed_sub_primitive as fn(i128, i128) -> TestResult); + qc.quickcheck(test_unsigned_div_primitive as fn(u128, u128) -> TestResult); + qc.quickcheck(test_signed_div_primitive as fn(i128, i128) -> TestResult); +} + +#[quickcheck] +fn quickcheck_unsigned_add_commutative(a: BigUint, b: BigUint) -> bool { + &a + &b == b + a +} + +#[quickcheck] +fn quickcheck_signed_add_commutative(a: BigInt, b: BigInt) -> bool { + &a + &b == b + a +} + +#[quickcheck] +fn quickcheck_unsigned_add_zero(a: BigUint) -> bool { + a == &a + BigUint::zero() +} + +#[quickcheck] +fn quickcheck_signed_add_zero(a: BigInt) -> bool { + a == &a + BigInt::zero() +} + +#[quickcheck] +fn quickcheck_unsigned_add_associative(a: BigUint, b: BigUint, c: BigUint) -> bool { + (&a + &b) + &c == a + (b + c) +} + +#[quickcheck] +fn quickcheck_signed_add_associative(a: BigInt, b: BigInt, c: BigInt) -> bool { + (&a + &b) + &c == a + (b + c) +} + +#[quickcheck] +fn quickcheck_unsigned_mul_zero(a: BigUint) -> bool { + a * BigUint::zero() == BigUint::zero() +} + +#[quickcheck] +fn quickcheck_signed_mul_zero(a: BigInt) -> bool { + a * BigInt::zero() == BigInt::zero() +} + +#[quickcheck] +fn quickcheck_unsigned_mul_one(a: BigUint) -> bool { + &a * BigUint::one() == a +} + +#[quickcheck] +fn quickcheck_signed_mul_one(a: BigInt) -> bool { + &a * BigInt::one() == a +} + +#[quickcheck] +fn quickcheck_unsigned_mul_commutative(a: BigUint, b: BigUint) -> bool { + &a * &b == b * a +} + +#[quickcheck] +fn quickcheck_signed_mul_commutative(a: BigInt, b: BigInt) -> bool { + &a * &b == b * a +} + +#[quickcheck] +fn quickcheck_unsigned_mul_associative(a: BigUint, b: BigUint, c: BigUint) -> bool { + (&a * &b) * &c == a * (b * c) +} + +#[quickcheck] +fn quickcheck_signed_mul_associative(a: BigInt, b: BigInt, c: BigInt) -> bool { + (&a * &b) * &c == a * (b * c) +} + +#[quickcheck] +fn quickcheck_unsigned_distributive(a: BigUint, b: BigUint, c: BigUint) -> bool { + &a * (&b + &c) == &a * b + a * c +} + +#[quickcheck] +fn quickcheck_signed_distributive(a: BigInt, b: BigInt, c: BigInt) -> bool { + &a * (&b + &c) == &a * b + a * c +} + +#[quickcheck] +///Tests that exactly one of ab a=b is true +fn quickcheck_unsigned_ge_le_eq_mut_exclusive(a: BigUint, b: BigUint) -> bool { + let gt_lt_eq = vec![a > b, a < b, a == b]; + gt_lt_eq + .iter() + .fold(0, |acc, e| if *e { acc + 1 } else { acc }) + == 1 +} + +#[quickcheck] +///Tests that exactly one of ab a=b is true +fn quickcheck_signed_ge_le_eq_mut_exclusive(a: BigInt, b: BigInt) -> bool { + let gt_lt_eq = vec![a > b, a < b, a == b]; + gt_lt_eq + .iter() + .fold(0, |acc, e| if *e { acc + 1 } else { acc }) + == 1 +} + +#[quickcheck] +/// Tests correctness of subtraction assuming addition is correct +fn quickcheck_unsigned_sub(a: BigUint, b: BigUint) -> bool { + if b < a { + &a - &b + b == a + } else { + &b - &a + a == b + } +} + +#[quickcheck] +/// Tests correctness of subtraction assuming addition is correct +fn quickcheck_signed_sub(a: BigInt, b: BigInt) -> bool { + if b < a { + &a - &b + b == a + } else { + &b - &a + a == b + } +} + +#[quickcheck] +fn quickcheck_unsigned_pow_zero(a: BigUint) -> bool { + a.pow(0_u32) == BigUint::one() +} + +#[quickcheck] +fn quickcheck_unsigned_pow_one(a: BigUint) -> bool { + a.pow(1_u32) == a +} + +#[quickcheck] +fn quickcheck_unsigned_sqrt(a: BigUint) -> bool { + (&a * &a).sqrt() == a +} + +#[quickcheck] +fn quickcheck_unsigned_cbrt(a: BigUint) -> bool { + (&a * &a * &a).cbrt() == a +} + +#[quickcheck] +fn quickcheck_signed_cbrt(a: BigInt) -> bool { + (&a * &a * &a).cbrt() == a +} + +#[quickcheck] +fn quickcheck_unsigned_conversion(a: BigUint, radix: u8) -> TestResult { + let radix = radix as u32; + if radix > 36 || radix < 2 { + return TestResult::discard(); + } + let string = a.to_str_radix(radix); + TestResult::from_bool(a == BigUint::from_str_radix(&string, radix).unwrap()) +} + +#[quickcheck] +fn quickcheck_signed_conversion(a: BigInt, radix: u8) -> TestResult { + let radix = radix as u32; + if radix > 36 || radix < 2 { + return TestResult::discard(); + } + let string = a.to_str_radix(radix); + TestResult::from_bool(a == BigInt::from_str_radix(&string, radix).unwrap()) +} + +#[test] +fn quicktest_shift() { + let gen = Gen::new(usize::MAX); + let mut qc = QuickCheck::new().gen(gen); + + fn test_shr_unsigned(a: u64, shift: u8) -> TestResult { + let shift = (shift % 64) as usize; //shift at most 64 bits + let big_a = BigUint::from(a); + TestResult::from_bool(BigUint::from(a >> shift) == big_a >> shift) + } + + fn test_shr_signed(a: i64, shift: u8) -> TestResult { + let shift = (shift % 64) as usize; //shift at most 64 bits + let big_a = BigInt::from(a); + TestResult::from_bool(BigInt::from(a >> shift) == big_a >> shift) + } + + fn test_shl_unsigned(a: u32, shift: u8) -> TestResult { + let shift = (shift % 32) as usize; //shift at most 32 bits + let a = a as u64; //leave room for the shifted bits + let big_a = BigUint::from(a); + TestResult::from_bool(BigUint::from(a >> shift) == big_a >> shift) + } + + fn test_shl_signed(a: i32, shift: u8) -> TestResult { + let shift = (shift % 32) as usize; + let a = a as u64; //leave room for the shifted bits + let big_a = BigInt::from(a); + TestResult::from_bool(BigInt::from(a >> shift) == big_a >> shift) + } + + qc.quickcheck(test_shr_unsigned as fn(u64, u8) -> TestResult); + qc.quickcheck(test_shr_signed as fn(i64, u8) -> TestResult); + qc.quickcheck(test_shl_unsigned as fn(u32, u8) -> TestResult); + qc.quickcheck(test_shl_signed as fn(i32, u8) -> TestResult); +} + +#[test] +fn quickcheck_modpow() { + let gen = Gen::new(usize::MAX); + let mut qc = QuickCheck::new().gen(gen); + + fn simple_modpow(base: &BigInt, exponent: &BigInt, modulus: &BigInt) -> BigInt { + assert!(!exponent.is_negative()); + let mut result = BigInt::one().mod_floor(modulus); + let mut base = base.mod_floor(modulus); + let mut exponent = exponent.clone(); + while !exponent.is_zero() { + if exponent.is_odd() { + result = (result * &base).mod_floor(modulus); + } + base = (&base * &base).mod_floor(modulus); + exponent >>= 1; + } + result + } + + fn test_modpow(base: i128, exponent: u128, modulus: i128) -> TestResult { + if modulus.is_zero() { + TestResult::discard() + } else { + let base = BigInt::from(base); + let exponent = BigInt::from(exponent); + let modulus = BigInt::from(modulus); + let modpow = base.modpow(&exponent, &modulus); + let simple = simple_modpow(&base, &exponent, &modulus); + if modpow != simple { + eprintln!("{}.modpow({}, {})", base, exponent, modulus); + eprintln!(" expected {}", simple); + eprintln!(" actual {}", modpow); + TestResult::failed() + } else { + TestResult::passed() + } + } + } + + qc.quickcheck(test_modpow as fn(i128, u128, i128) -> TestResult); +} + +#[test] +fn quickcheck_modinv() { + let gen = Gen::new(usize::MAX); + let mut qc = QuickCheck::new().gen(gen); + + fn test_modinv(value: i128, modulus: i128) -> TestResult { + if modulus.is_zero() { + TestResult::discard() + } else { + let value = BigInt::from(value); + let modulus = BigInt::from(modulus); + match (value.modinv(&modulus), value.gcd(&modulus).is_one()) { + (None, false) => TestResult::passed(), + (None, true) => { + eprintln!("{}.modinv({}) -> None, expected Some(_)", value, modulus); + TestResult::failed() + } + (Some(inverse), false) => { + eprintln!( + "{}.modinv({}) -> Some({}), expected None", + value, modulus, inverse + ); + TestResult::failed() + } + (Some(inverse), true) => { + // The inverse should either be in [0,m) or (m,0] + let zero = BigInt::zero(); + if (modulus.is_positive() && !(zero <= inverse && inverse < modulus)) + || (modulus.is_negative() && !(modulus < inverse && inverse <= zero)) + { + eprintln!( + "{}.modinv({}) -> Some({}) is out of range", + value, modulus, inverse + ); + return TestResult::failed(); + } + + // We don't know the expected inverse, but we can verify the product ≡ 1 + let product = (&value * &inverse).mod_floor(&modulus); + let mod_one = BigInt::one().mod_floor(&modulus); + if product != mod_one { + eprintln!("{}.modinv({}) -> Some({})", value, modulus, inverse); + eprintln!( + "{} * {} ≡ {}, expected {}", + value, inverse, product, mod_one + ); + return TestResult::failed(); + } + TestResult::passed() + } + } + } + } + + qc.quickcheck(test_modinv as fn(i128, i128) -> TestResult); +} + +#[test] +fn quickcheck_to_float_equals_i128_cast() { + let gen = Gen::new(usize::MAX); + let mut qc = QuickCheck::new().gen(gen).tests(1_000_000); + + fn to_f32_equals_i128_cast(value: i128) -> bool { + BigInt::from(value).to_f32() == Some(value as f32) + } + + fn to_f64_equals_i128_cast(value: i128) -> bool { + BigInt::from(value).to_f64() == Some(value as f64) + } + + qc.quickcheck(to_f32_equals_i128_cast as fn(i128) -> bool); + qc.quickcheck(to_f64_equals_i128_cast as fn(i128) -> bool); +} diff --git a/vendor/num-bigint-generic/ci/big_rand/Cargo.toml b/vendor/num-bigint-generic/ci/big_rand/Cargo.toml new file mode 100644 index 000000000..d2bfe3530 --- /dev/null +++ b/vendor/num-bigint-generic/ci/big_rand/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "big_rand" +version = "0.1.0" +authors = ["Josh Stone "] +edition = "2018" + +[dependencies] +num-traits = "0.2.11" +rand = "0.8" +rand_chacha = "0.3" +rand_isaac = "0.3" +rand_xorshift = "0.3" + +[dependencies.num-bigint] +features = ["rand"] +path = "../.." diff --git a/vendor/num-bigint-generic/ci/big_rand/src/lib.rs b/vendor/num-bigint-generic/ci/big_rand/src/lib.rs new file mode 100644 index 000000000..69728777f --- /dev/null +++ b/vendor/num-bigint-generic/ci/big_rand/src/lib.rs @@ -0,0 +1,384 @@ +//! Test randomization of `BigUint` and `BigInt` +//! +//! This test is in a completely separate crate so `rand::thread_rng()` +//! can be available without "infecting" the rest of the build with +//! `rand`'s default features, especially not `rand/std`. + +#![cfg(test)] + +mod torture; + +mod biguint { + use num_bigint::{BigUint, RandBigInt, RandomBits}; + use num_traits::Zero; + use rand::distributions::{Distribution, Uniform}; + use rand::thread_rng; + use rand::{Rng, SeedableRng}; + + #[test] + fn test_rand() { + let mut rng = thread_rng(); + let n: BigUint = rng.gen_biguint(137); + assert!(n.bits() <= 137); + assert!(rng.gen_biguint(0).is_zero()); + } + + #[test] + fn test_rand_bits() { + let mut rng = thread_rng(); + let n: BigUint = rng.sample(&RandomBits::new(137)); + assert!(n.bits() <= 137); + let z: BigUint = rng.sample(&RandomBits::new(0)); + assert!(z.is_zero()); + } + + #[test] + fn test_rand_range() { + let mut rng = thread_rng(); + + for _ in 0..10 { + assert_eq!( + rng.gen_biguint_range(&BigUint::from(236u32), &BigUint::from(237u32)), + BigUint::from(236u32) + ); + } + + let l = BigUint::from(403469000u32 + 2352); + let u = BigUint::from(403469000u32 + 3513); + for _ in 0..1000 { + let n: BigUint = rng.gen_biguint_below(&u); + assert!(n < u); + + let n: BigUint = rng.gen_biguint_range(&l, &u); + assert!(n >= l); + assert!(n < u); + } + } + + #[test] + #[should_panic] + fn test_zero_rand_range() { + thread_rng().gen_biguint_range(&BigUint::from(54u32), &BigUint::from(54u32)); + } + + #[test] + #[should_panic] + fn test_negative_rand_range() { + let mut rng = thread_rng(); + let l = BigUint::from(2352u32); + let u = BigUint::from(3513u32); + // Switching u and l should fail: + let _n: BigUint = rng.gen_biguint_range(&u, &l); + } + + #[test] + fn test_rand_uniform() { + let mut rng = thread_rng(); + + let tiny = Uniform::new(BigUint::from(236u32), BigUint::from(237u32)); + for _ in 0..10 { + assert_eq!(rng.sample(&tiny), BigUint::from(236u32)); + } + + let l = BigUint::from(403469000u32 + 2352); + let u = BigUint::from(403469000u32 + 3513); + let below = Uniform::new(BigUint::zero(), u.clone()); + let range = Uniform::new(l.clone(), u.clone()); + for _ in 0..1000 { + let n: BigUint = rng.sample(&below); + assert!(n < u); + + let n: BigUint = rng.sample(&range); + assert!(n >= l); + assert!(n < u); + } + } + + fn seeded_value_stability(expected: &[&str]) { + let mut seed = ::default(); + for (i, x) in seed.as_mut().iter_mut().enumerate() { + *x = (i as u8).wrapping_mul(191); + } + let mut rng = R::from_seed(seed); + for (i, &s) in expected.iter().enumerate() { + let n: BigUint = s.parse().unwrap(); + let r = rng.gen_biguint((1 << i) + i as u64); + assert_eq!(n, r); + } + } + + #[test] + fn test_chacha_value_stability() { + const EXPECTED: &[&str] = &[ + "0", + "0", + "52", + "84", + "23780", + "86502865016", + "187057847319509867386", + "34045731223080904464438757488196244981910", + "23813754422987836414755953516143692594193066497413249270287126597896871975915808", + "57401636903146945411652549098818446911814352529449356393690984105383482703074355\ + 67088360974672291353736011718191813678720755501317478656550386324355699624671", + ]; + use rand_chacha::ChaChaRng; + seeded_value_stability::(EXPECTED); + } + + #[test] + fn test_isaac_value_stability() { + const EXPECTED: &[&str] = &[ + "1", + "4", + "3", + "649", + "89116", + "7730042024", + "20773149082453254949", + "35999009049239918667571895439206839620281", + "10191757312714088681302309313551624007714035309632506837271600807524767413673006", + "37805949268912387809989378008822038725134260145886913321084097194957861133272558\ + 43458183365174899239251448892645546322463253898288141861183340823194379722556", + ]; + use rand_isaac::IsaacRng; + seeded_value_stability::(EXPECTED); + } + + #[test] + fn test_xorshift_value_stability() { + const EXPECTED: &[&str] = &[ + "1", + "0", + "37", + "395", + "181116", + "122718231117", + "1068467172329355695001", + "28246925743544411614293300167064395633287", + "12750053187017853048648861493745244146555950255549630854523304068318587267293038", + "53041498719137109355568081064978196049094604705283682101683207799515709404788873\ + 53417136457745727045473194367732849819278740266658219147356315674940229288531", + ]; + use rand_xorshift::XorShiftRng; + seeded_value_stability::(EXPECTED); + } + + #[test] + fn test_roots_rand() { + fn check>(x: T, n: u32) { + let x: BigUint = x.into(); + let root = x.nth_root(n); + println!("check {}.nth_root({}) = {}", x, n, root); + + if n == 2 { + assert_eq!(root, x.sqrt()) + } else if n == 3 { + assert_eq!(root, x.cbrt()) + } + + let lo = root.pow(n); + assert!(lo <= x); + assert_eq!(lo.nth_root(n), root); + if !lo.is_zero() { + assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32); + } + + let hi = (&root + 1u32).pow(n); + assert!(hi > x); + assert_eq!(hi.nth_root(n), &root + 1u32); + assert_eq!((&hi - 1u32).nth_root(n), root); + } + + let mut rng = thread_rng(); + let bit_range = Uniform::new(0, 2048); + let sample_bits: Vec<_> = bit_range.sample_iter(&mut rng).take(100).collect(); + for bits in sample_bits { + let x = rng.gen_biguint(bits); + for n in 2..11 { + check(x.clone(), n); + } + check(x.clone(), 100); + } + } +} + +mod bigint { + use num_bigint::{BigInt, RandBigInt, RandomBits}; + use num_traits::Zero; + use rand::distributions::Uniform; + use rand::thread_rng; + use rand::{Rng, SeedableRng}; + + #[test] + fn test_rand() { + let mut rng = thread_rng(); + let n: BigInt = rng.gen_bigint(137); + assert!(n.bits() <= 137); + assert!(rng.gen_bigint(0).is_zero()); + } + + #[test] + fn test_rand_bits() { + let mut rng = thread_rng(); + let n: BigInt = rng.sample(&RandomBits::new(137)); + assert!(n.bits() <= 137); + let z: BigInt = rng.sample(&RandomBits::new(0)); + assert!(z.is_zero()); + } + + #[test] + fn test_rand_range() { + let mut rng = thread_rng(); + + for _ in 0..10 { + assert_eq!( + rng.gen_bigint_range(&BigInt::from(236), &BigInt::from(237)), + BigInt::from(236) + ); + } + + fn check(l: BigInt, u: BigInt) { + let mut rng = thread_rng(); + for _ in 0..1000 { + let n: BigInt = rng.gen_bigint_range(&l, &u); + assert!(n >= l); + assert!(n < u); + } + } + let l: BigInt = BigInt::from(403469000 + 2352); + let u: BigInt = BigInt::from(403469000 + 3513); + check(l.clone(), u.clone()); + check(-l.clone(), u.clone()); + check(-u, -l); + } + + #[test] + #[should_panic] + fn test_zero_rand_range() { + thread_rng().gen_bigint_range(&BigInt::from(54), &BigInt::from(54)); + } + + #[test] + #[should_panic] + fn test_negative_rand_range() { + let mut rng = thread_rng(); + let l = BigInt::from(2352); + let u = BigInt::from(3513); + // Switching u and l should fail: + let _n: BigInt = rng.gen_bigint_range(&u, &l); + } + + #[test] + fn test_rand_uniform() { + let mut rng = thread_rng(); + + let tiny = Uniform::new(BigInt::from(236u32), BigInt::from(237u32)); + for _ in 0..10 { + assert_eq!(rng.sample(&tiny), BigInt::from(236u32)); + } + + fn check(l: BigInt, u: BigInt) { + let mut rng = thread_rng(); + let range = Uniform::new(l.clone(), u.clone()); + for _ in 0..1000 { + let n: BigInt = rng.sample(&range); + assert!(n >= l); + assert!(n < u); + } + } + let l: BigInt = BigInt::from(403469000 + 2352); + let u: BigInt = BigInt::from(403469000 + 3513); + check(l.clone(), u.clone()); + check(-l.clone(), u.clone()); + check(-u, -l); + } + + fn seeded_value_stability(expected: &[&str]) { + let mut seed = ::default(); + for (i, x) in seed.as_mut().iter_mut().enumerate() { + *x = (i as u8).wrapping_mul(191); + } + let mut rng = R::from_seed(seed); + for (i, &s) in expected.iter().enumerate() { + let n: BigInt = s.parse().unwrap(); + let r = rng.gen_bigint((1 << i) + i as u64); + assert_eq!(n, r); + } + } + + #[test] + fn test_chacha_value_stability() { + const EXPECTED: &[&str] = &[ + "0", + "-6", + "-1", + "1321", + "-147247", + "8486373526", + "-272736656290199720696", + "2731152629387534140535423510744221288522", + "-28820024790651190394679732038637785320661450462089347915910979466834461433196572", + "501454570554170484799723603981439288209930393334472085317977614690773821680884844\ + 8530978478667288338327570972869032358120588620346111979053742269317702532328", + ]; + use rand_chacha::ChaChaRng; + seeded_value_stability::(EXPECTED); + } + + #[test] + fn test_isaac_value_stability() { + const EXPECTED: &[&str] = &[ + "1", + "0", + "5", + "113", + "-132240", + "-36348760761", + "-365690596708430705434", + "-14090753008246284277803606722552430292432", + "-26313941628626248579319341019368550803676255307056857978955881718727601479436059", + "-14563174552421101848999036239003801073335703811160945137332228646111920972691151\ + 88341090358094331641182310792892459091016794928947242043358702692294695845817", + ]; + use rand_isaac::IsaacRng; + seeded_value_stability::(EXPECTED); + } + + #[test] + fn test_xorshift_value_stability() { + const EXPECTED: &[&str] = &[ + "-1", + "-4", + "11", + "-1802", + "966495", + "-62592045703", + "-602281783447192077116", + "-34335811410223060575607987996861632509125", + "29156580925282215857325937227200350542000244609280383263289720243118706105351199", + "49920038676141573457451407325930326489996232208489690499754573826911037849083623\ + 24546142615325187412887314466195222441945661833644117700809693098722026764846", + ]; + use rand_xorshift::XorShiftRng; + seeded_value_stability::(EXPECTED); + } + + #[test] + fn test_random_shr() { + use rand::distributions::Standard; + use rand::Rng; + let rng = rand::thread_rng(); + + for p in rng.sample_iter::(&Standard).take(1000) { + let big = BigInt::from(p); + let bigger = &big << 1000; + assert_eq!(&bigger >> 1000, big); + for i in 0..64 { + let answer = BigInt::from(p >> i); + assert_eq!(&big >> i, answer); + assert_eq!(&bigger >> (1000 + i), answer); + } + } + } +} diff --git a/vendor/num-bigint-generic/ci/big_rand/src/torture.rs b/vendor/num-bigint-generic/ci/big_rand/src/torture.rs new file mode 100644 index 000000000..72059ef20 --- /dev/null +++ b/vendor/num-bigint-generic/ci/big_rand/src/torture.rs @@ -0,0 +1,70 @@ +use num_bigint::RandBigInt; +use num_traits::Zero; +use rand::prelude::*; +use rand_xorshift::XorShiftRng; + +fn get_rng() -> XorShiftRng { + XorShiftRng::seed_from_u64(0x1234_5678_9abc_def0) +} + +fn test_mul_divide_torture_count(count: usize) { + let bits_max = 1 << 12; + let mut rng = get_rng(); + + for _ in 0..count { + // Test with numbers of random sizes: + let xbits = rng.gen_range(0..bits_max); + let ybits = rng.gen_range(0..bits_max); + + let x = rng.gen_biguint(xbits); + let y = rng.gen_biguint(ybits); + + if x.is_zero() || y.is_zero() { + continue; + } + + let prod = &x * &y; + assert_eq!(&prod / &x, y); + assert_eq!(&prod / &y, x); + } +} + +#[test] +fn test_mul_divide_torture() { + test_mul_divide_torture_count(1_000); +} + +#[test] +#[ignore] +fn test_mul_divide_torture_long() { + test_mul_divide_torture_count(1_000_000); +} + +fn test_factored_mul_torture_count(count: usize) { + let bits = 1 << 16; + let mut rng = get_rng(); + + for _ in 0..count { + let w = rng.gen_biguint(bits); + let x = rng.gen_biguint(bits); + let y = rng.gen_biguint(bits); + let z = rng.gen_biguint(bits); + + let prod1 = (&w * &x) * (&y * &z); + let prod2 = (&w * &y) * (&x * &z); + let prod3 = (&w * &z) * (&x * &y); + assert_eq!(prod1, prod2); + assert_eq!(prod2, prod3); + } +} + +#[test] +fn test_factored_mul_torture() { + test_factored_mul_torture_count(50); +} + +#[test] +#[ignore] +fn test_factored_mul_torture_long() { + test_factored_mul_torture_count(1_000); +} diff --git a/vendor/num-bigint-generic/ci/big_serde/Cargo.toml b/vendor/num-bigint-generic/ci/big_serde/Cargo.toml new file mode 100644 index 000000000..f18413a2f --- /dev/null +++ b/vendor/num-bigint-generic/ci/big_serde/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "big_serde" +version = "0.1.0" +authors = ["Josh Stone "] +edition = "2018" + +[dependencies] +num-traits = "0.2.11" +serde = "1.0" +serde_test = "1.0" + +[dependencies.num-bigint] +features = ["serde"] +path = "../.." diff --git a/vendor/num-bigint-generic/ci/big_serde/src/lib.rs b/vendor/num-bigint-generic/ci/big_serde/src/lib.rs new file mode 100644 index 000000000..4ee7b632c --- /dev/null +++ b/vendor/num-bigint-generic/ci/big_serde/src/lib.rs @@ -0,0 +1,163 @@ +//! Test serialization and deserialization of `BigUint` and `BigInt` +//! +//! The serialized formats should not change, even if we change our +//! internal representation, because we want to preserve forward and +//! backward compatibility of serialized data! +//! +//! This test is in a completely separate crate so its `serde_test` +//! dependency does not "infect" the rest of the build with `serde`'s +//! default features, especially not `serde/std`. + +#![cfg(test)] + +use num_bigint::{BigInt, BigUint}; +use num_traits::{One, Zero}; +use serde::{de::DeserializeOwned, Serialize}; +use serde_test::{assert_de_tokens, assert_ser_tokens, assert_tokens, Token}; +use std::{fmt::Debug, panic::catch_unwind}; + +#[test] +fn biguint_zero() { + let tokens = [Token::Seq { len: Some(0) }, Token::SeqEnd]; + assert_tokens(&BigUint::zero(), &tokens); +} + +#[test] +fn bigint_zero() { + let tokens = [ + Token::Tuple { len: 2 }, + Token::I8(0), + Token::Seq { len: Some(0) }, + Token::SeqEnd, + Token::TupleEnd, + ]; + assert_tokens(&BigInt::zero(), &tokens); +} + +#[test] +fn biguint_one() { + let tokens = [Token::Seq { len: Some(1) }, Token::U32(1), Token::SeqEnd]; + assert_tokens(&BigUint::one(), &tokens); +} + +#[test] +fn bigint_one() { + let tokens = [ + Token::Tuple { len: 2 }, + Token::I8(1), + Token::Seq { len: Some(1) }, + Token::U32(1), + Token::SeqEnd, + Token::TupleEnd, + ]; + assert_tokens(&BigInt::one(), &tokens); +} + +#[test] +fn bigint_negone() { + let tokens = [ + Token::Tuple { len: 2 }, + Token::I8(-1), + Token::Seq { len: Some(1) }, + Token::U32(1), + Token::SeqEnd, + Token::TupleEnd, + ]; + assert_tokens(&-BigInt::one(), &tokens); +} + +// Generated independently from python `hex(factorial(100))` +const FACTORIAL_100: &[u32] = &[ + 0x00000000, 0x00000000, 0x00000000, 0x2735c61a, 0xee8b02ea, 0xb3b72ed2, 0x9420c6ec, 0x45570cca, + 0xdf103917, 0x943a321c, 0xeb21b5b2, 0x66ef9a70, 0xa40d16e9, 0x28d54bbd, 0xdc240695, 0x964ec395, + 0x1b30, +]; + +#[test] +fn biguint_factorial_100() { + let n: BigUint = (1u8..101).product(); + + let mut tokens = vec![]; + tokens.push(Token::Seq { + len: Some(FACTORIAL_100.len()), + }); + tokens.extend(FACTORIAL_100.iter().map(|&u| Token::U32(u))); + tokens.push(Token::SeqEnd); + + assert_tokens(&n, &tokens); +} + +#[test] +fn bigint_factorial_100() { + let n: BigInt = (1i8..101).product(); + + let mut tokens = vec![]; + tokens.push(Token::Tuple { len: 2 }); + tokens.push(Token::I8(1)); + tokens.push(Token::Seq { + len: Some(FACTORIAL_100.len()), + }); + tokens.extend(FACTORIAL_100.iter().map(|&u| Token::U32(u))); + tokens.push(Token::SeqEnd); + tokens.push(Token::TupleEnd); + + assert_tokens(&n, &tokens); +} + +#[test] +fn big_digits() { + // Try a few different lengths for u32/u64 digit coverage + for len in 1..10 { + let digits = 1u32..=len; + let n = BigUint::new(digits.clone().collect()); + + let mut tokens = vec![]; + tokens.push(Token::Seq { + len: Some(len as usize), + }); + tokens.extend(digits.map(Token::U32)); + tokens.push(Token::SeqEnd); + + assert_tokens(&n, &tokens); + + let n = BigInt::from(n); + tokens.insert(0, Token::Tuple { len: 2 }); + tokens.insert(1, Token::I8(1)); + tokens.push(Token::TupleEnd); + assert_tokens(&n, &tokens); + + tokens[1] = Token::I8(-1); + assert_tokens(&-n, &tokens); + } +} + +#[test] +fn bad_size_hint_int() { + bad_size_hint::(&[Token::Tuple { len: 2 }, Token::I8(1)], &[Token::TupleEnd]); +} + +#[test] +fn bad_size_hint_uint() { + bad_size_hint::(&[], &[]); +} + +fn bad_size_hint( + prefix: &[Token], + suffix: &[Token], +) { + let mut tokens = [ + prefix, + &[Token::Seq { len: Some(1) }, Token::U32(1), Token::SeqEnd], + suffix, + ] + .concat(); + + assert_tokens(&T::one(), &tokens); + + tokens[prefix.len()] = Token::Seq { + len: Some(usize::MAX), + }; + + catch_unwind(|| assert_ser_tokens(&T::one(), &tokens)).unwrap_err(); + assert_de_tokens(&T::one(), &tokens); +} diff --git a/vendor/num-bigint-generic/ci/rustup.sh b/vendor/num-bigint-generic/ci/rustup.sh new file mode 100755 index 000000000..144042bc0 --- /dev/null +++ b/vendor/num-bigint-generic/ci/rustup.sh @@ -0,0 +1,10 @@ +#!/bin/sh +# Use rustup to locally run the same suite of tests as .github/workflows/ +# (You should first install/update all of the versions below.) + +set -ex + +ci=$(dirname "$0") +for version in 1.60.0 stable beta nightly; do + rustup run "$version" "$ci/test_full.sh" +done diff --git a/vendor/num-bigint-generic/ci/test_full.sh b/vendor/num-bigint-generic/ci/test_full.sh new file mode 100755 index 000000000..9483395b8 --- /dev/null +++ b/vendor/num-bigint-generic/ci/test_full.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +set -e + +CRATE=num-bigint +MSRV=1.60 + +get_rust_version() { + local array + IFS=' ' read -ra array <<< "$(rustc --version)" + echo "${array[1]}"; + return 0; +} +RUST_VERSION=$(get_rust_version) + +check_version() { + IFS=. read -ra rust <<< "$RUST_VERSION" + IFS=. read -ra want <<< "$1" + [[ "${rust[0]}" -gt "${want[0]}" || + ( "${rust[0]}" -eq "${want[0]}" && + "${rust[1]}" -ge "${want[1]}" ) + ]] +} + +echo "Testing $CRATE on rustc $RUST_VERSION" +if ! check_version $MSRV ; then + echo "The minimum for $CRATE is rustc $MSRV" + exit 1 +fi + +STD_FEATURES=(arbitrary quickcheck rand serde) +NO_STD_FEATURES=(serde rand) +echo "Testing supported features: ${STD_FEATURES[*]}" +if [ -n "${NO_STD_FEATURES[*]}" ]; then + echo " no_std supported features: ${NO_STD_FEATURES[*]}" +fi + +# arbitrary 1.1.4 started using array::from_fn +check_version 1.63.0 || cargo update -p arbitrary --precise 1.1.3 + +set -x + +# test the default with std +cargo build +cargo test + +# test each isolated feature with std +for feature in "${STD_FEATURES[@]}"; do + cargo build --no-default-features --features="std $feature" + cargo test --no-default-features --features="std $feature" +done + +# test all supported features with std +cargo build --no-default-features --features="std ${STD_FEATURES[*]}" +cargo test --no-default-features --features="std ${STD_FEATURES[*]}" + + +if [ -n "${NO_STD_FEATURES[*]}" ]; then + # test minimal `no_std` + cargo build --no-default-features + cargo test --no-default-features + + # test each isolated feature without std + for feature in "${NO_STD_FEATURES[@]}"; do + cargo build --no-default-features --features="$feature" + cargo test --no-default-features --features="$feature" + done + + # test all supported features without std + cargo build --no-default-features --features="${NO_STD_FEATURES[*]}" + cargo test --no-default-features --features="${NO_STD_FEATURES[*]}" +fi + + +# make sure benchmarks can be built and sanity-tested +if rustc --version | grep -q nightly; then + cargo test --all-features --benches +fi + +case "${STD_FEATURES[*]}" in + *serde*) ( + cd ci/big_serde + cargo test + ) ;;& + *rand*) cargo test --manifest-path ci/big_rand/Cargo.toml ;;& + *quickcheck*) ( + cd ci/big_quickcheck + cargo test + ) ;;& +esac diff --git a/vendor/num-bigint-generic/src/bigint.rs b/vendor/num-bigint-generic/src/bigint.rs new file mode 100644 index 000000000..ac1c8d918 --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint.rs @@ -0,0 +1,1250 @@ +// `Add`/`Sub` ops may flip from `BigInt` to its `BigUint` magnitude +#![allow(clippy::suspicious_arithmetic_impl)] + +use alloc::{string::String, vec::Vec}; +use core::{ + cmp::Ordering::{self, Equal}, + default::Default, + fmt, hash, + ops::{Neg, Not}, + str, +}; +use tinyvec::TinyVec; + +use num_integer::{Integer, Roots}; +use num_traits::{Num, One, Pow, Signed, Zero}; + +use self::Sign::{Minus, NoSign, Plus}; + +use crate::{ + big_digit::BigDigit, + biguint::{to_str_radix_reversed, BigUint, U32Digits, U64Digits, NLIMBS}, +}; + +mod addition; +mod division; +mod multiplication; +mod subtraction; + +mod arbitrary; +mod bits; +mod convert; +mod power; +mod serde; +mod shift; + +/// A `Sign` is a [`BigInt`]'s composing element. +#[derive(PartialEq, PartialOrd, Eq, Ord, Copy, Clone, Debug, Hash)] +pub enum Sign { + Minus, + NoSign, + Plus, +} + +impl Neg for Sign { + type Output = Sign; + + /// Negate `Sign` value. + #[inline] + fn neg(self) -> Sign { + match self { + Minus => Plus, + NoSign => NoSign, + Plus => Minus, + } + } +} + +impl BigInt { + pub fn to_digits(&self) -> BigInt { + BigInt { + sign: self.sign, + data: self.data.to_digits(), + } + } +} + +/// A big signed integer type. +pub struct BigInt { + sign: Sign, + data: BigUint, +} + +// Note: derived `Clone` doesn't specialize `clone_from`, +// but we want to keep the allocation in `data`. +impl Clone for BigInt { + #[inline] + fn clone(&self) -> Self { + BigInt { + sign: self.sign, + data: self.data.clone(), + } + } + + #[inline] + fn clone_from(&mut self, other: &Self) { + self.sign = other.sign; + self.data.clone_from(&other.data); + } +} + +impl hash::Hash for BigInt { + #[inline] + fn hash(&self, state: &mut H) { + debug_assert!((self.sign != NoSign) ^ self.data.is_zero()); + self.sign.hash(state); + if self.sign != NoSign { + self.data.hash(state); + } + } +} + +impl PartialEq for BigInt { + #[inline] + fn eq(&self, other: &BigInt) -> bool { + debug_assert!((self.sign != NoSign) ^ self.data.is_zero()); + debug_assert!((other.sign != NoSign) ^ other.data.is_zero()); + self.sign == other.sign && (self.sign == NoSign || self.data == other.data) + } +} + +impl Eq for BigInt {} + +impl PartialOrd for BigInt { + #[inline] + fn partial_cmp(&self, other: &BigInt) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BigInt { + #[inline] + fn cmp(&self, other: &BigInt) -> Ordering { + debug_assert!((self.sign != NoSign) ^ self.data.is_zero()); + debug_assert!((other.sign != NoSign) ^ other.data.is_zero()); + let scmp = self.sign.cmp(&other.sign); + if scmp != Equal { + return scmp; + } + + match self.sign { + NoSign => Equal, + Plus => self.data.cmp(&other.data), + Minus => other.data.cmp(&self.data), + } + } +} + +impl Default for BigInt { + #[inline] + fn default() -> BigInt { + Self::zero() + } +} + +impl fmt::Debug for BigInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for BigInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(!self.is_negative(), "", &self.data.to_str_radix(10)) + } +} + +impl fmt::Binary for BigInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(!self.is_negative(), "0b", &self.data.to_str_radix(2)) + } +} + +impl fmt::Octal for BigInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(!self.is_negative(), "0o", &self.data.to_str_radix(8)) + } +} + +impl fmt::LowerHex for BigInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(!self.is_negative(), "0x", &self.data.to_str_radix(16)) + } +} + +impl fmt::UpperHex for BigInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut s = self.data.to_str_radix(16); + s.make_ascii_uppercase(); + f.pad_integral(!self.is_negative(), "0x", &s) + } +} + +// !-2 = !...f fe = ...0 01 = +1 +// !-1 = !...f ff = ...0 00 = 0 +// ! 0 = !...0 00 = ...f ff = -1 +// !+1 = !...0 01 = ...f fe = -2 +impl Not for BigInt { + type Output = BigInt; + + fn not(mut self) -> BigInt { + match self.sign { + NoSign | Plus => { + self.data += 1u32; + self.sign = Minus; + } + Minus => { + self.data -= 1u32; + self.sign = if self.data.is_zero() { NoSign } else { Plus }; + } + } + self + } +} + +impl Not for &BigInt { + type Output = BigInt; + + fn not(self) -> BigInt { + match self.sign { + NoSign => -BigInt::one(), + Plus => -BigInt::from(&self.data + 1u32), + Minus => BigInt::from(&self.data - 1u32), + } + } +} + +impl Zero for BigInt { + #[inline] + fn zero() -> BigInt { + Self::zero() + } + + #[inline] + fn set_zero(&mut self) { + self.data.set_zero(); + self.sign = NoSign; + } + + #[inline] + fn is_zero(&self) -> bool { + self.sign == NoSign + } +} + +// impl ConstZero for BigInt { +// // forward to the inherent const +// const ZERO: Self = Self::zero(); +// } + +impl One for BigInt { + #[inline] + fn one() -> BigInt { + BigInt { + sign: Plus, + data: BigUint::one(), + } + } + + #[inline] + fn set_one(&mut self) { + self.data.set_one(); + self.sign = Plus; + } + + #[inline] + fn is_one(&self) -> bool { + self.sign == Plus && self.data.is_one() + } +} + +impl Signed for BigInt { + #[inline] + fn abs(&self) -> BigInt { + match self.sign { + Plus | NoSign => self.clone(), + Minus => BigInt::from(self.data.clone()), + } + } + + #[inline] + fn abs_sub(&self, other: &BigInt) -> BigInt { + if *self <= *other { + Self::zero() + } else { + self - other + } + } + + #[inline] + fn signum(&self) -> BigInt { + match self.sign { + Plus => BigInt::one(), + Minus => -BigInt::one(), + NoSign => Self::zero(), + } + } + + #[inline] + fn is_positive(&self) -> bool { + self.sign == Plus + } + + #[inline] + fn is_negative(&self) -> bool { + self.sign == Minus + } +} + +trait UnsignedAbs { + type Unsigned; + + fn checked_uabs(self) -> CheckedUnsignedAbs; +} + +enum CheckedUnsignedAbs { + Positive(T), + Negative(T), +} +use self::CheckedUnsignedAbs::{Negative, Positive}; + +macro_rules! impl_unsigned_abs { + ($Signed:ty, $Unsigned:ty) => { + impl UnsignedAbs for $Signed { + type Unsigned = $Unsigned; + + #[inline] + fn checked_uabs(self) -> CheckedUnsignedAbs { + if self >= 0 { + Positive(self as $Unsigned) + } else { + Negative(self.wrapping_neg() as $Unsigned) + } + } + } + }; +} +impl_unsigned_abs!(i8, u8); +impl_unsigned_abs!(i16, u16); +impl_unsigned_abs!(i32, u32); +impl_unsigned_abs!(i64, u64); +impl_unsigned_abs!(i128, u128); +impl_unsigned_abs!(isize, usize); + +impl Neg for BigInt { + type Output = BigInt; + + #[inline] + fn neg(mut self) -> BigInt { + self.sign = -self.sign; + self + } +} + +impl Neg for &BigInt { + type Output = BigInt; + + #[inline] + fn neg(self) -> BigInt { + -self.clone() + } +} + +impl Integer for BigInt { + #[inline] + fn div_rem(&self, other: &BigInt) -> (BigInt, BigInt) { + // r.sign == self.sign + let (d_ui, r_ui) = self.data.div_rem(&other.data); + let d = BigInt::from_biguint(self.sign, d_ui); + let r = BigInt::from_biguint(self.sign, r_ui); + if other.is_negative() { + (-d, r) + } else { + (d, r) + } + } + + #[inline] + fn div_floor(&self, other: &BigInt) -> BigInt { + let (d_ui, m) = self.data.div_mod_floor(&other.data); + let d = BigInt::from(d_ui); + match (self.sign, other.sign) { + (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => d, + (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => { + if m.is_zero() { + -d + } else { + -d - 1u32 + } + } + (_, NoSign) => unreachable!(), + } + } + + #[inline] + fn mod_floor(&self, other: &BigInt) -> BigInt { + // m.sign == other.sign + let m_ui = self.data.mod_floor(&other.data); + let m = BigInt::from_biguint(other.sign, m_ui); + match (self.sign, other.sign) { + (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => m, + (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => { + if m.is_zero() { + m + } else { + other - m + } + } + (_, NoSign) => unreachable!(), + } + } + + fn div_mod_floor(&self, other: &BigInt) -> (BigInt, BigInt) { + // m.sign == other.sign + let (d_ui, m_ui) = self.data.div_mod_floor(&other.data); + let d = BigInt::from(d_ui); + let m = BigInt::from_biguint(other.sign, m_ui); + match (self.sign, other.sign) { + (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => (d, m), + (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => { + if m.is_zero() { + (-d, m) + } else { + (-d - 1u32, other - m) + } + } + (_, NoSign) => unreachable!(), + } + } + + #[inline] + fn div_ceil(&self, other: &Self) -> Self { + let (d_ui, m) = self.data.div_mod_floor(&other.data); + let d = BigInt::from(d_ui); + match (self.sign, other.sign) { + (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => -d, + (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => { + if m.is_zero() { + d + } else { + d + 1u32 + } + } + (_, NoSign) => unreachable!(), + } + } + + /// Calculates the Greatest Common Divisor (GCD) of the number and `other`. + /// + /// The result is always positive. + #[inline] + fn gcd(&self, other: &BigInt) -> BigInt { + BigInt::from(self.data.gcd(&other.data)) + } + + /// Calculates the Lowest Common Multiple (LCM) of the number and `other`. + #[inline] + fn lcm(&self, other: &BigInt) -> BigInt { + BigInt::from(self.data.lcm(&other.data)) + } + + /// Calculates the Greatest Common Divisor (GCD) and + /// Lowest Common Multiple (LCM) together. + #[inline] + fn gcd_lcm(&self, other: &BigInt) -> (BigInt, BigInt) { + let (gcd, lcm) = self.data.gcd_lcm(&other.data); + (BigInt::from(gcd), BigInt::from(lcm)) + } + + /// Greatest common divisor, least common multiple, and Bézout coefficients. + #[inline] + fn extended_gcd_lcm( + &self, + other: &BigInt, + ) -> (num_integer::ExtendedGcd>, BigInt) { + let egcd = self.extended_gcd(other); + let lcm = if egcd.gcd.is_zero() { + Self::zero() + } else { + BigInt::from(&self.data / &egcd.gcd.data * &other.data) + }; + (egcd, lcm) + } + + /// Deprecated, use `is_multiple_of` instead. + #[inline] + fn divides(&self, other: &BigInt) -> bool { + self.is_multiple_of(other) + } + + /// Returns `true` if the number is a multiple of `other`. + #[inline] + fn is_multiple_of(&self, other: &BigInt) -> bool { + self.data.is_multiple_of(&other.data) + } + + /// Returns `true` if the number is divisible by `2`. + #[inline] + fn is_even(&self) -> bool { + self.data.is_even() + } + + /// Returns `true` if the number is not divisible by `2`. + #[inline] + fn is_odd(&self) -> bool { + self.data.is_odd() + } + + /// Rounds up to nearest multiple of argument. + #[inline] + fn next_multiple_of(&self, other: &Self) -> Self { + let m = self.mod_floor(other); + if m.is_zero() { + self.clone() + } else { + self + (other - m) + } + } + /// Rounds down to nearest multiple of argument. + #[inline] + fn prev_multiple_of(&self, other: &Self) -> Self { + self - self.mod_floor(other) + } + + fn dec(&mut self) { + *self -= 1u32; + } + + fn inc(&mut self) { + *self += 1u32; + } +} + +impl Roots for BigInt { + fn nth_root(&self, n: u32) -> Self { + assert!( + !(self.is_negative() && n.is_even()), + "root of degree {} is imaginary", + n + ); + + BigInt::from_biguint(self.sign, self.data.nth_root(n)) + } + + fn sqrt(&self) -> Self { + assert!(!self.is_negative(), "square root is imaginary"); + + BigInt::from_biguint(self.sign, self.data.sqrt()) + } + + fn cbrt(&self) -> Self { + BigInt::from_biguint(self.sign, self.data.cbrt()) + } +} + +impl BigInt { + #[inline] + pub(crate) fn digits(&self) -> &[BigDigit] { + self.data.digits() + } + #[inline] + pub(crate) fn digits_mut(&mut self) -> &mut TinyVec<[BigDigit; N]> { + self.data.digits_mut() + } + #[inline] + pub(crate) fn normalize(&mut self) { + self.data.normalize(); + if self.data.is_zero() { + self.sign = NoSign; + } + } + #[inline] + pub(crate) fn capacity(&self) -> usize { + self.data.capacity() + } + #[inline] + pub(crate) fn len(&self) -> usize { + self.data.len() + } +} + +/// A generic trait for converting a value to a [`BigInt`]. This may return +/// `None` when converting from `f32` or `f64`, and will always succeed +/// when converting from any integer or unsigned primitive, or [`BigUint`]. +pub trait ToBigInt { + /// Converts the value of `self` to a [`BigInt`]. + fn to_bigint(&self) -> Option; +} + +impl BigInt { + // A constant `BigInt` with value 0, useful for static initialization. + // pub const ZERO: Self = BigInt { + // sign: NoSign, + // data: BigUint::zero(), + // }; + + pub fn zero() -> Self { + Self { + sign: NoSign, + data: BigUint::zero(), + } + } + + /// Creates and initializes a [`BigInt`]. + /// + /// The base 232 digits are ordered least significant digit first. + #[inline] + pub fn new(sign: Sign, digits: Vec) -> BigInt { + BigInt::from_biguint(sign, BigUint::::new(digits)) + } + + /// Creates and initializes a [`BigInt`]. + /// + /// The base 232 digits are ordered least significant digit first. + #[inline] + pub fn from_biguint(mut sign: Sign, mut data: BigUint) -> BigInt { + if sign == NoSign { + data.assign_from_slice(&[]); + } else if data.is_zero() { + sign = NoSign; + } + + BigInt { sign, data } + } + + /// Creates and initializes a [`BigInt`]. + /// + /// The base 232 digits are ordered least significant digit first. + #[inline] + pub fn from_slice(sign: Sign, slice: &[u32]) -> BigInt { + BigInt::from_biguint(sign, BigUint::::from_slice(slice)) + } + + /// Reinitializes a [`BigInt`]. + /// + /// The base 232 digits are ordered least significant digit first. + #[inline] + pub fn assign_from_slice(&mut self, sign: Sign, slice: &[u32]) { + if sign == NoSign { + self.set_zero(); + } else { + self.data.assign_from_slice(slice); + self.sign = if self.data.is_zero() { NoSign } else { sign }; + } + } + + /// Creates and initializes a [`BigInt`]. + /// + /// The bytes are in big-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"A"), + /// BigInt::parse_bytes(b"65", 10).unwrap()); + /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AA"), + /// BigInt::parse_bytes(b"16705", 10).unwrap()); + /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AB"), + /// BigInt::parse_bytes(b"16706", 10).unwrap()); + /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"Hello world!"), + /// BigInt::parse_bytes(b"22405534230753963835153736737", 10).unwrap()); + /// ``` + #[inline] + pub fn from_bytes_be(sign: Sign, bytes: &[u8]) -> BigInt { + BigInt::from_biguint(sign, BigUint::from_bytes_be(bytes)) + } + + /// Creates and initializes a [`BigInt`]. + /// + /// The bytes are in little-endian byte order. + #[inline] + pub fn from_bytes_le(sign: Sign, bytes: &[u8]) -> BigInt { + BigInt::from_biguint(sign, BigUint::from_bytes_le(bytes)) + } + + /// Creates and initializes a [`BigInt`] from an array of bytes in + /// two's complement binary representation. + /// + /// The digits are in big-endian base 28. + #[inline] + pub fn from_signed_bytes_be(digits: &[u8]) -> BigInt { + convert::from_signed_bytes_be(digits) + } + + /// Creates and initializes a [`BigInt`] from an array of bytes in two's complement. + /// + /// The digits are in little-endian base 28. + #[inline] + pub fn from_signed_bytes_le(digits: &[u8]) -> BigInt { + convert::from_signed_bytes_le(digits) + } + + /// Creates and initializes a [`BigInt`]. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, ToBigInt}; + /// + /// assert_eq!(BigInt::parse_bytes(b"1234", 10), ToBigInt::to_bigint(&1234)); + /// assert_eq!(BigInt::parse_bytes(b"ABCD", 16), ToBigInt::to_bigint(&0xABCD)); + /// assert_eq!(BigInt::parse_bytes(b"G", 16), None); + /// ``` + #[inline] + pub fn parse_bytes(buf: &[u8], radix: u32) -> Option> { + let s = str::from_utf8(buf).ok()?; + BigInt::::from_str_radix(s, radix).ok() + } + + /// Creates and initializes a [`BigInt`]. Each `u8` of the input slice is + /// interpreted as one digit of the number + /// and must therefore be less than `radix`. + /// + /// The bytes are in big-endian byte order. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// let inbase190 = vec![15, 33, 125, 12, 14]; + /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap(); + /// assert_eq!(a.to_radix_be(190), (Sign:: Minus, inbase190)); + /// ``` + pub fn from_radix_be(sign: Sign, buf: &[u8], radix: u32) -> Option> { + let u = BigUint::from_radix_be(buf, radix)?; + Some(BigInt::from_biguint(sign, u)) + } + + /// Creates and initializes a [`BigInt`]. Each `u8` of the input slice is + /// interpreted as one digit of the number + /// and must therefore be less than `radix`. + /// + /// The bytes are in little-endian byte order. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// let inbase190 = vec![14, 12, 125, 33, 15]; + /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap(); + /// assert_eq!(a.to_radix_be(190), (Sign::Minus, inbase190)); + /// ``` + pub fn from_radix_le(sign: Sign, buf: &[u8], radix: u32) -> Option> { + let u = BigUint::from_radix_le(buf, radix)?; + Some(BigInt::from_biguint(sign, u)) + } + + /// Returns the sign and the byte representation of the [`BigInt`] in big-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{ToBigInt, Sign}; + /// + /// let i = -1125.to_bigint().unwrap(); + /// assert_eq!(i.to_bytes_be(), (Sign::Minus, vec![4, 101])); + /// ``` + #[inline] + pub fn to_bytes_be(&self) -> (Sign, Vec) { + (self.sign, self.data.to_bytes_be()) + } + + /// Returns the sign and the byte representation of the [`BigInt`] in little-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{ToBigInt, Sign}; + /// + /// let i = -1125.to_bigint().unwrap(); + /// assert_eq!(i.to_bytes_le(), (Sign::Minus, vec![101, 4])); + /// ``` + #[inline] + pub fn to_bytes_le(&self) -> (Sign, Vec) { + (self.sign, self.data.to_bytes_le()) + } + + /// Returns the sign and the `u32` digits representation of the [`BigInt`] ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// assert_eq!(BigInt::from(-1125).to_u32_digits(), (Sign::Minus, vec![1125])); + /// assert_eq!(BigInt::from(4294967295u32).to_u32_digits(), (Sign::Plus, vec![4294967295])); + /// assert_eq!(BigInt::from(4294967296u64).to_u32_digits(), (Sign::Plus, vec![0, 1])); + /// assert_eq!(BigInt::from(-112500000000i64).to_u32_digits(), (Sign::Minus, vec![830850304, 26])); + /// assert_eq!(BigInt::from(112500000000i64).to_u32_digits(), (Sign::Plus, vec![830850304, 26])); + /// ``` + #[inline] + pub fn to_u32_digits(&self) -> (Sign, Vec) { + (self.sign, self.data.to_u32_digits()) + } + + /// Returns the sign and the `u64` digits representation of the [`BigInt`] ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// assert_eq!(BigInt::from(-1125).to_u64_digits(), (Sign::Minus, vec![1125])); + /// assert_eq!(BigInt::from(4294967295u32).to_u64_digits(), (Sign::Plus, vec![4294967295])); + /// assert_eq!(BigInt::from(4294967296u64).to_u64_digits(), (Sign::Plus, vec![4294967296])); + /// assert_eq!(BigInt::from(-112500000000i64).to_u64_digits(), (Sign::Minus, vec![112500000000])); + /// assert_eq!(BigInt::from(112500000000i64).to_u64_digits(), (Sign::Plus, vec![112500000000])); + /// assert_eq!(BigInt::from(1u128 << 64).to_u64_digits(), (Sign::Plus, vec![0, 1])); + /// ``` + #[inline] + pub fn to_u64_digits(&self) -> (Sign, Vec) { + (self.sign, self.data.to_u64_digits()) + } + + /// Returns an iterator of `u32` digits representation of the [`BigInt`] ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigInt; + /// + /// assert_eq!(BigInt::from(-1125).iter_u32_digits().collect::>(), vec![1125]); + /// assert_eq!(BigInt::from(4294967295u32).iter_u32_digits().collect::>(), vec![4294967295]); + /// assert_eq!(BigInt::from(4294967296u64).iter_u32_digits().collect::>(), vec![0, 1]); + /// assert_eq!(BigInt::from(-112500000000i64).iter_u32_digits().collect::>(), vec![830850304, 26]); + /// assert_eq!(BigInt::from(112500000000i64).iter_u32_digits().collect::>(), vec![830850304, 26]); + /// ``` + #[inline] + pub fn iter_u32_digits(&self) -> U32Digits<'_> { + self.data.iter_u32_digits() + } + + /// Returns an iterator of `u64` digits representation of the [`BigInt`] ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigInt; + /// + /// assert_eq!(BigInt::from(-1125).iter_u64_digits().collect::>(), vec![1125u64]); + /// assert_eq!(BigInt::from(4294967295u32).iter_u64_digits().collect::>(), vec![4294967295u64]); + /// assert_eq!(BigInt::from(4294967296u64).iter_u64_digits().collect::>(), vec![4294967296u64]); + /// assert_eq!(BigInt::from(-112500000000i64).iter_u64_digits().collect::>(), vec![112500000000u64]); + /// assert_eq!(BigInt::from(112500000000i64).iter_u64_digits().collect::>(), vec![112500000000u64]); + /// assert_eq!(BigInt::from(1u128 << 64).iter_u64_digits().collect::>(), vec![0, 1]); + /// ``` + #[inline] + pub fn iter_u64_digits(&self) -> U64Digits<'_> { + self.data.iter_u64_digits() + } + + /// Returns the two's-complement byte representation of the [`BigInt`] in big-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::ToBigInt; + /// + /// let i = -1125.to_bigint().unwrap(); + /// assert_eq!(i.to_signed_bytes_be(), vec![251, 155]); + /// ``` + #[inline] + pub fn to_signed_bytes_be(&self) -> Vec { + convert::to_signed_bytes_be(self) + } + + /// Returns the two's-complement byte representation of the [`BigInt`] in little-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::ToBigInt; + /// + /// let i = -1125.to_bigint().unwrap(); + /// assert_eq!(i.to_signed_bytes_le(), vec![155, 251]); + /// ``` + #[inline] + pub fn to_signed_bytes_le(&self) -> Vec { + convert::to_signed_bytes_le(self) + } + + /// Returns the integer formatted as a string in the given radix. + /// `radix` must be in the range `2...36`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigInt; + /// + /// let i = BigInt::parse_bytes(b"ff", 16).unwrap(); + /// assert_eq!(i.to_str_radix(16), "ff"); + /// ``` + #[inline] + pub fn to_str_radix(&self, radix: u32) -> String { + let mut v = to_str_radix_reversed(&self.data, radix); + + if self.is_negative() { + v.push(b'-'); + } + + v.reverse(); + unsafe { String::from_utf8_unchecked(v) } + } + + /// Returns the integer in the requested base in big-endian digit order. + /// The output is not given in a human readable alphabet but as a zero + /// based `u8` number. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_be(159), + /// (Sign::Minus, vec![2, 94, 27])); + /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27 + /// ``` + #[inline] + pub fn to_radix_be(&self, radix: u32) -> (Sign, Vec) { + (self.sign, self.data.to_radix_be(radix)) + } + + /// Returns the integer in the requested base in little-endian digit order. + /// The output is not given in a human readable alphabet but as a zero + /// based `u8` number. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_le(159), + /// (Sign::Minus, vec![27, 94, 2])); + /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2) + /// ``` + #[inline] + pub fn to_radix_le(&self, radix: u32) -> (Sign, Vec) { + (self.sign, self.data.to_radix_le(radix)) + } + + /// Returns the sign of the [`BigInt`] as a [`Sign`]. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// assert_eq!(BigInt::from(1234).sign(), Sign::Plus); + /// assert_eq!(BigInt::from(-4321).sign(), Sign::Minus); + /// assert_eq!(BigInt::ZERO.sign(), Sign::NoSign); + /// ``` + #[inline] + pub fn sign(&self) -> Sign { + self.sign + } + + /// Returns the magnitude of the [`BigInt`] as a [`BigUint`]. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, BigUint}; + /// use num_traits::Zero; + /// + /// assert_eq!(BigInt::from(1234).magnitude(), &BigUint::from(1234u32)); + /// assert_eq!(BigInt::from(-4321).magnitude(), &BigUint::from(4321u32)); + /// assert!(BigInt::ZERO.magnitude().is_zero()); + /// ``` + #[inline] + pub fn magnitude(&self) -> &BigUint { + &self.data + } + + /// Convert this [`BigInt`] into its [`Sign`] and [`BigUint`] magnitude, + /// the reverse of [`BigInt::from_biguint()`]. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, BigUint, Sign}; + /// + /// assert_eq!(BigInt::from(1234).into_parts(), (Sign::Plus, BigUint::from(1234u32))); + /// assert_eq!(BigInt::from(-4321).into_parts(), (Sign::Minus, BigUint::from(4321u32))); + /// assert_eq!(BigInt::ZERO.into_parts(), (Sign::NoSign, BigUint::ZERO)); + /// ``` + #[inline] + pub fn into_parts(self) -> (Sign, BigUint) { + (self.sign, self.data) + } + + /// Determines the fewest bits necessary to express the [`BigInt`], + /// not including the sign. + #[inline] + pub fn bits(&self) -> u64 { + self.data.bits() + } + + /// Converts this [`BigInt`] into a [`BigUint`], if it's not negative. + #[inline] + pub fn to_biguint(&self) -> Option> { + match self.sign { + Plus => Some(self.data.clone()), + NoSign => Some(BigUint::zero()), + Minus => None, + } + } + + #[inline] + pub fn checked_add(&self, v: &BigInt) -> Option> { + Some(self + v) + } + + #[inline] + pub fn checked_sub(&self, v: &BigInt) -> Option> { + Some(self - v) + } + + #[inline] + pub fn checked_mul(&self, v: &BigInt) -> Option> { + Some(self * v) + } + + #[inline] + pub fn checked_div(&self, v: &BigInt) -> Option> { + if v.is_zero() { + return None; + } + Some(self / v) + } + + /// Returns `self ^ exponent`. + pub fn pow(&self, exponent: u32) -> Self { + Pow::pow(self, exponent) + } + + /// Returns `(self ^ exponent) mod modulus` + /// + /// Note that this rounds like `mod_floor`, not like the `%` operator, + /// which makes a difference when given a negative `self` or `modulus`. + /// The result will be in the interval `[0, modulus)` for `modulus > 0`, + /// or in the interval `(modulus, 0]` for `modulus < 0` + /// + /// Panics if the exponent is negative or the modulus is zero. + pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self { + power::modpow::(self, exponent, modulus) + } + + /// Returns the modular multiplicative inverse if it exists, otherwise `None`. + /// + /// This solves for `x` such that `self * x ≡ 1 (mod modulus)`. + /// Note that this rounds like `mod_floor`, not like the `%` operator, + /// which makes a difference when given a negative `self` or `modulus`. + /// The solution will be in the interval `[0, modulus)` for `modulus > 0`, + /// or in the interval `(modulus, 0]` for `modulus < 0`, + /// and it exists if and only if `gcd(self, modulus) == 1`. + /// + /// ``` + /// use num_bigint::BigInt; + /// use num_integer::Integer; + /// use num_traits::{One, Zero}; + /// + /// let m = BigInt::from(383); + /// + /// // Trivial cases + /// assert_eq!(BigInt::zero().modinv(&m), None); + /// assert_eq!(BigInt::one().modinv(&m), Some(BigInt::one())); + /// let neg1 = &m - 1u32; + /// assert_eq!(neg1.modinv(&m), Some(neg1)); + /// + /// // Positive self and modulus + /// let a = BigInt::from(271); + /// let x = a.modinv(&m).unwrap(); + /// assert_eq!(x, BigInt::from(106)); + /// assert_eq!(x.modinv(&m).unwrap(), a); + /// assert_eq!((&a * x).mod_floor(&m), BigInt::one()); + /// + /// // Negative self and positive modulus + /// let b = -&a; + /// let x = b.modinv(&m).unwrap(); + /// assert_eq!(x, BigInt::from(277)); + /// assert_eq!((&b * x).mod_floor(&m), BigInt::one()); + /// + /// // Positive self and negative modulus + /// let n = -&m; + /// let x = a.modinv(&n).unwrap(); + /// assert_eq!(x, BigInt::from(-277)); + /// assert_eq!((&a * x).mod_floor(&n), &n + 1); + /// + /// // Negative self and modulus + /// let x = b.modinv(&n).unwrap(); + /// assert_eq!(x, BigInt::from(-106)); + /// assert_eq!((&b * x).mod_floor(&n), &n + 1); + /// ``` + pub fn modinv(&self, modulus: &Self) -> Option { + let result = self.data.modinv(&modulus.data)?; + // The sign of the result follows the modulus, like `mod_floor`. + let (sign, mag) = match (self.is_negative(), modulus.is_negative()) { + (false, false) => (Plus, result), + (true, false) => (Plus, &modulus.data - result), + (false, true) => (Minus, &modulus.data - result), + (true, true) => (Minus, result), + }; + Some(BigInt::from_biguint(sign, mag)) + } + + /// Returns the truncated principal square root of `self` -- + /// see [`num_integer::Roots::sqrt()`]. + pub fn sqrt(&self) -> Self { + Roots::sqrt(self) + } + + /// Returns the truncated principal cube root of `self` -- + /// see [`num_integer::Roots::cbrt()`]. + pub fn cbrt(&self) -> Self { + Roots::cbrt(self) + } + + /// Returns the truncated principal `n`th root of `self` -- + /// See [`num_integer::Roots::nth_root()`]. + pub fn nth_root(&self, n: u32) -> Self { + Roots::nth_root(self, n) + } + + /// Returns the number of least-significant bits that are zero, + /// or `None` if the entire number is zero. + pub fn trailing_zeros(&self) -> Option { + self.data.trailing_zeros() + } + + /// Returns whether the bit in position `bit` is set, + /// using the two's complement for negative numbers + pub fn bit(&self, bit: u64) -> bool { + if self.is_negative() { + // Let the binary representation of a number be + // ... 0 x 1 0 ... 0 + // Then the two's complement is + // ... 1 !x 1 0 ... 0 + // where !x is obtained from x by flipping each bit + if bit >= u64::from(crate::big_digit::BITS) * self.len() as u64 { + true + } else { + let trailing_zeros = self.data.trailing_zeros().unwrap(); + match Ord::cmp(&bit, &trailing_zeros) { + Ordering::Less => false, + Ordering::Equal => true, + Ordering::Greater => !self.data.bit(bit), + } + } + } else { + self.data.bit(bit) + } + } + + /// Sets or clears the bit in the given position, + /// using the two's complement for negative numbers + /// + /// Note that setting/clearing a bit (for positive/negative numbers, + /// respectively) greater than the current bit length, a reallocation + /// may be needed to store the new digits + pub fn set_bit(&mut self, bit: u64, value: bool) { + match self.sign { + Sign::Plus => self.data.set_bit(bit, value), + Sign::Minus => bits::set_negative_bit(self, bit, value), + Sign::NoSign => { + if value { + self.data.set_bit(bit, true); + self.sign = Sign::Plus; + } else { + // Clearing a bit for zero is a no-op + } + } + } + // The top bit may have been cleared, so normalize + self.normalize(); + } +} + +impl num_traits::FromBytes for BigInt { + type Bytes = [u8]; + + fn from_be_bytes(bytes: &Self::Bytes) -> Self { + Self::from_signed_bytes_be(bytes) + } + + fn from_le_bytes(bytes: &Self::Bytes) -> Self { + Self::from_signed_bytes_le(bytes) + } +} + +impl num_traits::ToBytes for BigInt { + type Bytes = Vec; + + fn to_be_bytes(&self) -> Self::Bytes { + self.to_signed_bytes_be() + } + + fn to_le_bytes(&self) -> Self::Bytes { + self.to_signed_bytes_le() + } +} + +#[test] +fn test_from_biguint() { + fn check(inp_s: Sign, inp_n: usize, ans_s: Sign, ans_n: usize) { + let inp: BigInt = BigInt::from_biguint(inp_s, BigUint::from(inp_n)); + let ans = BigInt { + sign: ans_s, + data: BigUint::from(ans_n), + }; + assert_eq!(inp, ans); + } + check(Plus, 1, Plus, 1); + check(Plus, 0, NoSign, 0); + check(Minus, 1, Minus, 1); + check(NoSign, 1, NoSign, 0); +} + +#[test] +fn test_from_slice() { + fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) { + let inp: BigInt = BigInt::from_slice(inp_s, &[inp_n]); + let ans = BigInt { + sign: ans_s, + data: BigUint::from(ans_n), + }; + assert_eq!(inp, ans); + } + check(Plus, 1, Plus, 1); + check(Plus, 0, NoSign, 0); + check(Minus, 1, Minus, 1); + check(NoSign, 1, NoSign, 0); +} + +#[test] +fn test_assign_from_slice() { + fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) { + let mut inp: BigInt = BigInt::from_slice(Minus, &[2627_u32, 0_u32, 9182_u32, 42_u32]); + inp.assign_from_slice(inp_s, &[inp_n]); + let ans = BigInt { + sign: ans_s, + data: BigUint::from(ans_n), + }; + assert_eq!(inp, ans); + } + check(Plus, 1, Plus, 1); + check(Plus, 0, NoSign, 0); + check(Minus, 1, Minus, 1); + check(NoSign, 1, NoSign, 0); +} diff --git a/vendor/num-bigint-generic/src/bigint/addition.rs b/vendor/num-bigint-generic/src/bigint/addition.rs new file mode 100644 index 000000000..4f49ecf54 --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/addition.rs @@ -0,0 +1,249 @@ +use super::{ + BigInt, + CheckedUnsignedAbs::{Negative, Positive}, + Sign::{Minus, NoSign, Plus}, + UnsignedAbs, +}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + iter::Sum, + mem, + ops::{Add, AddAssign}, +}; +use num_traits::CheckedAdd; + +// We want to forward to BigUint::add, but it's not clear how that will go until +// we compare both sign and magnitude. So we duplicate this body for every +// val/ref combination, deferring that decision to BigUint's own forwarding. +macro_rules! bigint_add { + ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { + match ($a.sign, $b.sign) { + (_, NoSign) => $a_owned, + (NoSign, _) => $b_owned, + // same sign => keep the sign with the sum of magnitudes + (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data), + // opposite signs => keep the sign of the larger with the difference of magnitudes + (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) { + Less => BigInt::from_biguint($b.sign, $b_data - $a_data), + Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), + Equal => BigInt::zero(), + }, + } + }; +} + +impl Add<&BigInt> for &BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: &BigInt) -> BigInt { + bigint_add!( + self, + self.clone(), + &self.data, + other, + other.clone(), + &other.data + ) + } +} + +impl Add> for &BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: BigInt) -> BigInt { + bigint_add!(self, self.clone(), &self.data, other, other, other.data) + } +} + +impl Add<&BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: &BigInt) -> BigInt { + bigint_add!(self, self, self.data, other, other.clone(), &other.data) + } +} + +impl Add> for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: BigInt) -> BigInt { + bigint_add!(self, self, self.data, other, other, other.data) + } +} + +impl AddAssign<&BigInt> for BigInt { + #[inline] + fn add_assign(&mut self, other: &BigInt) { + let n = mem::replace(self, Self::zero()); + *self = n + other; + } +} +impl AddAssign> for BigInt { + #[inline] + fn add_assign(&mut self, other: BigInt) { + self.add_assign(&other); + } +} + +promote_all_scalars!(impl Add for BigInt, add); +promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u32) -> BigInt { + match self.sign { + NoSign => From::from(other), + Plus => BigInt::::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => Self::zero(), + Less => BigInt::::from(other - self.data), + Greater => -BigInt::::from(self.data - other), + }, + } + } +} + +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u32) { + let n = mem::replace(self, Self::zero()); + *self = n + other; + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u64) -> BigInt { + match self.sign { + NoSign => From::from(other), + Plus => BigInt::::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => Self::zero(), + Less => BigInt::::from(other - self.data), + Greater => -BigInt::::from(self.data - other), + }, + } + } +} + +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u64) { + let n = mem::replace(self, Self::zero()); + *self = n + other; + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u128) -> BigInt { + match self.sign { + NoSign => BigInt::::from(other), + Plus => BigInt::::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => Self::zero(), + Less => BigInt::::from(other - self.data), + Greater => -BigInt::::from(self.data - other), + }, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u128) { + let n = mem::replace(self, Self::zero()); + *self = n + other; + } +} + +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl CheckedAdd for BigInt { + #[inline] + fn checked_add(&self, v: &BigInt) -> Option> { + Some(self.add(v)) + } +} + +impl_sum_iter_type!(BigInt); diff --git a/vendor/num-bigint-generic/src/bigint/arbitrary.rs b/vendor/num-bigint-generic/src/bigint/arbitrary.rs new file mode 100644 index 000000000..3cb90b304 --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/arbitrary.rs @@ -0,0 +1,43 @@ +#![cfg(any(feature = "quickcheck", feature = "arbitrary"))] + +use super::{BigInt, Sign}; +use crate::BigUint; + +#[cfg(feature = "quickcheck")] +use alloc::boxed::Box; + +#[cfg(feature = "quickcheck")] +#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))] +impl quickcheck::Arbitrary for BigInt { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let positive = bool::arbitrary(g); + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Self::from_biguint(sign, BigUint::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + let sign = self.sign(); + let unsigned_shrink = self.data.shrink(); + Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) + } +} + +#[cfg(feature = "arbitrary")] +#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))] +impl arbitrary::Arbitrary<'_> for BigInt { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let positive = bool::arbitrary(u)?; + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?)) + } + + fn arbitrary_take_rest(mut u: arbitrary::Unstructured<'_>) -> arbitrary::Result { + let positive = bool::arbitrary(&mut u)?; + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Ok(Self::from_biguint(sign, BigUint::arbitrary_take_rest(u)?)) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and(bool::size_hint(depth), BigUint::size_hint(depth)) + } +} diff --git a/vendor/num-bigint-generic/src/bigint/bits.rs b/vendor/num-bigint-generic/src/bigint/bits.rs new file mode 100644 index 000000000..97dc036ee --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/bits.rs @@ -0,0 +1,539 @@ +use super::{ + BigInt, + Sign::{Minus, NoSign, Plus}, +}; + +use crate::big_digit::{self, BigDigit, DoubleBigDigit}; + +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign}, +}; +use num_traits::{ToPrimitive, Zero}; +use tinyvec::TinyVec; + +// Negation in two's complement. +// acc must be initialized as 1 for least-significant digit. +// +// When negating, a carry (acc == 1) means that all the digits +// considered to this point were zero. This means that if all the +// digits of a negative BigInt have been considered, carry must be +// zero as we cannot have negative zero. +// +// 01 -> ...f ff +// ff -> ...f 01 +// 01 00 -> ...f ff 00 +// 01 01 -> ...f fe ff +// 01 ff -> ...f fe 01 +// ff 00 -> ...f 01 00 +// ff 01 -> ...f 00 ff +// ff ff -> ...f 00 01 +#[inline] +fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { + *acc += DoubleBigDigit::from(!a); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1 +// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff +// answer is pos, has length of a +fn bitand_pos_neg(a: &mut [BigDigit], b: &[BigDigit]) { + let mut carry_b = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai &= twos_b; + } + debug_assert!(b.len() > a.len() || carry_b == 0); +} + +// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff +// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1 +// answer is pos, has length of b +fn bitand_neg_pos(a: &mut TinyVec<[BigDigit; N]>, b: &[BigDigit]) { + let mut carry_a = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = twos_a & bi; + } + debug_assert!(a.len() > b.len() || carry_a == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => a.truncate(b.len()), + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().cloned()); + } + } +} + +// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff +// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff +// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitand_neg_neg(a: &mut TinyVec<[BigDigit; N]>, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + let mut carry_and = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(twos_a & twos_b, &mut carry_and); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_and); + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_and) + })); + debug_assert!(carry_b == 0); + } + } + if carry_and != 0 { + a.push(1); + } +} + +forward_val_val_binop!(impl BitAnd for BigInt, bitand); +forward_ref_val_binop!(impl BitAnd for BigInt, bitand); + +// do not use forward_ref_ref_binop_commutative! for bitand so that we can +// clone as needed, avoiding over-allocation +impl BitAnd<&BigInt> for &BigInt { + type Output = BigInt; + + #[inline] + fn bitand(self, other: &BigInt) -> BigInt { + match (self.sign, other.sign) { + (NoSign, _) | (_, NoSign) => BigInt::zero(), + (Plus, Plus) => BigInt::from(&self.data & &other.data), + (Plus, Minus) => self.clone() & other, + (Minus, Plus) => other.clone() & self, + (Minus, Minus) => { + // forward to val-ref, choosing the larger to clone + if self.len() >= other.len() { + self.clone() & other + } else { + other.clone() & self + } + } + } + } +} + +impl BitAnd<&BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitand(mut self, other: &BigInt) -> BigInt { + self &= other; + self + } +} + +impl BitAndAssign> for BigInt { + #[inline] + fn bitand_assign(&mut self, other: BigInt) { + self.bitand_assign(&other); + } +} + +impl BitAndAssign<&BigInt> for BigInt { + fn bitand_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (NoSign, _) => {} + (_, NoSign) => self.set_zero(), + (Plus, Plus) => { + self.data &= &other.data; + if self.data.is_zero() { + self.sign = NoSign; + } + } + (Plus, Minus) => { + bitand_pos_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Plus) => { + bitand_neg_pos(self.digits_mut(), other.digits()); + self.sign = Plus; + self.normalize(); + } + (Minus, Minus) => { + bitand_neg_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + } + } +} + +// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff +// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1 +// answer is neg, has length of b +fn bitor_pos_neg(a: &mut TinyVec<[BigDigit; N]>, b: &[BigDigit]) { + let mut carry_b = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(*ai | twos_b, &mut carry_or); + } + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + a.truncate(b.len()); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_or) + })); + debug_assert!(carry_b == 0); + } + } + // for carry_or to be non-zero, we would need twos_b == 0 + debug_assert!(carry_or == 0); +} + +// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1 +// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff +// answer is neg, has length of a +fn bitor_neg_pos(a: &mut [BigDigit], b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a | bi, &mut carry_or); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + if a.len() > b.len() { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_or); + } + debug_assert!(carry_a == 0); + } + // for carry_or to be non-zero, we would need twos_a == 0 + debug_assert!(carry_or == 0); +} + +// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1 +// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1 +// answer is neg, has length of shortest +fn bitor_neg_neg(a: &mut TinyVec<[BigDigit; N]>, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(twos_a | twos_b, &mut carry_or); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + if a.len() > b.len() { + a.truncate(b.len()); + } + // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0 + debug_assert!(carry_or == 0); +} + +forward_val_val_binop!(impl BitOr for BigInt, bitor); +forward_ref_val_binop!(impl BitOr for BigInt, bitor); + +// do not use forward_ref_ref_binop_commutative! for bitor so that we can +// clone as needed, avoiding over-allocation +impl BitOr<&BigInt> for &BigInt { + type Output = BigInt; + + #[inline] + fn bitor(self, other: &BigInt) -> BigInt { + match (self.sign, other.sign) { + (NoSign, _) => other.clone(), + (_, NoSign) => self.clone(), + (Plus, Plus) => BigInt::::from(&self.data | &other.data), + (Plus, Minus) => other.clone() | self, + (Minus, Plus) => self.clone() | other, + (Minus, Minus) => { + // forward to val-ref, choosing the smaller to clone + if self.len() <= other.len() { + self.clone() | other + } else { + other.clone() | self + } + } + } + } +} + +impl BitOr<&BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitor(mut self, other: &BigInt) -> BigInt { + self |= other; + self + } +} + +forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign); + +impl BitOrAssign<&BigInt> for BigInt { + fn bitor_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (_, NoSign) => {} + (NoSign, _) => self.clone_from(other), + (Plus, Plus) => self.data |= &other.data, + (Plus, Minus) => { + bitor_pos_neg(self.digits_mut(), other.digits()); + self.sign = Minus; + self.normalize(); + } + (Minus, Plus) => { + bitor_neg_pos(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Minus) => { + bitor_neg_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + } + } +} + +// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100 +// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitxor_pos_neg(a: &mut TinyVec<[BigDigit; N]>, b: &[BigDigit]) { + let mut carry_b = 1; + let mut carry_xor = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); + } + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_b = !0; + *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); + } + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_xor) + })); + debug_assert!(carry_b == 0); + } + } + if carry_xor != 0 { + a.push(1); + } +} + +// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100 +// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitxor_neg_pos(a: &mut TinyVec<[BigDigit; N]>, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_xor = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a ^ bi, &mut carry_xor); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_xor); + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_a = !0; + negate_carry(twos_a ^ bi, &mut carry_xor) + })); + } + } + if carry_xor != 0 { + a.push(1); + } +} + +// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe +// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe +// answer is pos, has length of longest +fn bitxor_neg_neg(a: &mut TinyVec<[BigDigit; N]>, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = twos_a ^ twos_b; + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = !0; + *ai = twos_a ^ twos_b; + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_a = !0; + let twos_b = negate_carry(bi, &mut carry_b); + twos_a ^ twos_b + })); + debug_assert!(carry_b == 0); + } + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor); + +impl BitXor<&BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitxor(mut self, other: &BigInt) -> BigInt { + self ^= other; + self + } +} + +forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign); + +impl BitXorAssign<&BigInt> for BigInt { + fn bitxor_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (_, NoSign) => {} + (NoSign, _) => self.clone_from(other), + (Plus, Plus) => { + self.data ^= &other.data; + if self.data.is_zero() { + self.sign = NoSign; + } + } + (Plus, Minus) => { + bitxor_pos_neg(self.digits_mut(), other.digits()); + self.sign = Minus; + self.normalize(); + } + (Minus, Plus) => { + bitxor_neg_pos(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Minus) => { + bitxor_neg_neg(self.digits_mut(), other.digits()); + self.sign = Plus; + self.normalize(); + } + } + } +} + +pub(super) fn set_negative_bit(x: &mut BigInt, bit: u64, value: bool) { + debug_assert_eq!(x.sign, Minus); + let data = &mut x.data; + + let bits_per_digit = u64::from(big_digit::BITS); + if bit >= bits_per_digit * data.len() as u64 { + if !value { + data.set_bit(bit, true); + } + } else { + // If the Uint number is + // ... 0 x 1 0 ... 0 + // then the two's complement is + // ... 1 !x 1 0 ... 0 + // |-- bit at position 'trailing_zeros' + // where !x is obtained from x by flipping each bit + let trailing_zeros = data.trailing_zeros().unwrap(); + if bit > trailing_zeros { + data.set_bit(bit, !value); + } else if bit == trailing_zeros && !value { + // Clearing the bit at position `trailing_zeros` is dealt with by doing + // similarly to what `bitand_neg_pos` does, except we start at digit + // `bit_index`. All digits below `bit_index` are guaranteed to be zero, + // so initially we have `carry_in` = `carry_out` = 1. Furthermore, we + // stop traversing the digits when there are no more carries. + let bit_index = (bit / bits_per_digit).to_usize().unwrap(); + let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); + let mut digit_iter = data.digits_mut().iter_mut().skip(bit_index); + let mut carry_in = 1; + let mut carry_out = 1; + + let digit = digit_iter.next().unwrap(); + let twos_in = negate_carry(*digit, &mut carry_in); + let twos_out = twos_in & !bit_mask; + *digit = negate_carry(twos_out, &mut carry_out); + + for digit in digit_iter { + if carry_in == 0 && carry_out == 0 { + // Exit the loop since no more digits can change + break; + } + let twos = negate_carry(*digit, &mut carry_in); + *digit = negate_carry(twos, &mut carry_out); + } + + if carry_out != 0 { + // All digits have been traversed and there is a carry + debug_assert_eq!(carry_in, 0); + data.digits_mut().push(1); + } + } else if bit < trailing_zeros && value { + // Flip each bit from position 'bit' to 'trailing_zeros', both inclusive + // ... 1 !x 1 0 ... 0 ... 0 + // |-- bit at position 'bit' + // |-- bit at position 'trailing_zeros' + // bit_mask: 1 1 ... 1 0 .. 0 + // This is done by xor'ing with the bit_mask + let index_lo = (bit / bits_per_digit).to_usize().unwrap(); + let index_hi = (trailing_zeros / bits_per_digit).to_usize().unwrap(); + let bit_mask_lo = big_digit::MAX << (bit % bits_per_digit); + let bit_mask_hi = + big_digit::MAX >> (bits_per_digit - 1 - (trailing_zeros % bits_per_digit)); + let digits = data.digits_mut(); + + if index_lo == index_hi { + digits[index_lo] ^= bit_mask_lo & bit_mask_hi; + } else { + digits[index_lo] = bit_mask_lo; + for digit in &mut digits[index_lo + 1..index_hi] { + *digit = big_digit::MAX; + } + digits[index_hi] ^= bit_mask_hi; + } + } else { + // We end up here in two cases: + // bit == trailing_zeros && value: Bit is already set + // bit < trailing_zeros && !value: Bit is already cleared + } + } +} diff --git a/vendor/num-bigint-generic/src/bigint/convert.rs b/vendor/num-bigint-generic/src/bigint/convert.rs new file mode 100644 index 000000000..1ac5b2fc6 --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/convert.rs @@ -0,0 +1,506 @@ +use super::{ + BigInt, + Sign::{self, Minus, NoSign, Plus}, + ToBigInt, +}; + +use crate::{BigUint, ParseBigIntError, ToBigUint, TryFromBigIntError}; + +use alloc::vec::Vec; +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + convert::TryFrom, + str::{self, FromStr}, +}; +use num_traits::{FromPrimitive, Num, One, ToPrimitive, Zero}; + +impl FromStr for BigInt { + type Err = ParseBigIntError; + + #[inline] + fn from_str(s: &str) -> Result { + BigInt::from_str_radix(s, 10) + } +} + +impl Num for BigInt { + type FromStrRadixErr = ParseBigIntError; + + /// Creates and initializes a [`BigInt`]. + #[inline] + fn from_str_radix(mut s: &str, radix: u32) -> Result, ParseBigIntError> { + let sign = if let Some(tail) = s.strip_prefix('-') { + if !tail.starts_with('+') { + s = tail + } + Minus + } else { + Plus + }; + let bu = BigUint::from_str_radix(s, radix)?; + Ok(BigInt::from_biguint(sign, bu)) + } +} + +impl ToPrimitive for BigInt { + #[inline] + fn to_i64(&self) -> Option { + match self.sign { + Plus => self.data.to_i64(), + NoSign => Some(0), + Minus => { + let n = self.data.to_u64()?; + let m: u64 = 1 << 63; + match n.cmp(&m) { + Less => Some(-(n as i64)), + Equal => Some(i64::MIN), + Greater => None, + } + } + } + } + + #[inline] + fn to_i128(&self) -> Option { + match self.sign { + Plus => self.data.to_i128(), + NoSign => Some(0), + Minus => { + let n = self.data.to_u128()?; + let m: u128 = 1 << 127; + match n.cmp(&m) { + Less => Some(-(n as i128)), + Equal => Some(i128::MIN), + Greater => None, + } + } + } + } + + #[inline] + fn to_u64(&self) -> Option { + match self.sign { + Plus => self.data.to_u64(), + NoSign => Some(0), + Minus => None, + } + } + + #[inline] + fn to_u128(&self) -> Option { + match self.sign { + Plus => self.data.to_u128(), + NoSign => Some(0), + Minus => None, + } + } + + #[inline] + fn to_f32(&self) -> Option { + let n = self.data.to_f32()?; + Some(if self.sign == Minus { -n } else { n }) + } + + #[inline] + fn to_f64(&self) -> Option { + let n = self.data.to_f64()?; + Some(if self.sign == Minus { -n } else { n }) + } +} + +macro_rules! impl_try_from_bigint { + ($T:ty, $to_ty:path) => { + impl TryFrom<&BigInt> for $T { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> { + $to_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + + impl TryFrom> for $T { + type Error = TryFromBigIntError>; + + #[inline] + fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError>> { + <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) + } + } + }; +} + +impl_try_from_bigint!(u8, ToPrimitive::to_u8); +impl_try_from_bigint!(u16, ToPrimitive::to_u16); +impl_try_from_bigint!(u32, ToPrimitive::to_u32); +impl_try_from_bigint!(u64, ToPrimitive::to_u64); +impl_try_from_bigint!(usize, ToPrimitive::to_usize); +impl_try_from_bigint!(u128, ToPrimitive::to_u128); + +impl_try_from_bigint!(i8, ToPrimitive::to_i8); +impl_try_from_bigint!(i16, ToPrimitive::to_i16); +impl_try_from_bigint!(i32, ToPrimitive::to_i32); +impl_try_from_bigint!(i64, ToPrimitive::to_i64); +impl_try_from_bigint!(isize, ToPrimitive::to_isize); +impl_try_from_bigint!(i128, ToPrimitive::to_i128); + +impl FromPrimitive for BigInt { + #[inline] + fn from_i64(n: i64) -> Option> { + Some(BigInt::::from(n)) + } + + #[inline] + fn from_i128(n: i128) -> Option> { + Some(BigInt::::from(n)) + } + + #[inline] + fn from_u64(n: u64) -> Option> { + Some(BigInt::::from(n)) + } + + #[inline] + fn from_u128(n: u128) -> Option> { + Some(BigInt::::from(n)) + } + + #[inline] + fn from_f64(n: f64) -> Option> { + if n >= 0.0 { + BigUint::::from_f64(n).map(BigInt::::from) + } else { + let x = BigUint::::from_f64(-n)?; + Some(-BigInt::::from(x)) + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: i64) -> Self { + if n >= 0 { + BigInt::::from(n as u64) + } else { + let u = u64::MAX - (n as u64) + 1; + BigInt { + sign: Minus, + data: BigUint::from(u), + } + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: i128) -> Self { + if n >= 0 { + BigInt::::from(n as u128) + } else { + let u = u128::MAX - (n as u128) + 1; + BigInt { + sign: Minus, + data: BigUint::::from(u), + } + } + } +} + +macro_rules! impl_bigint_from_int { + ($T:ty) => { + impl From<$T> for BigInt { + #[inline] + fn from(n: $T) -> Self { + BigInt::from(n as i64) + } + } + }; +} + +impl_bigint_from_int!(i8); +impl_bigint_from_int!(i16); +impl_bigint_from_int!(i32); +impl_bigint_from_int!(isize); + +impl From for BigInt { + #[inline] + fn from(n: u64) -> Self { + if n > 0 { + BigInt { + sign: Plus, + data: BigUint::from(n), + } + } else { + Self::zero() + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: u128) -> Self { + if n > 0 { + BigInt { + sign: Plus, + data: BigUint::from(n), + } + } else { + Self::zero() + } + } +} + +macro_rules! impl_bigint_from_uint { + ($T:ty) => { + impl From<$T> for BigInt { + #[inline] + fn from(n: $T) -> Self { + BigInt::from(n as u64) + } + } + }; +} + +impl_bigint_from_uint!(u8); +impl_bigint_from_uint!(u16); +impl_bigint_from_uint!(u32); +impl_bigint_from_uint!(usize); + +impl From> for BigInt { + #[inline] + fn from(n: BigUint) -> Self { + if n.is_zero() { + Self::zero() + } else { + BigInt { + sign: Plus, + data: n, + } + } + } +} + +impl ToBigInt for BigInt { + #[inline] + fn to_bigint(&self) -> Option { + Some(self.clone()) + } +} +impl ToBigInt for BigUint { + #[inline] + fn to_bigint(&self) -> Option { + if self.is_zero() { + Some(BigInt::zero()) + } else { + Some(BigInt { + sign: Plus, + data: self.clone(), + }) + } + } +} +impl ToBigUint for BigInt { + #[inline] + fn to_biguint(&self) -> Option { + match self.sign() { + Plus => Some(self.data.clone()), + NoSign => Some(BigUint::zero()), + Minus => None, + } + } +} + +// impl BigInt { +// #[inline] +// fn to_bigint(&self) -> Option> { +// Some(self.clone()) +// } +// } + +// impl BigUint { +// #[inline] +// fn to_bigint(&self) -> Option> { +// if self.is_zero() { +// Some(BigInt::zero()) +// } else { +// Some(BigInt { +// sign: Plus, +// data: self.clone(), +// }) +// } +// } +// } + +// impl BigInt { +// #[inline] +// fn to_biguint(&self) -> Option> { +// match self.sign() { +// Plus => Some(self.data.clone()), +// NoSign => Some(BigUint::zero()), +// Minus => None, +// } +// } +// } + +impl TryFrom<&BigInt> for BigUint { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigInt) -> Result> { + value + .to_biguint() + .ok_or_else(|| TryFromBigIntError::new(())) + } +} + +impl TryFrom for BigUint { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigInt) -> Result> { + if value.sign() == Sign::Minus { + Err(TryFromBigIntError::new(value)) + } else { + Ok(value.data) + } + } +} + +macro_rules! impl_to_bigint { + ($T:ty, $from_ty:path) => { + impl ToBigInt for $T { + #[inline] + fn to_bigint(&self) -> Option { + $from_ty(*self) + } + } + }; +} + +impl_to_bigint!(isize, FromPrimitive::from_isize); +impl_to_bigint!(i8, FromPrimitive::from_i8); +impl_to_bigint!(i16, FromPrimitive::from_i16); +impl_to_bigint!(i32, FromPrimitive::from_i32); +impl_to_bigint!(i64, FromPrimitive::from_i64); +impl_to_bigint!(i128, FromPrimitive::from_i128); + +impl_to_bigint!(usize, FromPrimitive::from_usize); +impl_to_bigint!(u8, FromPrimitive::from_u8); +impl_to_bigint!(u16, FromPrimitive::from_u16); +impl_to_bigint!(u32, FromPrimitive::from_u32); +impl_to_bigint!(u64, FromPrimitive::from_u64); +impl_to_bigint!(u128, FromPrimitive::from_u128); + +impl_to_bigint!(f32, FromPrimitive::from_f32); +impl_to_bigint!(f64, FromPrimitive::from_f64); + +impl From for BigInt { + fn from(x: bool) -> Self { + if x { + One::one() + } else { + Self::zero() + } + } +} + +#[inline] +pub(super) fn from_signed_bytes_be(digits: &[u8]) -> BigInt { + let sign = match digits.first() { + Some(v) if *v > 0x7f => Sign::Minus, + Some(_) => Sign::Plus, + None => return BigInt::zero(), + }; + + if sign == Sign::Minus { + // two's-complement the content to retrieve the magnitude + let mut digits = Vec::from(digits); + twos_complement_be(&mut digits); + BigInt::from_biguint(sign, BigUint::from_bytes_be(&digits)) + } else { + BigInt::from_biguint(sign, BigUint::from_bytes_be(digits)) + } +} + +#[inline] +pub(super) fn from_signed_bytes_le(digits: &[u8]) -> BigInt { + let sign = match digits.last() { + Some(v) if *v > 0x7f => Sign::Minus, + Some(_) => Sign::Plus, + None => return BigInt::zero(), + }; + + if sign == Sign::Minus { + // two's-complement the content to retrieve the magnitude + let mut digits = Vec::from(digits); + twos_complement_le(&mut digits); + BigInt::from_biguint(sign, BigUint::from_bytes_le(&digits)) + } else { + BigInt::from_biguint(sign, BigUint::from_bytes_le(digits)) + } +} + +#[inline] +pub(super) fn to_signed_bytes_be(x: &BigInt) -> Vec { + let mut bytes = x.data.to_bytes_be(); + let first_byte = bytes.first().cloned().unwrap_or(0); + if first_byte > 0x7f + && !(first_byte == 0x80 && bytes.iter().skip(1).all(Zero::is_zero) && x.sign == Sign::Minus) + { + // msb used by magnitude, extend by 1 byte + bytes.insert(0, 0); + } + if x.sign == Sign::Minus { + twos_complement_be(&mut bytes); + } + bytes +} + +#[inline] +pub(super) fn to_signed_bytes_le(x: &BigInt) -> Vec { + let mut bytes = x.data.to_bytes_le(); + let last_byte = bytes.last().cloned().unwrap_or(0); + if last_byte > 0x7f + && !(last_byte == 0x80 + && bytes.iter().rev().skip(1).all(Zero::is_zero) + && x.sign == Sign::Minus) + { + // msb used by magnitude, extend by 1 byte + bytes.push(0); + } + if x.sign == Sign::Minus { + twos_complement_le(&mut bytes); + } + bytes +} + +/// Perform in-place two's complement of the given binary representation, +/// in little-endian byte order. +#[inline] +fn twos_complement_le(digits: &mut [u8]) { + twos_complement(digits) +} + +/// Perform in-place two's complement of the given binary representation +/// in big-endian byte order. +#[inline] +fn twos_complement_be(digits: &mut [u8]) { + twos_complement(digits.iter_mut().rev()) +} + +/// Perform in-place two's complement of the given digit iterator +/// starting from the least significant byte. +#[inline] +fn twos_complement<'a, I>(digits: I) +where + I: IntoIterator, +{ + let mut carry = true; + for d in digits { + *d = !*d; + if carry { + *d = d.wrapping_add(1); + carry = d.is_zero(); + } + } +} diff --git a/vendor/num-bigint-generic/src/bigint/division.rs b/vendor/num-bigint-generic/src/bigint/division.rs new file mode 100644 index 000000000..9df979766 --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/division.rs @@ -0,0 +1,522 @@ +use super::{ + BigInt, + CheckedUnsignedAbs::{Negative, Positive}, + Sign::NoSign, + UnsignedAbs, +}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::ops::{Div, DivAssign, Rem, RemAssign}; +use num_integer::Integer; +use num_traits::{CheckedDiv, CheckedEuclid, Euclid, Signed, ToPrimitive, Zero}; + +forward_all_binop_to_ref_ref!(impl Div for BigInt, div); + +impl Div<&BigInt> for &BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: &BigInt) -> BigInt { + let (q, _) = self.div_rem(other); + q + } +} + +impl DivAssign<&BigInt> for BigInt { + #[inline] + fn div_assign(&mut self, other: &BigInt) { + *self = &*self / other; + } +} + +forward_val_assign!(impl DivAssign for BigInt, div_assign); + +promote_all_scalars!(impl Div for BigInt, div); +promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u32) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div> for u32 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u64) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div> for u64 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u128) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div> for u128 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div> for i32 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div> for i64 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div> for i128 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem); + +impl Rem<&BigInt> for &BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: &BigInt) -> BigInt { + if let Some(other) = other.to_u32() { + self % other + } else if let Some(other) = other.to_i32() { + self % other + } else { + let (_, r) = self.div_rem(other); + r + } + } +} + +impl RemAssign<&BigInt> for BigInt { + #[inline] + fn rem_assign(&mut self, other: &BigInt) { + *self = &*self % other; + } +} +impl RemAssign> for BigInt { + #[inline] + fn rem_assign(&mut self, other: BigInt) { + self.rem_assign(&other); + } +} + +promote_all_scalars!(impl Rem for BigInt, rem); +promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u32) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem> for u32 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u64) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem> for u64 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u128) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem> for u128 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i32) -> BigInt { + self % other.unsigned_abs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i32) { + *self %= other.unsigned_abs(); + } +} + +impl Rem> for i32 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i64) -> BigInt { + self % other.unsigned_abs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i64) { + *self %= other.unsigned_abs(); + } +} + +impl Rem> for i64 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i128) -> BigInt { + self % other.unsigned_abs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i128) { + *self %= other.unsigned_abs(); + } +} + +impl Rem> for i128 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl CheckedDiv for BigInt { + #[inline] + fn checked_div(&self, v: &BigInt) -> Option> { + if v.is_zero() { + return None; + } + Some(self.div(v)) + } +} + +impl CheckedEuclid for BigInt { + #[inline] + fn checked_div_euclid(&self, v: &BigInt) -> Option> { + if v.is_zero() { + return None; + } + Some(self.div_euclid(v)) + } + + #[inline] + fn checked_rem_euclid(&self, v: &BigInt) -> Option> { + if v.is_zero() { + return None; + } + Some(self.rem_euclid(v)) + } + + fn checked_div_rem_euclid(&self, v: &Self) -> Option<(Self, Self)> { + Some(self.div_rem_euclid(v)) + } +} + +impl Euclid for BigInt { + #[inline] + fn div_euclid(&self, v: &BigInt) -> BigInt { + let (q, r) = self.div_rem(v); + if r.is_negative() { + if v.is_positive() { + q - 1 + } else { + q + 1 + } + } else { + q + } + } + + #[inline] + fn rem_euclid(&self, v: &BigInt) -> BigInt { + let r = self % v; + if r.is_negative() { + if v.is_positive() { + r + v + } else { + r - v + } + } else { + r + } + } + + fn div_rem_euclid(&self, v: &Self) -> (Self, Self) { + let (q, r) = self.div_rem(v); + if r.is_negative() { + if v.is_positive() { + (q - 1, r + v) + } else { + (q + 1, r - v) + } + } else { + (q, r) + } + } +} diff --git a/vendor/num-bigint-generic/src/bigint/multiplication.rs b/vendor/num-bigint-generic/src/bigint/multiplication.rs new file mode 100644 index 000000000..bdcf84bdf --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/multiplication.rs @@ -0,0 +1,240 @@ +use super::{ + BigInt, + CheckedUnsignedAbs::{Negative, Positive}, + Sign::{self, Minus, NoSign, Plus}, + UnsignedAbs, +}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::{ + iter::Product, + ops::{Mul, MulAssign}, +}; +use num_traits::{CheckedMul, One, Zero}; + +impl Mul for Sign { + type Output = Sign; + + #[inline] + fn mul(self, other: Sign) -> Sign { + match (self, other) { + (NoSign, _) | (_, NoSign) => NoSign, + (Plus, Plus) | (Minus, Minus) => Plus, + (Plus, Minus) | (Minus, Plus) => Minus, + } + } +} + +impl Mul> for BigInt { + type Output = BigInt; + #[inline] + fn mul(self, other: BigInt) -> BigInt { + let BigInt { data: x, .. } = self; + let BigInt { data: y, .. } = other; + BigInt::from_biguint(self.sign * other.sign, x * y) + } +} +impl Mul> for &BigInt { + type Output = BigInt; + #[inline] + fn mul(self, other: BigInt) -> BigInt { + let BigInt { data: x, .. } = self; + let BigInt { data: y, .. } = other; + BigInt::from_biguint(self.sign * other.sign, x * y) + } +} +impl Mul<&BigInt> for BigInt { + type Output = BigInt; + #[inline] + fn mul(self, other: &BigInt) -> BigInt { + let BigInt { data: x, .. } = self; + let BigInt { data: y, .. } = other; + BigInt::from_biguint(self.sign * other.sign, x * y) + } +} +impl Mul<&BigInt> for &BigInt { + type Output = BigInt; + #[inline] + fn mul(self, other: &BigInt) -> BigInt { + let BigInt { data: x, .. } = self; + let BigInt { data: y, .. } = other; + BigInt::from_biguint(self.sign * other.sign, x * y) + } +} + +impl MulAssign> for BigInt { + #[inline] + fn mul_assign(&mut self, other: BigInt) { + let BigInt { data: y, .. } = other; + self.data *= y; + if self.data.is_zero() { + self.sign = NoSign; + } else { + self.sign = self.sign * other.sign; + } + } +} +impl MulAssign<&BigInt> for BigInt { + #[inline] + fn mul_assign(&mut self, other: &BigInt) { + let BigInt { data: y, .. } = other; + self.data *= y; + if self.data.is_zero() { + self.sign = NoSign; + } else { + self.sign = self.sign * other.sign; + } + } +} + +promote_all_scalars!(impl Mul for BigInt, mul); +promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u32) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u64) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u128) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl CheckedMul for BigInt { + #[inline] + fn checked_mul(&self, v: &BigInt) -> Option> { + Some(self.mul(v)) + } +} + +impl_product_iter_type!(BigInt); diff --git a/vendor/num-bigint-generic/src/bigint/power.rs b/vendor/num-bigint-generic/src/bigint/power.rs new file mode 100644 index 000000000..30876d51f --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/power.rs @@ -0,0 +1,100 @@ +use super::{ + BigInt, + Sign::{self, Minus, Plus}, +}; + +use crate::BigUint; + +use num_integer::Integer; +use num_traits::{Pow, Signed, Zero}; + +/// Help function for pow +/// +/// Computes the effect of the exponent on the sign. +#[inline] +fn powsign(sign: Sign, other: &T) -> Sign { + if other.is_zero() { + Plus + } else if sign != Minus || other.is_odd() { + sign + } else { + -sign + } +} + +macro_rules! pow_impl { + ($T:ty) => { + impl Pow<$T> for BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: $T) -> BigInt { + BigInt::::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs)) + } + } + + impl Pow<&$T> for BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: &$T) -> BigInt { + BigInt::::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs)) + } + } + + impl Pow<$T> for &BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: $T) -> BigInt { + BigInt::::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs)) + } + } + + impl Pow<&$T> for &BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: &$T) -> BigInt { + BigInt::::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs)) + } + } + }; +} + +pow_impl!(u8); +pow_impl!(u16); +pow_impl!(u32); +pow_impl!(u64); +pow_impl!(usize); +pow_impl!(u128); +pow_impl!(BigUint); + +pub(super) fn modpow( + x: &BigInt, + exponent: &BigInt, + modulus: &BigInt, +) -> BigInt { + assert!( + !exponent.is_negative(), + "negative exponentiation is not supported!" + ); + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + let result = x.data.modpow(&exponent.data, &modulus.data); + if result.is_zero() { + return BigInt::zero(); + } + + // The sign of the result follows the modulus, like `mod_floor`. + let (sign, mag) = match (x.is_negative() && exponent.is_odd(), modulus.is_negative()) { + (false, false) => (Plus, result), + (true, false) => (Plus, &modulus.data - result), + (false, true) => (Minus, &modulus.data - result), + (true, true) => (Minus, result), + }; + BigInt::from_biguint(sign, mag) +} diff --git a/vendor/num-bigint-generic/src/bigint/serde.rs b/vendor/num-bigint-generic/src/bigint/serde.rs new file mode 100644 index 000000000..fa70502d8 --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/serde.rs @@ -0,0 +1,63 @@ +#![cfg(feature = "serde")] +#![cfg_attr(docsrs, doc(cfg(feature = "serde")))] + +use super::{BigInt, Sign}; + +use serde::{ + de::{Error, Unexpected}, + Deserialize, Deserializer, Serialize, Serializer, +}; + +impl Serialize for Sign { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break + // forward and backward compatibility of serialized data! + match *self { + Sign::Minus => (-1i8).serialize(serializer), + Sign::NoSign => 0i8.serialize(serializer), + Sign::Plus => 1i8.serialize(serializer), + } + } +} + +impl<'de> Deserialize<'de> for Sign { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let sign = i8::deserialize(deserializer)?; + match sign { + -1 => Ok(Sign::Minus), + 0 => Ok(Sign::NoSign), + 1 => Ok(Sign::Plus), + _ => Err(D::Error::invalid_value( + Unexpected::Signed(sign.into()), + &"a sign of -1, 0, or 1", + )), + } + } +} + +impl Serialize for BigInt { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break + // forward and backward compatibility of serialized data! + (self.sign, &self.data).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for BigInt { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let (sign, data) = Deserialize::deserialize(deserializer)?; + Ok(BigInt::from_biguint(sign, data)) + } +} diff --git a/vendor/num-bigint-generic/src/bigint/shift.rs b/vendor/num-bigint-generic/src/bigint/shift.rs new file mode 100644 index 000000000..2e75ba02f --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/shift.rs @@ -0,0 +1,106 @@ +use super::{BigInt, Sign::NoSign}; + +use core::ops::{Shl, ShlAssign, Shr, ShrAssign}; +use num_traits::{PrimInt, Signed, Zero}; + +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl $Shx<&$rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn $shx(self, rhs: &$rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl $Shx<&$rhs> for &BigInt { + type Output = BigInt; + + #[inline] + fn $shx(self, rhs: &$rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl $ShxAssign<&$rhs> for BigInt { + #[inline] + fn $shx_assign(&mut self, rhs: &$rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::::from_biguint(self.sign, self.data << rhs) + } + } + impl Shl<$rhs> for &BigInt { + type Output = BigInt; + + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::::from_biguint(self.sign, &self.data << rhs) + } + } + impl ShlAssign<$rhs> for BigInt { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + self.data <<= rhs + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } + + impl Shr<$rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(&self, rhs); + let data = self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl Shr<$rhs> for &BigInt { + type Output = BigInt; + + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(self, rhs); + let data = &self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl ShrAssign<$rhs> for BigInt { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + let round_down = shr_round_down(self, rhs); + self.data >>= rhs; + if round_down { + self.data += 1u8; + } else if self.data.is_zero() { + self.sign = NoSign; + } + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; +} + +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } + +// Negative values need a rounding adjustment if there are any ones in the +// bits that are getting shifted out. +fn shr_round_down(i: &BigInt, shift: T) -> bool { + if i.is_negative() { + let zeros = i.trailing_zeros().expect("negative values are non-zero"); + shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true) + } else { + false + } +} diff --git a/vendor/num-bigint-generic/src/bigint/subtraction.rs b/vendor/num-bigint-generic/src/bigint/subtraction.rs new file mode 100644 index 000000000..bf9140ce3 --- /dev/null +++ b/vendor/num-bigint-generic/src/bigint/subtraction.rs @@ -0,0 +1,305 @@ +use super::{ + BigInt, + CheckedUnsignedAbs::{Negative, Positive}, + Sign::{Minus, NoSign, Plus}, + UnsignedAbs, +}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + mem, + ops::{Sub, SubAssign}, +}; +use num_traits::CheckedSub; + +// We want to forward to BigUint::sub, but it's not clear how that will go until +// we compare both sign and magnitude. So we duplicate this body for every +// val/ref combination, deferring that decision to BigUint's own forwarding. +macro_rules! bigint_sub { + ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { + match ($a.sign, $b.sign) { + (_, NoSign) => $a_owned, + (NoSign, _) => -$b_owned, + // opposite signs => keep the sign of the left with the sum of magnitudes + (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data), + // same sign => keep or toggle the sign of the left with the difference of magnitudes + (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) { + Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data), + Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), + Equal => BigInt::zero(), + }, + } + }; +} + +impl Sub<&BigInt> for &BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: &BigInt) -> BigInt { + bigint_sub!( + self, + self.clone(), + &self.data, + other, + other.clone(), + &other.data + ) + } +} + +impl Sub> for &BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + bigint_sub!(self, self.clone(), &self.data, other, other, other.data) + } +} + +impl Sub<&BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: &BigInt) -> BigInt { + bigint_sub!(self, self, self.data, other, other.clone(), &other.data) + } +} + +impl Sub> for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + bigint_sub!(self, self, self.data, other, other, other.data) + } +} + +impl SubAssign<&BigInt> for BigInt { + #[inline] + fn sub_assign(&mut self, other: &BigInt) { + let n = mem::replace(self, Self::zero()); + *self = n - other; + } +} +forward_val_assign!(impl SubAssign for BigInt, sub_assign); + +promote_all_scalars!(impl Sub for BigInt, sub); +promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u32) -> BigInt { + match self.sign { + NoSign => -BigInt::::from(other), + Minus => -BigInt::::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Self::zero(), + Greater => BigInt::::from(self.data - other), + Less => -BigInt::::from(other - self.data), + }, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u32) { + let n = mem::replace(self, Self::zero()); + *self = n - other; + } +} + +impl Sub> for u32 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub> for u64 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub> for u128 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u64) -> BigInt { + match self.sign { + NoSign => -BigInt::::from(other), + Minus => -BigInt::::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Self::zero(), + Greater => BigInt::::from(self.data - other), + Less => -BigInt::::from(other - self.data), + }, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u64) { + let n = mem::replace(self, Self::zero()); + *self = n - other; + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u128) -> BigInt { + match self.sign { + NoSign => -BigInt::from(other), + Minus => -BigInt::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Self::zero(), + Greater => BigInt::from(self.data - other), + Less => -BigInt::from(other - self.data), + }, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u128) { + let n = mem::replace(self, Self::zero()); + *self = n - other; + } +} + +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub> for i32 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub> for i64 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub> for i128 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl CheckedSub for BigInt { + #[inline] + fn checked_sub(&self, v: &BigInt) -> Option> { + Some(self.sub(v)) + } +} diff --git a/vendor/num-bigint-generic/src/bigrand.rs b/vendor/num-bigint-generic/src/bigrand.rs new file mode 100644 index 000000000..52d058e0d --- /dev/null +++ b/vendor/num-bigint-generic/src/bigrand.rs @@ -0,0 +1,286 @@ +//! Randomization of big integers +#![cfg(feature = "rand")] +#![cfg_attr(docsrs, doc(cfg(feature = "rand")))] + +use rand::{ + distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler}, + prelude::*, +}; + +use crate::{BigInt, BigUint, Sign::*}; + +use crate::biguint::biguint_from_tinyvec; + +use num_integer::Integer; +use num_traits::{ToPrimitive, Zero}; + +/// A trait for sampling random big integers. +/// +/// The `rand` feature must be enabled to use this. See crate-level documentation for details. +pub trait RandBigInt { + /// Generate a random [`BigUint`] of the given bit size. + fn gen_biguint(&mut self, bit_size: u64) -> BigUint; + + /// Generate a random [ BigInt`] of the given bit size. + fn gen_bigint(&mut self, bit_size: u64) -> BigInt; + + /// Generate a random [`BigUint`] less than the given bound. Fails + /// when the bound is zero. + fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint; + + /// Generate a random [`BigUint`] within the given range. The lower + /// bound is inclusive; the upper bound is exclusive. Fails when + /// the upper bound is not greater than the lower bound. + fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint; + + /// Generate a random [`BigInt`] within the given range. The lower + /// bound is inclusive; the upper bound is exclusive. Fails when + /// the upper bound is not greater than the lower bound. + fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt; +} + +fn gen_bits(rng: &mut R, data: &mut [u32], rem: u64) { + // `fill` is faster than many `gen::` calls + rng.fill(data); + if rem > 0 { + let last = data.len() - 1; + data[last] >>= 32 - rem; + } +} + +impl RandBigInt for R { + cfg_digit!( + fn gen_biguint(&mut self, bit_size: u64) -> BigUint { + let (digits, rem) = bit_size.div_rem(&32); + let len = (digits + (rem > 0) as u64) + .to_usize() + .expect("capacity overflow"); + let mut data = vec![0u32; len]; + gen_bits(self, &mut data, rem); + biguint_from_vec(data) + } + + fn gen_biguint(&mut self, bit_size: u64) -> BigUint { + use core::slice; + + let (digits, rem) = bit_size.div_rem(&32); + let len = (digits + (rem > 0) as u64) + .to_usize() + .expect("capacity overflow"); + let native_digits = Integer::div_ceil(&bit_size, &64); + let native_len = native_digits.to_usize().expect("capacity overflow"); + let mut data = tinyvec::TinyVec::new(); + // let mut data = vec![0u64; native_len]; + unsafe { + // Generate bits in a `&mut [u32]` slice for value stability + let ptr = data.as_mut_ptr() as *mut u32; + debug_assert!(native_len * 2 >= len); + let data = slice::from_raw_parts_mut(ptr, len); + gen_bits(self, data, rem); + } + #[cfg(target_endian = "big")] + for digit in &mut data { + // swap u32 digits into u64 endianness + *digit = (*digit << 32) | (*digit >> 32); + } + biguint_from_tinyvec(data) + } + ); + + fn gen_bigint(&mut self, bit_size: u64) -> BigInt { + loop { + // Generate a random BigUint... + let biguint = self.gen_biguint(bit_size); + // ...and then randomly assign it a Sign... + let sign = if biguint.is_zero() { + // ...except that if the BigUint is zero, we need to try + // again with probability 0.5. This is because otherwise, + // the probability of generating a zero BigInt would be + // double that of any other number. + if self.gen() { + continue; + } else { + NoSign + } + } else if self.gen() { + Plus + } else { + Minus + }; + return BigInt::from_biguint(sign, biguint); + } + } + + fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint { + assert!(!bound.is_zero()); + let bits = bound.bits(); + loop { + let n = self.gen_biguint(bits); + if n < *bound { + return n; + } + } + } + + fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint { + assert!(*lbound < *ubound); + if lbound.is_zero() { + self.gen_biguint_below(ubound) + } else { + lbound + self.gen_biguint_below(&(ubound - lbound)) + } + } + + fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt { + assert!(*lbound < *ubound); + if lbound.is_zero() { + BigInt::from(self.gen_biguint_below(ubound.magnitude())) + } else if ubound.is_zero() { + lbound + BigInt::from(self.gen_biguint_below(lbound.magnitude())) + } else { + let delta = ubound - lbound; + lbound + BigInt::from(self.gen_biguint_below(delta.magnitude())) + } + } +} + +/// The back-end implementing rand's [`UniformSampler`] for [`BigUint`]. +#[derive(Clone, Debug)] +pub struct UniformBigUint { + base: BigUint, + len: BigUint, +} + +impl UniformSampler for UniformBigUint { + type X = BigUint; + + #[inline] + fn new(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = low_b.borrow(); + let high = high_b.borrow(); + assert!(low < high); + UniformBigUint { + len: high - low, + base: low.clone(), + } + } + + #[inline] + fn new_inclusive(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = low_b.borrow(); + let high = high_b.borrow(); + assert!(low <= high); + Self::new(low, high + 1u32) + } + + #[inline] + fn sample(&self, rng: &mut R) -> Self::X { + &self.base + rng.gen_biguint_below(&self.len) + } + + #[inline] + fn sample_single(low: B1, high: B2, rng: &mut R) -> Self::X + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + rng.gen_biguint_range(low.borrow(), high.borrow()) + } +} + +impl SampleUniform for BigUint { + type Sampler = UniformBigUint; +} + +/// The back-end implementing rand's [`UniformSampler`] for [`BigInt`]. +#[derive(Clone, Debug)] +pub struct UniformBigInt { + base: BigInt, + len: BigUint, +} + +impl UniformSampler for UniformBigInt { + type X = BigInt; + + #[inline] + fn new(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = low_b.borrow(); + let high = high_b.borrow(); + assert!(low < high); + UniformBigInt { + len: (high - low).into_parts().1, + base: low.clone(), + } + } + + #[inline] + fn new_inclusive(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = low_b.borrow(); + let high = high_b.borrow(); + assert!(low <= high); + Self::new(low, high + 1u32) + } + + #[inline] + fn sample(&self, rng: &mut R) -> Self::X { + &self.base + BigInt::from(rng.gen_biguint_below(&self.len)) + } + + #[inline] + fn sample_single(low: B1, high: B2, rng: &mut R) -> Self::X + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + rng.gen_bigint_range(low.borrow(), high.borrow()) + } +} + +impl SampleUniform for BigInt { + type Sampler = UniformBigInt; +} + +/// A random distribution for [`BigUint`] and [`BigInt`] values of a particular bit size. +/// +/// The `rand` feature must be enabled to use this. See crate-level documentation for details. +#[derive(Clone, Copy, Debug)] +pub struct RandomBits { + bits: u64, +} + +impl RandomBits { + #[inline] + pub fn new(bits: u64) -> RandomBits { + RandomBits { bits } + } +} + +impl Distribution for RandomBits { + #[inline] + fn sample(&self, rng: &mut R) -> BigUint { + rng.gen_biguint(self.bits) + } +} + +impl Distribution for RandomBits { + #[inline] + fn sample(&self, rng: &mut R) -> BigInt { + rng.gen_bigint(self.bits) + } +} diff --git a/vendor/num-bigint-generic/src/biguint.rs b/vendor/num-bigint-generic/src/biguint.rs new file mode 100644 index 000000000..e450dedbc --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint.rs @@ -0,0 +1,1223 @@ +use crate::big_digit::{self, BigDigit}; + +use alloc::{string::String, vec::Vec}; +use core::{cmp, cmp::Ordering, default::Default, fmt, hash, mem, str}; +use tinyvec::{tiny_vec, TinyVec}; + +use num_integer::{Integer, Roots}; +use num_traits::{Num, One, Pow, ToPrimitive, Unsigned, Zero}; + +mod addition; +mod division; +mod multiplication; +mod subtraction; + +mod arbitrary; +mod bits; +mod convert; +mod iter; +mod monty; +mod power; +mod serde; +mod shift; + +pub(crate) use self::convert::to_str_radix_reversed; +pub use self::iter::{U32Digits, U64Digits}; + +pub const NLIMBS: usize = 32; + +/// A big unsigned integer type. +pub struct BigUint { + data: TinyVec<[u64; N]>, +} + +impl BigUint { + pub fn to_digits(&self) -> BigUint { + BigUint { + data: self.data.iter().copied().collect(), + } + } +} + +// Note: derived `Clone` doesn't specialize `clone_from`, +// but we want to keep the allocation in `data`. +impl Clone for BigUint { + #[inline] + fn clone(&self) -> Self { + BigUint { + data: self.data.clone(), + } + } + + #[inline] + fn clone_from(&mut self, other: &Self) { + self.data.clone_from(&other.data); + } +} + +impl hash::Hash for BigUint { + #[inline] + fn hash(&self, state: &mut H) { + debug_assert!(self.data.last() != Some(&0)); + self.data.hash(state); + } +} + +impl PartialEq for BigUint { + #[inline] + fn eq(&self, other: &BigUint) -> bool { + debug_assert!(self.data.last() != Some(&0)); + debug_assert!(other.data.last() != Some(&0)); + self.data == other.data + } +} +impl Eq for BigUint {} + +impl PartialOrd for BigUint { + #[inline] + fn partial_cmp(&self, other: &BigUint) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BigUint { + #[inline] + fn cmp(&self, other: &BigUint) -> Ordering { + cmp_slice(&self.data[..], &other.data[..]) + } +} + +#[inline] +fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering { + debug_assert!(a.last() != Some(&0)); + debug_assert!(b.last() != Some(&0)); + + match Ord::cmp(&a.len(), &b.len()) { + Ordering::Equal => Iterator::cmp(a.iter().rev(), b.iter().rev()), + other => other, + } +} + +impl Default for BigUint { + #[inline] + fn default() -> BigUint { + Self::zero() + } +} + +impl fmt::Debug for BigUint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for BigUint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(true, "", &self.to_str_radix(10)) + } +} + +impl fmt::LowerHex for BigUint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(true, "0x", &self.to_str_radix(16)) + } +} + +impl fmt::UpperHex for BigUint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut s = self.to_str_radix(16); + s.make_ascii_uppercase(); + f.pad_integral(true, "0x", &s) + } +} + +impl fmt::Binary for BigUint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(true, "0b", &self.to_str_radix(2)) + } +} + +impl fmt::Octal for BigUint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad_integral(true, "0o", &self.to_str_radix(8)) + } +} + +impl Zero for BigUint { + #[inline] + fn zero() -> BigUint { + Self::zero() + } + + #[inline] + fn set_zero(&mut self) { + self.data.clear(); + } + + #[inline] + fn is_zero(&self) -> bool { + self.data.is_empty() + } +} + +// impl ConstZero for BigUint { +// // forward to the inherent const +// const ZERO: Self = Self::ZERO; // BigUint { data: Vec::new() }; +// } + +impl One for BigUint { + #[inline] + fn one() -> BigUint { + BigUint { data: tiny_vec![1] } + } + + #[inline] + fn set_one(&mut self) { + self.data.clear(); + self.data.push(1); + } + + #[inline] + fn is_one(&self) -> bool { + self.data[..] == [1] + } +} + +impl Unsigned for BigUint {} + +impl Integer for BigUint { + #[inline] + fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) { + division::div_rem_ref(self, other) + } + + #[inline] + fn div_floor(&self, other: &BigUint) -> BigUint { + let (d, _) = division::div_rem_ref(self, other); + d + } + + #[inline] + fn mod_floor(&self, other: &BigUint) -> BigUint { + let (_, m) = division::div_rem_ref(self, other); + m + } + + #[inline] + fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) { + division::div_rem_ref(self, other) + } + + #[inline] + fn div_ceil(&self, other: &BigUint) -> BigUint { + let (d, m) = division::div_rem_ref(self, other); + if m.is_zero() { + d + } else { + d + 1u32 + } + } + + /// Calculates the Greatest Common Divisor (GCD) of the number and `other`. + /// + /// The result is always positive. + #[inline] + fn gcd(&self, other: &Self) -> Self { + #[inline] + fn twos(x: &BigUint) -> u64 { + x.trailing_zeros().unwrap_or(0) + } + + // Stein's algorithm + if self.is_zero() { + return other.clone(); + } + if other.is_zero() { + return self.clone(); + } + let mut m = self.clone(); + let mut n = other.clone(); + + // find common factors of 2 + let shift = cmp::min(twos(&n), twos(&m)); + + // divide m and n by 2 until odd + // m inside loop + n >>= twos(&n); + + while !m.is_zero() { + m >>= twos(&m); + if n > m { + mem::swap(&mut n, &mut m) + } + m -= &n; + } + + n << shift + } + + /// Calculates the Lowest Common Multiple (LCM) of the number and `other`. + #[inline] + fn lcm(&self, other: &BigUint) -> BigUint { + if self.is_zero() && other.is_zero() { + Self::zero() + } else { + self / self.gcd(other) * other + } + } + + /// Calculates the Greatest Common Divisor (GCD) and + /// Lowest Common Multiple (LCM) together. + #[inline] + fn gcd_lcm(&self, other: &Self) -> (Self, Self) { + let gcd = self.gcd(other); + let lcm = if gcd.is_zero() { + Self::zero() + } else { + self / &gcd * other + }; + (gcd, lcm) + } + + /// Deprecated, use `is_multiple_of` instead. + #[inline] + fn divides(&self, other: &BigUint) -> bool { + self.is_multiple_of(other) + } + + /// Returns `true` if the number is a multiple of `other`. + #[inline] + fn is_multiple_of(&self, other: &BigUint) -> bool { + if other.is_zero() { + return self.is_zero(); + } + (self % other).is_zero() + } + + /// Returns `true` if the number is divisible by `2`. + #[inline] + fn is_even(&self) -> bool { + // Considering only the last digit. + match self.data.first() { + Some(x) => x.is_even(), + None => true, + } + } + + /// Returns `true` if the number is not divisible by `2`. + #[inline] + fn is_odd(&self) -> bool { + !self.is_even() + } + + /// Rounds up to nearest multiple of argument. + #[inline] + fn next_multiple_of(&self, other: &Self) -> Self { + let m = self.mod_floor(other); + if m.is_zero() { + self.clone() + } else { + self + (other - m) + } + } + /// Rounds down to nearest multiple of argument. + #[inline] + fn prev_multiple_of(&self, other: &Self) -> Self { + self - self.mod_floor(other) + } + + fn dec(&mut self) { + *self -= 1u32; + } + + fn inc(&mut self) { + *self += 1u32; + } +} + +#[inline] +fn fixpoint(mut x: BigUint, max_bits: u64, f: F) -> BigUint +where + F: Fn(&BigUint) -> BigUint, +{ + let mut xn = f(&x); + + // If the value increased, then the initial guess must have been low. + // Repeat until we reverse course. + while x < xn { + // Sometimes an increase will go way too far, especially with large + // powers, and then take a long time to walk back. We know an upper + // bound based on bit size, so saturate on that. + x = if xn.bits() > max_bits { + BigUint::one() << max_bits + } else { + xn + }; + xn = f(&x); + } + + // Now keep repeating while the estimate is decreasing. + while x > xn { + x = xn; + xn = f(&x); + } + x +} + +impl Roots for BigUint { + // nth_root, sqrt and cbrt use Newton's method to compute + // principal root of a given degree for a given integer. + + // Reference: + // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14 + fn nth_root(&self, n: u32) -> Self { + assert!(n > 0, "root degree n must be at least 1"); + + if self.is_zero() || self.is_one() { + return self.clone(); + } + + match n { + // Optimize for small n + 1 => return self.clone(), + 2 => return self.sqrt(), + 3 => return self.cbrt(), + _ => (), + } + + // The root of non-zero values less than 2ⁿ can only be 1. + let bits = self.bits(); + let n64 = u64::from(n); + if bits <= n64 { + return BigUint::one(); + } + + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.nth_root(n).into(); + } + + let max_bits = bits / n64 + 1; + + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; + + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ) + let extra_bits = bits - (f64::MAX_EXP as u64 - 1); + let root_scale = Integer::div_ceil(&extra_bits, &n64); + let scale = root_scale * n64; + if scale < bits && bits - scale > n64 { + (self >> scale).nth_root(n) << root_scale + } else { + BigUint::one() << max_bits + } + } + }; + + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; + + let n_min_1 = n - 1; + fixpoint(guess, max_bits, move |s| { + let q = self / s.pow(n_min_1); + let t = n_min_1 * s + q; + t / n + }) + } + + // Reference: + // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13 + fn sqrt(&self) -> Self { + if self.is_zero() || self.is_one() { + return self.clone(); + } + + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.sqrt().into(); + } + + let bits = self.bits(); + let max_bits = bits / 2 + 1; + + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; + + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64(f.sqrt()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ) + let extra_bits = bits - (f64::MAX_EXP as u64 - 1); + let root_scale = (extra_bits + 1) / 2; + let scale = root_scale * 2; + (self >> scale).sqrt() << root_scale + } + }; + + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; + + fixpoint(guess, max_bits, move |s| { + let q = self / s; + let t = s + q; + t >> 1 + }) + } + + fn cbrt(&self) -> Self { + if self.is_zero() || self.is_one() { + return self.clone(); + } + + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.cbrt().into(); + } + + let bits = self.bits(); + let max_bits = bits / 3 + 1; + + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; + + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64(f.cbrt()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ) + let extra_bits = bits - (f64::MAX_EXP as u64 - 1); + let root_scale = (extra_bits + 2) / 3; + let scale = root_scale * 3; + (self >> scale).cbrt() << root_scale + } + }; + + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; + + fixpoint(guess, max_bits, move |s| { + let q = self / (s * s); + let t = (s << 1) + q; + t / 3u32 + }) + } +} + +/// A generic trait for converting a value to a [`BigUint`]. +pub trait ToBigUint { + /// Converts the value of `self` to a [`BigUint`]. + fn to_biguint(&self) -> Option; +} + +/// Creates and initializes a [`BigUint`]. +/// +/// The digits are in little-endian base matching `BigDigit`. +// #[inline] +// pub(crate) fn biguint_from_vec(digits: Vec) -> BigUint { +// BigUint { data: digits.into_iter().collect() }.normalized() +// } +#[inline] +pub(crate) fn biguint_from_tinyvec(digits: TinyVec<[BigDigit; N]>) -> BigUint { + BigUint { data: digits }.normalized() +} + +impl BigUint { + pub fn zero() -> Self { + BigUint { + data: [].into_iter().collect(), + } + } + + // /// A constant `BigUint` with value 0, useful for static initialization. + // pub const ZERO: Self = BigUint { data: TinyVec::Inline(ArrayVec::from_array_empty([0; 8])) }; + + /// Creates and initializes a [`BigUint`]. + /// + /// The base 232 digits are ordered least significant digit first. + #[inline] + pub fn new(digits: Vec) -> BigUint { + let mut big = Self::zero(); + + cfg_digit_expr!( + { + big.data = digits; + big.normalize(); + }, + big.assign_from_slice(&digits) + ); + + big + } + + /// Creates and initializes a [`BigUint`]. + /// + /// The base 232 digits are ordered least significant digit first. + #[inline] + pub fn from_slice(slice: &[u32]) -> BigUint { + let mut big = Self::zero(); + big.assign_from_slice(slice); + big + } + + /// Assign a value to a [`BigUint`]. + /// + /// The base 232 digits are ordered least significant digit first. + #[inline] + pub fn assign_from_slice(&mut self, slice: &[u32]) { + self.data.clear(); + + cfg_digit_expr!( + self.data.extend_from_slice(slice), + self.data.extend(slice.chunks(2).map(u32_chunk_to_u64)) + ); + + self.normalize(); + } + + /// Creates and initializes a [`BigUint`]. + /// + /// The bytes are in big-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from_bytes_be(b"A"), + /// BigUint::parse_bytes(b"65", 10).unwrap()); + /// assert_eq!(BigUint::from_bytes_be(b"AA"), + /// BigUint::parse_bytes(b"16705", 10).unwrap()); + /// assert_eq!(BigUint::from_bytes_be(b"AB"), + /// BigUint::parse_bytes(b"16706", 10).unwrap()); + /// assert_eq!(BigUint::from_bytes_be(b"Hello world!"), + /// BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap()); + /// ``` + #[inline] + pub fn from_bytes_be(bytes: &[u8]) -> BigUint { + if bytes.is_empty() { + Self::zero() + } else { + let mut v = bytes.to_vec(); + v.reverse(); + BigUint::from_bytes_le(&v) + } + } + + /// Creates and initializes a [`BigUint`]. + /// + /// The bytes are in little-endian byte order. + #[inline] + pub fn from_bytes_le(bytes: &[u8]) -> BigUint { + if bytes.is_empty() { + Self::zero() + } else { + convert::from_bitwise_digits_le::(bytes, 8) + } + } + + /// Creates and initializes a [`BigUint`]. The input slice must contain + /// ascii/utf8 characters in [0-9a-zA-Z]. + /// `radix` must be in the range `2...36`. + /// + /// The function `from_str_radix` from the `Num` trait provides the same logic + /// for `&str` buffers. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigUint, ToBigUint}; + /// + /// assert_eq!(BigUint::parse_bytes(b"1234", 10), ToBigUint::to_biguint(&1234)); + /// assert_eq!(BigUint::parse_bytes(b"ABCD", 16), ToBigUint::to_biguint(&0xABCD)); + /// assert_eq!(BigUint::parse_bytes(b"G", 16), None); + /// ``` + #[inline] + pub fn parse_bytes(buf: &[u8], radix: u32) -> Option> { + let s = str::from_utf8(buf).ok()?; + BigUint::from_str_radix(s, radix).ok() + } + + /// Creates and initializes a [`BigUint`]. Each `u8` of the input slice is + /// interpreted as one digit of the number + /// and must therefore be less than `radix`. + /// + /// The bytes are in big-endian byte order. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigUint}; + /// + /// let inbase190 = &[15, 33, 125, 12, 14]; + /// let a = BigUint::from_radix_be(inbase190, 190).unwrap(); + /// assert_eq!(a.to_radix_be(190), inbase190); + /// ``` + pub fn from_radix_be(buf: &[u8], radix: u32) -> Option> { + convert::from_radix_be(buf, radix) + } + + /// Creates and initializes a [`BigUint`]. Each `u8` of the input slice is + /// interpreted as one digit of the number + /// and must therefore be less than `radix`. + /// + /// The bytes are in little-endian byte order. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigUint}; + /// + /// let inbase190 = &[14, 12, 125, 33, 15]; + /// let a = BigUint::from_radix_be(inbase190, 190).unwrap(); + /// assert_eq!(a.to_radix_be(190), inbase190); + /// ``` + pub fn from_radix_le(buf: &[u8], radix: u32) -> Option> { + convert::from_radix_le(buf, radix) + } + + /// Returns the byte representation of the [`BigUint`] in big-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// let i = BigUint::parse_bytes(b"1125", 10).unwrap(); + /// assert_eq!(i.to_bytes_be(), vec![4, 101]); + /// ``` + #[inline] + pub fn to_bytes_be(&self) -> Vec { + let mut v = self.to_bytes_le(); + v.reverse(); + v + } + + /// Returns the byte representation of the [`BigUint`] in little-endian byte order. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// let i = BigUint::parse_bytes(b"1125", 10).unwrap(); + /// assert_eq!(i.to_bytes_le(), vec![101, 4]); + /// ``` + #[inline] + pub fn to_bytes_le(&self) -> Vec { + if self.is_zero() { + vec![0] + } else { + convert::to_bitwise_digits_le(self, 8) + } + } + + /// Returns the `u32` digits representation of the [`BigUint`] ordered least significant digit + /// first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(1125u32).to_u32_digits(), vec![1125]); + /// assert_eq!(BigUint::from(4294967295u32).to_u32_digits(), vec![4294967295]); + /// assert_eq!(BigUint::from(4294967296u64).to_u32_digits(), vec![0, 1]); + /// assert_eq!(BigUint::from(112500000000u64).to_u32_digits(), vec![830850304, 26]); + /// ``` + #[inline] + pub fn to_u32_digits(&self) -> Vec { + self.iter_u32_digits().collect() + } + + /// Returns the `u64` digits representation of the [`BigUint`] ordered least significant digit + /// first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(1125u32).to_u64_digits(), vec![1125]); + /// assert_eq!(BigUint::from(4294967295u32).to_u64_digits(), vec![4294967295]); + /// assert_eq!(BigUint::from(4294967296u64).to_u64_digits(), vec![4294967296]); + /// assert_eq!(BigUint::from(112500000000u64).to_u64_digits(), vec![112500000000]); + /// assert_eq!(BigUint::from(1u128 << 64).to_u64_digits(), vec![0, 1]); + /// ``` + #[inline] + pub fn to_u64_digits(&self) -> Vec { + self.iter_u64_digits().collect() + } + + /// Returns an iterator of `u32` digits representation of the [`BigUint`] ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(1125u32).iter_u32_digits().collect::>(), vec![1125]); + /// assert_eq!(BigUint::from(4294967295u32).iter_u32_digits().collect::>(), vec![4294967295]); + /// assert_eq!(BigUint::from(4294967296u64).iter_u32_digits().collect::>(), vec![0, 1]); + /// assert_eq!(BigUint::from(112500000000u64).iter_u32_digits().collect::>(), vec![830850304, 26]); + /// ``` + #[inline] + pub fn iter_u32_digits(&self) -> U32Digits<'_> { + U32Digits::new(self.data.as_slice()) + } + + /// Returns an iterator of `u64` digits representation of the [`BigUint`] ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(1125u32).iter_u64_digits().collect::>(), vec![1125]); + /// assert_eq!(BigUint::from(4294967295u32).iter_u64_digits().collect::>(), vec![4294967295]); + /// assert_eq!(BigUint::from(4294967296u64).iter_u64_digits().collect::>(), vec![4294967296]); + /// assert_eq!(BigUint::from(112500000000u64).iter_u64_digits().collect::>(), vec![112500000000]); + /// assert_eq!(BigUint::from(1u128 << 64).iter_u64_digits().collect::>(), vec![0, 1]); + /// ``` + #[inline] + pub fn iter_u64_digits(&self) -> U64Digits<'_> { + U64Digits::new(self.data.as_slice()) + } + + /// Returns the integer formatted as a string in the given radix. + /// `radix` must be in the range `2...36`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// let i = BigUint::parse_bytes(b"ff", 16).unwrap(); + /// assert_eq!(i.to_str_radix(16), "ff"); + /// ``` + #[inline] + pub fn to_str_radix(&self, radix: u32) -> String { + let mut v = to_str_radix_reversed(self, radix); + v.reverse(); + unsafe { String::from_utf8_unchecked(v) } + } + + /// Returns the integer in the requested base in big-endian digit order. + /// The output is not given in a human readable alphabet but as a zero + /// based `u8` number. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_be(159), + /// vec![2, 94, 27]); + /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27 + /// ``` + #[inline] + pub fn to_radix_be(&self, radix: u32) -> Vec { + let mut v = convert::to_radix_le(self, radix); + v.reverse(); + v + } + + /// Returns the integer in the requested base in little-endian digit order. + /// The output is not given in a human readable alphabet but as a zero + /// based u8 number. + /// `radix` must be in the range `2...256`. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_le(159), + /// vec![27, 94, 2]); + /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2) + /// ``` + #[inline] + pub fn to_radix_le(&self, radix: u32) -> Vec { + convert::to_radix_le(self, radix) + } + + /// Determines the fewest bits necessary to express the [`BigUint`]. + #[inline] + pub fn bits(&self) -> u64 { + if self.is_zero() { + return 0; + } + let zeros: u64 = self.data.last().unwrap().leading_zeros().into(); + self.data.len() as u64 * u64::from(big_digit::BITS) - zeros + } + + /// Strips off trailing zero bigdigits - comparisons require the last element in the vector to + /// be nonzero. + #[inline] + pub(crate) fn normalize(&mut self) { + if let Some(&0) = self.data.last() { + let len = self.data.iter().rposition(|&d| d != 0).map_or(0, |i| i + 1); + self.data.truncate(len); + } + if self.data.len() < self.data.capacity() / 4 { + self.data.shrink_to_fit(); + } + } + + /// Returns a normalized [`BigUint`]. + #[inline] + fn normalized(mut self) -> BigUint { + self.normalize(); + self + } + + /// Returns `self ^ exponent`. + pub fn pow(&self, exponent: u32) -> Self { + Pow::pow(self, exponent) + } + + /// Returns `(self ^ exponent) % modulus`. + /// + /// Panics if the modulus is zero. + pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self { + power::modpow(self, exponent, modulus) + } + + /// Returns the modular multiplicative inverse if it exists, otherwise `None`. + /// + /// This solves for `x` in the interval `[0, modulus)` such that `self * x ≡ 1 (mod modulus)`. + /// The solution exists if and only if `gcd(self, modulus) == 1`. + /// + /// ``` + /// use num_bigint::BigUint; + /// use num_traits::{One, Zero}; + /// + /// let m = BigUint::from(383_u32); + /// + /// // Trivial cases + /// assert_eq!(BigUint::zero().modinv(&m), None); + /// assert_eq!(BigUint::one().modinv(&m), Some(BigUint::one())); + /// let neg1 = &m - 1u32; + /// assert_eq!(neg1.modinv(&m), Some(neg1)); + /// + /// let a = BigUint::from(271_u32); + /// let x = a.modinv(&m).unwrap(); + /// assert_eq!(x, BigUint::from(106_u32)); + /// assert_eq!(x.modinv(&m).unwrap(), a); + /// assert!((a * x % m).is_one()); + /// ``` + pub fn modinv(&self, modulus: &Self) -> Option { + // Based on the inverse pseudocode listed here: + // https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers + // TODO: consider Binary or Lehmer's GCD algorithms for optimization. + + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + if modulus.is_one() { + return Some(Self::zero()); + } + + let mut r0; // = modulus.clone(); + let mut r1 = self % modulus; + let mut t0; // = Self::zero(); + let mut t1; // = Self::one(); + + // Lift and simplify the first iteration to avoid some initial allocations. + if r1.is_zero() { + return None; + } else if r1.is_one() { + return Some(r1); + } else { + let (q, r2) = modulus.div_rem(&r1); + if r2.is_zero() { + return None; + } + r0 = r1; + r1 = r2; + t0 = Self::one(); + t1 = modulus - q; + } + + while !r1.is_zero() { + let (q, r2) = r0.div_rem(&r1); + r0 = r1; + r1 = r2; + + // let t2 = (t0 - q * t1) % modulus; + let qt1 = q * &t1 % modulus; + let t2 = if t0 < qt1 { + t0 + (modulus - qt1) + } else { + t0 - qt1 + }; + t0 = t1; + t1 = t2; + } + + if r0.is_one() { + Some(t0) + } else { + None + } + } + + /// Returns the truncated principal square root of `self` -- + /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt) + pub fn sqrt(&self) -> Self { + Roots::sqrt(self) + } + + /// Returns the truncated principal cube root of `self` -- + /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt). + pub fn cbrt(&self) -> Self { + Roots::cbrt(self) + } + + /// Returns the truncated principal `n`th root of `self` -- + /// see [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root). + pub fn nth_root(&self, n: u32) -> Self { + Roots::nth_root(self, n) + } + + /// Returns the number of least-significant bits that are zero, + /// or `None` if the entire number is zero. + pub fn trailing_zeros(&self) -> Option { + let i = self.data.iter().position(|&digit| digit != 0)?; + let zeros: u64 = self.data[i].trailing_zeros().into(); + Some(i as u64 * u64::from(big_digit::BITS) + zeros) + } + + /// Returns the number of least-significant bits that are ones. + pub fn trailing_ones(&self) -> u64 { + if let Some(i) = self.data.iter().position(|&digit| !digit != 0) { + let ones: u64 = self.data[i].trailing_ones().into(); + i as u64 * u64::from(big_digit::BITS) + ones + } else { + self.data.len() as u64 * u64::from(big_digit::BITS) + } + } + + /// Returns the number of one bits. + pub fn count_ones(&self) -> u64 { + self.data.iter().map(|&d| u64::from(d.count_ones())).sum() + } + + /// Returns whether the bit in the given position is set + pub fn bit(&self, bit: u64) -> bool { + let bits_per_digit = u64::from(big_digit::BITS); + if let Some(digit_index) = (bit / bits_per_digit).to_usize() { + if let Some(digit) = self.data.get(digit_index) { + let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); + return (digit & bit_mask) != 0; + } + } + false + } + + /// Sets or clears the bit in the given position + /// + /// Note that setting a bit greater than the current bit length, a reallocation may be needed + /// to store the new digits + pub fn set_bit(&mut self, bit: u64, value: bool) { + // Note: we're saturating `digit_index` and `new_len` -- any such case is guaranteed to + // fail allocation, and that's more consistent than adding our own overflow panics. + let bits_per_digit = u64::from(big_digit::BITS); + let digit_index = (bit / bits_per_digit).to_usize().unwrap_or(usize::MAX); + let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); + if value { + if digit_index >= self.data.len() { + let new_len = digit_index.saturating_add(1); + self.data.resize(new_len, 0); + } + self.data[digit_index] |= bit_mask; + } else if digit_index < self.data.len() { + self.data[digit_index] &= !bit_mask; + // the top bit may have been cleared, so normalize + self.normalize(); + } + } +} + +impl num_traits::FromBytes for BigUint { + type Bytes = [u8]; + + fn from_be_bytes(bytes: &Self::Bytes) -> Self { + Self::from_bytes_be(bytes) + } + + fn from_le_bytes(bytes: &Self::Bytes) -> Self { + Self::from_bytes_le(bytes) + } +} + +impl num_traits::ToBytes for BigUint { + type Bytes = Vec; + + fn to_be_bytes(&self) -> Self::Bytes { + self.to_bytes_be() + } + + fn to_le_bytes(&self) -> Self::Bytes { + self.to_bytes_le() + } +} + +// pub(crate) trait IntDigits { +// fn digits(&self) -> &[BigDigit]; +// fn digits_mut(&mut self) -> &mut TinyVec<[BigDigit; NLIMBS]>; +// fn normalize(&mut self); +// fn capacity(&self) -> usize; +// fn len(&self) -> usize; +// } + +// impl IntDigits for BigUint { +impl BigUint { + // const N: usize = N; + + #[inline] + pub(crate) fn digits(&self) -> &[BigDigit] { + &self.data + } + #[inline] + pub(crate) fn digits_mut(&mut self) -> &mut TinyVec<[BigDigit; N]> { + &mut self.data + } + // #[inline] + // pub(crate) fn normalize(&mut self) { + // self.normalize(); + // } + #[inline] + pub(crate) fn capacity(&self) -> usize { + self.data.capacity() + } + #[inline] + pub(crate) fn len(&self) -> usize { + self.data.len() + } +} + +/// Convert a `u32` chunk (len is either 1 or 2) to a single `u64` digit +#[inline] +fn u32_chunk_to_u64(chunk: &[u32]) -> u64 { + // raw could have odd length + let mut digit = chunk[0] as u64; + if let Some(&hi) = chunk.get(1) { + digit |= (hi as u64) << 32; + } + digit +} + +cfg_32_or_test!( + /// Combine four `u32`s into a single `u128`. + #[inline] + fn u32_to_u128(a: u32, b: u32, c: u32, d: u32) -> u128 { + u128::from(d) | (u128::from(c) << 32) | (u128::from(b) << 64) | (u128::from(a) << 96) + } +); + +cfg_32_or_test!( + /// Split a single `u128` into four `u32`. + #[inline] + fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) { + ( + (n >> 96) as u32, + (n >> 64) as u32, + (n >> 32) as u32, + n as u32, + ) + } +); + +cfg_digit!( + #[test] + fn test_from_slice() { + fn check(slice: &[u32], data: &[BigDigit]) { + assert_eq!(BigUint::from_slice(slice).data, data); + } + check(&[1], &[1]); + check(&[0, 0, 0], &[]); + check(&[1, 2, 0, 0], &[1, 2]); + check(&[0, 0, 1, 2], &[0, 0, 1, 2]); + check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]); + check(&[-1i32 as u32], &[-1i32 as BigDigit]); + } + + #[test] + fn test_from_slice() { + fn check(slice: &[u32], data: &[BigDigit]) { + assert_eq!( + BigUint::<32>::from_slice(slice).data, + data, + "from {:?}, to {:?}", + slice, + data + ); + } + check(&[1], &[1]); + check(&[0, 0, 0], &[]); + check(&[1, 2], &[8_589_934_593]); + check(&[1, 2, 0, 0], &[8_589_934_593]); + check(&[0, 0, 1, 2], &[0, 8_589_934_593]); + check(&[0, 0, 1, 2, 0, 0], &[0, 8_589_934_593]); + check(&[-1i32 as u32], &[(-1i32 as u32) as BigDigit]); + } +); + +#[test] +fn test_u32_u128() { + assert_eq!(u32_from_u128(0u128), (0, 0, 0, 0)); + assert_eq!( + u32_from_u128(u128::MAX), + (u32::MAX, u32::MAX, u32::MAX, u32::MAX) + ); + + assert_eq!(u32_from_u128(u32::MAX as u128), (0, 0, 0, u32::MAX)); + + assert_eq!(u32_from_u128(u64::MAX as u128), (0, 0, u32::MAX, u32::MAX)); + + assert_eq!( + u32_from_u128((u64::MAX as u128) + u32::MAX as u128), + (0, 1, 0, u32::MAX - 1) + ); + + assert_eq!(u32_from_u128(36_893_488_151_714_070_528), (0, 2, 1, 0)); +} + +#[test] +fn test_u128_u32_roundtrip() { + // roundtrips + let values = vec![ + 0u128, + 1u128, + u64::MAX as u128 * 3, + u32::MAX as u128, + u64::MAX as u128, + (u64::MAX as u128) + u32::MAX as u128, + u128::MAX, + ]; + + for val in &values { + let (a, b, c, d) = u32_from_u128(*val); + assert_eq!(u32_to_u128(a, b, c, d), *val); + } +} diff --git a/vendor/num-bigint-generic/src/biguint/addition.rs b/vendor/num-bigint-generic/src/biguint/addition.rs new file mode 100644 index 000000000..b1fff7c1a --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/addition.rs @@ -0,0 +1,260 @@ +use super::BigUint; + +use crate::{ + big_digit::{self, BigDigit}, + UsizePromotion, +}; + +use core::{ + iter::Sum, + ops::{Add, AddAssign}, +}; +use num_traits::CheckedAdd; + +#[cfg(target_arch = "x86_64")] +use core::arch::x86_64 as arch; + +#[cfg(target_arch = "x86")] +use core::arch::x86 as arch; + +// Add with carry: +#[cfg(target_arch = "x86_64")] +cfg_64!( + #[inline] + fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_addcarry_u64(carry, a, b, out) } + } +); + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +cfg_32!( + #[inline] + fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_addcarry_u32(carry, a, b, out) } + } +); + +// fallback for environments where we don't have an addcarry intrinsic +// (copied from the standard library's `carrying_add`) +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +#[inline] +fn adc(carry: u8, lhs: BigDigit, rhs: BigDigit, out: &mut BigDigit) -> u8 { + let (a, b) = lhs.overflowing_add(rhs); + let (c, d) = a.overflowing_add(carry as BigDigit); + *out = c; + u8::from(b || d) +} + +/// Two argument addition of raw slices, `a += b`, returning the carry. +/// +/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform +/// the addition first hoping that it will fit. +/// +/// The caller _must_ ensure that `a` is at least as long as `b`. +#[inline] +pub(super) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit { + debug_assert!(a.len() >= b.len()); + + let mut carry = 0; + let (a_lo, a_hi) = a.split_at_mut(b.len()); + + for (a, b) in a_lo.iter_mut().zip(b) { + carry = adc(carry, *a, *b, a); + } + + if carry != 0 { + for a in a_hi { + carry = adc(carry, *a, 0, a); + if carry == 0 { + break; + } + } + } + + carry as BigDigit +} + +/// Two argument addition of raw slices: +/// a += b +/// +/// The caller _must_ ensure that a is big enough to store the result - typically this means +/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry. +pub(super) fn add2(a: &mut [BigDigit], b: &[BigDigit]) { + let carry = __add2(a, b); + + debug_assert!(carry == 0); +} + +forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add); +forward_val_assign!(impl AddAssign for BigUint, add_assign); + +impl Add<&BigUint> for BigUint { + type Output = BigUint; + + fn add(mut self, other: &BigUint) -> BigUint { + self += other; + self + } +} +impl AddAssign<&BigUint> for BigUint { + #[inline] + fn add_assign(&mut self, other: &BigUint) { + let self_len = self.data.len(); + let carry = if self_len < other.data.len() { + let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]); + self.data.extend_from_slice(&other.data[self_len..]); + __add2(&mut self.data[self_len..], &[lo_carry]) + } else { + __add2(&mut self.data[..], &other.data[..]) + }; + if carry != 0 { + self.data.push(carry); + } + } +} + +promote_unsigned_scalars!(impl Add for BigUint, add); +promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u32) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + #[inline] + fn add_assign(&mut self, other: u32) { + if other != 0 { + if self.data.is_empty() { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[other as BigDigit]); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u64) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + cfg_digit!( + #[inline] + fn add_assign(&mut self, other: u64) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + if hi == 0 { + *self += lo; + } else { + while self.data.len() < 2 { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[lo, hi]); + if carry != 0 { + self.data.push(carry); + } + } + } + + #[inline] + fn add_assign(&mut self, other: u64) { + if other != 0 { + if self.data.is_empty() { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[other as BigDigit]); + if carry != 0 { + self.data.push(carry); + } + } + } + ); +} + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u128) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + cfg_digit!( + #[inline] + fn add_assign(&mut self, other: u128) { + if other <= u128::from(u64::MAX) { + *self += other as u64 + } else { + let (a, b, c, d) = super::u32_from_u128(other); + let carry = if a > 0 { + while self.data.len() < 4 { + self.data.push(0); + } + __add2(&mut self.data, &[d, c, b, a]) + } else { + debug_assert!(b > 0); + while self.data.len() < 3 { + self.data.push(0); + } + __add2(&mut self.data, &[d, c, b]) + }; + + if carry != 0 { + self.data.push(carry); + } + } + } + + #[inline] + fn add_assign(&mut self, other: u128) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + if hi == 0 { + *self += lo; + } else { + while self.data.len() < 2 { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[lo, hi]); + if carry != 0 { + self.data.push(carry); + } + } + } + ); +} + +impl CheckedAdd for BigUint { + #[inline] + fn checked_add(&self, v: &BigUint) -> Option> { + Some(self.add(v)) + } +} + +impl_sum_iter_type!(BigUint); diff --git a/vendor/num-bigint-generic/src/biguint/arbitrary.rs b/vendor/num-bigint-generic/src/biguint/arbitrary.rs new file mode 100644 index 000000000..11e523f2a --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/arbitrary.rs @@ -0,0 +1,38 @@ +#![cfg(any(feature = "quickcheck", feature = "arbitrary"))] + +use super::{biguint_from_vec, BigUint}; + +use crate::big_digit::BigDigit; +#[cfg(feature = "quickcheck")] +use alloc::boxed::Box; +use alloc::vec::Vec; + +#[cfg(feature = "quickcheck")] +#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))] +impl quickcheck::Arbitrary for BigUint { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + // Use arbitrary from Vec + biguint_from_vec(Vec::::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + // Use shrinker from Vec + Box::new(self.data.shrink().map(biguint_from_vec)) + } +} + +#[cfg(feature = "arbitrary")] +#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))] +impl arbitrary::Arbitrary<'_> for BigUint { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(biguint_from_vec(Vec::::arbitrary(u)?)) + } + + fn arbitrary_take_rest(u: arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(biguint_from_vec(Vec::::arbitrary_take_rest(u)?)) + } + + fn size_hint(depth: usize) -> (usize, Option) { + Vec::::size_hint(depth) + } +} diff --git a/vendor/num-bigint-generic/src/biguint/bits.rs b/vendor/num-bigint-generic/src/biguint/bits.rs new file mode 100644 index 000000000..28b27e370 --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/bits.rs @@ -0,0 +1,93 @@ +use super::BigUint; + +use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign}; + +forward_val_val_binop!(impl BitAnd for BigUint, bitand); +forward_ref_val_binop!(impl BitAnd for BigUint, bitand); + +// do not use forward_ref_ref_binop_commutative! for bitand so that we can +// clone the smaller value rather than the larger, avoiding over-allocation +impl BitAnd<&BigUint> for &BigUint { + type Output = BigUint; + + #[inline] + fn bitand(self, other: &BigUint) -> BigUint { + // forward to val-ref, choosing the smaller to clone + if self.data.len() <= other.data.len() { + self.clone() & other + } else { + other.clone() & self + } + } +} + +forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign); + +impl BitAnd<&BigUint> for BigUint { + type Output = BigUint; + + #[inline] + fn bitand(mut self, other: &BigUint) -> BigUint { + self &= other; + self + } +} +impl BitAndAssign<&BigUint> for BigUint { + #[inline] + fn bitand_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai &= bi; + } + self.data.truncate(other.data.len()); + self.normalize(); + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor); +forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign); + +impl BitOr<&BigUint> for BigUint { + type Output = BigUint; + + fn bitor(mut self, other: &BigUint) -> BigUint { + self |= other; + self + } +} +impl BitOrAssign<&BigUint> for BigUint { + #[inline] + fn bitor_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai |= bi; + } + if other.data.len() > self.data.len() { + let extra = &other.data[self.data.len()..]; + self.data.extend(extra.iter().cloned()); + } + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor); +forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign); + +impl BitXor<&BigUint> for BigUint { + type Output = BigUint; + + fn bitxor(mut self, other: &BigUint) -> BigUint { + self ^= other; + self + } +} +impl BitXorAssign<&BigUint> for BigUint { + #[inline] + fn bitxor_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai ^= bi; + } + if other.data.len() > self.data.len() { + let extra = &other.data[self.data.len()..]; + self.data.extend(extra.iter().cloned()); + } + self.normalize(); + } +} diff --git a/vendor/num-bigint-generic/src/biguint/convert.rs b/vendor/num-bigint-generic/src/biguint/convert.rs new file mode 100644 index 000000000..9aca2ec78 --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/convert.rs @@ -0,0 +1,873 @@ +// This uses stdlib features higher than the MSRV +#![allow(clippy::manual_range_contains)] // 1.35 + +use super::{biguint_from_tinyvec, BigUint, ToBigUint}; + +use super::{ + addition::add2, + division::{div_rem_digit, FAST_DIV_WIDE}, + multiplication::mac_with_carry, +}; + +use crate::{ + big_digit::{self, BigDigit}, + ParseBigIntError, TryFromBigIntError, +}; + +use alloc::vec::Vec; +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + convert::TryFrom, + mem, + str::FromStr, +}; +use num_integer::{Integer, Roots}; +use num_traits::{float::FloatCore, FromPrimitive, Num, One, PrimInt, ToPrimitive, Zero}; +use tinyvec::TinyVec; + +/// Find last set bit +/// fls(0) == 0, fls(u32::MAX) == 32 +fn fls(v: T) -> u8 { + mem::size_of::() as u8 * 8 - v.leading_zeros() as u8 +} + +fn ilog2(v: T) -> u8 { + fls(v) - 1 +} + +impl FromStr for BigUint { + type Err = ParseBigIntError; + + #[inline] + fn from_str(s: &str) -> Result, ParseBigIntError> { + BigUint::from_str_radix(s, 10) + } +} + +// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides +// BigDigit::BITS +pub(super) fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { + debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0); + debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); + + let digits_per_big_digit = big_digit::BITS / bits; + + let data = v + .chunks(digits_per_big_digit.into()) + .map(|chunk| { + chunk + .iter() + .rev() + .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c)) + }) + .collect(); + + biguint_from_tinyvec(data) +} + +// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide +// BigDigit::BITS +fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { + debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0); + debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); + + // let total_bits = (v.len() as u64).saturating_mul(bits.into()); + // let big_digits = Integer::div_ceil(&total_bits, &big_digit::BITS.into()) + // .to_usize() + // .unwrap_or(usize::MAX); + let mut data = TinyVec::new(); + // let mut data = Vec::with_capacity(big_digits); + + let mut d = 0; + let mut dbits = 0; // number of bits we currently have in d + + // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a + // big_digit: + for &c in v { + d |= BigDigit::from(c) << dbits; + dbits += bits; + + if dbits >= big_digit::BITS { + data.push(d); + dbits -= big_digit::BITS; + // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit + // in d) - grab the bits we lost here: + d = BigDigit::from(c) >> (bits - dbits); + } + } + + if dbits > 0 { + debug_assert!(dbits < big_digit::BITS); + data.push(d as BigDigit); + } + + biguint_from_tinyvec(data) +} + +// Read little-endian radix digits +fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint { + debug_assert!(!v.is_empty() && !radix.is_power_of_two()); + debug_assert!(v.iter().all(|&c| u32::from(c) < radix)); + + // Estimate how big the result will be, so we can pre-allocate it. + // #[cfg(feature = "std")] + // let big_digits = { + // let radix_log2 = f64::from(radix).log2(); + // let bits = radix_log2 * v.len() as f64; + // (bits / big_digit::BITS as f64).ceil() + // }; + // #[cfg(not(feature = "std"))] + // let big_digits = { + // let radix_log2 = ilog2(radix.next_power_of_two()) as usize; + // let bits = radix_log2 * v.len(); + // (bits / big_digit::BITS as usize) + 1 + // }; + + let mut data = TinyVec::new(); + // let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0)); + + let (base, power) = get_radix_base(radix); + let radix = radix as BigDigit; + + let r = v.len() % power; + let i = if r == 0 { power } else { r }; + let (head, tail) = v.split_at(i); + + let first = head + .iter() + .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); + data.push(first); + + debug_assert!(tail.len() % power == 0); + for chunk in tail.chunks(power) { + if data.last() != Some(&0) { + data.push(0); + } + + let mut carry = 0; + for d in data.iter_mut() { + *d = mac_with_carry(0, *d, base, &mut carry); + } + debug_assert!(carry == 0); + + let n = chunk + .iter() + .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); + add2(&mut data, &[n]); + } + + biguint_from_tinyvec(data) +} + +pub(super) fn from_radix_be(buf: &[u8], radix: u32) -> Option> { + assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + + if buf.is_empty() { + return Some(BigUint::zero()); + } + + if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { + return None; + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + let mut v = Vec::from(buf); + v.reverse(); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(&v, bits) + } else { + from_inexact_bitwise_digits_le(&v, bits) + } + } else { + from_radix_digits_be(buf, radix) + }; + + Some(res) +} + +pub(super) fn from_radix_le(buf: &[u8], radix: u32) -> Option> { + assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + + if buf.is_empty() { + return Some(BigUint::zero()); + } + + if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { + return None; + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(buf, bits) + } else { + from_inexact_bitwise_digits_le(buf, bits) + } + } else { + let mut v = Vec::from(buf); + v.reverse(); + from_radix_digits_be(&v, radix) + }; + + Some(res) +} + +impl Num for BigUint { + type FromStrRadixErr = ParseBigIntError; + + /// Creates and initializes a `BigUint`. + fn from_str_radix(s: &str, radix: u32) -> Result, ParseBigIntError> { + assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); + let mut s = s; + if let Some(tail) = s.strip_prefix('+') { + if !tail.starts_with('+') { + s = tail + } + } + + if s.is_empty() { + return Err(ParseBigIntError::empty()); + } + + if s.starts_with('_') { + // Must lead with a real digit! + return Err(ParseBigIntError::invalid()); + } + + // First normalize all characters to plain digit values + let mut v = Vec::with_capacity(s.len()); + for b in s.bytes() { + let d = match b { + b'0'..=b'9' => b - b'0', + b'a'..=b'z' => b - b'a' + 10, + b'A'..=b'Z' => b - b'A' + 10, + b'_' => continue, + _ => u8::MAX, + }; + if d < radix as u8 { + v.push(d); + } else { + return Err(ParseBigIntError::invalid()); + } + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + v.reverse(); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(&v, bits) + } else { + from_inexact_bitwise_digits_le(&v, bits) + } + } else { + from_radix_digits_be(&v, radix) + }; + Ok(res) + } +} + +fn high_bits_to_u64(v: &BigUint) -> u64 { + match v.data.len() { + 0 => 0, + 1 => { + // XXX Conversion is useless if already 64-bit. + #[allow(clippy::useless_conversion)] + let v0 = u64::from(v.data[0]); + v0 + } + _ => { + let mut bits = v.bits(); + let mut ret = 0u64; + let mut ret_bits = 0; + + for d in v.data.iter().rev() { + let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1; + let bits_want = Ord::min(64 - ret_bits, digit_bits); + + if bits_want != 0 { + if bits_want != 64 { + ret <<= bits_want; + } + // XXX Conversion is useless if already 64-bit. + #[allow(clippy::useless_conversion)] + let d0 = u64::from(*d) >> (digit_bits - bits_want); + ret |= d0; + } + + // Implement round-to-odd: If any lower bits are 1, set LSB to 1 + // so that rounding again to floating point value using + // nearest-ties-to-even is correct. + // + // See: https://en.wikipedia.org/wiki/Rounding#Rounding_to_prepare_for_shorter_precision + + if digit_bits - bits_want != 0 { + // XXX Conversion is useless if already 64-bit. + #[allow(clippy::useless_conversion)] + let masked = u64::from(*d) << (64 - (digit_bits - bits_want) as u32); + ret |= (masked != 0) as u64; + } + + ret_bits += bits_want; + bits -= bits_want; + } + + ret + } + } +} + +impl ToPrimitive for BigUint { + #[inline] + fn to_i64(&self) -> Option { + self.to_u64().as_ref().and_then(u64::to_i64) + } + + #[inline] + fn to_i128(&self) -> Option { + self.to_u128().as_ref().and_then(u128::to_i128) + } + + #[allow(clippy::useless_conversion)] + #[inline] + fn to_u64(&self) -> Option { + let mut ret: u64 = 0; + let mut bits = 0; + + for i in self.data.iter() { + if bits >= 64 { + return None; + } + + // XXX Conversion is useless if already 64-bit. + ret += u64::from(*i) << bits; + bits += big_digit::BITS; + } + + Some(ret) + } + + #[inline] + fn to_u128(&self) -> Option { + let mut ret: u128 = 0; + let mut bits = 0; + + for i in self.data.iter() { + if bits >= 128 { + return None; + } + + ret |= u128::from(*i) << bits; + bits += big_digit::BITS; + } + + Some(ret) + } + + #[inline] + fn to_f32(&self) -> Option { + let mantissa = high_bits_to_u64(self); + let exponent = self.bits() - u64::from(fls(mantissa)); + + if exponent > f32::MAX_EXP as u64 { + Some(f32::INFINITY) + } else { + Some((mantissa as f32) * 2.0f32.powi(exponent as i32)) + } + } + + #[inline] + fn to_f64(&self) -> Option { + let mantissa = high_bits_to_u64(self); + let exponent = self.bits() - u64::from(fls(mantissa)); + + if exponent > f64::MAX_EXP as u64 { + Some(f64::INFINITY) + } else { + Some((mantissa as f64) * 2.0f64.powi(exponent as i32)) + } + } +} + +macro_rules! impl_try_from_biguint { + ($T:ty, $to_ty:path) => { + impl TryFrom<&BigUint> for $T { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> { + $to_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + + impl TryFrom for $T { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError> { + <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) + } + } + }; +} + +impl_try_from_biguint!(u8, ToPrimitive::to_u8); +impl_try_from_biguint!(u16, ToPrimitive::to_u16); +impl_try_from_biguint!(u32, ToPrimitive::to_u32); +impl_try_from_biguint!(u64, ToPrimitive::to_u64); +impl_try_from_biguint!(usize, ToPrimitive::to_usize); +impl_try_from_biguint!(u128, ToPrimitive::to_u128); + +impl_try_from_biguint!(i8, ToPrimitive::to_i8); +impl_try_from_biguint!(i16, ToPrimitive::to_i16); +impl_try_from_biguint!(i32, ToPrimitive::to_i32); +impl_try_from_biguint!(i64, ToPrimitive::to_i64); +impl_try_from_biguint!(isize, ToPrimitive::to_isize); +impl_try_from_biguint!(i128, ToPrimitive::to_i128); + +impl FromPrimitive for BigUint { + #[inline] + fn from_i64(n: i64) -> Option> { + if n >= 0 { + Some(BigUint::from(n as u64)) + } else { + None + } + } + + #[inline] + fn from_i128(n: i128) -> Option> { + if n >= 0 { + Some(BigUint::from(n as u128)) + } else { + None + } + } + + #[inline] + fn from_u64(n: u64) -> Option> { + Some(BigUint::from(n)) + } + + #[inline] + fn from_u128(n: u128) -> Option> { + Some(BigUint::from(n)) + } + + #[inline] + fn from_f64(mut n: f64) -> Option> { + // handle NAN, INFINITY, NEG_INFINITY + if !n.is_finite() { + return None; + } + + // match the rounding of casting from float to int + n = n.trunc(); + + // handle 0.x, -0.x + if n.is_zero() { + return Some(Self::zero()); + } + + let (mantissa, exponent, sign) = FloatCore::integer_decode(n); + + if sign == -1 { + return None; + } + + let mut ret = BigUint::from(mantissa); + match exponent.cmp(&0) { + Greater => ret <<= exponent as usize, + Equal => {} + Less => ret >>= (-exponent) as usize, + } + Some(ret) + } +} + +impl From for BigUint { + #[inline] + fn from(mut n: u64) -> Self { + let mut ret: BigUint = Self::zero(); + + while n != 0 { + ret.data.push(n as BigDigit); + // don't overflow if BITS is 64: + n = (n >> 1) >> (big_digit::BITS - 1); + } + + ret + } +} + +impl From for BigUint { + #[inline] + fn from(mut n: u128) -> Self { + let mut ret: BigUint = Self::zero(); + + while n != 0 { + ret.data.push(n as BigDigit); + n >>= big_digit::BITS; + } + + ret + } +} + +macro_rules! impl_biguint_from_uint { + ($T:ty) => { + impl From<$T> for BigUint { + #[inline] + fn from(n: $T) -> Self { + BigUint::from(n as u64) + } + } + }; +} + +impl_biguint_from_uint!(u8); +impl_biguint_from_uint!(u16); +impl_biguint_from_uint!(u32); +impl_biguint_from_uint!(usize); + +macro_rules! impl_biguint_try_from_int { + ($T:ty, $from_ty:path) => { + impl TryFrom<$T> for BigUint { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: $T) -> Result, TryFromBigIntError<()>> { + $from_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + }; +} + +impl_biguint_try_from_int!(i8, FromPrimitive::from_i8); +impl_biguint_try_from_int!(i16, FromPrimitive::from_i16); +impl_biguint_try_from_int!(i32, FromPrimitive::from_i32); +impl_biguint_try_from_int!(i64, FromPrimitive::from_i64); +impl_biguint_try_from_int!(isize, FromPrimitive::from_isize); +impl_biguint_try_from_int!(i128, FromPrimitive::from_i128); + +impl ToBigUint for BigUint { + #[inline] + fn to_biguint(&self) -> Option { + Some(self.clone()) + } +} + +macro_rules! impl_to_biguint { + ($T:ty, $from_ty:path) => { + impl ToBigUint for $T { + #[inline] + fn to_biguint(&self) -> Option { + $from_ty(*self) + } + } + }; +} + +impl_to_biguint!(isize, FromPrimitive::from_isize); +impl_to_biguint!(i8, FromPrimitive::from_i8); +impl_to_biguint!(i16, FromPrimitive::from_i16); +impl_to_biguint!(i32, FromPrimitive::from_i32); +impl_to_biguint!(i64, FromPrimitive::from_i64); +impl_to_biguint!(i128, FromPrimitive::from_i128); + +impl_to_biguint!(usize, FromPrimitive::from_usize); +impl_to_biguint!(u8, FromPrimitive::from_u8); +impl_to_biguint!(u16, FromPrimitive::from_u16); +impl_to_biguint!(u32, FromPrimitive::from_u32); +impl_to_biguint!(u64, FromPrimitive::from_u64); +impl_to_biguint!(u128, FromPrimitive::from_u128); + +impl_to_biguint!(f32, FromPrimitive::from_f32); +impl_to_biguint!(f64, FromPrimitive::from_f64); + +impl From for BigUint { + fn from(x: bool) -> Self { + if x { + One::one() + } else { + Self::zero() + } + } +} + +// Extract bitwise digits that evenly divide BigDigit +pub(super) fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { + debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0); + + let last_i = u.data.len() - 1; + let mask: BigDigit = (1 << bits) - 1; + let digits_per_big_digit = big_digit::BITS / bits; + let digits = Integer::div_ceil(&u.bits(), &u64::from(bits)) + .to_usize() + .unwrap_or(usize::MAX); + let mut res = Vec::with_capacity(digits); + + for mut r in u.data[..last_i].iter().cloned() { + for _ in 0..digits_per_big_digit { + res.push((r & mask) as u8); + r >>= bits; + } + } + + let mut r = u.data[last_i]; + while r != 0 { + res.push((r & mask) as u8); + r >>= bits; + } + + res +} + +// Extract bitwise digits that don't evenly divide BigDigit +fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { + debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0); + + let mask: BigDigit = (1 << bits) - 1; + let digits = Integer::div_ceil(&u.bits(), &u64::from(bits)) + .to_usize() + .unwrap_or(usize::MAX); + let mut res = Vec::with_capacity(digits); + + let mut r = 0; + let mut rbits = 0; + + for c in &u.data { + r |= *c << rbits; + rbits += big_digit::BITS; + + while rbits >= bits { + res.push((r & mask) as u8); + r >>= bits; + + // r had more bits than it could fit - grab the bits we lost + if rbits > big_digit::BITS { + r = *c >> (big_digit::BITS - (rbits - bits)); + } + + rbits -= bits; + } + } + + if rbits != 0 { + res.push(r as u8); + } + + while let Some(&0) = res.last() { + res.pop(); + } + + res +} + +// Extract little-endian radix digits +#[inline(always)] // forced inline to get const-prop for radix=10 +pub(super) fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec { + debug_assert!(!u.is_zero() && !radix.is_power_of_two()); + + #[cfg(feature = "std")] + let radix_digits = { + let radix_log2 = f64::from(radix).log2(); + ((u.bits() as f64) / radix_log2).ceil() + }; + #[cfg(not(feature = "std"))] + let radix_digits = { + let radix_log2 = ilog2(radix) as usize; + ((u.bits() as usize) / radix_log2) + 1 + }; + + // Estimate how big the result will be, so we can pre-allocate it. + let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0)); + + let mut digits = u.clone(); + + // X86 DIV can quickly divide by a full digit, otherwise we choose a divisor + // that's suitable for `div_half` to avoid slow `DoubleBigDigit` division. + let (base, power) = if FAST_DIV_WIDE { + get_radix_base(radix) + } else { + get_half_radix_base(radix) + }; + let radix = radix as BigDigit; + + // For very large numbers, the O(n²) loop of repeated `div_rem_digit` dominates the + // performance. We can mitigate this by dividing into chunks of a larger base first. + // The threshold for this was chosen by anecdotal performance measurements to + // approximate where this starts to make a noticeable difference. + if digits.data.len() >= 64 { + let mut big_base = BigUint::from(base); + let mut big_power = 1usize; + + // Choose a target base length near √n. + let target_len = digits.data.len().sqrt(); + while big_base.data.len() < target_len { + big_base = &big_base * &big_base; + big_power *= 2; + } + + // This outer loop will run approximately √n times. + while digits > big_base { + // This is still the dominating factor, with n digits divided by √n digits. + let (q, mut big_r) = digits.div_rem(&big_base); + digits = q; + + // This inner loop now has O(√n²)=O(n) behavior altogether. + for _ in 0..big_power { + let (q, mut r) = div_rem_digit(big_r, base); + big_r = q; + for _ in 0..power { + res.push((r % radix) as u8); + r /= radix; + } + } + } + } + + while digits.data.len() > 1 { + let (q, mut r) = div_rem_digit(digits, base); + for _ in 0..power { + res.push((r % radix) as u8); + r /= radix; + } + digits = q; + } + + let mut r = digits.data[0]; + while r != 0 { + res.push((r % radix) as u8); + r /= radix; + } + + res +} + +pub(super) fn to_radix_le(u: &BigUint, radix: u32) -> Vec { + if u.is_zero() { + vec![0] + } else if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of division + let bits = ilog2(radix); + if big_digit::BITS % bits == 0 { + to_bitwise_digits_le(u, bits) + } else { + to_inexact_bitwise_digits_le(u, bits) + } + } else if radix == 10 { + // 10 is so common that it's worth separating out for const-propagation. + // Optimizers can often turn constant division into a faster multiplication. + to_radix_digits_le(u, 10) + } else { + to_radix_digits_le(u, radix) + } +} + +pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec { + assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); + + if u.is_zero() { + return vec![b'0']; + } + + let mut res = to_radix_le(u, radix); + + // Now convert everything to ASCII digits. + for r in &mut res { + debug_assert!(u32::from(*r) < radix); + if *r < 10 { + *r += b'0'; + } else { + *r += b'a' - 10; + } + } + res +} + +/// Returns the greatest power of the radix for the `BigDigit` bit size +#[inline] +fn get_radix_base(radix: u32) -> (BigDigit, usize) { + static BASES: [(BigDigit, usize); 257] = generate_radix_bases(big_digit::MAX); + debug_assert!(!radix.is_power_of_two()); + debug_assert!((3..256).contains(&radix)); + BASES[radix as usize] +} + +/// Returns the greatest power of the radix for half the `BigDigit` bit size +#[inline] +fn get_half_radix_base(radix: u32) -> (BigDigit, usize) { + static BASES: [(BigDigit, usize); 257] = generate_radix_bases(big_digit::HALF); + debug_assert!(!radix.is_power_of_two()); + debug_assert!((3..256).contains(&radix)); + BASES[radix as usize] +} + +/// Generate tables of the greatest power of each radix that is less that the given maximum. These +/// are returned from `get_radix_base` to batch the multiplication/division of radix conversions on +/// full `BigUint` values, operating on primitive integers as much as possible. +/// +/// e.g. BASES_16\[3\] = (59049, 10) // 3¹⁰ fits in u16, but 3¹¹ is too big +/// BASES_32\[3\] = (3486784401, 20) +/// BASES_64\[3\] = (12157665459056928801, 40) +/// +/// Powers of two are not included, just zeroed, as they're implemented with shifts. +const fn generate_radix_bases(max: BigDigit) -> [(BigDigit, usize); 257] { + let mut bases = [(0, 0); 257]; + + let mut radix: BigDigit = 3; + while radix < 256 { + if !radix.is_power_of_two() { + let mut power = 1; + let mut base = radix; + + while let Some(b) = base.checked_mul(radix) { + if b > max { + break; + } + base = b; + power += 1; + } + bases[radix as usize] = (base, power) + } + radix += 1; + } + + bases +} + +#[test] +fn test_radix_bases() { + for radix in 3u32..256 { + if !radix.is_power_of_two() { + let (base, power) = get_radix_base(radix); + let radix = BigDigit::from(radix); + let power = u32::try_from(power).unwrap(); + assert_eq!(base, radix.pow(power)); + assert!(radix.checked_pow(power + 1).is_none()); + } + } +} + +#[test] +fn test_half_radix_bases() { + for radix in 3u32..256 { + if !radix.is_power_of_two() { + let (base, power) = get_half_radix_base(radix); + let radix = BigDigit::from(radix); + let power = u32::try_from(power).unwrap(); + assert_eq!(base, radix.pow(power)); + assert!(radix.pow(power + 1) > big_digit::HALF); + } + } +} diff --git a/vendor/num-bigint-generic/src/biguint/division.rs b/vendor/num-bigint-generic/src/biguint/division.rs new file mode 100644 index 000000000..d76df2a45 --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/division.rs @@ -0,0 +1,718 @@ +use super::{addition::__add2, cmp_slice, BigUint}; + +use crate::{ + big_digit::{self, BigDigit, DoubleBigDigit}, + UsizePromotion, +}; + +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + mem, + ops::{Div, DivAssign, Rem, RemAssign}, +}; +use num_integer::Integer; +use num_traits::{CheckedDiv, CheckedEuclid, Euclid, One, ToPrimitive, Zero}; + +pub(super) const FAST_DIV_WIDE: bool = cfg!(any(target_arch = "x86", target_arch = "x86_64")); + +/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder: +/// +/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit. +/// This is _not_ true for an arbitrary numerator/denominator. +/// +/// (This function also matches what the x86 divide instruction does). +#[cfg(any(miri, not(any(target_arch = "x86", target_arch = "x86_64"))))] +#[inline] +fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { + debug_assert!(hi < divisor); + + let lhs = big_digit::to_doublebigdigit(hi, lo); + let rhs = DoubleBigDigit::from(divisor); + ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit) +} + +/// x86 and x86_64 can use a real `div` instruction. +#[cfg(all(not(miri), any(target_arch = "x86", target_arch = "x86_64")))] +#[inline] +fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { + // This debug assertion covers the potential #DE for divisor==0 or a quotient too large for one + // register, otherwise in release mode it will become a target-specific fault like SIGFPE. + // This should never occur with the inputs from our few `div_wide` callers. + debug_assert!(hi < divisor); + + // SAFETY: The `div` instruction only affects registers, reading the explicit operand as the + // divisor, and implicitly reading RDX:RAX or EDX:EAX as the dividend. The result is implicitly + // written back to RAX or EAX for the quotient and RDX or EDX for the remainder. No memory is + // used, and flags are not preserved. + unsafe { + let (div, rem); + + cfg_digit!( + macro_rules! div { + () => { + "div {0:e}" + }; + } + macro_rules! div { + () => { + "div {0:r}" + }; + } + ); + + core::arch::asm!( + div!(), + in(reg) divisor, + inout("dx") hi => rem, + inout("ax") lo => div, + options(pure, nomem, nostack), + ); + + (div, rem) + } +} + +/// For small divisors, we can divide without promoting to `DoubleBigDigit` by +/// using half-size pieces of digit, like long-division. +#[inline] +fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { + use crate::big_digit::{HALF, HALF_BITS}; + + debug_assert!(rem < divisor && divisor <= HALF); + let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor); + let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor); + ((hi << HALF_BITS) | lo, rem) +} + +#[inline] +pub(super) fn div_rem_digit( + mut a: BigUint, + b: BigDigit, +) -> (BigUint, BigDigit) { + if b == 0 { + panic!("attempt to divide by zero") + } + + let mut rem = 0; + + if !FAST_DIV_WIDE && b <= big_digit::HALF { + for d in a.data.iter_mut().rev() { + let (q, r) = div_half(rem, *d, b); + *d = q; + rem = r; + } + } else { + for d in a.data.iter_mut().rev() { + let (q, r) = div_wide(rem, *d, b); + *d = q; + rem = r; + } + } + + (a.normalized(), rem) +} + +#[inline] +fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit { + if b == 0 { + panic!("attempt to divide by zero") + } + + let mut rem = 0; + + if !FAST_DIV_WIDE && b <= big_digit::HALF { + for &digit in a.data.iter().rev() { + let (_, r) = div_half(rem, digit, b); + rem = r; + } + } else { + for &digit in a.data.iter().rev() { + let (_, r) = div_wide(rem, digit, b); + rem = r; + } + } + + rem +} + +/// Subtract a multiple. +/// a -= b * c +/// Returns a borrow (if a < b then borrow > 0). +fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit { + debug_assert!(a.len() == b.len()); + + // carry is between -big_digit::MAX and 0, so to avoid overflow we store + // offset_carry = carry + big_digit::MAX + let mut offset_carry = big_digit::MAX; + + for (x, y) in a.iter_mut().zip(b) { + // We want to calculate sum = x - y * c + carry. + // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX + // sum <= big_digit::MAX + // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range. + let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x) + - big_digit::MAX as DoubleBigDigit + + offset_carry as DoubleBigDigit + - *y as DoubleBigDigit * c as DoubleBigDigit; + + let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum); + offset_carry = new_offset_carry; + *x = new_x; + } + + // Return the borrow. + big_digit::MAX - offset_carry +} + +fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) { + if d.is_zero() { + panic!("attempt to divide by zero") + } + if u.is_zero() { + return (BigUint::zero(), BigUint::zero()); + } + + if d.data.len() == 1 { + if *d.data == [1] { + return (u, BigUint::zero()); + } + let (div, rem) = div_rem_digit(u, d.data[0]); + // reuse d + d.data.clear(); + d += rem; + return (div, d); + } + + // Required or the q_len calculation below can underflow: + match u.cmp(&d) { + Less => return (BigUint::zero(), u), + Equal => { + u.set_one(); + return (u, BigUint::zero()); + } + Greater => {} // Do nothing + } + + // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: + // + // First, normalize the arguments so the highest bit in the highest digit of the divisor is + // set: the main loop uses the highest digit of the divisor for generating guesses, so we + // want it to be the largest number we can efficiently divide by. + // + let shift = d.data.last().unwrap().leading_zeros() as usize; + + if shift == 0 { + // no need to clone d + div_rem_core(u, &d.data) + } else { + let (q, r) = div_rem_core(u << shift, &(d << shift).data); + // renormalize the remainder + (q, r >> shift) + } +} + +pub(super) fn div_rem_ref( + u: &BigUint, + d: &BigUint, +) -> (BigUint, BigUint) { + if d.is_zero() { + panic!("attempt to divide by zero") + } + if u.is_zero() { + return (BigUint::zero(), BigUint::zero()); + } + + // if + assert!( + u.data.is_inline(), + "u.data.len={:?} u={:?}", + u.data.len(), + u + ); + + if d.data.len() == 1 { + if *d.data == [1] { + return (u.clone(), BigUint::zero()); + } + + let (div, rem) = div_rem_digit(u.clone(), d.data[0]); + return (div, rem.into()); + } + + // Required or the q_len calculation below can underflow: + match u.cmp(d) { + Less => return (BigUint::zero(), u.clone()), + Equal => return (One::one(), BigUint::zero()), + Greater => {} // Do nothing + } + + // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: + // + // First, normalize the arguments so the highest bit in the highest digit of the divisor is + // set: the main loop uses the highest digit of the divisor for generating guesses, so we + // want it to be the largest number we can efficiently divide by. + // + let shift = d.data.last().unwrap().leading_zeros() as usize; + + if shift == 0 { + // no need to clone d + div_rem_core(u.clone(), &d.data) + } else { + let (q, r) = div_rem_core(u << shift, &(d << shift).data); + // renormalize the remainder + (q, r >> shift) + } +} + +/// An implementation of the base division algorithm. +/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21. +fn div_rem_core(mut a: BigUint, b: &[BigDigit]) -> (BigUint, BigUint) { + debug_assert!(a.data.len() >= b.len() && b.len() > 1); + debug_assert!(b.last().unwrap().leading_zeros() == 0); + + // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the + // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set + // + // q += q0 << j + // a -= (q0 << j) * b + // + // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder. + // + // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of + // b - this will give us a guess that is close to the actual quotient, but is possibly greater. + // It can only be greater by 1 and only in rare cases, with probability at most + // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21. + // + // If the quotient turns out to be too large, we adjust it by 1: + // q -= 1 << j + // a += b << j + + // a0 stores an additional extra most significant digit of the dividend, not stored in a. + let mut a0 = 0; + + // [b1, b0] are the two most significant digits of the divisor. They never change. + let b0 = b[b.len() - 1]; + let b1 = b[b.len() - 2]; + + let q_len = a.data.len() - b.len() + 1; + let mut q = BigUint { + data: core::iter::repeat(0).take(q_len).collect(), + }; + + for j in (0..q_len).rev() { + debug_assert!(a.data.len() == b.len() + j); + + let a1 = *a.data.last().unwrap(); + let a2 = a.data[a.data.len() - 2]; + + // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large + // by at most 2. + let (mut q0, mut r) = if a0 < b0 { + let (q0, r) = div_wide(a0, a1, b0); + (q0, r as DoubleBigDigit) + } else { + debug_assert!(a0 == b0); + // Avoid overflowing q0, we know the quotient fits in BigDigit. + // [a1,a0] = b0 * (1< a0 { + // q0 is too large. We need to add back one multiple of b. + q0 -= 1; + borrow -= __add2(&mut a.data[j..], b); + } + // The top digit of a, stored in a0, has now been zeroed. + debug_assert!(borrow == a0); + + q.data[j] = q0; + + // Pop off the next top digit of a. + a0 = a.data.pop().unwrap(); + } + + a.data.push(a0); + a.normalize(); + + debug_assert_eq!(cmp_slice(&a.data, b), Less); + + (q.normalized(), a) +} + +forward_val_ref_binop!(impl Div for BigUint, div); +forward_ref_val_binop!(impl Div for BigUint, div); +forward_val_assign!(impl DivAssign for BigUint, div_assign); + +impl Div> for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: BigUint) -> BigUint { + let (q, _) = div_rem(self, other); + q + } +} + +impl Div<&BigUint> for &BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: &BigUint) -> BigUint { + let (q, _) = self.div_rem(other); + q + } +} +impl DivAssign<&BigUint> for BigUint { + #[inline] + fn div_assign(&mut self, other: &BigUint) { + *self = &*self / other; + } +} + +promote_unsigned_scalars!(impl Div for BigUint, div); +promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u32) -> BigUint { + let (q, _) = div_rem_digit(self, other as BigDigit); + q + } +} +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u32) { + *self = &*self / other; + } +} + +impl Div> for u32 { + type Output = BigUint; + + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self as BigDigit / other.data[0]), + _ => BigUint::zero(), + } + } +} + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u64) -> BigUint { + let (q, _) = div_rem(self, From::from(other)); + q + } +} +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u64) { + // a vec of size 0 does not allocate, so this is fairly cheap + let temp = mem::replace(self, Self::zero()); + *self = temp / other; + } +} + +impl Div> for u64 { + type Output = BigUint; + + cfg_digit!( + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / u64::from(other.data[0])), + 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), + _ => BigUint::zero(), + } + } + + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / other.data[0]), + _ => BigUint::zero(), + } + } + ); +} + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u128) -> BigUint { + let (q, _) = div_rem(self, From::from(other)); + q + } +} + +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u128) { + *self = &*self / other; + } +} + +impl Div> for u128 { + type Output = BigUint; + + cfg_digit!( + #[inline] + fn div(self, other: BigUint) -> BigUint { + use super::u32_to_u128; + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / u128::from(other.data[0])), + 2 => From::from( + self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])), + ), + 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])), + 4 => From::from( + self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]), + ), + _ => BigUint::zero(), + } + } + + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / other.data[0] as u128), + 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), + _ => BigUint::zero(), + } + } + ); +} + +forward_val_ref_binop!(impl Rem for BigUint, rem); +forward_ref_val_binop!(impl Rem for BigUint, rem); +forward_val_assign!(impl RemAssign for BigUint, rem_assign); + +impl Rem> for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: BigUint) -> BigUint { + if let Some(other) = other.to_u32() { + &self % other + } else { + let (_, r) = div_rem(self, other); + r + } + } +} + +impl Rem<&BigUint> for &BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: &BigUint) -> BigUint { + if let Some(other) = other.to_u32() { + self % other + } else { + let (_, r) = self.div_rem(other); + r + } + } +} +impl RemAssign<&BigUint> for BigUint { + #[inline] + fn rem_assign(&mut self, other: &BigUint) { + *self = &*self % other; + } +} + +promote_unsigned_scalars!(impl Rem for BigUint, rem); +promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign); +forward_all_scalar_binop_to_ref_val!(impl Rem for BigUint, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); + +impl Rem for &BigUint { + type Output = BigUint; + #[inline] + fn rem(self, other: u32) -> BigUint { + rem_digit(self, other as BigDigit).into() + } +} +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u32) { + *self = &*self % other; + } +} +impl Rem<&BigUint> for u32 { + type Output = BigUint; + #[inline] + fn rem(mut self, other: &BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +macro_rules! impl_rem_assign_scalar { + ($scalar:ty, $to_scalar:ident) => { + forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign); + impl RemAssign<&BigUint> for $scalar { + #[inline] + fn rem_assign(&mut self, other: &BigUint) { + *self = match other.$to_scalar() { + None => *self, + Some(0) => panic!("attempt to divide by zero"), + Some(v) => *self % v + }; + } + } + } +} + +// we can scalar %= BigUint for any scalar, including signed types +impl_rem_assign_scalar!(u128, to_u128); +impl_rem_assign_scalar!(usize, to_usize); +impl_rem_assign_scalar!(u64, to_u64); +impl_rem_assign_scalar!(u32, to_u32); +impl_rem_assign_scalar!(u16, to_u16); +impl_rem_assign_scalar!(u8, to_u8); +impl_rem_assign_scalar!(i128, to_i128); +impl_rem_assign_scalar!(isize, to_isize); +impl_rem_assign_scalar!(i64, to_i64); +impl_rem_assign_scalar!(i32, to_i32); +impl_rem_assign_scalar!(i16, to_i16); +impl_rem_assign_scalar!(i8, to_i8); + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u64) -> BigUint { + let (_, r) = div_rem(self, From::from(other)); + r + } +} +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u64) { + *self = &*self % other; + } +} + +impl Rem> for u64 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u128) -> BigUint { + let (_, r) = div_rem(self, From::from(other)); + r + } +} + +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u128) { + *self = &*self % other; + } +} + +impl Rem> for u128 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +impl CheckedDiv for BigUint { + #[inline] + fn checked_div(&self, v: &BigUint) -> Option> { + if v.is_zero() { + return None; + } + Some(self.div(v)) + } +} + +impl CheckedEuclid for BigUint { + #[inline] + fn checked_div_euclid(&self, v: &BigUint) -> Option> { + if v.is_zero() { + return None; + } + Some(self.div_euclid(v)) + } + + #[inline] + fn checked_rem_euclid(&self, v: &BigUint) -> Option> { + if v.is_zero() { + return None; + } + Some(self.rem_euclid(v)) + } + + fn checked_div_rem_euclid(&self, v: &Self) -> Option<(Self, Self)> { + Some(self.div_rem_euclid(v)) + } +} + +impl Euclid for BigUint { + #[inline] + fn div_euclid(&self, v: &BigUint) -> BigUint { + // trivially same as regular division + self / v + } + + #[inline] + fn rem_euclid(&self, v: &BigUint) -> BigUint { + // trivially same as regular remainder + self % v + } + + fn div_rem_euclid(&self, v: &Self) -> (Self, Self) { + // trivially same as regular division and remainder + self.div_rem(v) + } +} diff --git a/vendor/num-bigint-generic/src/biguint/iter.rs b/vendor/num-bigint-generic/src/biguint/iter.rs new file mode 100644 index 000000000..e2f3aeb3e --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/iter.rs @@ -0,0 +1,361 @@ +use core::iter::FusedIterator; + +cfg_digit!( + /// An iterator of `u32` digits representation of a `BigUint` or `BigInt`, + /// ordered least significant digit first. + pub struct U32Digits<'a> { + it: core::slice::Iter<'a, u32>, + } + + /// An iterator of `u32` digits representation of a `BigUint` or `BigInt`, + /// ordered least significant digit first. + pub struct U32Digits<'a> { + data: &'a [u64], + next_is_lo: bool, + last_hi_is_zero: bool, + } +); + +cfg_digit!( + const _: () = { + impl<'a> U32Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u32]) -> Self { + Self { it: data.iter() } + } + } + + impl Iterator for U32Digits<'_> { + type Item = u32; + #[inline] + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.it.nth(n).cloned() + } + + #[inline] + fn last(self) -> Option { + self.it.last().cloned() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } + } + + impl DoubleEndedIterator for U32Digits<'_> { + fn next_back(&mut self) -> Option { + self.it.next_back().cloned() + } + } + + impl ExactSizeIterator for U32Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.it.len() + } + } + }; + + const _: () = { + impl<'a> U32Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u64]) -> Self { + let last_hi_is_zero = data + .last() + .map(|&last| { + let last_hi = (last >> 32) as u32; + last_hi == 0 + }) + .unwrap_or(false); + U32Digits { + data, + next_is_lo: true, + last_hi_is_zero, + } + } + } + + impl Iterator for U32Digits<'_> { + type Item = u32; + #[inline] + fn next(&mut self) -> Option { + match self.data.split_first() { + Some((&first, data)) => { + let next_is_lo = self.next_is_lo; + self.next_is_lo = !next_is_lo; + if next_is_lo { + Some(first as u32) + } else { + self.data = data; + if data.is_empty() && self.last_hi_is_zero { + self.last_hi_is_zero = false; + None + } else { + Some((first >> 32) as u32) + } + } + } + None => None, + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + fn last(self) -> Option { + self.data.last().map(|&last| { + if self.last_hi_is_zero { + last as u32 + } else { + (last >> 32) as u32 + } + }) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + } + + impl DoubleEndedIterator for U32Digits<'_> { + fn next_back(&mut self) -> Option { + match self.data.split_last() { + Some((&last, data)) => { + let last_is_lo = self.last_hi_is_zero; + self.last_hi_is_zero = !last_is_lo; + if last_is_lo { + self.data = data; + if data.is_empty() && !self.next_is_lo { + self.next_is_lo = true; + None + } else { + Some(last as u32) + } + } else { + Some((last >> 32) as u32) + } + } + None => None, + } + } + } + + impl ExactSizeIterator for U32Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.data.len() * 2 + - usize::from(self.last_hi_is_zero) + - usize::from(!self.next_is_lo) + } + } + }; +); + +impl FusedIterator for U32Digits<'_> {} + +cfg_digit!( + /// An iterator of `u64` digits representation of a `BigUint` or `BigInt`, + /// ordered least significant digit first. + pub struct U64Digits<'a> { + it: core::slice::Chunks<'a, u32>, + } + + /// An iterator of `u64` digits representation of a `BigUint` or `BigInt`, + /// ordered least significant digit first. + pub struct U64Digits<'a> { + it: core::slice::Iter<'a, u64>, + } +); + +cfg_digit!( + const _: () = { + impl<'a> U64Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u32]) -> Self { + U64Digits { it: data.chunks(2) } + } + } + + impl Iterator for U64Digits<'_> { + type Item = u64; + #[inline] + fn next(&mut self) -> Option { + self.it.next().map(super::u32_chunk_to_u64) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + fn last(self) -> Option { + self.it.last().map(super::u32_chunk_to_u64) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + } + + impl DoubleEndedIterator for U64Digits<'_> { + fn next_back(&mut self) -> Option { + self.it.next_back().map(super::u32_chunk_to_u64) + } + } + }; + + const _: () = { + impl<'a> U64Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u64]) -> Self { + Self { it: data.iter() } + } + } + + impl Iterator for U64Digits<'_> { + type Item = u64; + #[inline] + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.it.nth(n).cloned() + } + + #[inline] + fn last(self) -> Option { + self.it.last().cloned() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } + } + + impl DoubleEndedIterator for U64Digits<'_> { + fn next_back(&mut self) -> Option { + self.it.next_back().cloned() + } + } + }; +); + +impl ExactSizeIterator for U64Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.it.len() + } +} + +impl FusedIterator for U64Digits<'_> {} + +#[test] +fn test_iter_u32_digits() { + let n: super::BigUint = super::BigUint::from(5u8); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n: super::BigUint = super::BigUint::from(112500000000u64); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(830850304)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(26)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} + +#[test] +fn test_iter_u64_digits() { + let n: super::BigUint = super::BigUint::from(5u8); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n: super::BigUint = super::BigUint::from(18_446_744_073_709_551_616u128); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} + +#[test] +fn test_iter_u32_digits_be() { + let n: super::BigUint = super::BigUint::from(5u8); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n: super::BigUint = super::BigUint::from(112500000000u64); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(830850304)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(26)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} + +#[test] +fn test_iter_u64_digits_be() { + let n: super::BigUint = super::BigUint::from(5u8); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next_back(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n: super::BigUint = super::BigUint::from(18_446_744_073_709_551_616u128); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next_back(), Some(1)); + assert_eq!(it.len(), 1); + assert_eq!(it.next_back(), Some(0)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} diff --git a/vendor/num-bigint-generic/src/biguint/monty.rs b/vendor/num-bigint-generic/src/biguint/monty.rs new file mode 100644 index 000000000..0456e55d8 --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/monty.rs @@ -0,0 +1,237 @@ +use alloc::vec::Vec; +use core::{mem, ops::Shl}; +use num_traits::One; + +use crate::{ + big_digit::{self, BigDigit, DoubleBigDigit}, + biguint::BigUint, +}; + +struct MontyReducer { + n0inv: BigDigit, +} + +// k0 = -m**-1 mod 2**BITS. Algorithm from: Dumas, J.G. "On Newton–Raphson +// Iteration for Multiplicative Inverses Modulo Prime Powers". +fn inv_mod_alt(b: BigDigit) -> BigDigit { + assert_ne!(b & 1, 0); + + let mut k0 = BigDigit::wrapping_sub(2, b); + let mut t = b - 1; + let mut i = 1; + while i < big_digit::BITS { + t = t.wrapping_mul(t); + k0 = k0.wrapping_mul(t + 1); + + i <<= 1; + } + debug_assert_eq!(k0.wrapping_mul(b), 1); + k0.wrapping_neg() +} + +impl MontyReducer { + fn new(n: &BigUint) -> Self { + let n0inv = inv_mod_alt(n.data[0]); + MontyReducer { n0inv } + } +} + +/// Computes z mod m = x * y * 2 ** (-n*_W) mod m +/// assuming k = -1/m mod 2**_W +/// See Gueron, "Efficient Software Implementations of Modular Exponentiation". +/// +/// In the terminology of that paper, this is an "Almost Montgomery Multiplication": +/// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result +/// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m. +#[allow(clippy::many_single_char_names)] +fn montgomery( + x: &BigUint, + y: &BigUint, + m: &BigUint, + k: BigDigit, + n: usize, +) -> BigUint { + // This code assumes x, y, m are all the same length, n. + // (required by addMulVVW and the for loop). + // It also assumes that x, y are already reduced mod m, + // or else the result will not be properly reduced. + assert!( + x.data.len() == n && y.data.len() == n && m.data.len() == n, + "{:?} {:?} {:?} {}", + x, + y, + m, + n + ); + + let mut z = BigUint::zero(); + z.data.resize(n * 2, 0); + + let mut c: BigDigit = 0; + for i in 0..n { + let c2 = add_mul_vvw(&mut z.data[i..n + i], &x.data, y.data[i]); + let t = z.data[i].wrapping_mul(k); + let c3 = add_mul_vvw(&mut z.data[i..n + i], &m.data, t); + let cx = c.wrapping_add(c2); + let cy = cx.wrapping_add(c3); + z.data[n + i] = cy; + if cx < c2 || cy < c3 { + c = 1; + } else { + c = 0; + } + } + + if c == 0 { + z.data = z.data[n..].iter().copied().collect(); + } else { + { + let (first, second) = z.data.split_at_mut(n); + sub_vv(first, second, &m.data); + } + z.data = z.data[..n].iter().copied().collect(); + } + + z +} + +#[inline(always)] +fn add_mul_vvw(z: &mut [BigDigit], x: &[BigDigit], y: BigDigit) -> BigDigit { + let mut c = 0; + for (zi, xi) in z.iter_mut().zip(x.iter()) { + let (z1, z0) = mul_add_www(*xi, y, *zi); + let (c_, zi_) = add_ww(z0, c, 0); + *zi = zi_; + c = c_ + z1; + } + + c +} + +/// The resulting carry c is either 0 or 1. +#[inline(always)] +fn sub_vv(z: &mut [BigDigit], x: &[BigDigit], y: &[BigDigit]) -> BigDigit { + let mut c = 0; + for (i, (xi, yi)) in x.iter().zip(y.iter()).enumerate().take(z.len()) { + let zi = xi.wrapping_sub(*yi).wrapping_sub(c); + z[i] = zi; + // see "Hacker's Delight", section 2-12 (overflow detection) + c = ((yi & !xi) | ((yi | !xi) & zi)) >> (big_digit::BITS - 1) + } + + c +} + +/// z1<<_W + z0 = x+y+c, with c == 0 or 1 +#[inline(always)] +fn add_ww(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) { + let yc = y.wrapping_add(c); + let z0 = x.wrapping_add(yc); + let z1 = if z0 < x || yc < y { 1 } else { 0 }; + + (z1, z0) +} + +/// z1 << _W + z0 = x * y + c +#[inline(always)] +fn mul_add_www(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) { + let z = x as DoubleBigDigit * y as DoubleBigDigit + c as DoubleBigDigit; + ((z >> big_digit::BITS) as BigDigit, z as BigDigit) +} + +/// Calculates x ** y mod m using a fixed, 4-bit window. +#[allow(clippy::many_single_char_names)] +pub(super) fn monty_modpow( + x: &BigUint, + y: &BigUint, + m: &BigUint, +) -> BigUint { + assert!(m.data[0] & 1 == 1); + let mr = MontyReducer::new(m); + let num_words = m.data.len(); + + let mut x = x.clone(); + + // We want the lengths of x and m to be equal. + // It is OK if x >= m as long as len(x) == len(m). + if x.data.len() > num_words { + x %= m; + // Note: now len(x) <= numWords, not guaranteed ==. + } + if x.data.len() < num_words { + x.data.resize(num_words, 0); + } + + // rr = 2**(2*_W*len(m)) mod m + let mut rr = BigUint::one(); + rr = (rr.shl(2 * num_words as u64 * u64::from(big_digit::BITS))) % m; + if rr.data.len() < num_words { + rr.data.resize(num_words, 0); + } + // one = 1, with equal length to that of m + let mut one = BigUint::one(); + one.data.resize(num_words, 0); + + let n = 4; + // powers[i] contains x^i + let mut powers = Vec::with_capacity(1 << n); + powers.push(montgomery(&one, &rr, m, mr.n0inv, num_words)); + powers.push(montgomery(&x, &rr, m, mr.n0inv, num_words)); + for i in 2..1 << n { + let r = montgomery(&powers[i - 1], &powers[1], m, mr.n0inv, num_words); + powers.push(r); + } + + // initialize z = 1 (Montgomery 1) + let mut z = powers[0].clone(); + z.data.resize(num_words, 0); + let mut zz = BigUint::zero(); + zz.data.resize(num_words, 0); + + // same windowed exponent, but with Montgomery multiplications + for i in (0..y.data.len()).rev() { + let mut yi = y.data[i]; + let mut j = 0; + while j < big_digit::BITS { + if i != y.data.len() - 1 || j != 0 { + zz = montgomery(&z, &z, m, mr.n0inv, num_words); + z = montgomery(&zz, &zz, m, mr.n0inv, num_words); + zz = montgomery(&z, &z, m, mr.n0inv, num_words); + z = montgomery(&zz, &zz, m, mr.n0inv, num_words); + } + zz = montgomery( + &z, + &powers[(yi >> (big_digit::BITS - n)) as usize], + m, + mr.n0inv, + num_words, + ); + mem::swap(&mut z, &mut zz); + yi <<= n; + j += n; + } + } + + // convert to regular number + zz = montgomery(&z, &one, m, mr.n0inv, num_words); + + zz.normalize(); + // One last reduction, just in case. + // See golang.org/issue/13907. + if zz >= *m { + // Common case is m has high bit set; in that case, + // since zz is the same length as m, there can be just + // one multiple of m to remove. Just subtract. + // We think that the subtract should be sufficient in general, + // so do that unconditionally, but double-check, + // in case our beliefs are wrong. + // The div is not expected to be reached. + zz -= m; + if zz >= *m { + zz %= m; + } + } + + zz.normalize(); + zz +} diff --git a/vendor/num-bigint-generic/src/biguint/multiplication.rs b/vendor/num-bigint-generic/src/biguint/multiplication.rs new file mode 100644 index 000000000..ed2f63952 --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/multiplication.rs @@ -0,0 +1,662 @@ +use super::{ + addition::{__add2, add2}, + biguint_from_tinyvec, cmp_slice, + subtraction::sub2, + BigUint, +}; + +use crate::{ + big_digit::{self, BigDigit, DoubleBigDigit}, + BigInt, + Sign::{self, Minus, NoSign, Plus}, + UsizePromotion, +}; + +use core::{ + cmp::Ordering, + iter::Product, + ops::{Mul, MulAssign}, +}; +use num_traits::{CheckedMul, FromPrimitive, One, Zero}; +use tinyvec::TinyVec; + +#[inline] +pub(super) fn mac_with_carry( + a: BigDigit, + b: BigDigit, + c: BigDigit, + acc: &mut DoubleBigDigit, +) -> BigDigit { + *acc += DoubleBigDigit::from(a); + *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +#[inline] +fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { + *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +/// Three argument multiply accumulate: +/// acc += b * c +fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) { + if c == 0 { + return; + } + + let mut carry = 0; + let (a_lo, a_hi) = acc.split_at_mut(b.len()); + + for (a, &b) in a_lo.iter_mut().zip(b) { + *a = mac_with_carry(*a, b, c, &mut carry); + } + + let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry); + + let final_carry = if carry_hi == 0 { + __add2(a_hi, &[carry_lo]) + } else { + __add2(a_hi, &[carry_hi, carry_lo]) + }; + assert_eq!(final_carry, 0, "carry overflow during multiplication!"); +} + +fn bigint_from_slice(slice: &[BigDigit]) -> BigInt { + BigInt::from(biguint_from_tinyvec(slice.iter().copied().collect())) +} + +/// Three argument multiply accumulate: +/// acc += b * c +#[allow(clippy::many_single_char_names)] +fn mac3(mut acc: &mut [BigDigit], mut b: &[BigDigit], mut c: &[BigDigit]) { + // Least-significant zeros have no effect on the output. + if let Some(&0) = b.first() { + if let Some(nz) = b.iter().position(|&d| d != 0) { + b = &b[nz..]; + acc = &mut acc[nz..]; + } else { + return; + } + } + if let Some(&0) = c.first() { + if let Some(nz) = c.iter().position(|&d| d != 0) { + c = &c[nz..]; + acc = &mut acc[nz..]; + } else { + return; + } + } + + let acc = acc; + let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) }; + + // We use four algorithms for different input sizes. + // + // - For small inputs, long multiplication is fastest. + // - If y is at least least twice as long as x, split using Half-Karatsuba. + // - Next we use Karatsuba multiplication (Toom-2), which we have optimized + // to avoid unnecessary allocations for intermediate values. + // - For the largest inputs we use Toom-3, which better optimizes the + // number of operations, but uses more temporary allocations. + // + // The thresholds are somewhat arbitrary, chosen by evaluating the results + // of `cargo bench --bench bigint multiply`. + + if x.len() <= 32 { + // Long multiplication: + for (i, xi) in x.iter().enumerate() { + mac_digit(&mut acc[i..], y, *xi); + } + } else if x.len() * 2 <= y.len() { + // Karatsuba Multiplication for factors with significant length disparity. + // + // The Half-Karatsuba Multiplication Algorithm is a specialized case of + // the normal Karatsuba multiplication algorithm, designed for the scenario + // where y has at least twice as many base digits as x. + // + // In this case y (the longer input) is split into high2 and low2, + // at m2 (half the length of y) and x (the shorter input), + // is used directly without splitting. + // + // The algorithm then proceeds as follows: + // + // 1. Compute the product z0 = x * low2. + // 2. Compute the product temp = x * high2. + // 3. Adjust the weight of temp by adding m2 (* NBASE ^ m2) + // 4. Add temp and z0 to obtain the final result. + // + // Proof: + // + // The algorithm can be derived from the original Karatsuba algorithm by + // simplifying the formula when the shorter factor x is not split into + // high and low parts, as shown below. + // + // Original Karatsuba formula: + // + // result = (z2 * NBASE ^ (m2 × 2)) + ((z1 - z2 - z0) * NBASE ^ m2) + z0 + // + // Substitutions: + // + // low1 = x + // high1 = 0 + // + // Applying substitutions: + // + // z0 = (low1 * low2) + // = (x * low2) + // + // z1 = ((low1 + high1) * (low2 + high2)) + // = ((x + 0) * (low2 + high2)) + // = (x * low2) + (x * high2) + // + // z2 = (high1 * high2) + // = (0 * high2) + // = 0 + // + // Simplified using the above substitutions: + // + // result = (z2 * NBASE ^ (m2 × 2)) + ((z1 - z2 - z0) * NBASE ^ m2) + z0 + // = (0 * NBASE ^ (m2 × 2)) + ((z1 - 0 - z0) * NBASE ^ m2) + z0 + // = ((z1 - z0) * NBASE ^ m2) + z0 + // = ((z1 - z0) * NBASE ^ m2) + z0 + // = (x * high2) * NBASE ^ m2 + z0 + let m2 = y.len() / 2; + let (low2, high2) = y.split_at(m2); + + // (x * high2) * NBASE ^ m2 + z0 + mac3::(acc, x, low2); + mac3::(&mut acc[m2..], x, high2); + } else if x.len() <= 256 { + // Karatsuba multiplication: + // + // The idea is that we break x and y up into two smaller numbers that each have about half + // as many digits, like so (note that multiplying by b is just a shift): + // + // x = x0 + x1 * b + // y = y0 + y1 * b + // + // With some algebra, we can compute x * y with three smaller products, where the inputs to + // each of the smaller products have only about half as many digits as x and y: + // + // x * y = (x0 + x1 * b) * (y0 + y1 * b) + // + // x * y = x0 * y0 + // + x0 * y1 * b + // + x1 * y0 * b + // + x1 * y1 * b^2 + // + // Let p0 = x0 * y0 and p2 = x1 * y1: + // + // x * y = p0 + // + (x0 * y1 + x1 * y0) * b + // + p2 * b^2 + // + // The real trick is that middle term: + // + // x0 * y1 + x1 * y0 + // + // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2 + // + // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2 + // + // Now we complete the square: + // + // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2 + // + // = -((x1 - x0) * (y1 - y0)) + p0 + p2 + // + // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula: + // + // x * y = p0 + // + (p0 + p2 - p1) * b + // + p2 * b^2 + // + // Where the three intermediate products are: + // + // p0 = x0 * y0 + // p1 = (x1 - x0) * (y1 - y0) + // p2 = x1 * y1 + // + // In doing the computation, we take great care to avoid unnecessary temporary variables + // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a + // bit so we can use the same temporary variable for all the intermediate products: + // + // x * y = p2 * b^2 + p2 * b + // + p0 * b + p0 + // - p1 * b + // + // The other trick we use is instead of doing explicit shifts, we slice acc at the + // appropriate offset when doing the add. + + // When x is smaller than y, it's significantly faster to pick b such that x is split in + // half, not y: + let b = x.len() / 2; + let (x0, x1) = x.split_at(b); + let (y0, y1) = y.split_at(b); + + // We reuse the same BigUint for all the intermediate multiplies and have to size p + // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len(): + let len = x1.len() + y1.len() + 1; + let mut p = BigUint:: { + data: core::iter::repeat(0).take(len).collect(), + }; + + // p2 = x1 * y1 + mac3::(&mut p.data, x1, y1); + + // Not required, but the adds go faster if we drop any unneeded 0s from the end: + p.normalize(); + + add2(&mut acc[b..], &p.data); + add2(&mut acc[b * 2..], &p.data); + + // Zero out p before the next multiply: + p.data.truncate(0); + p.data.resize(len, 0); + + // p0 = x0 * y0 + mac3::(&mut p.data, x0, y0); + p.normalize(); + + add2(acc, &p.data); + add2(&mut acc[b..], &p.data); + + // p1 = (x1 - x0) * (y1 - y0) + // We do this one last, since it may be negative and acc can't ever be negative: + let (j0_sign, j0) = sub_sign::(x1, x0); + let (j1_sign, j1) = sub_sign::(y1, y0); + + match j0_sign * j1_sign { + Plus => { + p.data.truncate(0); + p.data.resize(len, 0); + + mac3::(&mut p.data, &j0.data, &j1.data); + p.normalize(); + + sub2(&mut acc[b..], &p.data); + } + Minus => { + mac3::(&mut acc[b..], &j0.data, &j1.data); + } + NoSign => (), + } + } else { + // Toom-3 multiplication: + // + // Toom-3 is like Karatsuba above, but dividing the inputs into three parts. + // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively. + // + // The general idea is to treat the large integers digits as + // polynomials of a certain degree and determine the coefficients/digits + // of the product of the two via interpolation of the polynomial product. + let i = y.len() / 3 + 1; + + let x0_len = Ord::min(x.len(), i); + let x1_len = Ord::min(x.len() - x0_len, i); + + let y0_len = i; + let y1_len = Ord::min(y.len() - y0_len, i); + + // Break x and y into three parts, representating an order two polynomial. + // t is chosen to be the size of a digit so we can use faster shifts + // in place of multiplications. + // + // x(t) = x2*t^2 + x1*t + x0 + let x0 = bigint_from_slice(&x[..x0_len]); + let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]); + let x2 = bigint_from_slice(&x[x0_len + x1_len..]); + + // y(t) = y2*t^2 + y1*t + y0 + let y0 = bigint_from_slice(&y[..y0_len]); + let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]); + let y2 = bigint_from_slice(&y[y0_len + y1_len..]); + + // Let w(t) = x(t) * y(t) + // + // This gives us the following order-4 polynomial. + // + // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0 + // + // We need to find the coefficients w4, w3, w2, w1 and w0. Instead + // of simply multiplying the x and y in total, we can evaluate w + // at 5 points. An n-degree polynomial is uniquely identified by (n + 1) + // points. + // + // It is arbitrary as to what points we evaluate w at but we use the + // following. + // + // w(t) at t = 0, 1, -1, -2 and inf + // + // The values for w(t) in terms of x(t)*y(t) at these points are: + // + // let a = w(0) = x0 * y0 + // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0) + // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0) + // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0) + // let e = w(inf) = x2 * y2 as t -> inf + + // x0 + x2, avoiding temporaries + let p = &x0 + &x2; + + // y0 + y2, avoiding temporaries + let q = &y0 + &y2; + + // x2 - x1 + x0, avoiding temporaries + let p2 = &p - &x1; + + // y2 - y1 + y0, avoiding temporaries + let q2 = &q - &y1; + + // w(0) + let r0 = &x0 * &y0; + + // w(inf) + let r4 = &x2 * &y2; + + // w(1) + let r1 = (p + x1) * (q + y1); + + // w(-1) + let r2 = &p2 * &q2; + + // w(-2) + let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0); + + // Evaluating these points gives us the following system of linear equations. + // + // 0 0 0 0 1 | a + // 1 1 1 1 1 | b + // 1 -1 1 -1 1 | c + // 16 -8 4 -2 1 | d + // 1 0 0 0 0 | e + // + // The solved equation (after gaussian elimination or similar) + // in terms of its coefficients: + // + // w0 = w(0) + // w1 = w(0)/2 + w(1)/3 - w(-1) + w(-2)/6 - 2*w(inf) + // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf) + // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(-2)/6 + 2*w(inf) + // w4 = w(inf) + // + // This particular sequence is given by Bodrato and is an interpolation + // of the above equations. + let mut comp3: BigInt = (r3 - &r1) / 3u32; + let mut comp1: BigInt = (r1 - &r2) >> 1; + let mut comp2: BigInt = r2 - &r0; + comp3 = ((&comp2 - comp3) >> 1) + (&r4 << 1); + comp2 += &comp1 - &r4; + comp1 -= &comp3; + + // Recomposition. The coefficients of the polynomial are now known. + // + // Evaluate at w(t) where t is our given base to get the result. + // + // let bits = u64::from(big_digit::BITS) * i as u64; + // let result = r0 + // + (comp1 << bits) + // + (comp2 << (2 * bits)) + // + (comp3 << (3 * bits)) + // + (r4 << (4 * bits)); + // let result_pos = result.to_biguint().unwrap(); + // add2(&mut acc[..], &result_pos.data); + // + // But with less intermediate copying: + for (j, result) in [&r0, &comp1, &comp2, &comp3, &r4].iter().enumerate().rev() { + match result.sign() { + Plus => add2(&mut acc[i * j..], result.digits()), + Minus => sub2(&mut acc[i * j..], result.digits()), + NoSign => {} + } + } + } +} + +fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint { + let len = x.len() + y.len() + 1; + let mut prod = BigUint { + data: core::iter::repeat(0).take(len).collect(), + }; + + mac3::(&mut prod.data, x, y); + prod.normalized() +} + +fn scalar_mul(a: &mut BigUint, b: BigDigit) { + match b { + 0 => a.set_zero(), + 1 => {} + _ => { + if b.is_power_of_two() { + *a <<= b.trailing_zeros(); + } else { + let mut carry = 0; + for a in a.data.iter_mut() { + *a = mul_with_carry(*a, b, &mut carry); + } + if carry != 0 { + a.data.push(carry as BigDigit); + } + } + } + } +} + +fn sub_sign(mut a: &[BigDigit], mut b: &[BigDigit]) -> (Sign, BigUint) { + // Normalize: + if let Some(&0) = a.last() { + a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; + } + if let Some(&0) = b.last() { + b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; + } + + match cmp_slice(a, b) { + Ordering::Greater => { + let mut a: TinyVec<_> = a.iter().copied().collect(); + sub2(&mut a, b); + (Plus, biguint_from_tinyvec(a)) + } + Ordering::Less => { + let mut b: TinyVec<_> = b.iter().copied().collect(); + sub2(&mut b, a); + (Minus, biguint_from_tinyvec(b)) + } + Ordering::Equal => (NoSign, BigUint::zero()), + } +} + +impl Mul> for BigUint { + type Output = BigUint; + #[inline] + fn mul(self, other: BigUint) -> BigUint { + match (&*self.data, &*other.data) { + (&[], _) | (_, &[]) => BigUint::zero(), + (_, &[digit]) => self * digit, + (&[digit], _) => other * digit, + (x, y) => mul3(x, y), + } + } +} +impl Mul> for &BigUint { + type Output = BigUint; + #[inline] + fn mul(self, other: BigUint) -> BigUint { + match (&*self.data, &*other.data) { + (&[], _) | (_, &[]) => BigUint::zero(), + (_, &[digit]) => self * digit, + (&[digit], _) => other * digit, + (x, y) => mul3(x, y), + } + } +} +impl Mul<&BigUint> for BigUint { + type Output = BigUint; + #[inline] + fn mul(self, other: &BigUint) -> BigUint { + match (&*self.data, &*other.data) { + (&[], _) | (_, &[]) => BigUint::zero(), + (_, &[digit]) => self * digit, + (&[digit], _) => other * digit, + (x, y) => mul3(x, y), + } + } +} +impl Mul<&BigUint> for &BigUint { + type Output = BigUint; + #[inline] + fn mul(self, other: &BigUint) -> BigUint { + match (&*self.data, &*other.data) { + (&[], _) | (_, &[]) => BigUint::zero(), + (_, &[digit]) => self * digit, + (&[digit], _) => other * digit, + (x, y) => mul3(x, y), + } + } +} + +impl MulAssign> for BigUint { + #[inline] + fn mul_assign(&mut self, other: BigUint) { + match (&*self.data, &*other.data) { + (&[], _) => {} + (_, &[]) => self.set_zero(), + (_, &[digit]) => *self *= digit, + (&[digit], _) => *self = other * digit, + (x, y) => *self = mul3(x, y), + } + } +} +impl MulAssign<&BigUint> for BigUint { + #[inline] + fn mul_assign(&mut self, other: &BigUint) { + match (&*self.data, &*other.data) { + (&[], _) => {} + (_, &[]) => self.set_zero(), + (_, &[digit]) => *self *= digit, + (&[digit], _) => *self = other * digit, + (x, y) => *self = mul3(x, y), + } + } +} + +promote_unsigned_scalars!(impl Mul for BigUint, mul); +promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u32) -> BigUint { + self *= other; + self + } +} +impl MulAssign for BigUint { + #[inline] + fn mul_assign(&mut self, other: u32) { + scalar_mul(self, other as BigDigit); + } +} + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u64) -> BigUint { + self *= other; + self + } +} +impl MulAssign for BigUint { + cfg_digit!( + #[inline] + fn mul_assign(&mut self, other: u64) { + if let Some(other) = BigDigit::from_u64(other) { + scalar_mul(self, other); + } else { + let (hi, lo) = big_digit::from_doublebigdigit(other); + *self = mul3(&self.data, &[lo, hi]); + } + } + + #[inline] + fn mul_assign(&mut self, other: u64) { + scalar_mul(self, other); + } + ); +} + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u128) -> BigUint { + self *= other; + self + } +} + +impl MulAssign for BigUint { + cfg_digit!( + #[inline] + fn mul_assign(&mut self, other: u128) { + if let Some(other) = BigDigit::from_u128(other) { + scalar_mul(self, other); + } else { + *self = match super::u32_from_u128(other) { + (0, 0, c, d) => mul3(&self.data, &[d, c]), + (0, b, c, d) => mul3(&self.data, &[d, c, b]), + (a, b, c, d) => mul3(&self.data, &[d, c, b, a]), + }; + } + } + + #[inline] + fn mul_assign(&mut self, other: u128) { + if let Some(other) = BigDigit::from_u128(other) { + scalar_mul(self, other); + } else { + let (hi, lo) = big_digit::from_doublebigdigit(other); + *self = mul3(&self.data, &[lo, hi]); + } + } + ); +} + +impl CheckedMul for BigUint { + #[inline] + fn checked_mul(&self, v: &BigUint) -> Option> { + Some(self.mul(v)) + } +} + +impl_product_iter_type!(BigUint); + +#[test] +fn test_sub_sign() { + use crate::BigInt; + use num_traits::Num; + + fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt { + let (sign, val) = sub_sign(a, b); + BigInt::from_biguint(sign, val) + } + + let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap(); + let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap(); + let a_i = BigInt::from(a.clone()); + let b_i = BigInt::from(b.clone()); + + assert_eq!(sub_sign_i(&a.data, &b.data), &a_i - &b_i); + assert_eq!(sub_sign_i(&b.data, &a.data), &b_i - &a_i); +} diff --git a/vendor/num-bigint-generic/src/biguint/power.rs b/vendor/num-bigint-generic/src/biguint/power.rs new file mode 100644 index 000000000..676543980 --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/power.rs @@ -0,0 +1,265 @@ +use super::{monty::monty_modpow, BigUint}; + +use crate::big_digit::{self, BigDigit}; + +use num_integer::Integer; +use num_traits::{One, Pow, ToPrimitive, Zero}; + +impl Pow<&BigUint> for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &BigUint) -> BigUint { + if self.is_one() || exp.is_zero() { + BigUint::one() + } else if self.is_zero() { + Self::zero() + } else if let Some(exp) = exp.to_u64() { + self.pow(exp) + } else if let Some(exp) = exp.to_u128() { + self.pow(exp) + } else { + // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given + // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address! + panic!("memory overflow") + } + } +} + +impl Pow> for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: BigUint) -> BigUint { + Pow::pow(self, &exp) + } +} + +impl Pow<&BigUint> for &BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &BigUint) -> BigUint { + if self.is_one() || exp.is_zero() { + BigUint::one() + } else if self.is_zero() { + BigUint::zero() + } else { + self.clone().pow(exp) + } + } +} + +impl Pow> for &BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: BigUint) -> BigUint { + Pow::pow(self, &exp) + } +} + +macro_rules! pow_impl { + ($T:ty) => { + impl Pow<$T> for BigUint { + type Output = BigUint; + + fn pow(self, mut exp: $T) -> BigUint { + if exp == 0 { + return BigUint::one(); + } + let mut base = self; + + while exp & 1 == 0 { + base = &base * &base; + exp >>= 1; + } + + if exp == 1 { + return base; + } + + let mut acc = base.clone(); + while exp > 1 { + exp >>= 1; + base = &base * &base; + if exp & 1 == 1 { + acc *= &base; + } + } + acc + } + } + + impl Pow<&$T> for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &$T) -> BigUint { + Pow::pow(self, *exp) + } + } + + impl Pow<$T> for &BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: $T) -> BigUint { + if exp == 0 { + return BigUint::one(); + } + Pow::pow(self.clone(), exp) + } + } + + impl Pow<&$T> for &BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &$T) -> BigUint { + Pow::pow(self, *exp) + } + } + }; +} + +pow_impl!(u8); +pow_impl!(u16); +pow_impl!(u32); +pow_impl!(u64); +pow_impl!(usize); +pow_impl!(u128); + +pub(super) fn modpow( + x: &BigUint, + exponent: &BigUint, + modulus: &BigUint, +) -> BigUint { + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + if modulus.is_odd() { + // For an odd modulus, we can use Montgomery multiplication in base 2^32. + monty_modpow(x, exponent, modulus) + } else { + // Otherwise do basically the same as `num::pow`, but with a modulus. + plain_modpow(x, &exponent.data, modulus) + } +} + +fn plain_modpow( + base: &BigUint, + exp_data: &[BigDigit], + modulus: &BigUint, +) -> BigUint { + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + let i = match exp_data.iter().position(|&r| r != 0) { + None => return BigUint::one(), + Some(i) => i, + }; + + let mut base = base % modulus; + for _ in 0..i { + for _ in 0..big_digit::BITS { + base = &base * &base % modulus; + } + } + + let mut r = exp_data[i]; + let mut b = 0u8; + while r.is_even() { + base = &base * &base % modulus; + r >>= 1; + b += 1; + } + + let mut exp_iter = exp_data[i + 1..].iter(); + if exp_iter.len() == 0 && r.is_one() { + return base; + } + + let mut acc = base.clone(); + r >>= 1; + b += 1; + + { + let mut unit = |exp_is_odd| { + base = &base * &base % modulus; + if exp_is_odd { + acc *= &base; + acc %= modulus; + } + }; + + if let Some(&last) = exp_iter.next_back() { + // consume exp_data[i] + for _ in b..big_digit::BITS { + unit(r.is_odd()); + r >>= 1; + } + + // consume all other digits before the last + for &r in exp_iter { + let mut r = r; + for _ in 0..big_digit::BITS { + unit(r.is_odd()); + r >>= 1; + } + } + r = last; + } + + debug_assert_ne!(r, 0); + while !r.is_zero() { + unit(r.is_odd()); + r >>= 1; + } + } + acc +} + +#[test] +fn test_plain_modpow() { + let two = &BigUint::<32>::from(2u32); + let modulus = BigUint::from(0x1100u32); + + let exp = vec![0, 0b1]; + assert_eq!( + two.pow(0b1_00000000_u32) % &modulus, + plain_modpow(two, &exp, &modulus) + ); + let exp = vec![0, 0b10]; + assert_eq!( + two.pow(0b10_00000000_u32) % &modulus, + plain_modpow(two, &exp, &modulus) + ); + let exp = vec![0, 0b110010]; + assert_eq!( + two.pow(0b110010_00000000_u32) % &modulus, + plain_modpow(two, &exp, &modulus) + ); + let exp = vec![0b1, 0b1]; + assert_eq!( + two.pow(0b1_00000001_u32) % &modulus, + plain_modpow(two, &exp, &modulus) + ); + let exp = vec![0b1100, 0, 0b1]; + assert_eq!( + two.pow(0b1_00000000_00001100_u32) % &modulus, + plain_modpow(two, &exp, &modulus) + ); +} + +#[test] +fn test_pow_biguint() { + let base = BigUint::<32>::from(5u8); + let exponent = BigUint::from(3u8); + + assert_eq!(BigUint::from(125u8), base.pow(exponent)); +} diff --git a/vendor/num-bigint-generic/src/biguint/serde.rs b/vendor/num-bigint-generic/src/biguint/serde.rs new file mode 100644 index 000000000..1ea61cb70 --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/serde.rs @@ -0,0 +1,123 @@ +#![cfg(feature = "serde")] +#![cfg_attr(docsrs, doc(cfg(feature = "serde")))] + +use super::{biguint_from_tinyvec, BigUint}; + +use alloc::vec::Vec; +use core::{cmp, fmt, mem}; +use serde::{ + de::{SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; + +// `cautious` is based on the function of the same name in `serde`, but specialized to `u32`: +// https://github.com/dtolnay/serde/blob/399ef081ecc36d2f165ff1f6debdcbf6a1dc7efb/serde/src/private/size_hint.rs#L11-L22 +fn cautious(hint: Option) -> usize { + const MAX_PREALLOC_BYTES: usize = 1024 * 1024; + + cmp::min( + hint.unwrap_or(0), + MAX_PREALLOC_BYTES / mem::size_of::(), + ) +} + +impl Serialize for BigUint { + cfg_digit!( + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break forward + // and backward compatibility of serialized data! If we ever change the + // internal representation, we should still serialize in base-`u32`. + let data: &[u32] = &self.data; + data.serialize(serializer) + } + + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use serde::ser::SerializeSeq; + + if let Some((&last, data)) = self.data.split_last() { + let last_lo = last as u32; + let last_hi = (last >> 32) as u32; + let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize; + let mut seq = serializer.serialize_seq(Some(u32_len))?; + for &x in data { + seq.serialize_element(&(x as u32))?; + seq.serialize_element(&((x >> 32) as u32))?; + } + seq.serialize_element(&last_lo)?; + if last_hi != 0 { + seq.serialize_element(&last_hi)?; + } + seq.end() + } else { + let data: &[u32] = &[]; + data.serialize(serializer) + } + } + ); +} + +impl<'de> Deserialize<'de> for BigUint { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(U32Visitor) + } +} + +struct U32Visitor; + +impl<'de> Visitor<'de> for U32Visitor { + type Value = BigUint; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence of unsigned 32-bit numbers") + } + + cfg_digit!( + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + let len = cautious(seq.size_hint()); + let mut data = tinyvec::TinyVec::new(); + + while let Some(value) = seq.next_element::()? { + data.push(value); + } + + Ok(biguint_from_tinyvec(data)) + } + + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + use crate::big_digit::BigDigit; + use num_integer::Integer; + + let u32_len = cautious(seq.size_hint()); + let len = Integer::div_ceil(&u32_len, &2); + let mut data = tinyvec::TinyVec::new(); + + while let Some(lo) = seq.next_element::()? { + let mut value = BigDigit::from(lo); + if let Some(hi) = seq.next_element::()? { + value |= BigDigit::from(hi) << 32; + data.push(value); + } else { + data.push(value); + break; + } + } + + Ok(biguint_from_tinyvec(data)) + } + ); +} diff --git a/vendor/num-bigint-generic/src/biguint/shift.rs b/vendor/num-bigint-generic/src/biguint/shift.rs new file mode 100644 index 000000000..7184bedaf --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/shift.rs @@ -0,0 +1,174 @@ +use super::{biguint_from_tinyvec, BigUint}; + +use crate::big_digit; + +use alloc::borrow::Cow; +use core::{ + mem, + ops::{Shl, ShlAssign, Shr, ShrAssign}, +}; +use num_traits::{PrimInt, Zero}; +use tinyvec::TinyVec; + +#[inline] +fn biguint_shl(n: Cow<'_, BigUint>, shift: T) -> BigUint { + if shift < T::zero() { + panic!("attempt to shift left with negative"); + } + if n.is_zero() { + return n.into_owned(); + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().expect("capacity overflow"); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shl2(n, digits, shift) +} + +fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { + let mut data = match digits { + 0 => n.into_owned().data, + _ => { + let len = digits.saturating_add(n.data.len() + 1); + let mut data = TinyVec::with_capacity(len); + data.resize(digits, 0); + data.extend(n.data.iter().copied()); + data + } + }; + + if shift > 0 { + let mut carry = 0; + let carry_shift = big_digit::BITS - shift; + for elem in data[digits..].iter_mut() { + let new_carry = *elem >> carry_shift; + *elem = (*elem << shift) | carry; + carry = new_carry; + } + if carry != 0 { + data.push(carry); + } + } + + biguint_from_tinyvec(data) +} + +#[inline] +fn biguint_shr(n: &mut BigUint, shift: T) { + if shift < T::zero() { + panic!("attempt to shift right with negative"); + } + if n.is_zero() { + return; + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().unwrap_or(usize::MAX); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shr2(n, digits, shift) +} + +fn biguint_shr2(n: &mut BigUint, digits: usize, shift: u8) { + if digits >= n.data.len() { + n.set_zero(); + return; + } + + let data = &mut n.data[digits..]; + + if shift > 0 { + let mut borrow = 0; + let borrow_shift = big_digit::BITS - shift; + for elem in data.iter_mut().rev() { + let new_borrow = *elem << borrow_shift; + *elem = (*elem >> shift) | borrow; + borrow = new_borrow; + } + } + + let len = data.len(); + n.data.copy_within(digits.., 0); + n.data.truncate(len); + n.normalize(); +} + +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl $Shx<&$rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn $shx(self, rhs: &$rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl $Shx<&$rhs> for &BigUint { + type Output = BigUint; + + #[inline] + fn $shx(self, rhs: &$rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl $ShxAssign<&$rhs> for BigUint { + #[inline] + fn $shx_assign(&mut self, rhs: &$rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Owned(self), rhs) + } + } + impl Shl<$rhs> for &BigUint { + type Output = BigUint; + + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Borrowed(self), rhs) + } + } + impl ShlAssign<$rhs> for BigUint { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + let n = mem::replace(self, Self::zero()); + *self = n << rhs; + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } + + impl Shr<$rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn shr(mut self, rhs: $rhs) -> BigUint { + biguint_shr(&mut self, rhs); + self + } + } + impl Shr<$rhs> for &BigUint { + type Output = BigUint; + + #[inline] + fn shr(self, rhs: $rhs) -> BigUint { + let mut this = self.clone(); + biguint_shr(&mut this, rhs); + this + } + } + impl ShrAssign<$rhs> for BigUint { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + biguint_shr(self, rhs); + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; +} + +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } diff --git a/vendor/num-bigint-generic/src/biguint/subtraction.rs b/vendor/num-bigint-generic/src/biguint/subtraction.rs new file mode 100644 index 000000000..db7ab033f --- /dev/null +++ b/vendor/num-bigint-generic/src/biguint/subtraction.rs @@ -0,0 +1,316 @@ +use super::BigUint; + +use crate::{ + big_digit::{self, BigDigit}, + UsizePromotion, +}; + +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + ops::{Sub, SubAssign}, +}; +use num_traits::CheckedSub; + +#[cfg(target_arch = "x86_64")] +use core::arch::x86_64 as arch; + +#[cfg(target_arch = "x86")] +use core::arch::x86 as arch; + +// Subtract with borrow: +#[cfg(target_arch = "x86_64")] +cfg_64!( + #[inline] + fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_subborrow_u64(borrow, a, b, out) } + } +); + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +cfg_32!( + #[inline] + fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_subborrow_u32(borrow, a, b, out) } + } +); + +// fallback for environments where we don't have a subborrow intrinsic +// (copied from the standard library's `borrowing_sub`) +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +#[inline] +fn sbb(borrow: u8, lhs: BigDigit, rhs: BigDigit, out: &mut BigDigit) -> u8 { + let (a, b) = lhs.overflowing_sub(rhs); + let (c, d) = a.overflowing_sub(borrow as BigDigit); + *out = c; + u8::from(b || d) +} + +pub(super) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) { + let mut borrow = 0; + + let len = Ord::min(a.len(), b.len()); + let (a_lo, a_hi) = a.split_at_mut(len); + let (b_lo, b_hi) = b.split_at(len); + + for (a, b) in a_lo.iter_mut().zip(b_lo) { + borrow = sbb(borrow, *a, *b, a); + } + + if borrow != 0 { + for a in a_hi { + borrow = sbb(borrow, *a, 0, a); + if borrow == 0 { + break; + } + } + } + + // note: we're _required_ to fail on underflow + assert!( + borrow == 0 && b_hi.iter().all(|x| *x == 0), + "Cannot subtract b from a because b is larger than a." + ); +} + +// Only for the Sub impl. `a` and `b` must have same length. +#[inline] +fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 { + debug_assert!(b.len() == a.len()); + + let mut borrow = 0; + + for (ai, bi) in a.iter().zip(b) { + borrow = sbb(borrow, *ai, *bi, bi); + } + + borrow +} + +fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) { + debug_assert!(b.len() >= a.len()); + + let len = Ord::min(a.len(), b.len()); + let (a_lo, a_hi) = a.split_at(len); + let (b_lo, b_hi) = b.split_at_mut(len); + + let borrow = __sub2rev(a_lo, b_lo); + + assert!(a_hi.is_empty()); + + // note: we're _required_ to fail on underflow + assert!( + borrow == 0 && b_hi.iter().all(|x| *x == 0), + "Cannot subtract b from a because b is larger than a." + ); +} + +forward_val_val_binop!(impl Sub for BigUint, sub); +forward_ref_ref_binop!(impl Sub for BigUint, sub); +forward_val_assign!(impl SubAssign for BigUint, sub_assign); + +impl Sub<&BigUint> for BigUint { + type Output = BigUint; + + fn sub(mut self, other: &BigUint) -> BigUint { + self -= other; + self + } +} +impl SubAssign<&BigUint> for BigUint { + fn sub_assign(&mut self, other: &BigUint) { + sub2(&mut self.data[..], &other.data[..]); + self.normalize(); + } +} + +impl Sub> for &BigUint { + type Output = BigUint; + + fn sub(self, mut other: BigUint) -> BigUint { + let other_len = other.data.len(); + if other_len < self.data.len() { + let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data); + other.data.extend_from_slice(&self.data[other_len..]); + if lo_borrow != 0 { + sub2(&mut other.data[other_len..], &[1]) + } + } else { + sub2rev(&self.data[..], &mut other.data[..]); + } + other.normalized() + } +} + +promote_unsigned_scalars!(impl Sub for BigUint, sub); +promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u32) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + fn sub_assign(&mut self, other: u32) { + sub2(&mut self.data[..], &[other as BigDigit]); + self.normalize(); + } +} + +impl Sub> for u32 { + type Output = BigUint; + + cfg_digit!( + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.len() == 0 { + other.data.push(self); + } else { + sub2rev(&[self], &mut other.data[..]); + } + other.normalized() + } + + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.is_empty() { + other.data.push(self as BigDigit); + } else { + sub2rev(&[self as BigDigit], &mut other.data[..]); + } + other.normalized() + } + ); +} + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u64) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + cfg_digit!( + #[inline] + fn sub_assign(&mut self, other: u64) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + sub2(&mut self.data[..], &[lo, hi]); + self.normalize(); + } + + #[inline] + fn sub_assign(&mut self, other: u64) { + sub2(&mut self.data[..], &[other as BigDigit]); + self.normalize(); + } + ); +} + +impl Sub> for u64 { + type Output = BigUint; + + cfg_digit!( + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 2 { + other.data.push(0); + } + + let (hi, lo) = big_digit::from_doublebigdigit(self); + sub2rev(&[lo, hi], &mut other.data[..]); + other.normalized() + } + + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.is_empty() { + other.data.push(self); + } else { + sub2rev(&[self], &mut other.data[..]); + } + other.normalized() + } + ); +} + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u128) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + cfg_digit!( + #[inline] + fn sub_assign(&mut self, other: u128) { + let (a, b, c, d) = super::u32_from_u128(other); + sub2(&mut self.data[..], &[d, c, b, a]); + self.normalize(); + } + + #[inline] + fn sub_assign(&mut self, other: u128) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + sub2(&mut self.data[..], &[lo, hi]); + self.normalize(); + } + ); +} + +impl Sub> for u128 { + type Output = BigUint; + + cfg_digit!( + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 4 { + other.data.push(0); + } + + let (a, b, c, d) = super::u32_from_u128(self); + sub2rev(&[d, c, b, a], &mut other.data[..]); + other.normalized() + } + + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 2 { + other.data.push(0); + } + + let (hi, lo) = big_digit::from_doublebigdigit(self); + sub2rev(&[lo, hi], &mut other.data[..]); + other.normalized() + } + ); +} + +impl CheckedSub for BigUint { + #[inline] + fn checked_sub(&self, v: &BigUint) -> Option> { + match self.cmp(v) { + Less => None, + Equal => Some(Self::zero()), + Greater => Some(self.sub(v)), + } + } +} diff --git a/vendor/num-bigint-generic/src/lib.rs b/vendor/num-bigint-generic/src/lib.rs new file mode 100644 index 000000000..f74663cea --- /dev/null +++ b/vendor/num-bigint-generic/src/lib.rs @@ -0,0 +1,263 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Big Integer Types for Rust +//! +//! * A [`BigUint`] is unsigned and represented as a vector of digits. +//! * A [`BigInt`] is signed and is a combination of [`BigUint`] and [`Sign`]. +//! +//! Common numerical operations are overloaded, so we can treat them +//! the same way we treat other numbers. +//! +//! ## Example +//! +//! ```rust +//! # fn main() { +//! use num_bigint::BigUint; +//! use num_traits::One; +//! +//! // Calculate large fibonacci numbers. +//! fn fib(n: usize) -> BigUint { +//! let mut f0 = BigUint::ZERO; +//! let mut f1 = BigUint::one(); +//! for _ in 0..n { +//! let f2 = f0 + &f1; +//! f0 = f1; +//! f1 = f2; +//! } +//! f0 +//! } +//! +//! // This is a very large number. +//! println!("fib(1000) = {}", fib(1000)); +//! # } +//! ``` +//! +//! It's easy to generate large random numbers: +//! +//! ```rust,ignore +//! use num_bigint::{ToBigInt, RandBigInt}; +//! +//! let mut rng = rand::thread_rng(); +//! let a = rng.gen_bigint(1000); +//! +//! let low = -10000.to_bigint().unwrap(); +//! let high = 10000.to_bigint().unwrap(); +//! let b = rng.gen_bigint_range(&low, &high); +//! +//! // Probably an even larger number. +//! println!("{}", a * b); +//! ``` +//! +//! See the "Features" section for instructions for enabling random number generation. +//! +//! ## Features +//! +//! The `std` crate feature is enabled by default, which enables [`std::error::Error`] +//! implementations and some internal use of floating point approximations. This can be disabled by +//! depending on `num-bigint` with `default-features = false`. Either way, the `alloc` crate is +//! always required for heap allocation of the `BigInt`/`BigUint` digits. +//! +//! ### Random Generation +//! +//! `num-bigint` supports the generation of random big integers when the `rand` +//! feature is enabled. To enable it include rand as +//! +//! ```toml +//! rand = "0.8" +//! num-bigint = { version = "0.4", features = ["rand"] } +//! ``` +//! +//! Note that you must use the version of `rand` that `num-bigint` is compatible +//! with: `0.8`. +//! +//! ### Arbitrary Big Integers +//! +//! `num-bigint` supports `arbitrary` and `quickcheck` features to implement +//! `Arbitrary` traits from the `arbitrary` and `quickcheck` crates, respectively, for both `BigInt` and +//! `BigUint`. These are useful for fuzzing and other forms of randomized testing. +//! +//! ### Serialization +//! +//! The `serde` feature adds implementations of `Serialize` and +//! `Deserialize` from the `serde` crate for both `BigInt` and `BigUint`. Their serialized data is +//! generated portably, regardless of platform differences like the internal digit size. +//! +//! +//! ## Compatibility +//! +//! The `num-bigint` crate is tested for rustc 1.60 and greater. + +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc(html_root_url = "https://docs.rs/num-bigint/0.4")] +#![warn(rust_2018_idioms)] +// #![no_std] + +#[macro_use] +extern crate alloc; + +// #[cfg(feature = "std")] +// use std; + +use core::fmt; + +#[macro_use] +mod macros; + +mod bigint; +mod bigrand; +mod biguint; + +// #[cfg(target_pointer_width = "32")] +// type UsizePromotion = u32; +// #[cfg(target_pointer_width = "64")] +type UsizePromotion = u64; + +// #[cfg(target_pointer_width = "32")] +// type IsizePromotion = i32; +// #[cfg(target_pointer_width = "64")] +type IsizePromotion = i64; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ParseBigIntError { + kind: BigIntErrorKind, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum BigIntErrorKind { + Empty, + InvalidDigit, +} + +impl ParseBigIntError { + fn __description(&self) -> &str { + use crate::BigIntErrorKind::*; + match self.kind { + Empty => "cannot parse integer from empty string", + InvalidDigit => "invalid digit found in string", + } + } + + fn empty() -> Self { + ParseBigIntError { + kind: BigIntErrorKind::Empty, + } + } + + fn invalid() -> Self { + ParseBigIntError { + kind: BigIntErrorKind::InvalidDigit, + } + } +} + +impl fmt::Display for ParseBigIntError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.__description().fmt(f) + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl std::error::Error for ParseBigIntError { + fn description(&self) -> &str { + self.__description() + } +} + +/// The error type returned when a checked conversion regarding big integer fails. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct TryFromBigIntError { + original: T, +} + +impl TryFromBigIntError { + fn new(original: T) -> Self { + TryFromBigIntError { original } + } + + fn __description(&self) -> &str { + "out of range conversion regarding big integer attempted" + } + + /// Extract the original value, if available. The value will be available + /// if the type before conversion was either [`BigInt`] or [`BigUint`]. + pub fn into_original(self) -> T { + self.original + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl std::error::Error for TryFromBigIntError +where + T: fmt::Debug, +{ + fn description(&self) -> &str { + self.__description() + } +} + +impl fmt::Display for TryFromBigIntError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.__description().fmt(f) + } +} + +pub use crate::biguint::{BigUint, ToBigUint, U32Digits, U64Digits, NLIMBS}; + +pub use crate::bigint::{BigInt, Sign, ToBigInt}; + +#[cfg(feature = "rand")] +#[cfg_attr(docsrs, doc(cfg(feature = "rand")))] +pub use crate::bigrand::{RandBigInt, RandomBits, UniformBigInt, UniformBigUint}; + +mod big_digit { + // A [`BigDigit`] is a [`BigUint`]'s composing element. + cfg_digit!( + pub(crate) type BigDigit = u32; + pub(crate) type BigDigit = u64; + ); + + // A [`DoubleBigDigit`] is the internal type used to do the computations. Its + // size is the double of the size of [`BigDigit`]. + cfg_digit!( + pub(crate) type DoubleBigDigit = u64; + pub(crate) type DoubleBigDigit = u128; + ); + + pub(crate) const BITS: u8 = BigDigit::BITS as u8; + pub(crate) const HALF_BITS: u8 = BITS / 2; + pub(crate) const HALF: BigDigit = (1 << HALF_BITS) - 1; + + pub(crate) const MAX: BigDigit = BigDigit::MAX; + const LO_MASK: DoubleBigDigit = MAX as DoubleBigDigit; + + #[inline] + fn get_hi(n: DoubleBigDigit) -> BigDigit { + (n >> BITS) as BigDigit + } + #[inline] + fn get_lo(n: DoubleBigDigit) -> BigDigit { + (n & LO_MASK) as BigDigit + } + + /// Split one [`DoubleBigDigit`] into two [`BigDigit`]s. + #[inline] + pub(crate) fn from_doublebigdigit(n: DoubleBigDigit) -> (BigDigit, BigDigit) { + (get_hi(n), get_lo(n)) + } + + /// Join two [`BigDigit`]s into one [`DoubleBigDigit`]. + #[inline] + pub(crate) fn to_doublebigdigit(hi: BigDigit, lo: BigDigit) -> DoubleBigDigit { + DoubleBigDigit::from(lo) | (DoubleBigDigit::from(hi) << BITS) + } +} diff --git a/vendor/num-bigint-generic/src/macros.rs b/vendor/num-bigint-generic/src/macros.rs new file mode 100644 index 000000000..fba4133b2 --- /dev/null +++ b/vendor/num-bigint-generic/src/macros.rs @@ -0,0 +1,474 @@ +#![allow(unused_macros)] + +macro_rules! cfg_32 { + ($($any:tt)+) => { + #[cfg(not(target_pointer_width = "64"))] $($any)+ + } +} + +macro_rules! cfg_32_or_test { + ($($any:tt)+) => { + #[cfg(any(not(target_pointer_width = "64"), test))] $($any)+ + } +} + +macro_rules! cfg_64 { + ($($any:tt)+) => { + $($any)+ + // #[cfg(target_pointer_width = "64")] $($any)+ + } +} + +macro_rules! cfg_digit { + ($item32:item $item64:item) => { + // cfg_32!($item32); + cfg_64!($item64); + }; +} + +macro_rules! cfg_digit_expr { + ($expr32:expr, $expr64:expr) => { + // cfg_32!($expr32); + cfg_64!($expr64); + }; +} + +macro_rules! forward_val_val_binop { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<$res> for $res { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + // forward to val-ref + $imp::$method(self, &other) + } + } + }; +} + +macro_rules! forward_val_val_binop_commutative { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<$res> for $res { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + // forward to val-ref, with the larger capacity as val + if self.capacity() >= other.capacity() { + $imp::$method(self, &other) + } else { + $imp::$method(other, &self) + } + } + } + }; +} + +macro_rules! forward_ref_val_binop { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<$res> for &$res { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + // forward to ref-ref + $imp::$method(self, &other) + } + } + }; +} + +macro_rules! forward_ref_val_binop_commutative { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<$res> for &$res { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + // reverse, forward to val-ref + $imp::$method(other, self) + } + } + }; +} + +macro_rules! forward_val_ref_binop { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<&$res> for $res { + type Output = $res; + + #[inline] + fn $method(self, other: &$res) -> $res { + // forward to ref-ref + $imp::$method(&self, other) + } + } + }; +} + +macro_rules! forward_ref_ref_binop { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<&$res> for &$res { + type Output = $res; + + #[inline] + fn $method(self, other: &$res) -> $res { + // forward to val-ref + $imp::$method(self.clone(), other) + } + } + }; +} + +macro_rules! forward_ref_ref_binop_commutative { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<&$res> for &$res { + type Output = $res; + + #[inline] + fn $method(self, other: &$res) -> $res { + // forward to val-ref, choosing the larger to clone + if self.len() >= other.len() { + $imp::$method(self.clone(), other) + } else { + $imp::$method(other.clone(), self) + } + } + } + }; +} + +macro_rules! forward_val_assign { + (impl $imp:ident for $res:ty, $method:ident) => { + impl $imp<$res> for $res { + #[inline] + fn $method(&mut self, other: $res) { + self.$method(&other); + } + } + }; +} + +macro_rules! forward_val_assign_scalar { + (impl $imp:ident for $res:ty, $scalar:ty, $method:ident) => { + impl $imp<$res> for $scalar { + #[inline] + fn $method(&mut self, other: $res) { + self.$method(&other); + } + } + }; +} + +/// use this if val_val_binop is already implemented and the reversed order is required +macro_rules! forward_scalar_val_val_binop_commutative { + (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => { + impl $imp<$res> for $scalar { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + $imp::$method(other, self) + } + } + }; +} + +// Forward scalar to ref-val, when reusing storage is not helpful +macro_rules! forward_scalar_val_val_binop_to_ref_val { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + impl $imp<$scalar> for $res { + type Output = $res; + + #[inline] + fn $method(self, other: $scalar) -> $res { + $imp::$method(&self, other) + } + } + + impl $imp<$res> for $scalar { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + $imp::$method(self, &other) + } + } + }; +} + +macro_rules! forward_scalar_ref_ref_binop_to_ref_val { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + impl $imp<&$scalar> for &$res { + type Output = $res; + + #[inline] + fn $method(self, other: &$scalar) -> $res { + $imp::$method(self, *other) + } + } + + impl $imp<&$res> for &$scalar { + type Output = $res; + + #[inline] + fn $method(self, other: &$res) -> $res { + $imp::$method(*self, other) + } + } + }; +} + +macro_rules! forward_scalar_val_ref_binop_to_ref_val { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + impl $imp<&$scalar> for $res { + type Output = $res; + + #[inline] + fn $method(self, other: &$scalar) -> $res { + $imp::$method(&self, *other) + } + } + + impl $imp<$res> for &$scalar { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + $imp::$method(*self, &other) + } + } + }; +} + +macro_rules! forward_scalar_val_ref_binop_to_val_val { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + impl $imp<&$scalar> for $res { + type Output = $res; + + #[inline] + fn $method(self, other: &$scalar) -> $res { + $imp::$method(self, *other) + } + } + + impl $imp<$res> for &$scalar { + type Output = $res; + + #[inline] + fn $method(self, other: $res) -> $res { + $imp::$method(*self, other) + } + } + }; +} + +macro_rules! forward_scalar_ref_val_binop_to_val_val { + (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => { + impl $imp<$scalar> for &$res { + type Output = $res; + + #[inline] + fn $method(self, other: $scalar) -> $res { + $imp::$method(self.clone(), other) + } + } + + impl $imp<&$res> for $scalar { + type Output = $res; + + #[inline] + fn $method(self, other: &$res) -> $res { + $imp::$method(self, other.clone()) + } + } + }; +} + +macro_rules! forward_scalar_ref_ref_binop_to_val_val { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + impl $imp<&$scalar> for &$res { + type Output = $res; + + #[inline] + fn $method(self, other: &$scalar) -> $res { + $imp::$method(self.clone(), *other) + } + } + + impl $imp<&$res> for &$scalar { + type Output = $res; + + #[inline] + fn $method(self, other: &$res) -> $res { + $imp::$method(*self, other.clone()) + } + } + }; +} + +macro_rules! promote_scalars { + (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => { + $( + forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method); + + impl $imp<$scalar> for $res { + type Output = $res; + + #[allow(clippy::cast_lossless)] + #[inline] + fn $method(self, other: $scalar) -> $res { + $imp::$method(self, other as $promo) + } + } + + impl $imp<$res> for $scalar { + type Output = $res; + + #[allow(clippy::cast_lossless)] + #[inline] + fn $method(self, other: $res) -> $res { + $imp::$method(self as $promo, other) + } + } + )* + } +} +macro_rules! promote_scalars_assign { + (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => { + $( + impl $imp<$scalar> for $res { + #[allow(clippy::cast_lossless)] + #[inline] + fn $method(&mut self, other: $scalar) { + self.$method(other as $promo); + } + } + )* + } +} + +macro_rules! promote_unsigned_scalars { + (impl $imp:ident for $res:ty, $method:ident) => { + promote_scalars!(impl $imp for $res, $method, u8, u16); + promote_scalars!(impl $imp for $res, $method, usize); + } +} + +macro_rules! promote_unsigned_scalars_assign { + (impl $imp:ident for $res:ty, $method:ident) => { + promote_scalars_assign!(impl $imp for $res, $method, u8, u16); + promote_scalars_assign!(impl $imp for $res, $method, usize); + } +} + +macro_rules! promote_signed_scalars { + (impl $imp:ident for $res:ty, $method:ident) => { + promote_scalars!(impl $imp for $res, $method, i8, i16); + promote_scalars!(impl $imp for $res, $method, isize); + } +} + +macro_rules! promote_signed_scalars_assign { + (impl $imp:ident for $res:ty, $method:ident) => { + promote_scalars_assign!(impl $imp for $res, $method, i8, i16); + promote_scalars_assign!(impl $imp for $res, $method, isize); + } +} + +// Forward everything to ref-ref, when reusing storage is not helpful +macro_rules! forward_all_binop_to_ref_ref { + (impl $imp:ident for $res:ty, $method:ident) => { + forward_val_val_binop!(impl $imp for $res, $method); + forward_val_ref_binop!(impl $imp for $res, $method); + forward_ref_val_binop!(impl $imp for $res, $method); + }; +} + +// Forward everything to val-ref, so LHS storage can be reused +macro_rules! forward_all_binop_to_val_ref { + (impl $imp:ident for $res:ty, $method:ident) => { + forward_val_val_binop!(impl $imp for $res, $method); + forward_ref_val_binop!(impl $imp for $res, $method); + forward_ref_ref_binop!(impl $imp for $res, $method); + }; +} + +// Forward everything to val-ref, commutatively, so either LHS or RHS storage can be reused +macro_rules! forward_all_binop_to_val_ref_commutative { + (impl $imp:ident for $res:ty, $method:ident) => { + forward_val_val_binop_commutative!(impl $imp for $res, $method); + forward_ref_val_binop_commutative!(impl $imp for $res, $method); + forward_ref_ref_binop_commutative!(impl $imp for $res, $method); + }; +} + +macro_rules! forward_all_scalar_binop_to_ref_val { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + forward_scalar_val_val_binop_to_ref_val!(impl $imp<$scalar> for $res, $method); + forward_scalar_val_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method); + forward_scalar_ref_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method); + } +} + +macro_rules! forward_all_scalar_binop_to_val_val { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + forward_scalar_val_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method); + forward_scalar_ref_val_binop_to_val_val!(impl $imp<$scalar> for $res, $method); + forward_scalar_ref_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method); + } +} + +macro_rules! forward_all_scalar_binop_to_val_val_commutative { + (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => { + forward_scalar_val_val_binop_commutative!(impl $imp<$scalar> for $res, $method); + forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method); + } +} + +macro_rules! promote_all_scalars { + (impl $imp:ident for $res:ty, $method:ident) => { + promote_unsigned_scalars!(impl $imp for $res, $method); + promote_signed_scalars!(impl $imp for $res, $method); + } +} + +macro_rules! promote_all_scalars_assign { + (impl $imp:ident for $res:ty, $method:ident) => { + promote_unsigned_scalars_assign!(impl $imp for $res, $method); + promote_signed_scalars_assign!(impl $imp for $res, $method); + } +} + +macro_rules! impl_sum_iter_type { + ($res:ty) => { + impl Sum for $res + where + $res: Add, + { + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::zero(), <$res>::add) + } + } + }; +} + +macro_rules! impl_product_iter_type { + ($res:ty) => { + impl Product for $res + where + $res: Mul, + { + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(One::one(), <$res>::mul) + } + } + }; +} diff --git a/vendor/num-bigint-generic/tests/bigint.rs b/vendor/num-bigint-generic/tests/bigint.rs new file mode 100644 index 000000000..5a4bb338a --- /dev/null +++ b/vendor/num-bigint-generic/tests/bigint.rs @@ -0,0 +1,1478 @@ +type BigInt = num_bigint_generic::BigInt; +type BigUint = num_bigint_generic::BigUint; +use num_bigint_generic::{ + Sign::{Minus, NoSign, Plus}, + ToBigInt, +}; + +use std::{ + cmp::Ordering::{Equal, Greater, Less}, + collections::hash_map::RandomState, + f32, f64, + hash::{BuildHasher, Hash, Hasher}, + iter::repeat, + ops::Neg, +}; + +use num_integer::Integer; +use num_traits::{ + pow, Euclid, FromBytes, FromPrimitive, Num, One, Pow, Signed, ToBytes, ToPrimitive, Zero, +}; + +mod consts; +use crate::consts::*; + +#[macro_use] +mod macros; + +#[test] +fn test_from_bytes_be() { + fn check(s: &str, result: &str) { + assert_eq!( + BigInt::from_bytes_be(Plus, s.as_bytes()), + BigInt::parse_bytes(result.as_bytes(), 10).unwrap() + ); + } + check("A", "65"); + check("AA", "16705"); + check("AB", "16706"); + check("Hello world!", "22405534230753963835153736737"); + assert_eq!(BigInt::from_bytes_be(Plus, &[]), BigInt::zero()); + assert_eq!(BigInt::from_bytes_be(Minus, &[]), BigInt::zero()); +} + +#[test] +fn test_to_bytes_be() { + fn check(s: &str, result: &str) { + let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap(); + let (sign, v) = b.to_bytes_be(); + assert_eq!((Plus, s.as_bytes()), (sign, &*v)); + } + check("A", "65"); + check("AA", "16705"); + check("AB", "16706"); + check("Hello world!", "22405534230753963835153736737"); + let b: BigInt = Zero::zero(); + assert_eq!(b.to_bytes_be(), (NoSign, vec![0])); + + // Test with leading/trailing zero bytes and a full BigDigit of value 0 + let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap(); + assert_eq!(b.to_bytes_be(), (Plus, vec![1, 0, 0, 0, 0, 0, 0, 2, 0])); +} + +#[test] +fn test_from_bytes_le() { + fn check(s: &str, result: &str) { + assert_eq!( + BigInt::from_bytes_le(Plus, s.as_bytes()), + BigInt::parse_bytes(result.as_bytes(), 10).unwrap() + ); + } + check("A", "65"); + check("AA", "16705"); + check("BA", "16706"); + check("!dlrow olleH", "22405534230753963835153736737"); + assert_eq!(BigInt::from_bytes_le(Plus, &[]), BigInt::zero()); + assert_eq!(BigInt::from_bytes_le(Minus, &[]), BigInt::zero()); +} + +#[test] +fn test_to_bytes_le() { + fn check(s: &str, result: &str) { + let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap(); + let (sign, v) = b.to_bytes_le(); + assert_eq!((Plus, s.as_bytes()), (sign, &*v)); + } + check("A", "65"); + check("AA", "16705"); + check("BA", "16706"); + check("!dlrow olleH", "22405534230753963835153736737"); + let b: BigInt = Zero::zero(); + assert_eq!(b.to_bytes_le(), (NoSign, vec![0])); + + // Test with leading/trailing zero bytes and a full BigDigit of value 0 + let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap(); + assert_eq!(b.to_bytes_le(), (Plus, vec![0, 2, 0, 0, 0, 0, 0, 0, 1])); +} + +#[test] +fn test_to_signed_bytes_le() { + fn check(s: &str, result: Vec) { + let b = BigInt::parse_bytes(s.as_bytes(), 10).unwrap(); + assert_eq!(b.to_signed_bytes_le(), result); + assert_eq!(::to_le_bytes(&b), result); + } + + check("0", vec![0]); + check("32767", vec![0xff, 0x7f]); + check("-1", vec![0xff]); + check("16777216", vec![0, 0, 0, 1]); + check("-100", vec![156]); + check("-8388608", vec![0, 0, 0x80]); + check("-192", vec![0x40, 0xff]); + check("128", vec![0x80, 0]) +} + +#[test] +fn test_from_signed_bytes_le() { + fn check(s: &[u8], result: &str) { + let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap(); + assert_eq!(BigInt::from_signed_bytes_le(s), b); + assert_eq!(::from_le_bytes(s), b); + } + + check(&[], "0"); + check(&[0], "0"); + check(&[0; 10], "0"); + check(&[0xff, 0x7f], "32767"); + check(&[0xff], "-1"); + check(&[0, 0, 0, 1], "16777216"); + check(&[156], "-100"); + check(&[0, 0, 0x80], "-8388608"); + check(&[0xff; 10], "-1"); + check(&[0x40, 0xff], "-192"); +} + +#[test] +fn test_to_signed_bytes_be() { + fn check(s: &str, result: Vec) { + let b = BigInt::parse_bytes(s.as_bytes(), 10).unwrap(); + assert_eq!(b.to_signed_bytes_be(), result); + assert_eq!(::to_be_bytes(&b), result); + } + + check("0", vec![0]); + check("32767", vec![0x7f, 0xff]); + check("-1", vec![255]); + check("16777216", vec![1, 0, 0, 0]); + check("-100", vec![156]); + check("-8388608", vec![128, 0, 0]); + check("-192", vec![0xff, 0x40]); + check("128", vec![0, 0x80]); +} + +#[test] +fn test_from_signed_bytes_be() { + fn check(s: &[u8], result: &str) { + let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap(); + assert_eq!(BigInt::from_signed_bytes_be(s), b); + assert_eq!(::from_be_bytes(s), b); + } + + check(&[], "0"); + check(&[0], "0"); + check(&[0; 10], "0"); + check(&[127, 255], "32767"); + check(&[255], "-1"); + check(&[1, 0, 0, 0], "16777216"); + check(&[156], "-100"); + check(&[128, 0, 0], "-8388608"); + check(&[255; 10], "-1"); + check(&[0xff, 0x40], "-192"); +} + +#[test] +fn test_signed_bytes_be_round_trip() { + for i in -0x1FFFF..0x20000 { + let n = BigInt::from(i); + assert_eq!(n, BigInt::from_signed_bytes_be(&n.to_signed_bytes_be())); + } +} + +#[test] +fn test_signed_bytes_le_round_trip() { + for i in -0x1FFFF..0x20000 { + let n = BigInt::from(i); + assert_eq!(n, BigInt::from_signed_bytes_le(&n.to_signed_bytes_le())); + } +} + +#[test] +fn test_cmp() { + let vs: [&[u32]; 4] = [&[2_u32], &[1, 1], &[2, 1], &[1, 1, 1]]; + let mut nums = Vec::new(); + for s in vs.iter().rev() { + nums.push(BigInt::from_slice(Minus, s)); + } + nums.push(Zero::zero()); + nums.extend(vs.iter().map(|s| BigInt::from_slice(Plus, s))); + + for (i, ni) in nums.iter().enumerate() { + for (j0, nj) in nums[i..].iter().enumerate() { + let j = i + j0; + if i == j { + assert_eq!(ni.cmp(nj), Equal); + assert_eq!(nj.cmp(ni), Equal); + assert_eq!(ni, nj); + assert!(ni == nj); + assert!(ni <= nj); + assert!(ni >= nj); + assert!(ni >= nj); + assert!(ni <= nj); + } else { + assert_eq!(ni.cmp(nj), Less); + assert_eq!(nj.cmp(ni), Greater); + + assert!(ni != nj); + assert!(ni != nj); + + assert!(ni <= nj); + assert!(ni < nj); + assert!(ni < nj); + assert!(ni <= nj); + + assert!(nj > ni); + assert!(nj >= ni); + assert!(nj >= ni); + assert!(nj > ni); + } + } + } +} + +fn hash(x: &T) -> u64 { + let mut hasher = ::Hasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +#[test] +fn test_hash() { + let a = BigInt::new(NoSign, vec![]); + let b = BigInt::new(NoSign, vec![0]); + let c = BigInt::new(Plus, vec![1]); + let d = BigInt::new(Plus, vec![1, 0, 0, 0, 0, 0]); + let e = BigInt::new(Plus, vec![0, 0, 0, 0, 0, 1]); + let f = BigInt::new(Minus, vec![1]); + assert!(hash(&a) == hash(&b)); + assert!(hash(&b) != hash(&c)); + assert!(hash(&c) == hash(&d)); + assert!(hash(&d) != hash(&e)); + assert!(hash(&c) != hash(&f)); +} + +#[test] +fn test_convert_i64() { + fn check(b1: BigInt, i: i64) { + let b2: BigInt = FromPrimitive::from_i64(i).unwrap(); + assert!(b1 == b2); + assert!(b1.to_i64().unwrap() == i); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(i64::MIN.to_bigint().unwrap(), i64::MIN); + check(i64::MAX.to_bigint().unwrap(), i64::MAX); + + assert_eq!((i64::MAX as u64 + 1).to_bigint().unwrap().to_i64(), None); + + assert_eq!( + BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(), + None + ); + + assert_eq!( + BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i64(), + None + ); + + assert_eq!( + BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(), + None + ); +} + +#[test] +fn test_convert_i128() { + fn check(b1: BigInt, i: i128) { + let b2: BigInt = FromPrimitive::from_i128(i).unwrap(); + assert!(b1 == b2); + assert!(b1.to_i128().unwrap() == i); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(i128::MIN.to_bigint().unwrap(), i128::MIN); + check(i128::MAX.to_bigint().unwrap(), i128::MAX); + + assert_eq!((i128::MAX as u128 + 1).to_bigint().unwrap().to_i128(), None); + + assert_eq!( + BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(), + None + ); + + assert_eq!( + BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i128(), + None + ); + + assert_eq!( + BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(), + None + ); +} + +#[test] +fn test_convert_u64() { + fn check(b1: BigInt, u: u64) { + let b2: BigInt = FromPrimitive::from_u64(u).unwrap(); + assert!(b1 == b2); + assert!(b1.to_u64().unwrap() == u); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(u64::MIN.to_bigint().unwrap(), u64::MIN); + check(u64::MAX.to_bigint().unwrap(), u64::MAX); + + assert_eq!( + BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(), + None + ); + + let max_value: BigUint = FromPrimitive::from_u64(u64::MAX).unwrap(); + assert_eq!(BigInt::from_biguint(Minus, max_value).to_u64(), None); + assert_eq!( + BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(), + None + ); +} + +#[test] +fn test_convert_u128() { + fn check(b1: BigInt, u: u128) { + let b2: BigInt = FromPrimitive::from_u128(u).unwrap(); + assert!(b1 == b2); + assert!(b1.to_u128().unwrap() == u); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(u128::MIN.to_bigint().unwrap(), u128::MIN); + check(u128::MAX.to_bigint().unwrap(), u128::MAX); + + assert_eq!( + BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(), + None + ); + + let max_value: BigUint = FromPrimitive::from_u128(u128::MAX).unwrap(); + assert_eq!(BigInt::from_biguint(Minus, max_value).to_u128(), None); + assert_eq!( + BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(), + None + ); +} + +#[test] +#[allow(clippy::float_cmp)] +fn test_convert_f32() { + fn check(b1: &BigInt, f: f32) { + let b2 = BigInt::from_f32(f).unwrap(); + assert_eq!(b1, &b2); + assert_eq!(b1.to_f32().unwrap(), f); + let neg_b1 = -b1; + let neg_b2 = BigInt::from_f32(-f).unwrap(); + assert_eq!(neg_b1, neg_b2); + assert_eq!(neg_b1.to_f32().unwrap(), -f); + } + + check(&BigInt::zero(), 0.0); + check(&BigInt::one(), 1.0); + check(&BigInt::from(u16::MAX), pow(2.0_f32, 16) - 1.0); + check(&BigInt::from(1u64 << 32), pow(2.0_f32, 32)); + check(&BigInt::from_slice(Plus, &[0, 0, 1]), pow(2.0_f32, 64)); + check( + &((BigInt::one() << 100) + (BigInt::one() << 123)), + pow(2.0_f32, 100) + pow(2.0_f32, 123), + ); + check(&(BigInt::one() << 127), pow(2.0_f32, 127)); + check(&(BigInt::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX); + + // keeping all 24 digits with the bits at different offsets to the BigDigits + let x: u32 = 0b00000000101111011111011011011101; + let mut f = x as f32; + let mut b = BigInt::from(x); + for _ in 0..64 { + check(&b, f); + f *= 2.0; + b <<= 1; + } + + // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32 + let mut n: i64 = 0b0000000000111111111111111111111111011111111111111111111111111111; + assert!((n as f64) as f32 != n as f32); + assert_eq!(BigInt::from(n).to_f32(), Some(n as f32)); + n = -n; + assert!((n as f64) as f32 != n as f32); + assert_eq!(BigInt::from(n).to_f32(), Some(n as f32)); + + // test rounding up with the bits at different offsets to the BigDigits + let mut f = ((1u64 << 25) - 1) as f32; + let mut b = BigInt::from(1u64 << 25); + for _ in 0..64 { + assert_eq!(b.to_f32(), Some(f)); + f *= 2.0; + b <<= 1; + } + + // test correct ties-to-even rounding + let weird: i128 = (1i128 << 100) + (1i128 << (100 - f32::MANTISSA_DIGITS)); + assert_ne!(weird as f32, (weird + 1) as f32); + + assert_eq!(BigInt::from(weird).to_f32(), Some(weird as f32)); + assert_eq!(BigInt::from(weird + 1).to_f32(), Some((weird + 1) as f32)); + + // rounding + assert_eq!( + BigInt::from_f32(-f32::consts::PI), + Some(BigInt::from(-3i32)) + ); + assert_eq!(BigInt::from_f32(-f32::consts::E), Some(BigInt::from(-2i32))); + assert_eq!(BigInt::from_f32(-0.99999), Some(BigInt::zero())); + assert_eq!(BigInt::from_f32(-0.5), Some(BigInt::zero())); + assert_eq!(BigInt::from_f32(-0.0), Some(BigInt::zero())); + assert_eq!( + BigInt::from_f32(f32::MIN_POSITIVE / 2.0), + Some(BigInt::zero()) + ); + assert_eq!(BigInt::from_f32(f32::MIN_POSITIVE), Some(BigInt::zero())); + assert_eq!(BigInt::from_f32(0.5), Some(BigInt::zero())); + assert_eq!(BigInt::from_f32(0.99999), Some(BigInt::zero())); + assert_eq!(BigInt::from_f32(f32::consts::E), Some(BigInt::from(2u32))); + assert_eq!(BigInt::from_f32(f32::consts::PI), Some(BigInt::from(3u32))); + + // special float values + assert_eq!(BigInt::from_f32(f32::NAN), None); + assert_eq!(BigInt::from_f32(f32::INFINITY), None); + assert_eq!(BigInt::from_f32(f32::NEG_INFINITY), None); + + // largest BigInt that will round to a finite f32 value + let big_num = (BigInt::one() << 128u8) - 1u8 - (BigInt::one() << (128u8 - 25)); + assert_eq!(big_num.to_f32(), Some(f32::MAX)); + assert_eq!((&big_num + 1u8).to_f32(), Some(f32::INFINITY)); + assert_eq!((-&big_num).to_f32(), Some(f32::MIN)); + assert_eq!(((-&big_num) - 1u8).to_f32(), Some(f32::NEG_INFINITY)); + + assert_eq!( + ((BigInt::one() << 128u8) - 1u8).to_f32(), + Some(f32::INFINITY) + ); + assert_eq!((BigInt::one() << 128u8).to_f32(), Some(f32::INFINITY)); + assert_eq!( + (-((BigInt::one() << 128u8) - 1u8)).to_f32(), + Some(f32::NEG_INFINITY) + ); + assert_eq!( + (-(BigInt::one() << 128u8)).to_f32(), + Some(f32::NEG_INFINITY) + ); +} + +#[test] +#[allow(clippy::float_cmp)] +fn test_convert_f64() { + fn check(b1: &BigInt, f: f64) { + let b2 = BigInt::from_f64(f).unwrap(); + assert_eq!(b1, &b2); + assert_eq!(b1.to_f64().unwrap(), f); + let neg_b1 = -b1; + let neg_b2 = BigInt::from_f64(-f).unwrap(); + assert_eq!(neg_b1, neg_b2); + assert_eq!(neg_b1.to_f64().unwrap(), -f); + } + + check(&BigInt::zero(), 0.0); + check(&BigInt::one(), 1.0); + check(&BigInt::from(u32::MAX), pow(2.0_f64, 32) - 1.0); + check(&BigInt::from(1u64 << 32), pow(2.0_f64, 32)); + check(&BigInt::from_slice(Plus, &[0, 0, 1]), pow(2.0_f64, 64)); + check( + &((BigInt::one() << 100) + (BigInt::one() << 152)), + pow(2.0_f64, 100) + pow(2.0_f64, 152), + ); + check(&(BigInt::one() << 1023), pow(2.0_f64, 1023)); + check(&(BigInt::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX); + + // keeping all 53 digits with the bits at different offsets to the BigDigits + let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101; + let mut f = x as f64; + let mut b = BigInt::from(x); + for _ in 0..128 { + check(&b, f); + f *= 2.0; + b <<= 1; + } + + // test rounding up with the bits at different offsets to the BigDigits + let mut f = ((1u64 << 54) - 1) as f64; + let mut b = BigInt::from(1u64 << 54); + for _ in 0..128 { + assert_eq!(b.to_f64(), Some(f)); + f *= 2.0; + b <<= 1; + } + + // test correct ties-to-even rounding + let weird: i128 = (1i128 << 100) + (1i128 << (100 - f64::MANTISSA_DIGITS)); + assert_ne!(weird as f64, (weird + 1) as f64); + + assert_eq!(BigInt::from(weird).to_f64(), Some(weird as f64)); + assert_eq!(BigInt::from(weird + 1).to_f64(), Some((weird + 1) as f64)); + + // rounding + assert_eq!( + BigInt::from_f64(-f64::consts::PI), + Some(BigInt::from(-3i32)) + ); + assert_eq!(BigInt::from_f64(-f64::consts::E), Some(BigInt::from(-2i32))); + assert_eq!(BigInt::from_f64(-0.99999), Some(BigInt::zero())); + assert_eq!(BigInt::from_f64(-0.5), Some(BigInt::zero())); + assert_eq!(BigInt::from_f64(-0.0), Some(BigInt::zero())); + assert_eq!( + BigInt::from_f64(f64::MIN_POSITIVE / 2.0), + Some(BigInt::zero()) + ); + assert_eq!(BigInt::from_f64(f64::MIN_POSITIVE), Some(BigInt::zero())); + assert_eq!(BigInt::from_f64(0.5), Some(BigInt::zero())); + assert_eq!(BigInt::from_f64(0.99999), Some(BigInt::zero())); + assert_eq!(BigInt::from_f64(f64::consts::E), Some(BigInt::from(2u32))); + assert_eq!(BigInt::from_f64(f64::consts::PI), Some(BigInt::from(3u32))); + + // special float values + assert_eq!(BigInt::from_f64(f64::NAN), None); + assert_eq!(BigInt::from_f64(f64::INFINITY), None); + assert_eq!(BigInt::from_f64(f64::NEG_INFINITY), None); + + // largest BigInt that will round to a finite f64 value + let big_num = (BigInt::one() << 1024u16) - 1u8 - (BigInt::one() << (1024u16 - 54)); + assert_eq!(big_num.to_f64(), Some(f64::MAX)); + assert_eq!((&big_num + 1u8).to_f64(), Some(f64::INFINITY)); + assert_eq!((-&big_num).to_f64(), Some(f64::MIN)); + assert_eq!(((-&big_num) - 1u8).to_f64(), Some(f64::NEG_INFINITY)); + + assert_eq!( + ((BigInt::one() << 1024u16) - 1u8).to_f64(), + Some(f64::INFINITY) + ); + assert_eq!((BigInt::one() << 1024u16).to_f64(), Some(f64::INFINITY)); + assert_eq!( + (-((BigInt::one() << 1024u16) - 1u8)).to_f64(), + Some(f64::NEG_INFINITY) + ); + assert_eq!( + (-(BigInt::one() << 1024u16)).to_f64(), + Some(f64::NEG_INFINITY) + ); +} + +#[test] +fn test_convert_to_biguint() { + fn check(n: BigInt, ans_1: BigUint) { + assert_eq!(n.to_biguint().unwrap(), ans_1); + assert_eq!(n.to_biguint().unwrap().to_bigint().unwrap(), n); + } + let zero: BigInt = Zero::zero(); + let unsigned_zero: BigUint = Zero::zero(); + let positive = BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3])); + let negative = -&positive; + + check(zero, unsigned_zero); + check(positive, BigUint::new(vec![1, 2, 3])); + + assert_eq!(negative.to_biguint(), None); +} + +#[test] +fn test_convert_from_uint() { + macro_rules! check { + ($ty:ident, $max:expr) => { + assert_eq!(BigInt::from($ty::zero()), BigInt::zero()); + assert_eq!(BigInt::from($ty::one()), BigInt::one()); + assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one()); + assert_eq!(BigInt::from($ty::MAX), $max); + }; + } + + check!(u8, BigInt::from_slice(Plus, &[u8::MAX as u32])); + check!(u16, BigInt::from_slice(Plus, &[u16::MAX as u32])); + check!(u32, BigInt::from_slice(Plus, &[u32::MAX])); + check!(u64, BigInt::from_slice(Plus, &[u32::MAX, u32::MAX])); + check!( + u128, + BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, u32::MAX]) + ); + check!(usize, BigInt::from(usize::MAX as u64)); +} + +#[test] +fn test_convert_from_int() { + macro_rules! check { + ($ty:ident, $min:expr, $max:expr) => { + assert_eq!(BigInt::from($ty::MIN), $min); + assert_eq!(BigInt::from($ty::MIN + $ty::one()), $min + BigInt::one()); + assert_eq!(BigInt::from(-$ty::one()), -BigInt::one()); + assert_eq!(BigInt::from($ty::zero()), BigInt::zero()); + assert_eq!(BigInt::from($ty::one()), BigInt::one()); + assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one()); + assert_eq!(BigInt::from($ty::MAX), $max); + }; + } + + check!( + i8, + BigInt::from_slice(Minus, &[1 << 7]), + BigInt::from_slice(Plus, &[i8::MAX as u32]) + ); + check!( + i16, + BigInt::from_slice(Minus, &[1 << 15]), + BigInt::from_slice(Plus, &[i16::MAX as u32]) + ); + check!( + i32, + BigInt::from_slice(Minus, &[1 << 31]), + BigInt::from_slice(Plus, &[i32::MAX as u32]) + ); + check!( + i64, + BigInt::from_slice(Minus, &[0, 1 << 31]), + BigInt::from_slice(Plus, &[u32::MAX, i32::MAX as u32]) + ); + check!( + i128, + BigInt::from_slice(Minus, &[0, 0, 0, 1 << 31]), + BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, i32::MAX as u32]) + ); + check!( + isize, + BigInt::from(isize::MIN as i64), + BigInt::from(isize::MAX as i64) + ); +} + +#[test] +fn test_convert_from_biguint() { + assert_eq!(BigInt::from(BigUint::zero()), BigInt::zero()); + assert_eq!(BigInt::from(BigUint::one()), BigInt::one()); + assert_eq!( + BigInt::from(BigUint::from_slice(&[1, 2, 3])), + BigInt::from_slice(Plus, &[1, 2, 3]) + ); +} + +#[test] +fn test_add() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let (na, nb, nc) = (-&a, -&b, -&c); + + assert_op!(a + b == c); + assert_op!(b + a == c); + assert_op!(c + na == b); + assert_op!(c + nb == a); + assert_op!(a + nc == nb); + assert_op!(b + nc == na); + assert_op!(na + nb == nc); + assert_op!(a + na == BigInt::zero()); + + assert_assign_op!(a += b == c); + assert_assign_op!(b += a == c); + assert_assign_op!(c += na == b); + assert_assign_op!(c += nb == a); + assert_assign_op!(a += nc == nb); + assert_assign_op!(b += nc == na); + assert_assign_op!(na += nb == nc); + assert_assign_op!(a += na == BigInt::zero()); + } +} + +#[test] +fn test_sub() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let (na, nb, nc) = (-&a, -&b, -&c); + + assert_op!(c - a == b); + assert_op!(c - b == a); + assert_op!(nb - a == nc); + assert_op!(na - b == nc); + assert_op!(b - na == c); + assert_op!(a - nb == c); + assert_op!(nc - na == nb); + assert_op!(a - a == BigInt::zero()); + + assert_assign_op!(c -= a == b); + assert_assign_op!(c -= b == a); + assert_assign_op!(nb -= a == nc); + assert_assign_op!(na -= b == nc); + assert_assign_op!(b -= na == c); + assert_assign_op!(a -= nb == c); + assert_assign_op!(nc -= na == nb); + assert_assign_op!(a -= a == BigInt::zero()); + } +} + +#[test] +fn test_mul() { + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let (na, nb, nc) = (-&a, -&b, -&c); + + assert_op!(a * b == c); + assert_op!(b * a == c); + assert_op!(na * nb == c); + + assert_op!(na * b == nc); + assert_op!(nb * a == nc); + + assert_assign_op!(a *= b == c); + assert_assign_op!(b *= a == c); + assert_assign_op!(na *= nb == c); + + assert_assign_op!(na *= b == nc); + assert_assign_op!(nb *= a == nc); + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let d = BigInt::from_slice(Plus, d_vec); + + assert!(a == &b * &c + &d); + assert!(a == &c * &b + &d); + } +} + +#[test] +fn test_div_mod_floor() { + fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt, ans_m: &BigInt) { + let (d, m) = a.div_mod_floor(b); + assert_eq!(d, a.div_floor(b)); + assert_eq!(m, a.mod_floor(b)); + if !m.is_zero() { + assert_eq!(m.sign(), b.sign()); + } + assert!(m.abs() <= b.abs()); + assert!(*a == b * &d + &m); + assert!(d == *ans_d); + assert!(m == *ans_m); + } + + fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) { + if m.is_zero() { + check_sub(a, b, d, m); + check_sub(a, &b.neg(), &d.neg(), m); + check_sub(&a.neg(), b, &d.neg(), m); + check_sub(&a.neg(), &b.neg(), d, m); + } else { + let one: BigInt = One::one(); + check_sub(a, b, d, m); + check_sub(a, &b.neg(), &(d.neg() - &one), &(m - b)); + check_sub(&a.neg(), b, &(d.neg() - &one), &(b - m)); + check_sub(&a.neg(), &b.neg(), d, &m.neg()); + } + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + if !a.is_zero() { + check(&c, &a, &b, &Zero::zero()); + } + if !b.is_zero() { + check(&c, &b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let d = BigInt::from_slice(Plus, d_vec); + + if !b.is_zero() { + check(&a, &b, &c, &d); + } + } +} + +#[test] +fn test_div_rem() { + fn check_sub(a: &BigInt, b: &BigInt, ans_q: &BigInt, ans_r: &BigInt) { + let (q, r) = a.div_rem(b); + if !r.is_zero() { + assert_eq!(r.sign(), a.sign()); + } + assert!(r.abs() <= b.abs()); + assert!(*a == b * &q + &r); + assert!(q == *ans_q); + assert!(r == *ans_r); + + let (a, b, ans_q, ans_r) = (a.clone(), b.clone(), ans_q.clone(), ans_r.clone()); + assert_op!(a / b == ans_q); + assert_op!(a % b == ans_r); + assert_assign_op!(a /= b == ans_q); + assert_assign_op!(a %= b == ans_r); + } + + fn check(a: &BigInt, b: &BigInt, q: &BigInt, r: &BigInt) { + check_sub(a, b, q, r); + check_sub(a, &b.neg(), &q.neg(), r); + check_sub(&a.neg(), b, &q.neg(), &r.neg()); + check_sub(&a.neg(), &b.neg(), q, &r.neg()); + } + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + if !a.is_zero() { + check(&c, &a, &b, &Zero::zero()); + } + if !b.is_zero() { + check(&c, &b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let d = BigInt::from_slice(Plus, d_vec); + + if !b.is_zero() { + check(&a, &b, &c, &d); + } + } +} + +#[test] +fn test_div_ceil() { + fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt) { + assert_eq!(a.div_ceil(b), *ans_d); + } + + fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) { + if m.is_zero() { + check_sub(a, b, d); + check_sub(a, &b.neg(), &d.neg()); + check_sub(&a.neg(), b, &d.neg()); + check_sub(&a.neg(), &b.neg(), d); + } else { + check_sub(a, b, &(d + 1)); + check_sub(a, &b.neg(), &d.neg()); + check_sub(&a.neg(), b, &d.neg()); + check_sub(&a.neg(), &b.neg(), &(d + 1)); + } + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + if !a.is_zero() { + check(&c, &a, &b, &Zero::zero()); + } + if !b.is_zero() { + check(&c, &b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let d = BigInt::from_slice(Plus, d_vec); + + if !b.is_zero() { + check(&a, &b, &c, &d); + } + } +} + +#[test] +fn test_div_rem_euclid() { + fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt, ans_m: &BigInt) { + eprintln!("{} {} {} {}", a, b, ans_d, ans_m); + assert_eq!(a.div_euclid(b), *ans_d); + assert_eq!(a.rem_euclid(b), *ans_m); + assert!(*ans_m >= BigInt::zero()); + assert!(*ans_m < b.abs()); + } + + fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) { + if m.is_zero() { + check_sub(a, b, d, m); + check_sub(a, &b.neg(), &d.neg(), m); + check_sub(&a.neg(), b, &d.neg(), m); + check_sub(&a.neg(), &b.neg(), d, m); + } else { + let one: BigInt = One::one(); + check_sub(a, b, d, m); + check_sub(a, &b.neg(), &d.neg(), m); + check_sub(&a.neg(), b, &(d + &one).neg(), &(b - m)); + check_sub(&a.neg(), &b.neg(), &(d + &one), &(b.abs() - m)); + } + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + if !a.is_zero() { + check(&c, &a, &b, &Zero::zero()); + } + if !b.is_zero() { + check(&c, &b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let d = BigInt::from_slice(Plus, d_vec); + + if !b.is_zero() { + check(&a, &b, &c, &d); + } + } +} + +#[test] +fn test_checked_add() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + assert!(a.checked_add(&b).unwrap() == c); + assert!(b.checked_add(&a).unwrap() == c); + assert!(c.checked_add(&(-&a)).unwrap() == b); + assert!(c.checked_add(&(-&b)).unwrap() == a); + assert!(a.checked_add(&(-&c)).unwrap() == (-&b)); + assert!(b.checked_add(&(-&c)).unwrap() == (-&a)); + assert!((-&a).checked_add(&(-&b)).unwrap() == (-&c)); + assert!(a.checked_add(&(-&a)).unwrap() == BigInt::zero()); + } +} + +#[test] +fn test_checked_sub() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + assert!(c.checked_sub(&a).unwrap() == b); + assert!(c.checked_sub(&b).unwrap() == a); + assert!((-&b).checked_sub(&a).unwrap() == (-&c)); + assert!((-&a).checked_sub(&b).unwrap() == (-&c)); + assert!(b.checked_sub(&(-&a)).unwrap() == c); + assert!(a.checked_sub(&(-&b)).unwrap() == c); + assert!((-&c).checked_sub(&(-&a)).unwrap() == (-&b)); + assert!(a.checked_sub(&a).unwrap() == BigInt::zero()); + } +} + +#[test] +fn test_checked_mul() { + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + assert!(a.checked_mul(&b).unwrap() == c); + assert!(b.checked_mul(&a).unwrap() == c); + + assert!((-&a).checked_mul(&b).unwrap() == -&c); + assert!((-&b).checked_mul(&a).unwrap() == -&c); + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let d = BigInt::from_slice(Plus, d_vec); + + assert!(a == b.checked_mul(&c).unwrap() + &d); + assert!(a == c.checked_mul(&b).unwrap() + &d); + } +} +#[test] +fn test_checked_div() { + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + if !a.is_zero() { + assert!(c.checked_div(&a).unwrap() == b); + assert!((-&c).checked_div(&(-&a)).unwrap() == b); + assert!((-&c).checked_div(&a).unwrap() == -&b); + } + if !b.is_zero() { + assert!(c.checked_div(&b).unwrap() == a); + assert!((-&c).checked_div(&(-&b)).unwrap() == a); + assert!((-&c).checked_div(&b).unwrap() == -&a); + } + + assert!(c.checked_div(&Zero::zero()).is_none()); + assert!((-&c).checked_div(&Zero::zero()).is_none()); + } +} + +#[test] +fn test_gcd() { + fn check(a: isize, b: isize, c: isize) { + let big_a: BigInt = FromPrimitive::from_isize(a).unwrap(); + let big_b: BigInt = FromPrimitive::from_isize(b).unwrap(); + let big_c: BigInt = FromPrimitive::from_isize(c).unwrap(); + + assert_eq!(big_a.gcd(&big_b), big_c); + assert_eq!(big_a.extended_gcd(&big_b).gcd, big_c); + assert_eq!(big_a.gcd_lcm(&big_b).0, big_c); + assert_eq!(big_a.extended_gcd_lcm(&big_b).0.gcd, big_c); + } + + check(10, 2, 2); + check(10, 3, 1); + check(0, 3, 3); + check(3, 3, 3); + check(56, 42, 14); + check(3, -3, 3); + check(-6, 3, 3); + check(-4, -2, 2); +} + +#[test] +fn test_lcm() { + fn check(a: isize, b: isize, c: isize) { + let big_a: BigInt = FromPrimitive::from_isize(a).unwrap(); + let big_b: BigInt = FromPrimitive::from_isize(b).unwrap(); + let big_c: BigInt = FromPrimitive::from_isize(c).unwrap(); + + assert_eq!(big_a.lcm(&big_b), big_c); + assert_eq!(big_a.gcd_lcm(&big_b).1, big_c); + assert_eq!(big_a.extended_gcd_lcm(&big_b).1, big_c); + } + + check(0, 0, 0); + check(1, 0, 0); + check(0, 1, 0); + check(1, 1, 1); + check(-1, 1, 1); + check(1, -1, 1); + check(-1, -1, 1); + check(8, 9, 72); + check(11, 5, 55); +} + +#[test] +fn test_is_multiple_of() { + assert!(BigInt::from(0).is_multiple_of(&BigInt::from(0))); + assert!(BigInt::from(6).is_multiple_of(&BigInt::from(6))); + assert!(BigInt::from(6).is_multiple_of(&BigInt::from(3))); + assert!(BigInt::from(6).is_multiple_of(&BigInt::from(1))); + + assert!(!BigInt::from(42).is_multiple_of(&BigInt::from(5))); + assert!(!BigInt::from(5).is_multiple_of(&BigInt::from(3))); + assert!(!BigInt::from(42).is_multiple_of(&BigInt::from(0))); +} + +#[test] +fn test_next_multiple_of() { + assert_eq!( + BigInt::from(16).next_multiple_of(&BigInt::from(8)), + BigInt::from(16) + ); + assert_eq!( + BigInt::from(23).next_multiple_of(&BigInt::from(8)), + BigInt::from(24) + ); + assert_eq!( + BigInt::from(16).next_multiple_of(&BigInt::from(-8)), + BigInt::from(16) + ); + assert_eq!( + BigInt::from(23).next_multiple_of(&BigInt::from(-8)), + BigInt::from(16) + ); + assert_eq!( + BigInt::from(-16).next_multiple_of(&BigInt::from(8)), + BigInt::from(-16) + ); + assert_eq!( + BigInt::from(-23).next_multiple_of(&BigInt::from(8)), + BigInt::from(-16) + ); + assert_eq!( + BigInt::from(-16).next_multiple_of(&BigInt::from(-8)), + BigInt::from(-16) + ); + assert_eq!( + BigInt::from(-23).next_multiple_of(&BigInt::from(-8)), + BigInt::from(-24) + ); +} + +#[test] +fn test_prev_multiple_of() { + assert_eq!( + BigInt::from(16).prev_multiple_of(&BigInt::from(8)), + BigInt::from(16) + ); + assert_eq!( + BigInt::from(23).prev_multiple_of(&BigInt::from(8)), + BigInt::from(16) + ); + assert_eq!( + BigInt::from(16).prev_multiple_of(&BigInt::from(-8)), + BigInt::from(16) + ); + assert_eq!( + BigInt::from(23).prev_multiple_of(&BigInt::from(-8)), + BigInt::from(24) + ); + assert_eq!( + BigInt::from(-16).prev_multiple_of(&BigInt::from(8)), + BigInt::from(-16) + ); + assert_eq!( + BigInt::from(-23).prev_multiple_of(&BigInt::from(8)), + BigInt::from(-24) + ); + assert_eq!( + BigInt::from(-16).prev_multiple_of(&BigInt::from(-8)), + BigInt::from(-16) + ); + assert_eq!( + BigInt::from(-23).prev_multiple_of(&BigInt::from(-8)), + BigInt::from(-16) + ); +} + +#[test] +fn test_abs_sub() { + let zero: BigInt = Zero::zero(); + let one: BigInt = One::one(); + assert_eq!((-&one).abs_sub(&one), zero); + let one: BigInt = One::one(); + let zero: BigInt = Zero::zero(); + assert_eq!(one.abs_sub(&one), zero); + let one: BigInt = One::one(); + let zero: BigInt = Zero::zero(); + assert_eq!(one.abs_sub(&zero), one); + let one: BigInt = One::one(); + let two: BigInt = FromPrimitive::from_isize(2).unwrap(); + assert_eq!(one.abs_sub(&-&one), two); +} + +#[test] +fn test_from_str_radix() { + fn check(s: &str, ans: Option) { + let ans = ans.map(|n| { + let x: BigInt = FromPrimitive::from_isize(n).unwrap(); + x + }); + assert_eq!(BigInt::from_str_radix(s, 10).ok(), ans); + } + check("10", Some(10)); + check("1", Some(1)); + check("0", Some(0)); + check("-1", Some(-1)); + check("-10", Some(-10)); + check("+10", Some(10)); + check("--7", None); + check("++5", None); + check("+-9", None); + check("-+3", None); + check("Z", None); + check("_", None); + + // issue 10522, this hit an edge case that caused it to + // attempt to allocate a vector of size (-1u) == huge. + let x: BigInt = format!("1{}", "0".repeat(36)).parse().unwrap(); + let _y = x.to_string(); +} + +#[test] +fn test_lower_hex() { + let a = BigInt::parse_bytes(b"A", 16).unwrap(); + let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{:x}", a), "a"); + assert_eq!(format!("{:x}", hello), "-48656c6c6f20776f726c6421"); + assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa"); +} + +#[test] +fn test_upper_hex() { + let a = BigInt::parse_bytes(b"A", 16).unwrap(); + let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{:X}", a), "A"); + assert_eq!(format!("{:X}", hello), "-48656C6C6F20776F726C6421"); + assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA"); +} + +#[test] +fn test_binary() { + let a = BigInt::parse_bytes(b"A", 16).unwrap(); + let hello = BigInt::parse_bytes(b"-224055342307539", 10).unwrap(); + + assert_eq!(format!("{:b}", a), "1010"); + assert_eq!( + format!("{:b}", hello), + "-110010111100011011110011000101101001100011010011" + ); + assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010"); +} + +#[test] +fn test_octal() { + let a = BigInt::parse_bytes(b"A", 16).unwrap(); + let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{:o}", a), "12"); + assert_eq!(format!("{:o}", hello), "-22062554330674403566756233062041"); + assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12"); +} + +#[test] +fn test_display() { + let a = BigInt::parse_bytes(b"A", 16).unwrap(); + let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{}", a), "10"); + assert_eq!(format!("{}", hello), "-22405534230753963835153736737"); + assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10"); +} + +#[test] +fn test_neg() { + assert!(-BigInt::new(Plus, vec![1, 1, 1]) == BigInt::new(Minus, vec![1, 1, 1])); + assert!(-BigInt::new(Minus, vec![1, 1, 1]) == BigInt::new(Plus, vec![1, 1, 1])); + let zero: BigInt = Zero::zero(); + assert_eq!(-&zero, zero); +} + +#[test] +fn test_negative_shr() { + assert_eq!(BigInt::from(-1) >> 1, BigInt::from(-1)); + assert_eq!(BigInt::from(-2) >> 1, BigInt::from(-1)); + assert_eq!(BigInt::from(-3) >> 1, BigInt::from(-2)); + assert_eq!(BigInt::from(-3) >> 2, BigInt::from(-1)); +} + +#[test] +fn test_iter_sum() { + let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap(); + let data: Vec = vec![ + FromPrimitive::from_i32(-1000000).unwrap(), + FromPrimitive::from_i32(-200000).unwrap(), + FromPrimitive::from_i32(-30000).unwrap(), + FromPrimitive::from_i32(-4000).unwrap(), + FromPrimitive::from_i32(-500).unwrap(), + FromPrimitive::from_i32(-60).unwrap(), + FromPrimitive::from_i32(-7).unwrap(), + ]; + + assert_eq!(result, data.iter().sum::()); + assert_eq!(result, data.into_iter().sum::()); +} + +#[test] +fn test_iter_product() { + let data: Vec = vec![ + FromPrimitive::from_i32(1001).unwrap(), + FromPrimitive::from_i32(-1002).unwrap(), + FromPrimitive::from_i32(1003).unwrap(), + FromPrimitive::from_i32(-1004).unwrap(), + FromPrimitive::from_i32(1005).unwrap(), + ]; + let result = data.first().unwrap() + * data.get(1).unwrap() + * data.get(2).unwrap() + * data.get(3).unwrap() + * data.get(4).unwrap(); + + assert_eq!(result, data.iter().product::()); + assert_eq!(result, data.into_iter().product::()); +} + +#[test] +fn test_iter_sum_generic() { + let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap(); + let data = vec![-1000000, -200000, -30000, -4000, -500, -60, -7]; + + assert_eq!(result, data.iter().sum::()); + assert_eq!(result, data.into_iter().sum::()); +} + +#[test] +fn test_iter_product_generic() { + let data = vec![1001, -1002, 1003, -1004, 1005]; + let result = data[0].to_bigint().unwrap() + * data[1].to_bigint().unwrap() + * data[2].to_bigint().unwrap() + * data[3].to_bigint().unwrap() + * data[4].to_bigint().unwrap(); + + assert_eq!(result, data.iter().product::()); + assert_eq!(result, data.into_iter().product::()); +} + +#[test] +fn test_pow() { + let one = BigInt::from(1i32); + let two = BigInt::from(2i32); + let four = BigInt::from(4i32); + let eight = BigInt::from(8i32); + let minus_two = BigInt::from(-2i32); + macro_rules! check { + ($t:ty) => { + assert_eq!(Pow::pow(&two, 0 as $t), one); + assert_eq!(Pow::pow(&two, 1 as $t), two); + assert_eq!(Pow::pow(&two, 2 as $t), four); + assert_eq!(Pow::pow(&two, 3 as $t), eight); + assert_eq!(Pow::pow(&two, &(3 as $t)), eight); + assert_eq!(Pow::pow(&minus_two, 0 as $t), one, "-2^0"); + assert_eq!(Pow::pow(&minus_two, 1 as $t), minus_two, "-2^1"); + assert_eq!(Pow::pow(&minus_two, 2 as $t), four, "-2^2"); + assert_eq!(Pow::pow(&minus_two, 3 as $t), -&eight, "-2^3"); + }; + } + check!(u8); + check!(u16); + check!(u32); + check!(u64); + check!(usize); + + let pow_1e10000 = BigInt::from(10u32).pow(10_000_u32); + let manual_1e10000 = repeat(10u32).take(10_000).product::(); + assert!(manual_1e10000 == pow_1e10000); +} + +#[test] +fn test_bit() { + // 12 = (1100)_2 + assert!(!BigInt::from(0b1100u8).bit(0)); + assert!(!BigInt::from(0b1100u8).bit(1)); + assert!(BigInt::from(0b1100u8).bit(2)); + assert!(BigInt::from(0b1100u8).bit(3)); + assert!(!BigInt::from(0b1100u8).bit(4)); + assert!(!BigInt::from(0b1100u8).bit(200)); + assert!(!BigInt::from(0b1100u8).bit(u64::MAX)); + // -12 = (...110100)_2 + assert!(!BigInt::from(-12i8).bit(0)); + assert!(!BigInt::from(-12i8).bit(1)); + assert!(BigInt::from(-12i8).bit(2)); + assert!(!BigInt::from(-12i8).bit(3)); + assert!(BigInt::from(-12i8).bit(4)); + assert!(BigInt::from(-12i8).bit(200)); + assert!(BigInt::from(-12i8).bit(u64::MAX)); +} + +#[test] +fn test_set_bit() { + let mut x: BigInt; + + // zero + x = BigInt::zero(); + x.set_bit(200, true); + assert_eq!(x, BigInt::one() << 200); + x = BigInt::zero(); + x.set_bit(200, false); + assert_eq!(x, BigInt::zero()); + + // positive numbers + x = BigInt::from_biguint(Plus, BigUint::one() << 200); + x.set_bit(10, true); + x.set_bit(200, false); + assert_eq!(x, BigInt::one() << 10); + x.set_bit(10, false); + x.set_bit(5, false); + assert_eq!(x, BigInt::zero()); + + // negative numbers + x = BigInt::from(-12i8); + x.set_bit(200, true); + assert_eq!(x, BigInt::from(-12i8)); + x.set_bit(200, false); + assert_eq!( + x, + BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200)) + ); + x.set_bit(6, false); + assert_eq!( + x, + BigInt::from_biguint(Minus, BigUint::from(76u8) | (BigUint::one() << 200)) + ); + x.set_bit(6, true); + assert_eq!( + x, + BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200)) + ); + x.set_bit(200, true); + assert_eq!(x, BigInt::from(-12i8)); + + x = BigInt::from_biguint(Minus, BigUint::one() << 30); + x.set_bit(10, true); + assert_eq!( + x, + BigInt::from_biguint(Minus, (BigUint::one() << 30) - (BigUint::one() << 10)) + ); + + x = BigInt::from_biguint(Minus, BigUint::one() << 200); + x.set_bit(40, true); + assert_eq!( + x, + BigInt::from_biguint(Minus, (BigUint::one() << 200) - (BigUint::one() << 40)) + ); + + x = BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 100)); + x.set_bit(100, false); + assert_eq!( + x, + BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 101)) + ); + + x = BigInt::from_biguint(Minus, (BigUint::one() << 63) | (BigUint::one() << 62)); + x.set_bit(62, false); + assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 64)); + + x = BigInt::from_biguint(Minus, (BigUint::one() << 200) - BigUint::one()); + x.set_bit(0, false); + assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 200)); +} diff --git a/vendor/num-bigint-generic/tests/bigint_bitwise.rs b/vendor/num-bigint-generic/tests/bigint_bitwise.rs new file mode 100644 index 000000000..6cfa5ac51 --- /dev/null +++ b/vendor/num-bigint-generic/tests/bigint_bitwise.rs @@ -0,0 +1,178 @@ +type BigInt = num_bigint_generic::BigInt; +use num_bigint_generic::{Sign, ToBigInt}; +use num_traits::ToPrimitive; + +enum ValueVec { + N, + P(&'static [u32]), + M(&'static [u32]), +} + +use crate::ValueVec::*; + +impl ToBigInt for ValueVec { + fn to_bigint(&self) -> Option { + match self { + N => Some(BigInt::from_slice(Sign::NoSign, &[])), + P(s) => Some(BigInt::from_slice(Sign::Plus, s)), + M(s) => Some(BigInt::from_slice(Sign::Minus, s)), + } + } +} + +// a, !a +const NOT_VALUES: &[(ValueVec, ValueVec)] = &[ + (N, M(&[1])), + (P(&[1]), M(&[2])), + (P(&[2]), M(&[3])), + (P(&[!0 - 2]), M(&[!0 - 1])), + (P(&[!0 - 1]), M(&[!0])), + (P(&[!0]), M(&[0, 1])), + (P(&[0, 1]), M(&[1, 1])), + (P(&[1, 1]), M(&[2, 1])), +]; + +// a, b, a & b, a | b, a ^ b +const BITWISE_VALUES: &[(ValueVec, ValueVec, ValueVec, ValueVec, ValueVec)] = &[ + (N, N, N, N, N), + (N, P(&[1]), N, P(&[1]), P(&[1])), + (N, P(&[!0]), N, P(&[!0]), P(&[!0])), + (N, P(&[0, 1]), N, P(&[0, 1]), P(&[0, 1])), + (N, M(&[1]), N, M(&[1]), M(&[1])), + (N, M(&[!0]), N, M(&[!0]), M(&[!0])), + (N, M(&[0, 1]), N, M(&[0, 1]), M(&[0, 1])), + (P(&[1]), P(&[!0]), P(&[1]), P(&[!0]), P(&[!0 - 1])), + (P(&[!0]), P(&[!0]), P(&[!0]), P(&[!0]), N), + (P(&[!0]), P(&[1, 1]), P(&[1]), P(&[!0, 1]), P(&[!0 - 1, 1])), + (P(&[1]), M(&[!0]), P(&[1]), M(&[!0]), M(&[0, 1])), + (P(&[!0]), M(&[1]), P(&[!0]), M(&[1]), M(&[0, 1])), + (P(&[!0]), M(&[!0]), P(&[1]), M(&[1]), M(&[2])), + (P(&[!0]), M(&[1, 1]), P(&[!0]), M(&[1, 1]), M(&[0, 2])), + (P(&[1, 1]), M(&[!0]), P(&[1, 1]), M(&[!0]), M(&[0, 2])), + (M(&[1]), M(&[!0]), M(&[!0]), M(&[1]), P(&[!0 - 1])), + (M(&[!0]), M(&[!0]), M(&[!0]), M(&[!0]), N), + (M(&[!0]), M(&[1, 1]), M(&[!0, 1]), M(&[1]), P(&[!0 - 1, 1])), +]; + +const I32_MIN: i64 = i32::MIN as i64; +const I32_MAX: i64 = i32::MAX as i64; +const U32_MAX: i64 = u32::MAX as i64; + +// some corner cases +const I64_VALUES: &[i64] = &[ + i64::MIN, + i64::MIN + 1, + i64::MIN + 2, + i64::MIN + 3, + -U32_MAX - 3, + -U32_MAX - 2, + -U32_MAX - 1, + -U32_MAX, + -U32_MAX + 1, + -U32_MAX + 2, + -U32_MAX + 3, + I32_MIN - 3, + I32_MIN - 2, + I32_MIN - 1, + I32_MIN, + I32_MIN + 1, + I32_MIN + 2, + I32_MIN + 3, + -3, + -2, + -1, + 0, + 1, + 2, + 3, + I32_MAX - 3, + I32_MAX - 2, + I32_MAX - 1, + I32_MAX, + I32_MAX + 1, + I32_MAX + 2, + I32_MAX + 3, + U32_MAX - 3, + U32_MAX - 2, + U32_MAX - 1, + U32_MAX, + U32_MAX + 1, + U32_MAX + 2, + U32_MAX + 3, + i64::MAX - 3, + i64::MAX - 2, + i64::MAX - 1, + i64::MAX, +]; + +#[test] +fn test_not() { + for (a, not) in NOT_VALUES.iter() { + let a = a.to_bigint().unwrap(); + let not = not.to_bigint().unwrap(); + + // sanity check for tests that fit in i64 + if let (Some(prim_a), Some(prim_not)) = (a.to_i64(), not.to_i64()) { + assert_eq!(!prim_a, prim_not); + } + + assert_eq!(!a.clone(), not, "!{:x}", a); + assert_eq!(!not.clone(), a, "!{:x}", not); + } +} + +#[test] +fn test_not_i64() { + for &prim_a in I64_VALUES.iter() { + let a = prim_a.to_bigint().unwrap(); + let not = (!prim_a).to_bigint().unwrap(); + assert_eq!(!a.clone(), not, "!{:x}", a); + } +} + +#[test] +fn test_bitwise() { + for (a, b, and, or, xor) in BITWISE_VALUES.iter() { + let a = a.to_bigint().unwrap(); + let b = b.to_bigint().unwrap(); + let and = and.to_bigint().unwrap(); + let or = or.to_bigint().unwrap(); + let xor = xor.to_bigint().unwrap(); + + // sanity check for tests that fit in i64 + if let (Some(prim_a), Some(prim_b)) = (a.to_i64(), b.to_i64()) { + if let Some(prim_and) = and.to_i64() { + assert_eq!(prim_a & prim_b, prim_and); + } + if let Some(prim_or) = or.to_i64() { + assert_eq!(prim_a | prim_b, prim_or); + } + if let Some(prim_xor) = xor.to_i64() { + assert_eq!(prim_a ^ prim_b, prim_xor); + } + } + + assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b); + assert_eq!(b.clone() & &a, and, "{:x} & {:x}", b, a); + assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b); + assert_eq!(b.clone() | &a, or, "{:x} | {:x}", b, a); + assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b); + assert_eq!(b.clone() ^ &a, xor, "{:x} ^ {:x}", b, a); + } +} + +#[test] +fn test_bitwise_i64() { + for &prim_a in I64_VALUES.iter() { + let a = prim_a.to_bigint().unwrap(); + for &prim_b in I64_VALUES.iter() { + let b = prim_b.to_bigint().unwrap(); + let and = (prim_a & prim_b).to_bigint().unwrap(); + let or = (prim_a | prim_b).to_bigint().unwrap(); + let xor = (prim_a ^ prim_b).to_bigint().unwrap(); + assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b); + assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b); + assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b); + } + } +} diff --git a/vendor/num-bigint-generic/tests/bigint_scalar.rs b/vendor/num-bigint-generic/tests/bigint_scalar.rs new file mode 100644 index 000000000..99d403168 --- /dev/null +++ b/vendor/num-bigint-generic/tests/bigint_scalar.rs @@ -0,0 +1,156 @@ +type BigInt = num_bigint_generic::BigInt; +use num_bigint_generic::Sign::Plus; +use num_traits::{One, Signed, ToPrimitive, Zero}; + +use std::{ops::Neg, panic::catch_unwind}; + +mod consts; +use crate::consts::*; + +#[macro_use] +mod macros; + +#[test] +fn test_scalar_add() { + fn check(x: &BigInt, y: &BigInt, z: &BigInt) { + let (x, y, z) = (x.clone(), y.clone(), z.clone()); + assert_signed_scalar_op!(x + y == z); + assert_signed_scalar_assign_op!(x += y == z); + } + + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let (na, nb, nc) = (-&a, -&b, -&c); + + check(&a, &b, &c); + check(&b, &a, &c); + check(&c, &na, &b); + check(&c, &nb, &a); + check(&a, &nc, &nb); + check(&b, &nc, &na); + check(&na, &nb, &nc); + check(&a, &na, &Zero::zero()); + } +} + +#[test] +fn test_scalar_sub() { + fn check(x: &BigInt, y: &BigInt, z: &BigInt) { + let (x, y, z) = (x.clone(), y.clone(), z.clone()); + assert_signed_scalar_op!(x - y == z); + assert_signed_scalar_assign_op!(x -= y == z); + } + + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let (na, nb, nc) = (-&a, -&b, -&c); + + check(&c, &a, &b); + check(&c, &b, &a); + check(&nb, &a, &nc); + check(&na, &b, &nc); + check(&b, &na, &c); + check(&a, &nb, &c); + check(&nc, &na, &nb); + check(&a, &a, &Zero::zero()); + } +} + +#[test] +fn test_scalar_mul() { + fn check(x: &BigInt, y: &BigInt, z: &BigInt) { + let (x, y, z) = (x.clone(), y.clone(), z.clone()); + assert_signed_scalar_op!(x * y == z); + assert_signed_scalar_assign_op!(x *= y == z); + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + let (na, nb, nc) = (-&a, -&b, -&c); + + check(&a, &b, &c); + check(&b, &a, &c); + check(&na, &nb, &c); + + check(&na, &b, &nc); + check(&nb, &a, &nc); + } +} + +#[test] +fn test_scalar_div_rem() { + fn check_sub(a: &BigInt, b: u32, ans_q: &BigInt, ans_r: &BigInt) { + let (q, r) = (a / b, a % b); + if !r.is_zero() { + assert_eq!(r.sign(), a.sign()); + } + assert!(r.abs() <= BigInt::from(b)); + assert!(*a == b * &q + &r); + assert!(q == *ans_q); + assert!(r == *ans_r); + + let b = BigInt::from(b); + let (a, ans_q, ans_r) = (a.clone(), ans_q.clone(), ans_r.clone()); + assert_signed_scalar_op!(a / b == ans_q); + assert_signed_scalar_op!(a % b == ans_r); + assert_signed_scalar_assign_op!(a /= b == ans_q); + assert_signed_scalar_assign_op!(a %= b == ans_r); + + let nb = -b; + assert_signed_scalar_op!(a / nb == -ans_q.clone()); + assert_signed_scalar_op!(a % nb == ans_r); + assert_signed_scalar_assign_op!(a /= nb == -ans_q.clone()); + assert_signed_scalar_assign_op!(a %= nb == ans_r); + } + + fn check(a: &BigInt, b: u32, q: &BigInt, r: &BigInt) { + check_sub(a, b, q, r); + check_sub(&a.neg(), b, &q.neg(), &r.neg()); + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let b = BigInt::from_slice(Plus, b_vec); + let c = BigInt::from_slice(Plus, c_vec); + + if a_vec.len() == 1 && a_vec[0] != 0 { + let a = a_vec[0]; + check(&c, a, &b, &Zero::zero()); + } + + if b_vec.len() == 1 && b_vec[0] != 0 { + let b = b_vec[0]; + check(&c, b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigInt::from_slice(Plus, a_vec); + let c = BigInt::from_slice(Plus, c_vec); + let d = BigInt::from_slice(Plus, d_vec); + + if b_vec.len() == 1 && b_vec[0] != 0 { + let b = b_vec[0]; + check(&a, b, &c, &d); + } + } +} + +#[test] +fn test_scalar_div_rem_zero() { + catch_unwind(|| BigInt::zero() / 0u32).unwrap_err(); + catch_unwind(|| BigInt::zero() % 0u32).unwrap_err(); + catch_unwind(|| BigInt::one() / 0u32).unwrap_err(); + catch_unwind(|| BigInt::one() % 0u32).unwrap_err(); +} diff --git a/vendor/num-bigint-generic/tests/biguint.rs b/vendor/num-bigint-generic/tests/biguint.rs new file mode 100644 index 000000000..eb6e59666 --- /dev/null +++ b/vendor/num-bigint-generic/tests/biguint.rs @@ -0,0 +1,1917 @@ +type BigInt = num_bigint_generic::BigInt; +type BigUint = num_bigint_generic::BigUint; +use num_bigint_generic::{Sign::Plus, ToBigInt, ToBigUint}; +use num_integer::Integer; + +use std::{ + cmp::Ordering::{Equal, Greater, Less}, + collections::hash_map::RandomState, + f32, f64, + hash::{BuildHasher, Hash, Hasher}, + iter::repeat, + str::FromStr, +}; + +use num_traits::{ + pow, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Euclid, FromBytes, FromPrimitive, Num, + One, Pow, ToBytes, ToPrimitive, Zero, +}; + +mod consts; +use crate::consts::*; + +#[macro_use] +mod macros; + +#[test] +fn test_from_bytes_be() { + fn check(s: &str, result: &str) { + let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap(); + assert_eq!(BigUint::from_bytes_be(s.as_bytes()), b); + assert_eq!(::from_be_bytes(s.as_bytes()), b); + } + check("A", "65"); + check("AA", "16705"); + check("AB", "16706"); + check("Hello world!", "22405534230753963835153736737"); + assert_eq!(BigUint::from_bytes_be(&[]), BigUint::zero()); +} + +#[test] +fn test_to_bytes_be() { + fn check(s: &str, result: &str) { + let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap(); + assert_eq!(b.to_bytes_be(), s.as_bytes()); + assert_eq!(::to_be_bytes(&b), s.as_bytes()); + } + check("A", "65"); + check("AA", "16705"); + check("AB", "16706"); + check("Hello world!", "22405534230753963835153736737"); + let b: BigUint = Zero::zero(); + assert_eq!(b.to_bytes_be(), [0]); + + // Test with leading/trailing zero bytes and a full BigDigit of value 0 + let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap(); + assert_eq!(b.to_bytes_be(), [1, 0, 0, 0, 0, 0, 0, 2, 0]); +} + +#[test] +fn test_from_bytes_le() { + fn check(s: &str, result: &str) { + let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap(); + assert_eq!(BigUint::from_bytes_le(s.as_bytes()), b); + assert_eq!(::from_le_bytes(s.as_bytes()), b); + } + check("A", "65"); + check("AA", "16705"); + check("BA", "16706"); + check("!dlrow olleH", "22405534230753963835153736737"); + assert_eq!(BigUint::from_bytes_le(&[]), BigUint::zero()); +} + +#[test] +fn test_to_bytes_le() { + fn check(s: &str, result: &str) { + let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap(); + assert_eq!(b.to_bytes_le(), s.as_bytes()); + assert_eq!(::to_le_bytes(&b), s.as_bytes()); + } + check("A", "65"); + check("AA", "16705"); + check("BA", "16706"); + check("!dlrow olleH", "22405534230753963835153736737"); + let b: BigUint = Zero::zero(); + assert_eq!(b.to_bytes_le(), [0]); + + // Test with leading/trailing zero bytes and a full BigDigit of value 0 + let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap(); + assert_eq!(b.to_bytes_le(), [0, 2, 0, 0, 0, 0, 0, 0, 1]); +} + +#[test] +fn test_cmp() { + let data: [&[_]; 7] = [&[], &[1], &[2], &[!0], &[0, 1], &[2, 1], &[1, 1, 1]]; + let data: Vec = data.iter().map(|v| BigUint::from_slice(v)).collect(); + for (i, ni) in data.iter().enumerate() { + for (j0, nj) in data[i..].iter().enumerate() { + let j = j0 + i; + if i == j { + assert_eq!(ni.cmp(nj), Equal); + assert_eq!(nj.cmp(ni), Equal); + assert_eq!(ni, nj); + assert!(ni == nj); + assert!(ni <= nj); + assert!(ni >= nj); + assert!(ni >= nj); + assert!(ni <= nj); + } else { + assert_eq!(ni.cmp(nj), Less); + assert_eq!(nj.cmp(ni), Greater); + + assert!(ni != nj); + assert!(ni != nj); + + assert!(ni <= nj); + assert!(ni < nj); + assert!(ni < nj); + assert!(ni <= nj); + + assert!(nj > ni); + assert!(nj >= ni); + assert!(nj >= ni); + assert!(nj > ni); + } + } + } +} + +fn hash(x: &T) -> u64 { + let mut hasher = ::Hasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +#[test] +fn test_hash() { + use crate::hash; + + let a = BigUint::new(vec![]); + let b = BigUint::new(vec![0]); + let c = BigUint::new(vec![1]); + let d = BigUint::new(vec![1, 0, 0, 0, 0, 0]); + let e = BigUint::new(vec![0, 0, 0, 0, 0, 1]); + assert!(hash(&a) == hash(&b)); + assert!(hash(&b) != hash(&c)); + assert!(hash(&c) == hash(&d)); + assert!(hash(&d) != hash(&e)); +} + +// LEFT, RIGHT, AND, OR, XOR +type BitTestTuple = ( + &'static [u32], + &'static [u32], + &'static [u32], + &'static [u32], + &'static [u32], +); +const BIT_TESTS: &[BitTestTuple] = &[ + (&[], &[], &[], &[], &[]), + (&[1, 0, 1], &[1, 1], &[1], &[1, 1, 1], &[0, 1, 1]), + (&[1, 0, 1], &[0, 1, 1], &[0, 0, 1], &[1, 1, 1], &[1, 1]), + ( + &[268, 482, 17], + &[964, 54], + &[260, 34], + &[972, 502, 17], + &[712, 468, 17], + ), +]; + +#[test] +fn test_bitand() { + for elm in BIT_TESTS { + let (a_vec, b_vec, c_vec, _, _) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert_op!(a & b == c); + assert_op!(b & a == c); + assert_assign_op!(a &= b == c); + assert_assign_op!(b &= a == c); + } +} + +#[test] +fn test_bitor() { + for elm in BIT_TESTS { + let (a_vec, b_vec, _, c_vec, _) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert_op!(a | b == c); + assert_op!(b | a == c); + assert_assign_op!(a |= b == c); + assert_assign_op!(b |= a == c); + } +} + +#[test] +fn test_bitxor() { + for elm in BIT_TESTS { + let (a_vec, b_vec, _, _, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert_op!(a ^ b == c); + assert_op!(b ^ a == c); + assert_op!(a ^ c == b); + assert_op!(c ^ a == b); + assert_op!(b ^ c == a); + assert_op!(c ^ b == a); + assert_assign_op!(a ^= b == c); + assert_assign_op!(b ^= a == c); + assert_assign_op!(a ^= c == b); + assert_assign_op!(c ^= a == b); + assert_assign_op!(b ^= c == a); + assert_assign_op!(c ^= b == a); + } +} + +#[test] +fn test_shl() { + fn check(s: &str, shift: usize, ans: &str) { + let opt_biguint = BigUint::from_str_radix(s, 16).ok(); + let mut bu_assign = opt_biguint.unwrap(); + let bu = (bu_assign.clone() << shift).to_str_radix(16); + assert_eq!(bu, ans); + bu_assign <<= shift; + assert_eq!(bu_assign.to_str_radix(16), ans); + } + + check("0", 3, "0"); + check("1", 3, "8"); + + check( + "1\ + 0000\ + 0000\ + 0000\ + 0001\ + 0000\ + 0000\ + 0000\ + 0001", + 3, + "8\ + 0000\ + 0000\ + 0000\ + 0008\ + 0000\ + 0000\ + 0000\ + 0008", + ); + check( + "1\ + 0000\ + 0001\ + 0000\ + 0001", + 2, + "4\ + 0000\ + 0004\ + 0000\ + 0004", + ); + check( + "1\ + 0001\ + 0001", + 1, + "2\ + 0002\ + 0002", + ); + + check( + "\ + 4000\ + 0000\ + 0000\ + 0000", + 3, + "2\ + 0000\ + 0000\ + 0000\ + 0000", + ); + check( + "4000\ + 0000", + 2, + "1\ + 0000\ + 0000", + ); + check( + "4000", + 2, + "1\ + 0000", + ); + + check( + "4000\ + 0000\ + 0000\ + 0000", + 67, + "2\ + 0000\ + 0000\ + 0000\ + 0000\ + 0000\ + 0000\ + 0000\ + 0000", + ); + check( + "4000\ + 0000", + 35, + "2\ + 0000\ + 0000\ + 0000\ + 0000", + ); + check( + "4000", + 19, + "2\ + 0000\ + 0000", + ); + + check( + "fedc\ + ba98\ + 7654\ + 3210\ + fedc\ + ba98\ + 7654\ + 3210", + 4, + "f\ + edcb\ + a987\ + 6543\ + 210f\ + edcb\ + a987\ + 6543\ + 2100", + ); + check( + "88887777666655554444333322221111", + 16, + "888877776666555544443333222211110000", + ); +} + +#[test] +fn test_shr() { + fn check(s: &str, shift: usize, ans: &str) { + let opt_biguint = BigUint::from_str_radix(s, 16).ok(); + let mut bu_assign = opt_biguint.unwrap(); + let bu = (bu_assign.clone() >> shift).to_str_radix(16); + assert_eq!(bu, ans); + bu_assign >>= shift; + assert_eq!(bu_assign.to_str_radix(16), ans); + } + + check("0", 3, "0"); + check("f", 3, "1"); + + check( + "1\ + 0000\ + 0000\ + 0000\ + 0001\ + 0000\ + 0000\ + 0000\ + 0001", + 3, + "2000\ + 0000\ + 0000\ + 0000\ + 2000\ + 0000\ + 0000\ + 0000", + ); + check( + "1\ + 0000\ + 0001\ + 0000\ + 0001", + 2, + "4000\ + 0000\ + 4000\ + 0000", + ); + check( + "1\ + 0001\ + 0001", + 1, + "8000\ + 8000", + ); + + check( + "2\ + 0000\ + 0000\ + 0000\ + 0001\ + 0000\ + 0000\ + 0000\ + 0001", + 67, + "4000\ + 0000\ + 0000\ + 0000", + ); + check( + "2\ + 0000\ + 0001\ + 0000\ + 0001", + 35, + "4000\ + 0000", + ); + check( + "2\ + 0001\ + 0001", + 19, + "4000", + ); + + check( + "1\ + 0000\ + 0000\ + 0000\ + 0000", + 1, + "8000\ + 0000\ + 0000\ + 0000", + ); + check( + "1\ + 0000\ + 0000", + 1, + "8000\ + 0000", + ); + check( + "1\ + 0000", + 1, + "8000", + ); + check( + "f\ + edcb\ + a987\ + 6543\ + 210f\ + edcb\ + a987\ + 6543\ + 2100", + 4, + "fedc\ + ba98\ + 7654\ + 3210\ + fedc\ + ba98\ + 7654\ + 3210", + ); + + check( + "888877776666555544443333222211110000", + 16, + "88887777666655554444333322221111", + ); +} + +// `DoubleBigDigit` size dependent +#[test] +fn test_convert_i64() { + fn check(b1: BigUint, i: i64) { + let b2: BigUint = FromPrimitive::from_i64(i).unwrap(); + assert_eq!(b1, b2); + assert_eq!(b1.to_i64().unwrap(), i); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(i64::MAX.to_biguint().unwrap(), i64::MAX); + + check(BigUint::new(vec![]), 0); + check(BigUint::new(vec![1]), 1); + check(BigUint::new(vec![N1]), (1 << 32) - 1); + check(BigUint::new(vec![0, 1]), 1 << 32); + check(BigUint::new(vec![N1, N1 >> 1]), i64::MAX); + + assert_eq!(i64::MIN.to_biguint(), None); + assert_eq!(BigUint::new(vec![N1, N1]).to_i64(), None); + assert_eq!(BigUint::new(vec![0, 0, 1]).to_i64(), None); + assert_eq!(BigUint::new(vec![N1, N1, N1]).to_i64(), None); +} + +#[test] +fn test_convert_i128() { + fn check(b1: BigUint, i: i128) { + let b2: BigUint = FromPrimitive::from_i128(i).unwrap(); + assert_eq!(b1, b2); + assert_eq!(b1.to_i128().unwrap(), i); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(i128::MAX.to_biguint().unwrap(), i128::MAX); + + check(BigUint::new(vec![]), 0); + check(BigUint::new(vec![1]), 1); + check(BigUint::new(vec![N1]), (1 << 32) - 1); + check(BigUint::new(vec![0, 1]), 1 << 32); + check(BigUint::new(vec![N1, N1, N1, N1 >> 1]), i128::MAX); + + assert_eq!(i128::MIN.to_biguint(), None); + assert_eq!(BigUint::new(vec![N1, N1, N1, N1]).to_i128(), None); + assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_i128(), None); + assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_i128(), None); +} + +// `DoubleBigDigit` size dependent +#[test] +fn test_convert_u64() { + fn check(b1: BigUint, u: u64) { + let b2: BigUint = FromPrimitive::from_u64(u).unwrap(); + assert_eq!(b1, b2); + assert_eq!(b1.to_u64().unwrap(), u); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(u64::MIN.to_biguint().unwrap(), u64::MIN); + check(u64::MAX.to_biguint().unwrap(), u64::MAX); + + check(BigUint::new(vec![]), 0); + check(BigUint::new(vec![1]), 1); + check(BigUint::new(vec![N1]), (1 << 32) - 1); + check(BigUint::new(vec![0, 1]), 1 << 32); + check(BigUint::new(vec![N1, N1]), u64::MAX); + + assert_eq!(BigUint::new(vec![0, 0, 1]).to_u64(), None); + assert_eq!(BigUint::new(vec![N1, N1, N1]).to_u64(), None); +} + +#[test] +fn test_convert_u128() { + fn check(b1: BigUint, u: u128) { + let b2: BigUint = FromPrimitive::from_u128(u).unwrap(); + assert_eq!(b1, b2); + assert_eq!(b1.to_u128().unwrap(), u); + } + + check(Zero::zero(), 0); + check(One::one(), 1); + check(u128::MIN.to_biguint().unwrap(), u128::MIN); + check(u128::MAX.to_biguint().unwrap(), u128::MAX); + + check(BigUint::new(vec![]), 0); + check(BigUint::new(vec![1]), 1); + check(BigUint::new(vec![N1]), (1 << 32) - 1); + check(BigUint::new(vec![0, 1]), 1 << 32); + check(BigUint::new(vec![N1, N1, N1, N1]), u128::MAX); + + assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_u128(), None); + assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_u128(), None); +} + +#[test] +#[allow(clippy::float_cmp)] +fn test_convert_f32() { + fn check(b1: &BigUint, f: f32) { + let b2 = BigUint::from_f32(f).unwrap(); + assert_eq!(b1, &b2); + assert_eq!(b1.to_f32().unwrap(), f); + } + + check(&BigUint::zero(), 0.0); + check(&BigUint::one(), 1.0); + check(&BigUint::from(u16::MAX), pow(2.0_f32, 16) - 1.0); + check(&BigUint::from(1u64 << 32), pow(2.0_f32, 32)); + check(&BigUint::from_slice(&[0, 0, 1]), pow(2.0_f32, 64)); + check( + &((BigUint::one() << 100) + (BigUint::one() << 123)), + pow(2.0_f32, 100) + pow(2.0_f32, 123), + ); + check(&(BigUint::one() << 127), pow(2.0_f32, 127)); + check(&(BigUint::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX); + + // keeping all 24 digits with the bits at different offsets to the BigDigits + let x: u32 = 0b00000000101111011111011011011101; + let mut f = x as f32; + let mut b = BigUint::from(x); + for _ in 0..64 { + check(&b, f); + f *= 2.0; + b <<= 1; + } + + // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32 + let n: u64 = 0b0000000000111111111111111111111111011111111111111111111111111111; + assert!((n as f64) as f32 != n as f32); + assert_eq!(BigUint::from(n).to_f32(), Some(n as f32)); + + // test rounding up with the bits at different offsets to the BigDigits + let mut f = ((1u64 << 25) - 1) as f32; + let mut b = BigUint::from(1u64 << 25); + for _ in 0..64 { + assert_eq!(b.to_f32(), Some(f)); + f *= 2.0; + b <<= 1; + } + + // test correct ties-to-even rounding + let weird: i128 = (1i128 << 100) + (1i128 << (100 - f32::MANTISSA_DIGITS)); + assert_ne!(weird as f32, (weird + 1) as f32); + + assert_eq!(BigInt::from(weird).to_f32(), Some(weird as f32)); + assert_eq!(BigInt::from(weird + 1).to_f32(), Some((weird + 1) as f32)); + + // rounding + assert_eq!(BigUint::from_f32(-1.0), None); + assert_eq!(BigUint::from_f32(-0.99999), Some(BigUint::zero())); + assert_eq!(BigUint::from_f32(-0.5), Some(BigUint::zero())); + assert_eq!(BigUint::from_f32(-0.0), Some(BigUint::zero())); + assert_eq!( + BigUint::from_f32(f32::MIN_POSITIVE / 2.0), + Some(BigUint::zero()) + ); + assert_eq!(BigUint::from_f32(f32::MIN_POSITIVE), Some(BigUint::zero())); + assert_eq!(BigUint::from_f32(0.5), Some(BigUint::zero())); + assert_eq!(BigUint::from_f32(0.99999), Some(BigUint::zero())); + assert_eq!(BigUint::from_f32(f32::consts::E), Some(BigUint::from(2u32))); + assert_eq!( + BigUint::from_f32(f32::consts::PI), + Some(BigUint::from(3u32)) + ); + + // special float values + assert_eq!(BigUint::from_f32(f32::NAN), None); + assert_eq!(BigUint::from_f32(f32::INFINITY), None); + assert_eq!(BigUint::from_f32(f32::NEG_INFINITY), None); + assert_eq!(BigUint::from_f32(f32::MIN), None); + + // largest BigUint that will round to a finite f32 value + let big_num = (BigUint::one() << 128u8) - 1u8 - (BigUint::one() << (128u8 - 25)); + assert_eq!(big_num.to_f32(), Some(f32::MAX)); + assert_eq!((big_num + 1u8).to_f32(), Some(f32::INFINITY)); + + assert_eq!( + ((BigUint::one() << 128u8) - 1u8).to_f32(), + Some(f32::INFINITY) + ); + assert_eq!((BigUint::one() << 128u8).to_f32(), Some(f32::INFINITY)); +} + +#[test] +#[allow(clippy::float_cmp)] +fn test_convert_f64() { + fn check(b1: &BigUint, f: f64) { + let b2 = BigUint::from_f64(f).unwrap(); + assert_eq!(b1, &b2); + assert_eq!(b1.to_f64().unwrap(), f); + } + + check(&BigUint::zero(), 0.0); + check(&BigUint::one(), 1.0); + check(&BigUint::from(u32::MAX), pow(2.0_f64, 32) - 1.0); + check(&BigUint::from(1u64 << 32), pow(2.0_f64, 32)); + check(&BigUint::from_slice(&[0, 0, 1]), pow(2.0_f64, 64)); + check( + &((BigUint::one() << 100) + (BigUint::one() << 152)), + pow(2.0_f64, 100) + pow(2.0_f64, 152), + ); + check(&(BigUint::one() << 1023), pow(2.0_f64, 1023)); + check(&(BigUint::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX); + + // keeping all 53 digits with the bits at different offsets to the BigDigits + let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101; + let mut f = x as f64; + let mut b = BigUint::from(x); + for _ in 0..128 { + check(&b, f); + f *= 2.0; + b <<= 1; + } + + // test rounding up with the bits at different offsets to the BigDigits + let mut f = ((1u64 << 54) - 1) as f64; + let mut b = BigUint::from(1u64 << 54); + for _ in 0..128 { + assert_eq!(b.to_f64(), Some(f)); + f *= 2.0; + b <<= 1; + } + + // test correct ties-to-even rounding + let weird: i128 = (1i128 << 100) + (1i128 << (100 - f64::MANTISSA_DIGITS)); + assert_ne!(weird as f64, (weird + 1) as f64); + + assert_eq!(BigInt::from(weird).to_f64(), Some(weird as f64)); + assert_eq!(BigInt::from(weird + 1).to_f64(), Some((weird + 1) as f64)); + + // rounding + assert_eq!(BigUint::from_f64(-1.0), None); + assert_eq!(BigUint::from_f64(-0.99999), Some(BigUint::zero())); + assert_eq!(BigUint::from_f64(-0.5), Some(BigUint::zero())); + assert_eq!(BigUint::from_f64(-0.0), Some(BigUint::zero())); + assert_eq!( + BigUint::from_f64(f64::MIN_POSITIVE / 2.0), + Some(BigUint::zero()) + ); + assert_eq!(BigUint::from_f64(f64::MIN_POSITIVE), Some(BigUint::zero())); + assert_eq!(BigUint::from_f64(0.5), Some(BigUint::zero())); + assert_eq!(BigUint::from_f64(0.99999), Some(BigUint::zero())); + assert_eq!(BigUint::from_f64(f64::consts::E), Some(BigUint::from(2u32))); + assert_eq!( + BigUint::from_f64(f64::consts::PI), + Some(BigUint::from(3u32)) + ); + + // special float values + assert_eq!(BigUint::from_f64(f64::NAN), None); + assert_eq!(BigUint::from_f64(f64::INFINITY), None); + assert_eq!(BigUint::from_f64(f64::NEG_INFINITY), None); + assert_eq!(BigUint::from_f64(f64::MIN), None); + + // largest BigUint that will round to a finite f64 value + let big_num = (BigUint::one() << 1024u16) - 1u8 - (BigUint::one() << (1024u16 - 54)); + assert_eq!(big_num.to_f64(), Some(f64::MAX)); + assert_eq!((big_num + 1u8).to_f64(), Some(f64::INFINITY)); + + assert_eq!( + ((BigUint::one() << 1024u16) - 1u8).to_f64(), + Some(f64::INFINITY) + ); + assert_eq!((BigUint::one() << 1024u16).to_f64(), Some(f64::INFINITY)); +} + +#[test] +fn test_convert_to_bigint() { + fn check(n: BigUint, ans: BigInt) { + assert_eq!(n.to_bigint().unwrap(), ans); + assert_eq!(n.to_bigint().unwrap().to_biguint().unwrap(), n); + } + check(Zero::zero(), Zero::zero()); + check( + BigUint::new(vec![1, 2, 3]), + BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3])), + ); +} + +#[test] +fn test_convert_from_uint() { + macro_rules! check { + ($ty:ident, $max:expr) => { + assert_eq!(BigUint::from($ty::zero()), BigUint::zero()); + assert_eq!(BigUint::from($ty::one()), BigUint::one()); + assert_eq!(BigUint::from($ty::MAX - $ty::one()), $max - BigUint::one()); + assert_eq!(BigUint::from($ty::MAX), $max); + }; + } + + check!(u8, BigUint::from_slice(&[u8::MAX as u32])); + check!(u16, BigUint::from_slice(&[u16::MAX as u32])); + check!(u32, BigUint::from_slice(&[u32::MAX])); + check!(u64, BigUint::from_slice(&[u32::MAX, u32::MAX])); + check!( + u128, + BigUint::from_slice(&[u32::MAX, u32::MAX, u32::MAX, u32::MAX]) + ); + check!(usize, BigUint::from(usize::MAX as u64)); +} + +#[test] +fn test_add() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert_op!(a + b == c); + assert_op!(b + a == c); + assert_assign_op!(a += b == c); + assert_assign_op!(b += a == c); + } +} + +#[test] +fn test_sub() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert_op!(c - a == b); + assert_op!(c - b == a); + assert_assign_op!(c -= a == b); + assert_assign_op!(c -= b == a); + } +} + +#[test] +#[should_panic] +fn test_sub_fail_on_underflow() { + let (a, b): (BigUint, BigUint) = (Zero::zero(), One::one()); + let _ = a - b; +} + +#[test] +fn test_mul() { + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert_op!(a * b == c); + assert_op!(b * a == c); + assert_assign_op!(a *= b == c); + assert_assign_op!(b *= a == c); + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + let d = BigUint::from_slice(d_vec); + + assert!(a == &b * &c + &d); + assert!(a == &c * &b + &d); + } +} + +#[test] +fn test_div_rem() { + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + if !a.is_zero() { + assert_op!(c / a == b); + assert_op!(c % a == BigUint::zero()); + assert_assign_op!(c /= a == b); + assert_assign_op!(c %= a == BigUint::zero()); + assert_eq!(c.div_rem(&a), (b.clone(), BigUint::zero())); + } + if !b.is_zero() { + assert_op!(c / b == a); + assert_op!(c % b == BigUint::zero()); + assert_assign_op!(c /= b == a); + assert_assign_op!(c %= b == BigUint::zero()); + assert_eq!(c.div_rem(&b), (a.clone(), BigUint::zero())); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + let d = BigUint::from_slice(d_vec); + + if !b.is_zero() { + assert_op!(a / b == c); + assert_op!(a % b == d); + assert_assign_op!(a /= b == c); + assert_assign_op!(a %= b == d); + assert!(a.div_rem(&b) == (c, d)); + } + } +} + +#[test] +fn test_div_rem_big_multiple() { + let a = BigUint::from(3u32).pow(100u32); + let a2 = &a * &a; + + let (div, rem) = a2.div_rem(&a); + assert_eq!(div, a); + assert!(rem.is_zero()); + + let (div, rem) = (&a2 - 1u32).div_rem(&a); + assert_eq!(div, &a - 1u32); + assert_eq!(rem, &a - 1u32); +} + +#[test] +fn test_div_ceil() { + fn check(a: &BigUint, b: &BigUint, d: &BigUint, m: &BigUint) { + if m.is_zero() { + assert_eq!(a.div_ceil(b), *d); + } else { + assert_eq!(a.div_ceil(b), d + 1u32); + } + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + if !a.is_zero() { + check(&c, &a, &b, &Zero::zero()); + } + if !b.is_zero() { + check(&c, &b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + let d = BigUint::from_slice(d_vec); + + if !b.is_zero() { + check(&a, &b, &c, &d); + } + } +} + +#[test] +fn test_div_rem_euclid() { + fn check(a: &BigUint, b: &BigUint, d: &BigUint, m: &BigUint) { + assert_eq!(a.div_euclid(b), *d); + assert_eq!(a.rem_euclid(b), *m); + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + if !a.is_zero() { + check(&c, &a, &b, &Zero::zero()); + } + if !b.is_zero() { + check(&c, &b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + let d = BigUint::from_slice(d_vec); + + if !b.is_zero() { + check(&a, &b, &c, &d); + } + } +} + +#[test] +fn test_checked_add() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert!(a.checked_add(&b).unwrap() == c); + assert!(b.checked_add(&a).unwrap() == c); + } +} + +#[test] +fn test_checked_sub() { + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert!(c.checked_sub(&a).unwrap() == b); + assert!(c.checked_sub(&b).unwrap() == a); + + if a > c { + assert!(a.checked_sub(&c).is_none()); + } + if b > c { + assert!(b.checked_sub(&c).is_none()); + } + } +} + +#[test] +fn test_checked_mul() { + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + assert!(a.checked_mul(&b).unwrap() == c); + assert!(b.checked_mul(&a).unwrap() == c); + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + let d = BigUint::from_slice(d_vec); + + assert!(a == b.checked_mul(&c).unwrap() + &d); + assert!(a == c.checked_mul(&b).unwrap() + &d); + } +} + +#[test] +fn test_mul_overflow() { + // Test for issue #187 - overflow due to mac3 incorrectly sizing temporary + let s = "5311379928167670986895882065524686273295931177270319231994441382\ + 0040355986085224273916250223263671004753755210595137000079652876\ + 0829212940754539968588340162273730474622005920097370111"; + let a: BigUint = s.parse().unwrap(); + let b = a.clone(); + let _ = a.checked_mul(&b); +} + +#[test] +fn test_mul_overflow_2() { + // Try a bunch of sizes that are right on the edge of multiplication length + // overflow, where (x * x).data.len() == 2 * x.data.len() + 1. + for i in 1u8..20 { + let bits = 1u32 << i; + let x = (BigUint::one() << bits) - 1u32; + let x2 = (BigUint::one() << (2 * bits)) - &x - &x - 1u32; + assert_eq!(&x * &x, x2); + } +} + +#[test] +fn test_checked_div() { + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + if !a.is_zero() { + assert!(c.checked_div(&a).unwrap() == b); + } + if !b.is_zero() { + assert!(c.checked_div(&b).unwrap() == a); + } + + assert!(c.checked_div(&Zero::zero()).is_none()); + } +} + +#[test] +fn test_gcd() { + fn check(a: usize, b: usize, c: usize) { + let big_a: BigUint = FromPrimitive::from_usize(a).unwrap(); + let big_b: BigUint = FromPrimitive::from_usize(b).unwrap(); + let big_c: BigUint = FromPrimitive::from_usize(c).unwrap(); + + assert_eq!(big_a.gcd(&big_b), big_c); + assert_eq!(big_a.gcd_lcm(&big_b).0, big_c); + } + + check(10, 2, 2); + check(10, 3, 1); + check(0, 3, 3); + check(3, 3, 3); + check(56, 42, 14); +} + +#[test] +fn test_lcm() { + fn check(a: usize, b: usize, c: usize) { + let big_a: BigUint = FromPrimitive::from_usize(a).unwrap(); + let big_b: BigUint = FromPrimitive::from_usize(b).unwrap(); + let big_c: BigUint = FromPrimitive::from_usize(c).unwrap(); + + assert_eq!(big_a.lcm(&big_b), big_c); + assert_eq!(big_a.gcd_lcm(&big_b).1, big_c); + } + + check(0, 0, 0); + check(1, 0, 0); + check(0, 1, 0); + check(1, 1, 1); + check(8, 9, 72); + check(11, 5, 55); + check(99, 17, 1683); +} + +#[test] +fn test_is_multiple_of() { + assert!(BigUint::from(0u32).is_multiple_of(&BigUint::from(0u32))); + assert!(BigUint::from(6u32).is_multiple_of(&BigUint::from(6u32))); + assert!(BigUint::from(6u32).is_multiple_of(&BigUint::from(3u32))); + assert!(BigUint::from(6u32).is_multiple_of(&BigUint::from(1u32))); + + assert!(!BigUint::from(42u32).is_multiple_of(&BigUint::from(5u32))); + assert!(!BigUint::from(5u32).is_multiple_of(&BigUint::from(3u32))); + assert!(!BigUint::from(42u32).is_multiple_of(&BigUint::from(0u32))); +} + +#[test] +fn test_next_multiple_of() { + assert_eq!( + BigUint::from(16u32).next_multiple_of(&BigUint::from(8u32)), + BigUint::from(16u32) + ); + assert_eq!( + BigUint::from(23u32).next_multiple_of(&BigUint::from(8u32)), + BigUint::from(24u32) + ); +} + +#[test] +fn test_prev_multiple_of() { + assert_eq!( + BigUint::from(16u32).prev_multiple_of(&BigUint::from(8u32)), + BigUint::from(16u32) + ); + assert_eq!( + BigUint::from(23u32).prev_multiple_of(&BigUint::from(8u32)), + BigUint::from(16u32) + ); +} + +#[test] +fn test_is_even() { + let one: BigUint = FromStr::from_str("1").unwrap(); + let two: BigUint = FromStr::from_str("2").unwrap(); + let thousand: BigUint = FromStr::from_str("1000").unwrap(); + let big: BigUint = FromStr::from_str("1000000000000000000000").unwrap(); + let bigger: BigUint = FromStr::from_str("1000000000000000000001").unwrap(); + assert!(one.is_odd()); + assert!(two.is_even()); + assert!(thousand.is_even()); + assert!(big.is_even()); + assert!(bigger.is_odd()); + assert!((&one << 64u8).is_even()); + assert!(((&one << 64u8) + one).is_odd()); +} + +fn to_str_pairs() -> Vec<(BigUint, Vec<(u32, String)>)> { + let bits = 32; + vec![ + ( + Zero::zero(), + vec![(2, "0".to_string()), (3, "0".to_string())], + ), + ( + BigUint::from_slice(&[0xff]), + vec![ + (2, "11111111".to_string()), + (3, "100110".to_string()), + (4, "3333".to_string()), + (5, "2010".to_string()), + (6, "1103".to_string()), + (7, "513".to_string()), + (8, "377".to_string()), + (9, "313".to_string()), + (10, "255".to_string()), + (11, "212".to_string()), + (12, "193".to_string()), + (13, "168".to_string()), + (14, "143".to_string()), + (15, "120".to_string()), + (16, "ff".to_string()), + ], + ), + ( + BigUint::from_slice(&[0xfff]), + vec![ + (2, "111111111111".to_string()), + (4, "333333".to_string()), + (16, "fff".to_string()), + ], + ), + ( + BigUint::from_slice(&[1, 2]), + vec![ + (2, format!("10{}1", "0".repeat(bits - 1))), + (4, format!("2{}1", "0".repeat(bits / 2 - 1))), + ( + 10, + match bits { + 64 => "36893488147419103233".to_string(), + 32 => "8589934593".to_string(), + 16 => "131073".to_string(), + _ => panic!(), + }, + ), + (16, format!("2{}1", "0".repeat(bits / 4 - 1))), + ], + ), + ( + BigUint::from_slice(&[1, 2, 3]), + vec![ + ( + 2, + format!("11{}10{}1", "0".repeat(bits - 2), "0".repeat(bits - 1)), + ), + ( + 4, + format!( + "3{}2{}1", + "0".repeat(bits / 2 - 1), + "0".repeat(bits / 2 - 1) + ), + ), + ( + 8, + match bits { + 64 => "14000000000000000000004000000000000000000001".to_string(), + 32 => "6000000000100000000001".to_string(), + 16 => "140000400001".to_string(), + _ => panic!(), + }, + ), + ( + 10, + match bits { + 64 => "1020847100762815390427017310442723737601".to_string(), + 32 => "55340232229718589441".to_string(), + 16 => "12885032961".to_string(), + _ => panic!(), + }, + ), + ( + 16, + format!( + "3{}2{}1", + "0".repeat(bits / 4 - 1), + "0".repeat(bits / 4 - 1) + ), + ), + ], + ), + ] +} + +#[test] +fn test_to_str_radix() { + let r = to_str_pairs(); + for num_pair in r.iter() { + let (n, rs) = num_pair; + for str_pair in rs.iter() { + let (radix, str) = str_pair; + assert_eq!(n.to_str_radix(*radix), *str); + } + } +} + +#[test] +fn test_from_and_to_radix() { + const GROUND_TRUTH: &[(&[u8], u32, &[u8])] = &[ + (b"0", 42, &[0]), + ( + b"ffffeeffbb", + 2, + &[ + 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + ], + ), + ( + b"ffffeeffbb", + 3, + &[ + 2, 2, 1, 1, 2, 1, 1, 2, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 0, 0, 2, 2, 0, 1, + ], + ), + ( + b"ffffeeffbb", + 4, + &[3, 2, 3, 2, 3, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], + ), + ( + b"ffffeeffbb", + 5, + &[0, 4, 3, 3, 1, 4, 2, 4, 1, 4, 4, 2, 3, 0, 0, 1, 2, 1], + ), + ( + b"ffffeeffbb", + 6, + &[5, 5, 4, 5, 5, 0, 0, 1, 2, 5, 3, 0, 1, 0, 2, 2], + ), + ( + b"ffffeeffbb", + 7, + &[4, 2, 3, 6, 0, 1, 6, 1, 6, 2, 0, 3, 2, 4, 1], + ), + ( + b"ffffeeffbb", + 8, + &[3, 7, 6, 7, 7, 5, 3, 7, 7, 7, 7, 7, 7, 1], + ), + (b"ffffeeffbb", 9, &[8, 4, 5, 7, 0, 0, 3, 2, 0, 3, 0, 8, 3]), + (b"ffffeeffbb", 10, &[5, 9, 5, 3, 1, 5, 0, 1, 5, 9, 9, 0, 1]), + (b"ffffeeffbb", 11, &[10, 7, 6, 5, 2, 0, 3, 3, 3, 4, 9, 3]), + (b"ffffeeffbb", 12, &[11, 8, 5, 10, 1, 10, 3, 1, 1, 9, 5, 1]), + (b"ffffeeffbb", 13, &[0, 5, 7, 4, 6, 5, 6, 11, 8, 12, 7]), + (b"ffffeeffbb", 14, &[11, 4, 4, 11, 8, 4, 6, 0, 3, 11, 3]), + (b"ffffeeffbb", 15, &[5, 11, 13, 2, 1, 10, 2, 0, 9, 13, 1]), + (b"ffffeeffbb", 16, &[11, 11, 15, 15, 14, 14, 15, 15, 15, 15]), + (b"ffffeeffbb", 17, &[0, 2, 14, 12, 2, 14, 8, 10, 4, 9]), + (b"ffffeeffbb", 18, &[17, 15, 5, 13, 10, 16, 16, 13, 9, 5]), + (b"ffffeeffbb", 19, &[14, 13, 2, 8, 9, 0, 1, 14, 7, 3]), + (b"ffffeeffbb", 20, &[15, 19, 3, 14, 0, 17, 19, 18, 2, 2]), + (b"ffffeeffbb", 21, &[11, 5, 4, 13, 5, 18, 9, 1, 8, 1]), + (b"ffffeeffbb", 22, &[21, 3, 7, 21, 15, 12, 17, 0, 20]), + (b"ffffeeffbb", 23, &[21, 21, 6, 9, 10, 7, 21, 0, 14]), + (b"ffffeeffbb", 24, &[11, 10, 19, 14, 22, 11, 17, 23, 9]), + (b"ffffeeffbb", 25, &[20, 18, 21, 22, 21, 14, 3, 5, 7]), + (b"ffffeeffbb", 26, &[13, 15, 24, 11, 17, 6, 23, 6, 5]), + (b"ffffeeffbb", 27, &[17, 16, 7, 0, 21, 0, 3, 24, 3]), + (b"ffffeeffbb", 28, &[11, 16, 11, 15, 14, 18, 13, 25, 2]), + (b"ffffeeffbb", 29, &[6, 8, 7, 19, 14, 13, 21, 5, 2]), + (b"ffffeeffbb", 30, &[5, 13, 18, 11, 10, 7, 8, 20, 1]), + (b"ffffeeffbb", 31, &[22, 26, 15, 19, 8, 27, 29, 8, 1]), + (b"ffffeeffbb", 32, &[27, 29, 31, 29, 30, 31, 31, 31]), + (b"ffffeeffbb", 33, &[32, 20, 27, 12, 1, 12, 26, 25]), + (b"ffffeeffbb", 34, &[17, 9, 16, 33, 13, 25, 31, 20]), + (b"ffffeeffbb", 35, &[25, 32, 2, 25, 11, 4, 3, 17]), + (b"ffffeeffbb", 36, &[35, 34, 5, 6, 32, 3, 1, 14]), + (b"ffffeeffbb", 37, &[16, 21, 18, 4, 33, 19, 21, 11]), + (b"ffffeeffbb", 38, &[33, 25, 19, 29, 20, 6, 23, 9]), + (b"ffffeeffbb", 39, &[26, 27, 29, 23, 16, 18, 0, 8]), + (b"ffffeeffbb", 40, &[35, 39, 30, 11, 16, 17, 28, 6]), + (b"ffffeeffbb", 41, &[36, 30, 9, 18, 12, 19, 26, 5]), + (b"ffffeeffbb", 42, &[11, 34, 37, 27, 1, 13, 32, 4]), + (b"ffffeeffbb", 43, &[3, 24, 11, 2, 10, 40, 1, 4]), + (b"ffffeeffbb", 44, &[43, 12, 40, 32, 3, 23, 19, 3]), + (b"ffffeeffbb", 45, &[35, 38, 44, 18, 22, 18, 42, 2]), + (b"ffffeeffbb", 46, &[21, 45, 18, 41, 17, 2, 24, 2]), + (b"ffffeeffbb", 47, &[37, 37, 11, 12, 6, 0, 8, 2]), + (b"ffffeeffbb", 48, &[11, 41, 40, 43, 5, 43, 41, 1]), + (b"ffffeeffbb", 49, &[18, 45, 7, 13, 20, 21, 30, 1]), + (b"ffffeeffbb", 50, &[45, 21, 5, 34, 21, 18, 20, 1]), + (b"ffffeeffbb", 51, &[17, 6, 26, 22, 38, 24, 11, 1]), + (b"ffffeeffbb", 52, &[39, 33, 38, 30, 46, 31, 3, 1]), + (b"ffffeeffbb", 53, &[31, 7, 44, 23, 9, 32, 49]), + (b"ffffeeffbb", 54, &[17, 35, 8, 37, 31, 18, 44]), + (b"ffffeeffbb", 55, &[10, 52, 9, 48, 36, 39, 39]), + (b"ffffeeffbb", 56, &[11, 50, 51, 22, 25, 36, 35]), + (b"ffffeeffbb", 57, &[14, 55, 12, 43, 20, 3, 32]), + (b"ffffeeffbb", 58, &[35, 18, 45, 56, 9, 51, 28]), + (b"ffffeeffbb", 59, &[51, 28, 20, 26, 55, 3, 26]), + (b"ffffeeffbb", 60, &[35, 6, 27, 46, 58, 33, 23]), + (b"ffffeeffbb", 61, &[58, 7, 6, 54, 49, 20, 21]), + (b"ffffeeffbb", 62, &[53, 59, 3, 14, 10, 22, 19]), + (b"ffffeeffbb", 63, &[53, 50, 23, 4, 56, 36, 17]), + (b"ffffeeffbb", 64, &[59, 62, 47, 59, 63, 63, 15]), + (b"ffffeeffbb", 65, &[0, 53, 39, 4, 40, 37, 14]), + (b"ffffeeffbb", 66, &[65, 59, 39, 1, 64, 19, 13]), + (b"ffffeeffbb", 67, &[35, 14, 19, 16, 25, 10, 12]), + (b"ffffeeffbb", 68, &[51, 38, 63, 50, 15, 8, 11]), + (b"ffffeeffbb", 69, &[44, 45, 18, 58, 68, 12, 10]), + (b"ffffeeffbb", 70, &[25, 51, 0, 60, 13, 24, 9]), + (b"ffffeeffbb", 71, &[54, 30, 9, 65, 28, 41, 8]), + (b"ffffeeffbb", 72, &[35, 35, 55, 54, 17, 64, 7]), + (b"ffffeeffbb", 73, &[34, 4, 48, 40, 27, 19, 7]), + (b"ffffeeffbb", 74, &[53, 47, 4, 56, 36, 51, 6]), + (b"ffffeeffbb", 75, &[20, 56, 10, 72, 24, 13, 6]), + (b"ffffeeffbb", 76, &[71, 31, 52, 60, 48, 53, 5]), + (b"ffffeeffbb", 77, &[32, 73, 14, 63, 15, 21, 5]), + (b"ffffeeffbb", 78, &[65, 13, 17, 32, 64, 68, 4]), + (b"ffffeeffbb", 79, &[37, 56, 2, 56, 25, 41, 4]), + (b"ffffeeffbb", 80, &[75, 59, 37, 41, 43, 15, 4]), + (b"ffffeeffbb", 81, &[44, 68, 0, 21, 27, 72, 3]), + (b"ffffeeffbb", 82, &[77, 35, 2, 74, 46, 50, 3]), + (b"ffffeeffbb", 83, &[52, 51, 19, 76, 10, 30, 3]), + (b"ffffeeffbb", 84, &[11, 80, 19, 19, 76, 10, 3]), + (b"ffffeeffbb", 85, &[0, 82, 20, 14, 68, 77, 2]), + (b"ffffeeffbb", 86, &[3, 12, 78, 37, 62, 61, 2]), + (b"ffffeeffbb", 87, &[35, 12, 20, 8, 52, 46, 2]), + (b"ffffeeffbb", 88, &[43, 6, 54, 42, 30, 32, 2]), + (b"ffffeeffbb", 89, &[49, 52, 85, 21, 80, 18, 2]), + (b"ffffeeffbb", 90, &[35, 64, 78, 24, 18, 6, 2]), + (b"ffffeeffbb", 91, &[39, 17, 83, 63, 17, 85, 1]), + (b"ffffeeffbb", 92, &[67, 22, 85, 79, 75, 74, 1]), + (b"ffffeeffbb", 93, &[53, 60, 39, 29, 4, 65, 1]), + (b"ffffeeffbb", 94, &[37, 89, 2, 72, 76, 55, 1]), + (b"ffffeeffbb", 95, &[90, 74, 89, 9, 9, 47, 1]), + (b"ffffeeffbb", 96, &[59, 20, 46, 35, 81, 38, 1]), + (b"ffffeeffbb", 97, &[94, 87, 60, 71, 3, 31, 1]), + (b"ffffeeffbb", 98, &[67, 22, 63, 50, 62, 23, 1]), + (b"ffffeeffbb", 99, &[98, 6, 69, 12, 61, 16, 1]), + (b"ffffeeffbb", 100, &[95, 35, 51, 10, 95, 9, 1]), + (b"ffffeeffbb", 101, &[87, 27, 7, 8, 62, 3, 1]), + (b"ffffeeffbb", 102, &[17, 3, 32, 79, 59, 99]), + (b"ffffeeffbb", 103, &[30, 22, 90, 0, 87, 94]), + (b"ffffeeffbb", 104, &[91, 68, 87, 68, 38, 90]), + (b"ffffeeffbb", 105, &[95, 80, 54, 73, 15, 86]), + (b"ffffeeffbb", 106, &[31, 30, 24, 16, 17, 82]), + (b"ffffeeffbb", 107, &[51, 50, 10, 12, 42, 78]), + (b"ffffeeffbb", 108, &[71, 71, 96, 78, 89, 74]), + (b"ffffeeffbb", 109, &[33, 18, 93, 22, 50, 71]), + (b"ffffeeffbb", 110, &[65, 53, 57, 88, 29, 68]), + (b"ffffeeffbb", 111, &[53, 93, 67, 90, 27, 65]), + (b"ffffeeffbb", 112, &[11, 109, 96, 65, 43, 62]), + (b"ffffeeffbb", 113, &[27, 23, 106, 56, 76, 59]), + (b"ffffeeffbb", 114, &[71, 84, 31, 112, 11, 57]), + (b"ffffeeffbb", 115, &[90, 22, 1, 56, 76, 54]), + (b"ffffeeffbb", 116, &[35, 38, 98, 57, 40, 52]), + (b"ffffeeffbb", 117, &[26, 113, 115, 62, 17, 50]), + (b"ffffeeffbb", 118, &[51, 14, 5, 18, 7, 48]), + (b"ffffeeffbb", 119, &[102, 31, 110, 108, 8, 46]), + (b"ffffeeffbb", 120, &[35, 93, 96, 50, 22, 44]), + (b"ffffeeffbb", 121, &[87, 61, 2, 36, 47, 42]), + (b"ffffeeffbb", 122, &[119, 64, 1, 22, 83, 40]), + (b"ffffeeffbb", 123, &[77, 119, 32, 90, 6, 39]), + (b"ffffeeffbb", 124, &[115, 122, 31, 79, 62, 37]), + (b"ffffeeffbb", 125, &[95, 108, 47, 74, 3, 36]), + (b"ffffeeffbb", 126, &[53, 25, 116, 39, 78, 34]), + (b"ffffeeffbb", 127, &[22, 23, 125, 67, 35, 33]), + (b"ffffeeffbb", 128, &[59, 127, 59, 127, 127, 31]), + (b"ffffeeffbb", 129, &[89, 36, 1, 59, 100, 30]), + (b"ffffeeffbb", 130, &[65, 91, 123, 89, 79, 29]), + (b"ffffeeffbb", 131, &[58, 72, 39, 63, 65, 28]), + (b"ffffeeffbb", 132, &[131, 62, 92, 82, 57, 27]), + (b"ffffeeffbb", 133, &[109, 31, 51, 123, 55, 26]), + (b"ffffeeffbb", 134, &[35, 74, 21, 27, 60, 25]), + (b"ffffeeffbb", 135, &[125, 132, 49, 37, 70, 24]), + (b"ffffeeffbb", 136, &[51, 121, 117, 133, 85, 23]), + (b"ffffeeffbb", 137, &[113, 60, 135, 22, 107, 22]), + (b"ffffeeffbb", 138, &[113, 91, 73, 93, 133, 21]), + (b"ffffeeffbb", 139, &[114, 75, 102, 51, 26, 21]), + (b"ffffeeffbb", 140, &[95, 25, 35, 16, 62, 20]), + (b"ffffeeffbb", 141, &[131, 137, 16, 110, 102, 19]), + (b"ffffeeffbb", 142, &[125, 121, 108, 34, 6, 19]), + (b"ffffeeffbb", 143, &[65, 78, 138, 55, 55, 18]), + (b"ffffeeffbb", 144, &[107, 125, 121, 15, 109, 17]), + (b"ffffeeffbb", 145, &[35, 13, 122, 42, 22, 17]), + (b"ffffeeffbb", 146, &[107, 38, 103, 123, 83, 16]), + (b"ffffeeffbb", 147, &[116, 96, 71, 98, 2, 16]), + (b"ffffeeffbb", 148, &[127, 23, 75, 99, 71, 15]), + (b"ffffeeffbb", 149, &[136, 110, 53, 114, 144, 14]), + (b"ffffeeffbb", 150, &[95, 140, 133, 130, 71, 14]), + (b"ffffeeffbb", 151, &[15, 50, 29, 137, 0, 14]), + (b"ffffeeffbb", 152, &[147, 15, 89, 121, 83, 13]), + (b"ffffeeffbb", 153, &[17, 87, 93, 72, 17, 13]), + (b"ffffeeffbb", 154, &[109, 113, 3, 133, 106, 12]), + (b"ffffeeffbb", 155, &[115, 141, 120, 139, 44, 12]), + (b"ffffeeffbb", 156, &[143, 45, 4, 82, 140, 11]), + (b"ffffeeffbb", 157, &[149, 92, 15, 106, 82, 11]), + (b"ffffeeffbb", 158, &[37, 107, 79, 46, 26, 11]), + (b"ffffeeffbb", 159, &[137, 37, 146, 51, 130, 10]), + (b"ffffeeffbb", 160, &[155, 69, 29, 115, 77, 10]), + (b"ffffeeffbb", 161, &[67, 98, 46, 68, 26, 10]), + (b"ffffeeffbb", 162, &[125, 155, 60, 63, 138, 9]), + (b"ffffeeffbb", 163, &[96, 43, 118, 93, 90, 9]), + (b"ffffeeffbb", 164, &[159, 99, 123, 152, 43, 9]), + (b"ffffeeffbb", 165, &[65, 17, 1, 69, 163, 8]), + (b"ffffeeffbb", 166, &[135, 108, 25, 165, 119, 8]), + (b"ffffeeffbb", 167, &[165, 116, 164, 103, 77, 8]), + (b"ffffeeffbb", 168, &[11, 166, 67, 44, 36, 8]), + (b"ffffeeffbb", 169, &[65, 59, 71, 149, 164, 7]), + (b"ffffeeffbb", 170, &[85, 83, 26, 76, 126, 7]), + (b"ffffeeffbb", 171, &[71, 132, 140, 157, 88, 7]), + (b"ffffeeffbb", 172, &[3, 6, 127, 47, 52, 7]), + (b"ffffeeffbb", 173, &[122, 66, 53, 83, 16, 7]), + (b"ffffeeffbb", 174, &[35, 6, 5, 88, 155, 6]), + (b"ffffeeffbb", 175, &[95, 20, 84, 56, 122, 6]), + (b"ffffeeffbb", 176, &[43, 91, 57, 159, 89, 6]), + (b"ffffeeffbb", 177, &[110, 127, 54, 40, 58, 6]), + (b"ffffeeffbb", 178, &[49, 115, 43, 47, 27, 6]), + (b"ffffeeffbb", 179, &[130, 91, 4, 178, 175, 5]), + (b"ffffeeffbb", 180, &[35, 122, 109, 70, 147, 5]), + (b"ffffeeffbb", 181, &[94, 94, 4, 79, 119, 5]), + (b"ffffeeffbb", 182, &[39, 54, 66, 19, 92, 5]), + (b"ffffeeffbb", 183, &[119, 2, 143, 69, 65, 5]), + (b"ffffeeffbb", 184, &[67, 57, 90, 44, 39, 5]), + (b"ffffeeffbb", 185, &[90, 63, 141, 123, 13, 5]), + (b"ffffeeffbb", 186, &[53, 123, 172, 119, 174, 4]), + (b"ffffeeffbb", 187, &[153, 21, 68, 28, 151, 4]), + (b"ffffeeffbb", 188, &[131, 138, 94, 32, 128, 4]), + (b"ffffeeffbb", 189, &[179, 121, 156, 130, 105, 4]), + (b"ffffeeffbb", 190, &[185, 179, 164, 131, 83, 4]), + (b"ffffeeffbb", 191, &[118, 123, 37, 31, 62, 4]), + (b"ffffeeffbb", 192, &[59, 106, 83, 16, 41, 4]), + (b"ffffeeffbb", 193, &[57, 37, 47, 86, 20, 4]), + (b"ffffeeffbb", 194, &[191, 140, 63, 45, 0, 4]), + (b"ffffeeffbb", 195, &[65, 169, 83, 84, 175, 3]), + (b"ffffeeffbb", 196, &[67, 158, 64, 6, 157, 3]), + (b"ffffeeffbb", 197, &[121, 26, 167, 3, 139, 3]), + (b"ffffeeffbb", 198, &[197, 151, 165, 75, 121, 3]), + (b"ffffeeffbb", 199, &[55, 175, 36, 22, 104, 3]), + (b"ffffeeffbb", 200, &[195, 167, 162, 38, 87, 3]), + (b"ffffeeffbb", 201, &[35, 27, 136, 124, 70, 3]), + (b"ffffeeffbb", 202, &[87, 64, 153, 76, 54, 3]), + (b"ffffeeffbb", 203, &[151, 191, 14, 94, 38, 3]), + (b"ffffeeffbb", 204, &[119, 103, 135, 175, 22, 3]), + (b"ffffeeffbb", 205, &[200, 79, 123, 115, 7, 3]), + (b"ffffeeffbb", 206, &[133, 165, 202, 115, 198, 2]), + (b"ffffeeffbb", 207, &[44, 153, 193, 175, 184, 2]), + (b"ffffeeffbb", 208, &[91, 190, 125, 86, 171, 2]), + (b"ffffeeffbb", 209, &[109, 151, 34, 53, 158, 2]), + (b"ffffeeffbb", 210, &[95, 40, 171, 74, 145, 2]), + (b"ffffeeffbb", 211, &[84, 195, 162, 150, 132, 2]), + (b"ffffeeffbb", 212, &[31, 15, 59, 68, 120, 2]), + (b"ffffeeffbb", 213, &[125, 57, 127, 36, 108, 2]), + (b"ffffeeffbb", 214, &[51, 132, 2, 55, 96, 2]), + (b"ffffeeffbb", 215, &[175, 133, 177, 122, 84, 2]), + (b"ffffeeffbb", 216, &[179, 35, 78, 23, 73, 2]), + (b"ffffeeffbb", 217, &[53, 101, 208, 186, 61, 2]), + (b"ffffeeffbb", 218, &[33, 9, 214, 179, 50, 2]), + (b"ffffeeffbb", 219, &[107, 147, 175, 217, 39, 2]), + (b"ffffeeffbb", 220, &[175, 81, 179, 79, 29, 2]), + (b"ffffeeffbb", 221, &[0, 76, 95, 204, 18, 2]), + (b"ffffeeffbb", 222, &[53, 213, 16, 150, 8, 2]), + (b"ffffeeffbb", 223, &[158, 161, 42, 136, 221, 1]), + (b"ffffeeffbb", 224, &[123, 54, 52, 162, 212, 1]), + (b"ffffeeffbb", 225, &[170, 43, 151, 2, 204, 1]), + (b"ffffeeffbb", 226, &[27, 68, 224, 105, 195, 1]), + (b"ffffeeffbb", 227, &[45, 69, 157, 20, 187, 1]), + (b"ffffeeffbb", 228, &[71, 213, 64, 199, 178, 1]), + (b"ffffeeffbb", 229, &[129, 203, 66, 186, 170, 1]), + (b"ffffeeffbb", 230, &[205, 183, 57, 208, 162, 1]), + (b"ffffeeffbb", 231, &[32, 50, 164, 33, 155, 1]), + (b"ffffeeffbb", 232, &[35, 135, 53, 123, 147, 1]), + (b"ffffeeffbb", 233, &[209, 47, 89, 13, 140, 1]), + (b"ffffeeffbb", 234, &[143, 56, 175, 168, 132, 1]), + (b"ffffeeffbb", 235, &[225, 157, 216, 121, 125, 1]), + (b"ffffeeffbb", 236, &[51, 66, 119, 105, 118, 1]), + (b"ffffeeffbb", 237, &[116, 150, 26, 119, 111, 1]), + (b"ffffeeffbb", 238, &[221, 15, 87, 162, 104, 1]), + (b"ffffeeffbb", 239, &[234, 155, 214, 234, 97, 1]), + (b"ffffeeffbb", 240, &[155, 46, 84, 96, 91, 1]), + (b"ffffeeffbb", 241, &[187, 48, 90, 225, 84, 1]), + (b"ffffeeffbb", 242, &[87, 212, 151, 140, 78, 1]), + (b"ffffeeffbb", 243, &[206, 22, 189, 81, 72, 1]), + (b"ffffeeffbb", 244, &[119, 93, 122, 48, 66, 1]), + (b"ffffeeffbb", 245, &[165, 224, 117, 40, 60, 1]), + (b"ffffeeffbb", 246, &[77, 121, 100, 57, 54, 1]), + (b"ffffeeffbb", 247, &[52, 128, 242, 98, 48, 1]), + (b"ffffeeffbb", 248, &[115, 247, 224, 164, 42, 1]), + (b"ffffeeffbb", 249, &[218, 127, 223, 5, 37, 1]), + (b"ffffeeffbb", 250, &[95, 54, 168, 118, 31, 1]), + (b"ffffeeffbb", 251, &[121, 204, 240, 3, 26, 1]), + (b"ffffeeffbb", 252, &[179, 138, 123, 162, 20, 1]), + (b"ffffeeffbb", 253, &[21, 50, 1, 91, 15, 1]), + (b"ffffeeffbb", 254, &[149, 11, 63, 40, 10, 1]), + (b"ffffeeffbb", 255, &[170, 225, 247, 9, 5, 1]), + (b"ffffeeffbb", 256, &[187, 255, 238, 255, 255]), + ]; + + for &(bigint, radix, inbaseradix_le) in GROUND_TRUTH.iter() { + let bigint = BigUint::parse_bytes(bigint, 16).unwrap(); + // to_radix_le + assert_eq!(bigint.to_radix_le(radix), inbaseradix_le); + // to_radix_be + let mut inbase_be = bigint.to_radix_be(radix); + inbase_be.reverse(); // now le + assert_eq!(inbase_be, inbaseradix_le); + // from_radix_le + assert_eq!( + BigUint::from_radix_le(inbaseradix_le, radix).unwrap(), + bigint + ); + // from_radix_be + let mut inbaseradix_be = Vec::from(inbaseradix_le); + inbaseradix_be.reverse(); + assert_eq!( + BigUint::from_radix_be(&inbaseradix_be, radix).unwrap(), + bigint + ); + } + + assert!(BigUint::from_radix_le(&[10, 100, 10], 50).is_none()); + assert_eq!(BigUint::from_radix_le(&[], 2), Some(BigUint::zero())); + assert_eq!(BigUint::from_radix_be(&[], 2), Some(BigUint::zero())); +} + +#[test] +fn test_from_str_radix() { + let r = to_str_pairs(); + for num_pair in r.iter() { + let (n, rs) = num_pair; + for str_pair in rs.iter() { + let (radix, str) = str_pair; + assert_eq!(n, &BigUint::from_str_radix(str, *radix).unwrap()); + } + } + + let zed = BigUint::from_str_radix("Z", 10).ok(); + assert_eq!(zed, None); + let blank = BigUint::from_str_radix("_", 2).ok(); + assert_eq!(blank, None); + let blank_one = BigUint::from_str_radix("_1", 2).ok(); + assert_eq!(blank_one, None); + let plus_one = BigUint::from_str_radix("+1", 10).ok(); + assert_eq!(plus_one, Some(BigUint::from_slice(&[1]))); + let plus_plus_one = BigUint::from_str_radix("++1", 10).ok(); + assert_eq!(plus_plus_one, None); + let minus_one = BigUint::from_str_radix("-1", 10).ok(); + assert_eq!(minus_one, None); + let zero_plus_two = BigUint::from_str_radix("0+2", 10).ok(); + assert_eq!(zero_plus_two, None); + let three = BigUint::from_str_radix("1_1", 2).ok(); + assert_eq!(three, Some(BigUint::from_slice(&[3]))); + let ff = BigUint::from_str_radix("1111_1111", 2).ok(); + assert_eq!(ff, Some(BigUint::from_slice(&[0xff]))); +} + +#[test] +fn test_all_str_radix() { + let n = BigUint::new((0..10).collect()); + for radix in 2..37 { + let s = n.to_str_radix(radix); + let x = BigUint::from_str_radix(&s, radix); + assert_eq!(x.unwrap(), n); + + let s = s.to_ascii_uppercase(); + let x = BigUint::from_str_radix(&s, radix); + assert_eq!(x.unwrap(), n); + } +} + +#[test] +fn test_big_str() { + for n in 2..=20_u32 { + let x: BigUint = BigUint::from(n).pow(10_000_u32); + let s = x.to_string(); + let y: BigUint = s.parse().unwrap(); + assert_eq!(x, y); + } +} + +#[test] +fn test_lower_hex() { + let a = BigUint::parse_bytes(b"A", 16).unwrap(); + let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{:x}", a), "a"); + assert_eq!(format!("{:x}", hello), "48656c6c6f20776f726c6421"); + assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa"); +} + +#[test] +fn test_upper_hex() { + let a = BigUint::parse_bytes(b"A", 16).unwrap(); + let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{:X}", a), "A"); + assert_eq!(format!("{:X}", hello), "48656C6C6F20776F726C6421"); + assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA"); +} + +#[test] +fn test_binary() { + let a = BigUint::parse_bytes(b"A", 16).unwrap(); + let hello = BigUint::parse_bytes(b"224055342307539", 10).unwrap(); + + assert_eq!(format!("{:b}", a), "1010"); + assert_eq!( + format!("{:b}", hello), + "110010111100011011110011000101101001100011010011" + ); + assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010"); +} + +#[test] +fn test_octal() { + let a = BigUint::parse_bytes(b"A", 16).unwrap(); + let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{:o}", a), "12"); + assert_eq!(format!("{:o}", hello), "22062554330674403566756233062041"); + assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12"); +} + +#[test] +fn test_display() { + let a = BigUint::parse_bytes(b"A", 16).unwrap(); + let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap(); + + assert_eq!(format!("{}", a), "10"); + assert_eq!(format!("{}", hello), "22405534230753963835153736737"); + assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10"); +} + +#[test] +fn test_factor() { + fn factor(n: usize) -> BigUint { + let mut f: BigUint = One::one(); + for i in 2..=n { + // FIXME(#5992): assignment operator overloads + // f *= FromPrimitive::from_usize(i); + let bu: BigUint = FromPrimitive::from_usize(i).unwrap(); + f *= bu; + } + f + } + + fn check(n: usize, s: &str) { + let n = factor(n); + let ans = BigUint::from_str_radix(s, 10).unwrap(); + assert_eq!(n, ans); + } + + check(3, "6"); + check(10, "3628800"); + check(20, "2432902008176640000"); + check(30, "265252859812191058636308480000000"); +} + +#[test] +fn test_bits() { + assert_eq!(BigUint::new(vec![0, 0, 0, 0]).bits(), 0); + let n: BigUint = FromPrimitive::from_usize(0).unwrap(); + assert_eq!(n.bits(), 0); + let n: BigUint = FromPrimitive::from_usize(1).unwrap(); + assert_eq!(n.bits(), 1); + let n: BigUint = FromPrimitive::from_usize(3).unwrap(); + assert_eq!(n.bits(), 2); + let n: BigUint = BigUint::from_str_radix("4000000000", 16).unwrap(); + assert_eq!(n.bits(), 39); + let one: BigUint = One::one(); + assert_eq!((one << 426u16).bits(), 427); +} + +#[test] +fn test_iter_sum() { + let result: BigUint = FromPrimitive::from_isize(1234567).unwrap(); + let data: Vec = vec![ + FromPrimitive::from_u32(1000000).unwrap(), + FromPrimitive::from_u32(200000).unwrap(), + FromPrimitive::from_u32(30000).unwrap(), + FromPrimitive::from_u32(4000).unwrap(), + FromPrimitive::from_u32(500).unwrap(), + FromPrimitive::from_u32(60).unwrap(), + FromPrimitive::from_u32(7).unwrap(), + ]; + + assert_eq!(result, data.iter().sum::()); + assert_eq!(result, data.into_iter().sum::()); +} + +#[test] +fn test_iter_product() { + let data: Vec = vec![ + FromPrimitive::from_u32(1001).unwrap(), + FromPrimitive::from_u32(1002).unwrap(), + FromPrimitive::from_u32(1003).unwrap(), + FromPrimitive::from_u32(1004).unwrap(), + FromPrimitive::from_u32(1005).unwrap(), + ]; + let result = data.first().unwrap() + * data.get(1).unwrap() + * data.get(2).unwrap() + * data.get(3).unwrap() + * data.get(4).unwrap(); + + assert_eq!(result, data.iter().product::()); + assert_eq!(result, data.into_iter().product::()); +} + +#[test] +fn test_iter_sum_generic() { + let result: BigUint = FromPrimitive::from_isize(1234567).unwrap(); + let data = vec![1000000_u32, 200000, 30000, 4000, 500, 60, 7]; + + assert_eq!(result, data.iter().sum::()); + assert_eq!(result, data.into_iter().sum::()); +} + +#[test] +fn test_iter_product_generic() { + let data = vec![1001_u32, 1002, 1003, 1004, 1005]; + let result = data[0].to_biguint().unwrap() + * data[1].to_biguint().unwrap() + * data[2].to_biguint().unwrap() + * data[3].to_biguint().unwrap() + * data[4].to_biguint().unwrap(); + + assert_eq!(result, data.iter().product::()); + assert_eq!(result, data.into_iter().product::()); +} + +#[test] +fn test_pow() { + let one = BigUint::from(1u32); + let two = BigUint::from(2u32); + let four = BigUint::from(4u32); + let eight = BigUint::from(8u32); + let tentwentyfour = BigUint::from(1024u32); + let twentyfourtyeight = BigUint::from(2048u32); + macro_rules! check { + ($t:ty) => { + assert_eq!(Pow::pow(&two, 0 as $t), one); + assert_eq!(Pow::pow(&two, 1 as $t), two); + assert_eq!(Pow::pow(&two, 2 as $t), four); + assert_eq!(Pow::pow(&two, 3 as $t), eight); + assert_eq!(Pow::pow(&two, 10 as $t), tentwentyfour); + assert_eq!(Pow::pow(&two, 11 as $t), twentyfourtyeight); + assert_eq!(Pow::pow(&two, &(11 as $t)), twentyfourtyeight); + }; + } + check!(u8); + check!(u16); + check!(u32); + check!(u64); + check!(u128); + check!(usize); + + let pow_1e10000 = BigUint::from(10u32).pow(10_000_u32); + let manual_1e10000 = repeat(10u32).take(10_000).product::(); + assert!(manual_1e10000 == pow_1e10000); +} + +#[test] +fn test_trailing_zeros() { + assert!(BigUint::from(0u8).trailing_zeros().is_none()); + assert_eq!(BigUint::from(1u8).trailing_zeros().unwrap(), 0); + assert_eq!(BigUint::from(2u8).trailing_zeros().unwrap(), 1); + let x: BigUint = BigUint::one() << 128; + assert_eq!(x.trailing_zeros().unwrap(), 128); +} + +#[test] +fn test_trailing_ones() { + assert_eq!(BigUint::from(0u8).trailing_ones(), 0); + assert_eq!(BigUint::from(1u8).trailing_ones(), 1); + assert_eq!(BigUint::from(2u8).trailing_ones(), 0); + assert_eq!(BigUint::from(3u8).trailing_ones(), 2); + let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8); + assert_eq!(x.trailing_ones(), 2); + let x: BigUint = (BigUint::one() << 128) - BigUint::one(); + assert_eq!(x.trailing_ones(), 128); +} + +#[test] +fn test_count_ones() { + assert_eq!(BigUint::from(0u8).count_ones(), 0); + assert_eq!(BigUint::from(1u8).count_ones(), 1); + assert_eq!(BigUint::from(2u8).count_ones(), 1); + assert_eq!(BigUint::from(3u8).count_ones(), 2); + let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8); + assert_eq!(x.count_ones(), 4); +} + +#[test] +fn test_bit() { + assert!(!BigUint::from(0u8).bit(0)); + assert!(!BigUint::from(0u8).bit(100)); + assert!(!BigUint::from(42u8).bit(4)); + assert!(BigUint::from(42u8).bit(5)); + let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8); + assert!(x.bit(129)); + assert!(!x.bit(130)); +} + +#[test] +fn test_set_bit() { + let mut x = BigUint::from(3u8); + x.set_bit(128, true); + x.set_bit(129, true); + assert_eq!(x, (BigUint::from(3u8) << 128) | BigUint::from(3u8)); + x.set_bit(0, false); + x.set_bit(128, false); + x.set_bit(130, false); + assert_eq!(x, (BigUint::from(2u8) << 128) | BigUint::from(2u8)); + x.set_bit(129, false); + x.set_bit(1, false); + assert_eq!(x, BigUint::zero()); +} diff --git a/vendor/num-bigint-generic/tests/biguint_scalar.rs b/vendor/num-bigint-generic/tests/biguint_scalar.rs new file mode 100644 index 000000000..1aa24f6f0 --- /dev/null +++ b/vendor/num-bigint-generic/tests/biguint_scalar.rs @@ -0,0 +1,123 @@ +type BigUint = num_bigint_generic::BigUint; +use num_traits::{One, ToPrimitive, Zero}; + +use std::panic::catch_unwind; + +mod consts; +use crate::consts::*; + +#[macro_use] +mod macros; + +#[test] +fn test_scalar_add() { + fn check(x: &BigUint, y: &BigUint, z: &BigUint) { + let (x, y, z) = (x.clone(), y.clone(), z.clone()); + assert_unsigned_scalar_op!(x + y == z); + assert_unsigned_scalar_assign_op!(x += y == z); + } + + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + check(&a, &b, &c); + check(&b, &a, &c); + } +} + +#[test] +fn test_scalar_sub() { + fn check(x: &BigUint, y: &BigUint, z: &BigUint) { + let (x, y, z) = (x.clone(), y.clone(), z.clone()); + assert_unsigned_scalar_op!(x - y == z); + assert_unsigned_scalar_assign_op!(x -= y == z); + } + + for elm in SUM_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + check(&c, &a, &b); + check(&c, &b, &a); + } +} + +#[test] +fn test_scalar_mul() { + fn check(x: &BigUint, y: &BigUint, z: &BigUint) { + let (x, y, z) = (x.clone(), y.clone(), z.clone()); + assert_unsigned_scalar_op!(x * y == z); + assert_unsigned_scalar_assign_op!(x *= y == z); + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + check(&a, &b, &c); + check(&b, &a, &c); + } +} + +#[test] +fn test_scalar_rem_noncommutative() { + assert_eq!(5u8 % BigUint::from(7u8), BigUint::from(5u8)); + assert_eq!(BigUint::from(5u8) % 7u8, BigUint::from(5u8)); +} + +#[test] +fn test_scalar_div_rem() { + fn check(x: &BigUint, y: &BigUint, z: &BigUint, r: &BigUint) { + let (x, y, z, r) = (x.clone(), y.clone(), z.clone(), r.clone()); + assert_unsigned_scalar_op!(x / y == z); + assert_unsigned_scalar_op!(x % y == r); + assert_unsigned_scalar_assign_op!(x /= y == z); + assert_unsigned_scalar_assign_op!(x %= y == r); + } + + for elm in MUL_TRIPLES.iter() { + let (a_vec, b_vec, c_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + + if !a.is_zero() { + check(&c, &a, &b, &Zero::zero()); + } + + if !b.is_zero() { + check(&c, &b, &a, &Zero::zero()); + } + } + + for elm in DIV_REM_QUADRUPLES.iter() { + let (a_vec, b_vec, c_vec, d_vec) = *elm; + let a = BigUint::from_slice(a_vec); + let b = BigUint::from_slice(b_vec); + let c = BigUint::from_slice(c_vec); + let d = BigUint::from_slice(d_vec); + + if !b.is_zero() { + check(&a, &b, &c, &d); + assert_unsigned_scalar_op!(a / b == c); + assert_unsigned_scalar_op!(a % b == d); + assert_unsigned_scalar_assign_op!(a /= b == c); + assert_unsigned_scalar_assign_op!(a %= b == d); + } + } +} + +#[test] +fn test_scalar_div_rem_zero() { + catch_unwind(|| BigUint::zero() / 0u32).unwrap_err(); + catch_unwind(|| BigUint::zero() % 0u32).unwrap_err(); + catch_unwind(|| BigUint::one() / 0u32).unwrap_err(); + catch_unwind(|| BigUint::one() % 0u32).unwrap_err(); +} diff --git a/vendor/num-bigint-generic/tests/consts/mod.rs b/vendor/num-bigint-generic/tests/consts/mod.rs new file mode 100644 index 000000000..a100e596e --- /dev/null +++ b/vendor/num-bigint-generic/tests/consts/mod.rs @@ -0,0 +1,57 @@ +#![allow(unused)] + +pub const N1: u32 = -1i32 as u32; +pub const N2: u32 = -2i32 as u32; + +pub const SUM_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[ + (&[], &[], &[]), + (&[], &[1], &[1]), + (&[1], &[1], &[2]), + (&[1], &[1, 1], &[2, 1]), + (&[1], &[N1], &[0, 1]), + (&[1], &[N1, N1], &[0, 0, 1]), + (&[N1, N1], &[N1, N1], &[N2, N1, 1]), + (&[1, 1, 1], &[N1, N1], &[0, 1, 2]), + (&[2, 2, 1], &[N1, N2], &[1, 1, 2]), + (&[1, 2, 2, 1], &[N1, N2], &[0, 1, 3, 1]), +]; + +pub const M: u32 = u32::MAX; +pub const MUL_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[ + (&[], &[], &[]), + (&[], &[1], &[]), + (&[2], &[], &[]), + (&[1], &[1], &[1]), + (&[2], &[3], &[6]), + (&[1], &[1, 1, 1], &[1, 1, 1]), + (&[1, 2, 3], &[3], &[3, 6, 9]), + (&[1, 1, 1], &[N1], &[N1, N1, N1]), + (&[1, 2, 3], &[N1], &[N1, N2, N2, 2]), + (&[1, 2, 3, 4], &[N1], &[N1, N2, N2, N2, 3]), + (&[N1], &[N1], &[1, N2]), + (&[N1, N1], &[N1], &[1, N1, N2]), + (&[N1, N1, N1], &[N1], &[1, N1, N1, N2]), + (&[N1, N1, N1, N1], &[N1], &[1, N1, N1, N1, N2]), + (&[M / 2 + 1], &[2], &[0, 1]), + (&[0, M / 2 + 1], &[2], &[0, 0, 1]), + (&[1, 2], &[1, 2, 3], &[1, 4, 7, 6]), + (&[N1, N1], &[N1, N1, N1], &[1, 0, N1, N2, N1]), + (&[N1, N1, N1], &[N1, N1, N1, N1], &[1, 0, 0, N1, N2, N1, N1]), + (&[0, 0, 1], &[1, 2, 3], &[0, 0, 1, 2, 3]), + (&[0, 0, 1], &[0, 0, 0, 1], &[0, 0, 0, 0, 0, 1]), +]; + +type DivRemQuadruple = ( + &'static [u32], + &'static [u32], + &'static [u32], + &'static [u32], +); +pub const DIV_REM_QUADRUPLES: &[DivRemQuadruple] = &[ + (&[1], &[2], &[], &[1]), + (&[3], &[2], &[1], &[1]), + (&[1, 1], &[2], &[M / 2 + 1], &[1]), + (&[1, 1, 1], &[2], &[M / 2 + 1, M / 2 + 1], &[1]), + (&[0, 1], &[N1], &[1], &[1]), + (&[N1, N1], &[N2], &[2, 1], &[3]), +]; diff --git a/vendor/num-bigint-generic/tests/fuzzed.rs b/vendor/num-bigint-generic/tests/fuzzed.rs new file mode 100644 index 000000000..4801190fc --- /dev/null +++ b/vendor/num-bigint-generic/tests/fuzzed.rs @@ -0,0 +1,185 @@ +//! Check buggy inputs that were found by fuzzing + +type BigUint = num_bigint_generic::BigUint; +use num_traits::Num; + +#[test] +fn fuzzed_mul_1() { + let hex1 = "\ + cd6839ee857cf791a40494c2e522846eefbca9eca9912fdc1feed4561dbde75c75f1ddca2325ebb1\ + b9cd6eae07308578e58e57f4ddd7dc239b4fd347b883e37d87232a8e5d5a8690c8dba69c97fe8ac4\ + 58add18be7e460e03c9d1ae8223db53d20681a4027ffc17d1e43b764791c4db5ff7add849da7e378\ + ac8d9be0e8b517c490da3c0f944b6a52a0c5dc5217c71da8eec35d2c3110d8b041d2b52f3e2a8904\ + abcaaca517a8f2ef6cd26ceadd39a1cf9f770bc08f55f5a230cd81961348bb18534245430699de77\ + d93b805153cffd05dfd0f2cfc2332888cec9c5abf3ece9b4d7886ad94c784bf74fce12853b2a9a75\ + b62a845151a703446cc20300eafe7332330e992ae88817cd6ccef8877b66a7252300a4664d7074da\ + 181cd9fd502ea1cd71c0b02db3c009fe970a7d226382cdba5b5576c5c0341694681c7adc4ca2d059\ + d9a6b300957a2235a4eb6689b71d34dcc4037b520eabd2c8b66604bb662fe2bcf533ba8d242dbc91\ + f04c1795b9f0fee800d197d8c6e998248b15855a9602b76cb3f94b148d8f71f7d6225b79d63a8e20\ + 8ec8f0fa56a1c381b6c09bad9886056aec17fc92b9bb0f8625fd3444e40cccc2ede768ddb23c66ad\ + 59a680a26a26d519d02e4d46ce93cce9e9dd86702bdd376abae0959a0e8e418aa507a63fafb8f422\ + 83b03dc26f371c5e261a8f90f3ac9e2a6bcc7f0a39c3f73043b5aa5a950d4e945e9f68b2c2e593e3\ + b995be174714c1967b71f579043f89bfce37437af9388828a3ba0465c88954110cae6d38b638e094\ + 13c15c9faddd6fb63623fd50e06d00c4d5954e787158b3e4eea7e9fae8b189fa8a204b23ac2f7bbc\ + b601189c0df2075977c2424336024ba3594172bea87f0f92beb20276ce8510c8ef2a4cd5ede87e7e\ + 38b3fa49d66fbcd322be686a349c24919f4000000000000000000000000000000000000000000000\ + 000000000000000000000000000000000"; + let hex2 = "\ + 40000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000007"; + + // Result produced independently in Python + let hex_result = "\ + 335a0e7ba15f3de469012530b948a11bbbef2a7b2a644bf707fbb515876f79d71d7c777288c97aec\ + 6e735bab81cc215e396395fd3775f708e6d3f4d1ee20f8df61c8caa39756a1a43236e9a725ffa2b1\ + 162b7462f9f918380f2746ba088f6d4f481a069009fff05f4790edd91e47136d7fdeb7612769f8de\ + 2b2366f83a2d45f124368f03e512da94a831771485f1c76a3bb0d74b0c44362c1074ad4bcf8aa241\ + 2af2ab2945ea3cbbdb349b3ab74e6873e7ddc2f023d57d688c33606584d22ec614d09150c1a6779d\ + f64ee01454f3ff4177f43cb3f08cca2233b2716afcfb3a6d35e21ab6531e12fdd3f384a14ecaa69d\ + 6d8aa1145469c0d11b3080c03abf9ccc8cc3a64aba2205f35b33be21ded9a9c948c02919935c1d36\ + 8607367f540ba8735c702c0b6cf0027fa5c29f4898e0b36e96d55db1700d05a51a071eb71328b416\ + 7669acc0255e888d693ad9a26dc74d373100ded483aaf4b22d99812ed98bf8af3d4ceea3490b6f24\ + 7c1305e56e7c3fba003465f631ba660922c56156a580addb2cfe52c52363dc7df58896de758ea388\ + 23b23c3e95a870e06db026eb6621815abb05ff24ae6ec3e1897f4d1139033330bb79da376c8f19ab\ + 5669a0289a89b546740b9351b3a4f33a7a77619c0af74ddaaeb8256683a39062a941e98febee3d08\ + a0ec0f709bcdc7178986a3e43ceb278a9af31fc28e70fdcc10ed6a96a54353a517a7da2cb0b964f8\ + ee656f85d1c530659edc7d5e410fe26ff38dd0debe4e220a28ee811972225504432b9b4e2d8e3825\ + 04f05727eb775bed8d88ff54381b40313565539e1c562cf93ba9fa7eba2c627ea28812c8eb0bdeef\ + 2d804627037c81d65df09090cd8092e8d6505cafaa1fc3e4afac809db3a144323bca93358117f935\ + 13d3695771180f461cf38bb995b531c9e072f84f04df87ce5ad0315387399d1086f60971dc149e06\ + c23253a64e46e467b210e704f93f2ec6f60b9b386eb1f629e48d79adf57e018e4827f5cb5e6cc0ba\ + d3573ea621a84bbc58efaff4abe2d8b7c117fe4a6bd3da03bf4fc61ff9fc5c0ea04f97384cb7df43\ + 265cf3a65ff5f7a46d0e0fe8426569063ea671cf9e87578c355775ecd1ccc2f44ab329bf20b28ab8\ + 83a59ea48bf9c0fa6c0c936cad5c415243eb59b76f559e8b1a86fd1daa46cfe4d52e351546f0a082\ + 394aafeb291eb6a3ae4f661bbda78467b3ab7a63f1e4baebf1174a13c32ea281a49e2a3937fb299e\ + 393b9116def94e15066cf5265f6566302c5bb8a69df9a8cbb45fce9203f5047ecc1e1331f6a8c9f5\ + ed31466c9e1c44d13fea4045f621496bf0b893a0187f563f68416c9e0ed8c75c061873b274f38ee5\ + 041656ef77826fcdc401cc72095c185f3e66b2c37cfcca211fcb4f332ab46a19dbfd4027fd9214a5\ + 181596f85805bb26ed706328ffcd96a57a1a1303f8ebd10d8fdeec1dc6daf08054db99e2e3e77e96\ + d85e6c588bff4441bf2baa25ec74a7e803141d6cab09ec6de23c5999548153de0fdfa6cebd738d84\ + 70e70fd3b4b1441cefa60a9a65650ead11330c83eb1c24173665e3caca83358bbdce0eacf199d1b0\ + 510a81c6930ab9ecf6a9b85328f2977947945bc251d9f7a87a135d260e965bdce354470b3a131832\ + a2f1914b1d601db64f1dbcc43ea382d85cd08bb91c7a161ec87bc14c7758c4fc8cfb8e240c8a4988\ + 5dc10e0dfb7afbed3622fb0561d715254b196ceb42869765dc5cdac5d9c6e20df9b54c6228fa07ac\ + 44619e3372464fcfd67a10117770ca23369b796d0336de113fa5a3757e8a2819d9815b75738cebd8\ + 04dd0e29c5f334dae77044fffb5ac000000000000000000000000000000000000000000000000000\ + 000000000000000000000000000"; + + let bn1 = &BigUint::from_str_radix(hex1, 16).unwrap(); + let bn2 = &BigUint::from_str_radix(hex2, 16).unwrap(); + let result = BigUint::from_str_radix(hex_result, 16).unwrap(); + + assert_eq!(bn1 * bn2, result); + assert_eq!(bn2 * bn1, result); +} + +#[test] +fn fuzzed_mul_2() { + let hex_a = "\ + 812cff04ff812cff04ff8180ff80ffff11ff80ff2cff04ff812cff04ff812cff04ff81232cff047d\ + ff04ff812cff04ff812cff04ff812cff047f812cff04ff8180ff2cff04ff04ff8180ff2cff04ff04\ + ff812cbf04ff8180ff2cff04ff812cff0401010000000000000000ffff1a80ffc006c70084ffff80\ + ffc0064006000084ffff72ffc020ffffffffffff06d709000000dbffffffc799999999b999999999\ + 99999999000084ffff72ffc02006e1ffffffc70900ffffff00f312ff80ebffffff6f505f6c2e6712\ + 108970ffff5f6c6f6727020000000000007400000000000000000000a50000000000000000000000\ + 000000000000000000000000ff812cff04ff812cff2c04ff812cff8180ff2cff04ff04ff818b8b8b\ + 8b8b8b8b8b8b8b8b8b8b8b8b8b06c70084ffff80ffc006c700847fff80ffc006c700ffff12c70084\ + ffff80ffc0060000000000000056ff00c789bfff80ffc006c70084ffff80ffc006c700ffff840100\ + 00000000001289ffc08b8b8b8b8b8b8b2c"; + let hex_b = "\ + 7ed300fb007ed300fb007e7f00db00fb007ed3007ed300fb007edcd300fb8200fb007ed300fb007e\ + d300fb007ed300fb007ed300fbfeffffffffffffa8fb007e7f00d300fb00fb007ed340fb007e7f00\ + 00fb007ed300fb007ed300fb007e7f00d300fb00fb007e7f00d300fb007efb007e7f00d300fb007e\ + d300fb007e7f0097d300fb00bf007ed300fb007ed300fb00fb00fb00fbffffffffffffffffffff00\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8f600de7f00\ + 3fdf9b3900ff908fa08d9e968cf9b9ff0000ed38ff7b00007f003ff9ffffffffffffffa900ff3876\ + 000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300\ + fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017e\ + d300fb007ed300fb007edcd300fb8200fb007e0000e580"; + let hex_c = "\ + 7b00387ffff938ff7b80007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff38\ + 76000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d3\ + 00fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb01\ + 7ed300fb007ed300fb007edcd300fb8200fb007e000000ee7f003f0000007b00387ffff938ff7b80\ + 007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff3876000078003ff938ff7b\ + 00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300fb00fb007e7f00d300\ + fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017ed300fb007ed300fb00\ + 7edcd300fb8200fb007e000000ee7f003f000000000000000000000000000000002a000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8\ + f600de7f003fdf9b3900ff908fa08d9e968c9a0000e7fffa7fff0000004005004c90d8f600de908f\ + dcd300fb8200fb007e0000e57f003ff938ff7b00007f003d7ed300fb007ed300fb007ed300fb007e\ + fa00fb007ed300fbf9ffffffffffffffa900ff387600007f003ff938ff7b00007f003ff938fd0000\ + 7bfeffffffffffffed76003f74747474747474d300fc"; + + // Result produced independently in Python + let hex_result = "\ + 1ebf6415da7ac71a689cd450727b7a361402a1626e0b6cd057e0e2a77d4cb722c1b7d0cbd73a7c07\ + d756813fe97d73d5905c4a26404c7162769ba2dbc1e2742855a1db803e2d2c2fddc77c0598cc70fe\ + 066fd4b81cae3e23c55b4795de63acacd1343cf5ad5e715e6919d140c01bab1af1a737ebbf8a7775\ + 7602acd611f555ee2d5be56cc14b97c248009cd77490a3dfd6762bae25459a544e369eb4b0cc952a\ + 8e6a551ff35a4a7a6e5f5b0b72495c4baadf3a26b9d5d97402ad60fa2324e93adc96ca159b62d147\ + 5695f26ff27da100a76e2d273420572e61b4dfbd97e826d9d946f85b87434523f6aa7ce43c443285\ + 33f5b5adf32574167b1e9ea3bf6254d6afacf865894907de196285169cfcc1c0fcf438873d13f7e8\ + 654acc27c1abb00bec2729e34c994ff2152f60406f75db3ab616541795d9db8ca0b381148de7875f\ + e7a8191407abc390718003698ca28498948caf1dbc3f02593dd85fa929ebae86cfe783d7be473e98\ + 0060d9ec60843661cb4cb9b8ddb24bb710f93700b22530501b5ea26c5c94c7370fe0ccbafe0ce7e4\ + cd4f071d0cf0ac151c85a5b132ecaa75793abfb4a6ee33fddd2aa2f5cf2a8eb19c75322792c0d8dc\ + 1efb2dcd8ae2b49dd57b84898f531c7f745464f637716151831db56b3e293f587dc95a5e12edfe6b\ + 8458033dddf3556da55bef55ba3c3769def0c0f0c86786aca8313dc0ce09118760721eb545d69b46\ + cdb89d377f2c80e67b572da0f75760c2849288a8457c18c6f0b58244b7f95a7567ce23756f1fe359\ + 64f7e84fbe28b188157519dd99b8798b076e21984d15c37f41da1309e0fbc539e8b9b09fed36a908\ + 28c94f72e7b755c187e58db6bfef0c02309086626ad0fe2efd2ff1467b3de11e057687865f4f85e7\ + 0a39bcbc4674dcaded9b04562afe08eb92fbd96ea4a99aa4f9347a075d4421f070ce3a33225f5af1\ + 9c27ec5d1720e659ca7fff9686f46b01d76d7de64c738671aaec57ee5582ef7956206fb37c6a36f8\ + 8f226ce2124a7f9894a0e9a7aa02001746e6def35699d7adc84a7dcf513ff3da20fd849950f41a5d\ + bb02c91666697156d69ebbe2ef26732b6595d1b6d014a60006d2d3c7055ff9b531779195b8dcd7d9\ + 426e776cbc9041735384568ba4adbf7eeea7e0e6cbb47b70335a7ed12a68904eecd334921e4ae6d9\ + c983af20d73215c39573963f03bc87082450cc1c70250e1e8eaa318acaf044a072891fc60324d134\ + 6c0a1d02cceb4d4806e536d6017bf6bc125c41694ded38766fea51bfbf7a008ca0b3eb1168766486\ + 8aa8469b3e6787a5d5bad6cd67c24005a5cbaa10b63d1b4d05ac42a8b31263052a1260b5900be628\ + 4dcab4eb0cf5cda815412ced7bd78f87c00ac3581f41a04352a4a186805a5c9e37b14561a5fc97d2\ + 52ca4654fe3d82f42080c21483789cc4b4cbb568f79844f7a317aa2a6555774da26c6f027d3cb0ee\ + 9276c6dc4f285fc3b4b9a3cd51c8815cebf110e73c80a9b842cc3b7c80af13f702662b10e868eb61\ + 947000b390cd2f3a0899f6f1bab86acf767062f5526507790645ae13b9701ba96b3f873047c9d3b8\ + 5e8a5d904a01fbfe10e63495b6021e7cc082aa66679e4d92b3e4e2d62490b44f7e250584cedff0e7\ + 072a870ddaa9687a1eae11afc874d83065fb98dbc3cfd90f39517ff3015c71a8c0ab36a6483c7b87\ + f41b2c832fa9428fe95ffba4e49cc553d9e2d33a540958da51588e5120fef6497bfaa96a4dcfc024\ + 8170c57f78e9ab9546efbbaf8e9ad6a993493577edd3d29ce8fd9a2e9eb4363b5b472a4ecb2065eb\ + 38f876a841af1f227a703248955c8978329dffcd8e065d8da4d42504796ff7abc62832ed86c4f8d0\ + 0f55cd567fb9d42524be57ebdacef730c3f94c0372f86fa1b0114f8620f553e4329b2a586fcfeedc\ + af47934909090e14a1f1204e6f1681fb2df05356381e6340f4feaf0787e06218b0b0d8df51acb0bc\ + f98546f33273adf260da959d6fc4a04872122af6508d124abb963c14c30e7c07fee368324921fe33\ + 9ae89490c5d6cdae0c356bb6921de95ea13b54e23800"; + + let a = &BigUint::from_str_radix(hex_a, 16).unwrap(); + let b = &BigUint::from_str_radix(hex_b, 16).unwrap(); + let c = &BigUint::from_str_radix(hex_c, 16).unwrap(); + let result = BigUint::from_str_radix(hex_result, 16).unwrap(); + + assert_eq!(a * b * c, result); + assert_eq!(a * c * b, result); + assert_eq!(b * a * c, result); + assert_eq!(b * c * a, result); + assert_eq!(c * a * b, result); + assert_eq!(c * b * a, result); +} diff --git a/vendor/num-bigint-generic/tests/macros/mod.rs b/vendor/num-bigint-generic/tests/macros/mod.rs new file mode 100644 index 000000000..b14cd577d --- /dev/null +++ b/vendor/num-bigint-generic/tests/macros/mod.rs @@ -0,0 +1,78 @@ +#![allow(unused)] + +/// Assert that an op works for all val/ref combinations +macro_rules! assert_op { + ($left:ident $op:tt $right:ident == $expected:expr) => { + assert_eq!((&$left) $op (&$right), $expected); + assert_eq!((&$left) $op $right.clone(), $expected); + assert_eq!($left.clone() $op (&$right), $expected); + assert_eq!($left.clone() $op $right.clone(), $expected); + }; +} + +/// Assert that an assign-op works for all val/ref combinations +macro_rules! assert_assign_op { + ($left:ident $op:tt $right:ident == $expected:expr) => {{ + let mut left = $left.clone(); + assert_eq!({ left $op &$right; left}, $expected); + + let mut left = $left.clone(); + assert_eq!({ left $op $right.clone(); left}, $expected); + }}; +} + +/// Assert that an op works for scalar left or right +macro_rules! assert_scalar_op { + (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => { + $( + if let Some(left) = $left.$to() { + assert_op!(left $op $right == $expected); + } + if let Some(right) = $right.$to() { + assert_op!($left $op right == $expected); + } + )* + }; +} + +macro_rules! assert_unsigned_scalar_op { + ($left:ident $op:tt $right:ident == $expected:expr) => { + assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128) + $left $op $right == $expected); + }; +} + +macro_rules! assert_signed_scalar_op { + ($left:ident $op:tt $right:ident == $expected:expr) => { + assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128, + to_i8, to_i16, to_i32, to_i64, to_isize, to_i128) + $left $op $right == $expected); + }; +} + +/// Assert that an op works for scalar right +macro_rules! assert_scalar_assign_op { + (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => { + $( + if let Some(right) = $right.$to() { + let mut left = $left.clone(); + assert_eq!({ left $op right; left}, $expected); + } + )* + }; +} + +macro_rules! assert_unsigned_scalar_assign_op { + ($left:ident $op:tt $right:ident == $expected:expr) => { + assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128) + $left $op $right == $expected); + }; +} + +macro_rules! assert_signed_scalar_assign_op { + ($left:ident $op:tt $right:ident == $expected:expr) => { + assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128, + to_i8, to_i16, to_i32, to_i64, to_isize, to_i128) + $left $op $right == $expected); + }; +} diff --git a/vendor/num-bigint-generic/tests/modpow.rs b/vendor/num-bigint-generic/tests/modpow.rs new file mode 100644 index 000000000..ca2ee23ed --- /dev/null +++ b/vendor/num-bigint-generic/tests/modpow.rs @@ -0,0 +1,181 @@ +static BIG_B: &str = "\ + efac3c0a_0de55551_fee0bfe4_67fa017a_1a898fa1_6ca57cb1\ + ca9e3248_cacc09a9_b99d6abc_38418d0f_82ae4238_d9a68832\ + aadec7c1_ac5fed48_7a56a71b_67ac59d5_afb28022_20d9592d\ + 247c4efc_abbd9b75_586088ee_1dc00dc4_232a8e15_6e8191dd\ + 675b6ae0_c80f5164_752940bc_284b7cee_885c1e10_e495345b\ + 8fbe9cfd_e5233fe1_19459d0b_d64be53c_27de5a02_a829976b\ + 33096862_82dad291_bd38b6a9_be396646_ddaf8039_a2573c39\ + 1b14e8bc_2cb53e48_298c047e_d9879e9c_5a521076_f0e27df3\ + 990e1659_d3d8205b_6443ebc0_9918ebee_6764f668_9f2b2be3\ + b59cbc76_d76d0dfc_d737c3ec_0ccf9c00_ad0554bf_17e776ad\ + b4edf9cc_6ce540be_76229093_5c53893b"; + +static BIG_E: &str = "\ + be0e6ea6_08746133_e0fbc1bf_82dba91e_e2b56231_a81888d2\ + a833a1fc_f7ff002a_3c486a13_4f420bf3_a5435be9_1a5c8391\ + 774d6e6c_085d8357_b0c97d4d_2bb33f7c_34c68059_f78d2541\ + eacc8832_426f1816_d3be001e_b69f9242_51c7708e_e10efe98\ + 449c9a4a_b55a0f23_9d797410_515da00d_3ea07970_4478a2ca\ + c3d5043c_bd9be1b4_6dce479d_4302d344_84a939e6_0ab5ada7\ + 12ae34b2_30cc473c_9f8ee69d_2cac5970_29f5bf18_bc8203e4\ + f3e895a2_13c94f1e_24c73d77_e517e801_53661fdd_a2ce9e47\ + a73dd7f8_2f2adb1e_3f136bf7_8ae5f3b8_08730de1_a4eff678\ + e77a06d0_19a522eb_cbefba2a_9caf7736_b157c5c6_2d192591\ + 17946850_2ddb1822_117b68a0_32f7db88"; + +// This modulus is the prime from the 2048-bit MODP DH group: +// https://tools.ietf.org/html/rfc3526#section-3 +static BIG_M: &str = "\ + FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\ + 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\ + EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\ + E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\ + EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\ + C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\ + 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\ + 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\ + E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\ + DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\ + 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF"; + +static BIG_R: &str = "\ + a1468311_6e56edc9_7a98228b_5e924776_0dd7836e_caabac13\ + eda5373b_4752aa65_a1454850_40dc770e_30aa8675_6be7d3a8\ + 9d3085e4_da5155cf_b451ef62_54d0da61_cf2b2c87_f495e096\ + 055309f7_77802bbb_37271ba8_1313f1b5_075c75d1_024b6c77\ + fdb56f17_b05bce61_e527ebfd_2ee86860_e9907066_edd526e7\ + 93d289bf_6726b293_41b0de24_eff82424_8dfd374b_4ec59542\ + 35ced2b2_6b195c90_10042ffb_8f58ce21_bc10ec42_64fda779\ + d352d234_3d4eaea6_a86111ad_a37e9555_43ca78ce_2885bed7\ + 5a30d182_f1cf6834_dc5b6e27_1a41ac34_a2e91e11_33363ff0\ + f88a7b04_900227c9_f6e6d06b_7856b4bb_4e354d61_060db6c8\ + 109c4735_6e7db425_7b5d74c7_0b709508"; + +mod biguint { + type BigUint = num_bigint_generic::BigUint; + use num_integer::Integer; + use num_traits::Num; + + fn check_modpow>(b: T, e: T, m: T, r: T) { + let b: BigUint = b.into(); + let e: BigUint = e.into(); + let m: BigUint = m.into(); + let r: BigUint = r.into(); + + assert_eq!(b.modpow(&e, &m), r); + + let even_m = &m << 1; + let even_modpow = b.modpow(&e, &even_m); + assert!(even_modpow < even_m); + assert_eq!(even_modpow.mod_floor(&m), r); + } + + #[test] + fn test_modpow_single() { + check_modpow::(1, 0, 11, 1); + check_modpow::(0, 15, 11, 0); + check_modpow::(3, 7, 11, 9); + check_modpow::(5, 117, 19, 1); + check_modpow::(20, 1, 2, 0); + check_modpow::(20, 1, 3, 2); + } + + #[test] + fn test_modpow_small() { + for b in 0u64..11 { + for e in 0u64..11 { + for m in 1..11 { + check_modpow::(b, e, m, b.pow(e as u32) % m); + } + } + } + } + + #[test] + fn test_modpow_big() { + let b: BigUint = BigUint::from_str_radix(super::BIG_B, 16).unwrap(); + let e: BigUint = BigUint::from_str_radix(super::BIG_E, 16).unwrap(); + let m: BigUint = BigUint::from_str_radix(super::BIG_M, 16).unwrap(); + let r: BigUint = BigUint::from_str_radix(super::BIG_R, 16).unwrap(); + + assert_eq!(b.modpow(&e, &m), r); + + let even_m = &m << 1; + let even_modpow = b.modpow(&e, &even_m); + assert!(even_modpow < even_m); + assert_eq!(even_modpow % m, r); + } +} + +mod bigint { + type BigInt = num_bigint_generic::BigInt; + use num_integer::Integer; + use num_traits::{Num, One, Signed}; + + fn check_modpow>(b: T, e: T, m: T, r: T) { + fn check(b: &BigInt, e: &BigInt, m: &BigInt, r: &BigInt) { + assert_eq!(&b.modpow(e, m), r, "{} ** {} (mod {}) != {}", b, e, m, r); + + let even_m = m << 1u8; + let even_modpow = b.modpow(e, m); + assert!(even_modpow.abs() < even_m.abs()); + assert_eq!(&even_modpow.mod_floor(m), r); + + // the sign of the result follows the modulus like `mod_floor`, not `rem` + assert_eq!(b.modpow(&BigInt::one(), m), b.mod_floor(m)); + } + + let b: BigInt = b.into(); + let e: BigInt = e.into(); + let m: BigInt = m.into(); + let r: BigInt = r.into(); + + let neg_b_r = if e.is_odd() { + (-&r).mod_floor(&m) + } else { + r.clone() + }; + let neg_m_r = r.mod_floor(&-&m); + let neg_bm_r = neg_b_r.mod_floor(&-&m); + + check(&b, &e, &m, &r); + check(&-&b, &e, &m, &neg_b_r); + check(&b, &e, &-&m, &neg_m_r); + check(&-b, &e, &-&m, &neg_bm_r); + } + + #[test] + fn test_modpow() { + check_modpow(1, 0, 11, 1); + check_modpow(0, 15, 11, 0); + check_modpow(3, 7, 11, 9); + check_modpow(5, 117, 19, 1); + check_modpow(-20, 1, 2, 0); + check_modpow(-20, 1, 3, 1); + } + + #[test] + fn test_modpow_small() { + for b in -10i64..11 { + for e in 0i64..11 { + for m in -10..11 { + if m == 0 { + continue; + } + check_modpow(b, e, m, b.pow(e as u32).mod_floor(&m)); + } + } + } + } + + #[test] + fn test_modpow_big() { + let b = BigInt::from_str_radix(super::BIG_B, 16).unwrap(); + let e = BigInt::from_str_radix(super::BIG_E, 16).unwrap(); + let m = BigInt::from_str_radix(super::BIG_M, 16).unwrap(); + let r = BigInt::from_str_radix(super::BIG_R, 16).unwrap(); + + check_modpow(b, e, m, r); + } +} diff --git a/vendor/num-bigint-generic/tests/roots.rs b/vendor/num-bigint-generic/tests/roots.rs new file mode 100644 index 000000000..e5aa73a08 --- /dev/null +++ b/vendor/num-bigint-generic/tests/roots.rs @@ -0,0 +1,159 @@ +mod biguint { + type BigUint = num_bigint_generic::BigUint; + use num_traits::{One, Zero}; + + fn check>(x: T, n: u32) { + let x: BigUint = x.into(); + let root = x.nth_root(n); + println!("check {}.nth_root({}) = {}", x, n, root); + + if n == 2 { + assert_eq!(root, x.sqrt()) + } else if n == 3 { + assert_eq!(root, x.cbrt()) + } + + let lo = root.pow(n); + assert!(lo <= x); + assert_eq!(lo.nth_root(n), root); + if !lo.is_zero() { + assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32); + } + + let hi = (&root + 1u32).pow(n); + assert!(hi > x); + assert_eq!(hi.nth_root(n), &root + 1u32); + assert_eq!((&hi - 1u32).nth_root(n), root); + } + + #[test] + fn test_sqrt() { + check(99u32, 2); + check(100u32, 2); + check(120u32, 2); + } + + #[test] + fn test_cbrt() { + check(8u32, 3); + check(26u32, 3); + } + + #[test] + fn test_nth_root() { + check(0u32, 1); + check(10u32, 1); + check(100u32, 4); + } + + #[test] + #[should_panic] + fn test_nth_root_n_is_zero() { + check(4u32, 0); + } + + #[test] + fn test_nth_root_big() { + let x = BigUint::from(123_456_789_u32); + let expected = BigUint::from(6u32); + + assert_eq!(x.nth_root(10), expected); + check(x, 10); + } + + #[test] + fn test_nth_root_googol() { + let googol = BigUint::from(10u32).pow(100u32); + + // perfect divisors of 100 + for &n in &[2, 4, 5, 10, 20, 25, 50, 100] { + let expected = BigUint::from(10u32).pow(100u32 / n); + assert_eq!(googol.nth_root(n), expected); + check(googol.clone(), n); + } + } + + #[test] + fn test_nth_root_twos() { + const EXP: u32 = 12; + const LOG2: usize = 1 << EXP; + let x = BigUint::one() << LOG2; + + // the perfect divisors are just powers of two + for exp in 1..=EXP { + let n = 2u32.pow(exp); + let expected = BigUint::one() << (LOG2 / n as usize); + assert_eq!(x.nth_root(n), expected); + check(x.clone(), n); + } + + // degenerate cases should return quickly + assert!(x.nth_root(x.bits() as u32).is_one()); + assert!(x.nth_root(i32::MAX as u32).is_one()); + assert!(x.nth_root(u32::MAX).is_one()); + } + + #[test] + fn test_roots_rand1() { + // A random input that found regressions + let s = "575981506858479247661989091587544744717244516135539456183849\ + 986593934723426343633698413178771587697273822147578889823552\ + 182702908597782734558103025298880194023243541613924361007059\ + 353344183590348785832467726433749431093350684849462759540710\ + 026019022227591412417064179299354183441181373862905039254106\ + 4781867"; + let x: BigUint = s.parse().unwrap(); + + check(x.clone(), 2); + check(x.clone(), 3); + check(x.clone(), 10); + check(x, 100); + } +} + +mod bigint { + type BigInt = num_bigint_generic::BigInt; + use num_traits::Signed; + + fn check(x: i64, n: u32) { + let big_x = BigInt::from(x); + let res = big_x.nth_root(n); + + if n == 2 { + assert_eq!(&res, &big_x.sqrt()) + } else if n == 3 { + assert_eq!(&res, &big_x.cbrt()) + } + + if big_x.is_negative() { + assert!(res.pow(n) >= big_x); + assert!((res - 1u32).pow(n) < big_x); + } else { + assert!(res.pow(n) <= big_x); + assert!((res + 1u32).pow(n) > big_x); + } + } + + #[test] + fn test_nth_root() { + check(-100, 3); + } + + #[test] + #[should_panic] + fn test_nth_root_x_neg_n_even() { + check(-100, 4); + } + + #[test] + #[should_panic] + fn test_sqrt_x_neg() { + check(-4, 2); + } + + #[test] + fn test_cbrt() { + check(8, 3); + check(-8, 3); + } +} diff --git a/vendor/num-rational-generic/.github/workflows/ci.yaml b/vendor/num-rational-generic/.github/workflows/ci.yaml new file mode 100644 index 000000000..96aabaee2 --- /dev/null +++ b/vendor/num-rational-generic/.github/workflows/ci.yaml @@ -0,0 +1,64 @@ +name: CI +on: merge_group + +jobs: + + test: + name: Test + runs-on: ubuntu-latest + strategy: + matrix: + rust: [ + 1.60.0, # MSRV + stable, + beta, + nightly + ] + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: startsWith(matrix.rust, '1') + with: + path: ~/.cargo/registry/index + key: cargo-${{ matrix.rust }}-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - run: cargo build + - run: ./ci/test_full.sh + + # try a target that doesn't have std at all, but does have alloc + no_std: + name: No Std (stable) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: thumbv6m-none-eabi + - run: cargo build --target thumbv6m-none-eabi --no-default-features --features "num-bigint serde" + + fmt: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@1.62.0 + with: + components: rustfmt + - run: cargo fmt --all --check + + # One job that "summarizes" the success state of this pipeline. This can then be added to branch + # protection, rather than having to add each job separately. + success: + name: Success + runs-on: ubuntu-latest + needs: [test, no_std, fmt] + # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency + # failed" as success. So we have to do some contortions to ensure the job fails if any of its + # dependencies fails. + if: always() # make sure this is never "skipped" + steps: + # Manually check the status of all dependencies. `if: failure()` does not work. + - name: check if any dependency failed + run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/vendor/num-rational-generic/.github/workflows/master.yaml b/vendor/num-rational-generic/.github/workflows/master.yaml new file mode 100644 index 000000000..034e37936 --- /dev/null +++ b/vendor/num-rational-generic/.github/workflows/master.yaml @@ -0,0 +1,28 @@ +name: master +on: + push: + branches: + - master + schedule: + - cron: '0 0 * * 0' # 00:00 Sunday + +jobs: + + test: + name: Test + runs-on: ubuntu-latest + strategy: + matrix: + rust: [1.60.0, stable] + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: startsWith(matrix.rust, '1') + with: + path: ~/.cargo/registry/index + key: cargo-${{ matrix.rust }}-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - run: cargo build + - run: ./ci/test_full.sh diff --git a/vendor/num-rational-generic/.github/workflows/pr.yaml b/vendor/num-rational-generic/.github/workflows/pr.yaml new file mode 100644 index 000000000..baac64847 --- /dev/null +++ b/vendor/num-rational-generic/.github/workflows/pr.yaml @@ -0,0 +1,49 @@ +name: PR +on: + pull_request: + +jobs: + + test: + name: Test + runs-on: ubuntu-latest + strategy: + matrix: + rust: [1.60.0, stable] + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: startsWith(matrix.rust, '1') + with: + path: ~/.cargo/registry/index + key: cargo-${{ matrix.rust }}-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - run: cargo build + - run: ./ci/test_full.sh + + fmt: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@1.62.0 + with: + components: rustfmt + - run: cargo fmt --all --check + + # One job that "summarizes" the success state of this pipeline. This can then be added to branch + # protection, rather than having to add each job separately. + success: + name: Success + runs-on: ubuntu-latest + needs: [test, fmt] + # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency + # failed" as success. So we have to do some contortions to ensure the job fails if any of its + # dependencies fails. + if: always() # make sure this is never "skipped" + steps: + # Manually check the status of all dependencies. `if: failure()` does not work. + - name: check if any dependency failed + run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/vendor/num-rational-generic/.gitignore b/vendor/num-rational-generic/.gitignore new file mode 100644 index 000000000..fa8d85ac5 --- /dev/null +++ b/vendor/num-rational-generic/.gitignore @@ -0,0 +1,2 @@ +Cargo.lock +target diff --git a/vendor/num-rational-generic/Cargo.toml b/vendor/num-rational-generic/Cargo.toml new file mode 100644 index 000000000..0d6492668 --- /dev/null +++ b/vendor/num-rational-generic/Cargo.toml @@ -0,0 +1,48 @@ +[package] +authors = ["The Rust Project Developers"] +description = "Rational numbers implementation for Rust" +documentation = "https://docs.rs/num-rational" +homepage = "https://github.com/rust-num/num-rational" +keywords = ["mathematics", "numerics", "fractions"] +categories = ["algorithms", "data-structures", "science", "no-std"] +license = "MIT OR Apache-2.0" +name = "num-rational-generic" +repository = "https://github.com/rust-num/num-rational" +version = "0.4.2" +readme = "README.md" +exclude = ["/ci/*", "/.github/*"] +edition = "2021" +rust-version = "1.60" + +[package.metadata.docs.rs] +features = ["std", "num-bigint-std", "serde"] + +[dependencies] + +[dependencies.num-bigint-generic] +default-features = false +optional = true +path = "../num-bigint-generic" +version = "0.4.0" + +[dependencies.num-integer] +default-features = false +features = ["i128"] +version = "0.1.42" + +[dependencies.num-traits] +default-features = false +features = ["i128"] +version = "0.2.18" + +[dependencies.serde] +default-features = false +optional = true +version = "1.0.0" + +[features] +default = ["num-bigint-generic", "std"] +std = ["num-bigint-generic?/std", "num-integer/std", "num-traits/std"] +num-bigint-generic-std = ["num-bigint-generic/std"] +num-bigint-generic = ["dep:num-bigint-generic"] +serde = ["dep:serde"] diff --git a/vendor/num-rational-generic/LICENSE-APACHE b/vendor/num-rational-generic/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/vendor/num-rational-generic/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/num-rational-generic/LICENSE-MIT b/vendor/num-rational-generic/LICENSE-MIT new file mode 100644 index 000000000..39d4bdb5a --- /dev/null +++ b/vendor/num-rational-generic/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/num-rational-generic/README.md b/vendor/num-rational-generic/README.md new file mode 100644 index 000000000..c1fbc6f3a --- /dev/null +++ b/vendor/num-rational-generic/README.md @@ -0,0 +1,51 @@ +# num-rational + +[![crate](https://img.shields.io/crates/v/num-rational.svg)](https://crates.io/crates/num-rational) +[![documentation](https://docs.rs/num-rational/badge.svg)](https://docs.rs/num-rational) +[![minimum rustc 1.60](https://img.shields.io/badge/rustc-1.60+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html) +[![build status](https://github.com/rust-num/num-rational/workflows/master/badge.svg)](https://github.com/rust-num/num-rational/actions) + +Generic `Rational` numbers (aka fractions) for Rust. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +num-rational = "0.4" +``` + +## Features + +This crate can be used without the standard library (`#![no_std]`) by disabling +the default `std` feature. Use this in `Cargo.toml`: + +```toml +[dependencies.num-rational] +version = "0.4" +default-features = false +``` + +## Releases + +Release notes are available in [RELEASES.md](RELEASES.md). + +## Compatibility + +The `num-rational` crate is tested for rustc 1.60 and greater. + +## License + +Licensed under either of + +- [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) +- [MIT license](http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/vendor/num-rational-generic/RELEASES.md b/vendor/num-rational-generic/RELEASES.md new file mode 100644 index 000000000..128f70a88 --- /dev/null +++ b/vendor/num-rational-generic/RELEASES.md @@ -0,0 +1,177 @@ +# Release 0.4.2 (2024-05-07) + +- [Upgrade to 2021 edition, **MSRV 1.60**][126] +- [Add `Ratio::approximate_float_unsigned` to convert `FloatCore` types to + unsigned][109] +- [Add `const ZERO` and `ONE`, and implement `num_traits::ConstZero` and + `ConstOne`][128] +- [Add `Ratio::into_raw` to deconstruct the numerator and denominator][129] + +**Contributors**: @cuviper, @Enyium, @flavioroth, @waywardmonkeys + +[109]: https://github.com/rust-num/num-rational/pull/109 +[126]: https://github.com/rust-num/num-rational/pull/126 +[128]: https://github.com/rust-num/num-rational/pull/128 +[129]: https://github.com/rust-num/num-rational/pull/129 + +# Release 0.4.1 (2022-06-23) + +- [Fewer `clone` calls are used when reducing a new `Ratio`][98]. +- [Conversions to floating point are better at avoiding underflow][104]. +- [`Ratio` now implements `Default`][105], returning a zero value. + +**Contributors**: @cuviper, @lemmih, @MattX + +[98]: https://github.com/rust-num/num-rational/pull/98 +[104]: https://github.com/rust-num/num-rational/pull/104 +[105]: https://github.com/rust-num/num-rational/pull/105 + +# Release 0.4.0 (2021-03-05) + +- The optional `num-bigint` dependency is now 0.4. +- [The `Rational` alias for `Ratio` is now deprecated][92]. It is + recommended to use specific type sizes for numeric computation, like + `Rational32` and `Rational64`. + +**Contributors**: @cuviper, @vks + +[92]: https://github.com/rust-num/num-rational/pull/92 + +# Release 0.3.2 (2020-11-06) + +- [Fix always rebuilding with --remap-path-prefix][88] + +**Contributors**: @Nemo157 + +[88]: https://github.com/rust-num/num-rational/pull/88 + +# Release 0.3.1 (2020-10-29) + +- [Handle to_f64() with raw division by zero][83]. +- [Better document panic behaviour][84]. +- Clarify the license specification as "MIT OR Apache-2.0". + +**Contributors**: @cuviper, @zetok + +[83]: https://github.com/rust-num/num-rational/pull/83 +[84]: https://github.com/rust-num/num-rational/pull/84 + +# Release 0.3.0 (2020-06-13) + +### Enhancements + +- [`Ratio` now implements `ToPrimitive`][52]. +- [`Ratio` now implements additional formatting traits][56]: + - `Binary`, `Octal`, `LowerHex`, `UpperHex`, `LowerExp`, `UpperExp` +- [The `Pow` implementations have been expanded][70]. + - `Pow` and `Pow` are now implemented. + - `Pow<_> for &Ratio` now uses `&T: Pow`. + - The inherent `pow` method now uses `&T: Pow`. + +### Breaking Changes + +- [`num-rational` now requires Rust 1.31 or greater][66]. + - The "i128" opt-in feature was removed, now always available. +- [The "num-bigint-std" feature replaces "bigint" with `std` enabled][80]. + - The "num-bigint" feature without `std` uses `alloc` on Rust 1.36+. + +**Contributors**: @cuviper, @MattX, @maxbla + +[52]: https://github.com/rust-num/num-rational/pull/52 +[56]: https://github.com/rust-num/num-rational/pull/56 +[66]: https://github.com/rust-num/num-rational/pull/66 +[70]: https://github.com/rust-num/num-rational/pull/70 +[80]: https://github.com/rust-num/num-rational/pull/80 + +# Release 0.2.4 (2020-03-17) + +- [Fixed `CheckedDiv` when both dividend and divisor are 0][74]. +- [Fixed `CheckedDiv` with `min_value()` numerators][76]. + +[74]: https://github.com/rust-num/num-rational/pull/74 +[76]: https://github.com/rust-num/num-rational/pull/76 + +# Release 0.2.3 (2020-01-09) + +- [`Ratio` now performs earlier reductions to avoid overflow with `+-*/%` + operators][42]. +- [`Ratio::{new_raw, numer, denom}` are now `const fn` for Rust 1.31 and + later][48]. +- [Updated the `autocfg` build dependency to 1.0][63]. + +**Contributors**: @cuviper, @dingelish, @jimbo1qaz, @maxbla + +[42]: https://github.com/rust-num/num-rational/pull/42 +[48]: https://github.com/rust-num/num-rational/pull/48 +[63]: https://github.com/rust-num/num-rational/pull/63 + +# Release 0.2.2 (2019-06-10) + +- [`Ratio` now implements `Zero::set_zero` and `One::set_one`][47]. + +**Contributors**: @cuviper, @ignatenkobrain, @vks + +[47]: https://github.com/rust-num/num-rational/pull/47 + +# Release 0.2.1 (2018-06-22) + +- Maintenance release to fix `html_root_url`. + +# Release 0.2.0 (2018-06-19) + +### Enhancements + +- [`Ratio` now implements `One::is_one` and the `Inv` trait][19]. +- [`Ratio` now implements `Sum` and `Product`][25]. +- [`Ratio` now supports `i128` and `u128` components][29] with Rust 1.26+. +- [`Ratio` now implements the `Pow` trait][21]. + +### Breaking Changes + +- [`num-rational` now requires rustc 1.15 or greater][18]. +- [There is now a `std` feature][23], enabled by default, along with the + implication that building _without_ this feature makes this a `#![no_std]` + crate. A few methods now require `FloatCore` instead of `Float`. +- [The `serde` dependency has been updated to 1.0][24], and `rustc-serialize` is + no longer supported by `num-rational`. +- The optional `num-bigint` dependency has been updated to 0.2, and should be + enabled using the `bigint-std` feature. In the future, it may be possible to + use the `bigint` feature with `no_std`. + +**Contributors**: @clarcharr, @cuviper, @Emerentius, @robomancer-or, @vks + +[18]: https://github.com/rust-num/num-rational/pull/18 +[19]: https://github.com/rust-num/num-rational/pull/19 +[21]: https://github.com/rust-num/num-rational/pull/21 +[23]: https://github.com/rust-num/num-rational/pull/23 +[24]: https://github.com/rust-num/num-rational/pull/24 +[25]: https://github.com/rust-num/num-rational/pull/25 +[29]: https://github.com/rust-num/num-rational/pull/29 + +# Release 0.1.42 (2018-02-08) + +- Maintenance release to update dependencies. + +# Release 0.1.41 (2018-01-26) + +- [num-rational now has its own source repository][num-356] at + [rust-num/num-rational][home]. +- [`Ratio` now implements `CheckedAdd`, `CheckedSub`, `CheckedMul`, and + `CheckedDiv`][11]. +- [`Ratio` now implements `AddAssign`, `SubAssign`, `MulAssign`, `DivAssign`, + and `RemAssign`][12] with either `Ratio` or an integer on the right side. The + non-assignment operators now also accept integers as an operand. +- [`Ratio` operators now make fewer `clone()` calls][14]. + +Thanks to @c410-f3r, @cuviper, and @psimonyi for their contributions! + +[home]: https://github.com/rust-num/num-rational +[num-356]: https://github.com/rust-num/num/pull/356 +[11]: https://github.com/rust-num/num-rational/pull/11 +[12]: https://github.com/rust-num/num-rational/pull/12 +[14]: https://github.com/rust-num/num-rational/pull/14 + +# Prior releases + +No prior release notes were kept. Thanks all the same to the many contributors +that have made this crate what it is! diff --git a/vendor/num-rational-generic/benchmarks/Cargo.toml b/vendor/num-rational-generic/benchmarks/Cargo.toml new file mode 100644 index 000000000..e890e3095 --- /dev/null +++ b/vendor/num-rational-generic/benchmarks/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "benchmarks" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] + +[dependencies.num-rational] +default-features = false +features = ["num-bigint"] +path = "../.." + +[dependencies.num-bigint] +default-features = false +version = "0.4.0" + +[dependencies.rand] +default-features = false +version = "0.8" diff --git a/vendor/num-rational-generic/benchmarks/src/main.rs b/vendor/num-rational-generic/benchmarks/src/main.rs new file mode 100644 index 000000000..37e7a42d6 --- /dev/null +++ b/vendor/num-rational-generic/benchmarks/src/main.rs @@ -0,0 +1,32 @@ +#![feature(test)] + +extern crate test; + +use num_bigint::BigInt; +use num_rational::{BigRational, Ratio}; +use test::Bencher; + +mod rng; +use rng::get_rng; + +#[bench] +fn alloc_ratio_bigint_bench(b: &mut Bencher) { + use rand::RngCore; + let mut rng = get_rng(); + b.iter(|| { + let a = BigInt::from(rng.next_u64()); + let b = BigInt::from(rng.next_u64()); + BigRational::new(a, b) + }); +} + +#[bench] +fn alloc_ratio_u64_bench(b: &mut Bencher) { + use rand::RngCore; + let mut rng = get_rng(); + b.iter(|| { + let a = rng.next_u64(); + let b = rng.next_u64(); + Ratio::new(a, b) + }); +} diff --git a/vendor/num-rational-generic/benchmarks/src/rng.rs b/vendor/num-rational-generic/benchmarks/src/rng.rs new file mode 100644 index 000000000..33e4f0fad --- /dev/null +++ b/vendor/num-rational-generic/benchmarks/src/rng.rs @@ -0,0 +1,38 @@ +use rand::RngCore; + +pub(crate) fn get_rng() -> impl RngCore { + XorShiftStar { + a: 0x0123_4567_89AB_CDEF, + } +} + +/// Simple `Rng` for benchmarking without additional dependencies +struct XorShiftStar { + a: u64, +} + +impl RngCore for XorShiftStar { + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + fn next_u64(&mut self) -> u64 { + // https://en.wikipedia.org/wiki/Xorshift#xorshift* + self.a ^= self.a >> 12; + self.a ^= self.a << 25; + self.a ^= self.a >> 27; + self.a.wrapping_mul(0x2545_F491_4F6C_DD1D) + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + for chunk in dest.chunks_mut(8) { + let bytes = self.next_u64().to_le_bytes(); + let slice = &bytes[..chunk.len()]; + chunk.copy_from_slice(slice) + } + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { + Ok(self.fill_bytes(dest)) + } +} diff --git a/vendor/num-rational-generic/ci/benchmarks/Cargo.toml b/vendor/num-rational-generic/ci/benchmarks/Cargo.toml new file mode 100644 index 000000000..e890e3095 --- /dev/null +++ b/vendor/num-rational-generic/ci/benchmarks/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "benchmarks" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] + +[dependencies.num-rational] +default-features = false +features = ["num-bigint"] +path = "../.." + +[dependencies.num-bigint] +default-features = false +version = "0.4.0" + +[dependencies.rand] +default-features = false +version = "0.8" diff --git a/vendor/num-rational-generic/ci/benchmarks/src/main.rs b/vendor/num-rational-generic/ci/benchmarks/src/main.rs new file mode 100644 index 000000000..23cef118d --- /dev/null +++ b/vendor/num-rational-generic/ci/benchmarks/src/main.rs @@ -0,0 +1,32 @@ +#![feature(test)] + +extern crate test; + +use num_bigint_generic::BigInt; +use num_rational_generic::{BigRational, Ratio}; +use test::Bencher; + +mod rng; +use rng::get_rng; + +#[bench] +fn alloc_ratio_bigint_bench(b: &mut Bencher) { + use rand::RngCore; + let mut rng = get_rng(); + b.iter(|| { + let a = BigInt::from(rng.next_u64()); + let b = BigInt::from(rng.next_u64()); + BigRational::new(a, b) + }); +} + +#[bench] +fn alloc_ratio_u64_bench(b: &mut Bencher) { + use rand::RngCore; + let mut rng = get_rng(); + b.iter(|| { + let a = rng.next_u64(); + let b = rng.next_u64(); + Ratio::new(a, b) + }); +} diff --git a/vendor/num-rational-generic/ci/benchmarks/src/rng.rs b/vendor/num-rational-generic/ci/benchmarks/src/rng.rs new file mode 100644 index 000000000..33e4f0fad --- /dev/null +++ b/vendor/num-rational-generic/ci/benchmarks/src/rng.rs @@ -0,0 +1,38 @@ +use rand::RngCore; + +pub(crate) fn get_rng() -> impl RngCore { + XorShiftStar { + a: 0x0123_4567_89AB_CDEF, + } +} + +/// Simple `Rng` for benchmarking without additional dependencies +struct XorShiftStar { + a: u64, +} + +impl RngCore for XorShiftStar { + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + fn next_u64(&mut self) -> u64 { + // https://en.wikipedia.org/wiki/Xorshift#xorshift* + self.a ^= self.a >> 12; + self.a ^= self.a << 25; + self.a ^= self.a >> 27; + self.a.wrapping_mul(0x2545_F491_4F6C_DD1D) + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + for chunk in dest.chunks_mut(8) { + let bytes = self.next_u64().to_le_bytes(); + let slice = &bytes[..chunk.len()]; + chunk.copy_from_slice(slice) + } + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { + Ok(self.fill_bytes(dest)) + } +} diff --git a/vendor/num-rational-generic/ci/rustup.sh b/vendor/num-rational-generic/ci/rustup.sh new file mode 100755 index 000000000..144042bc0 --- /dev/null +++ b/vendor/num-rational-generic/ci/rustup.sh @@ -0,0 +1,10 @@ +#!/bin/sh +# Use rustup to locally run the same suite of tests as .github/workflows/ +# (You should first install/update all of the versions below.) + +set -ex + +ci=$(dirname "$0") +for version in 1.60.0 stable beta nightly; do + rustup run "$version" "$ci/test_full.sh" +done diff --git a/vendor/num-rational-generic/ci/test_full.sh b/vendor/num-rational-generic/ci/test_full.sh new file mode 100755 index 000000000..dbb963a83 --- /dev/null +++ b/vendor/num-rational-generic/ci/test_full.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +set -e + +CRATE=num-rational +MSRV=1.60 + +get_rust_version() { + local array=("$(rustc --version)"); + echo "${array[1]}"; + return 0; +} +RUST_VERSION=$(get_rust_version) + +check_version() { + IFS=. read -ra rust <<< "$RUST_VERSION" + IFS=. read -ra want <<< "$1" + [[ "${rust[0]}" -gt "${want[0]}" || + ( "${rust[0]}" -eq "${want[0]}" && + "${rust[1]}" -ge "${want[1]}" ) + ]] +} + +echo "Testing $CRATE on rustc $RUST_VERSION" +if ! check_version $MSRV ; then + echo "The minimum for $CRATE is rustc $MSRV" + exit 1 +fi + +STD_FEATURES=(num-bigint-std serde) +NO_STD_FEATURES=(num-bigint serde) +echo "Testing supported features: ${STD_FEATURES[*]}" +echo " no_std supported features: ${NO_STD_FEATURES[*]}" + +set -x + +# test the default with std +cargo build +cargo test + +# test each isolated feature with std +for feature in "${STD_FEATURES[@]}"; do + cargo build --no-default-features --features="std $feature" + cargo test --no-default-features --features="std $feature" +done + +# test all supported features with std +cargo build --no-default-features --features="std ${STD_FEATURES[*]}" +cargo test --no-default-features --features="std ${STD_FEATURES[*]}" + + +# test minimal `no_std` +cargo build --no-default-features +cargo test --no-default-features + +# test each isolated feature without std +for feature in "${NO_STD_FEATURES[@]}"; do + cargo build --no-default-features --features="$feature" + cargo test --no-default-features --features="$feature" +done + +# test all supported features without std +cargo build --no-default-features --features="${NO_STD_FEATURES[*]}" +cargo test --no-default-features --features="${NO_STD_FEATURES[*]}" + +# make sure benchmarks can be built and sanity-tested +if rustc --version | grep -q nightly; then + cargo test --manifest-path ci/benchmarks/Cargo.toml +fi diff --git a/vendor/num-rational-generic/lib.rs b/vendor/num-rational-generic/lib.rs new file mode 100644 index 000000000..f838f7f6e --- /dev/null +++ b/vendor/num-rational-generic/lib.rs @@ -0,0 +1,3148 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Rational numbers +//! +//! ## Compatibility +//! +//! The `num-rational` crate is tested for rustc 1.60 and greater. + +#![doc(html_root_url = "https://docs.rs/num-rational/0.4")] +#![no_std] +// Ratio ops often use other "suspicious" ops +#![allow(clippy::suspicious_arithmetic_impl)] +#![allow(clippy::suspicious_op_assign_impl)] + +#[cfg(feature = "std")] +#[macro_use] +extern crate std; + +use core::cmp; +use core::fmt; +use core::fmt::{Binary, Display, Formatter, LowerExp, LowerHex, Octal, UpperExp, UpperHex}; +use core::hash::{Hash, Hasher}; +use core::ops::{Add, Div, Mul, Neg, Rem, ShlAssign, Sub}; +use core::str::FromStr; +#[cfg(feature = "std")] +use std::error::Error; + +#[cfg(feature = "num-bigint")] +use num_bigint::{BigInt, BigUint, Sign, ToBigInt}; + +use num_integer::Integer; +use num_traits::float::FloatCore; +use num_traits::{ + Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, ConstOne, ConstZero, FromPrimitive, + Inv, Num, NumCast, One, Pow, Signed, ToPrimitive, Unsigned, Zero, +}; + +mod pow; + +/// Represents the ratio between two numbers. +#[derive(Copy, Clone, Debug)] +#[allow(missing_docs)] +pub struct Ratio { + /// Numerator. + numer: T, + /// Denominator. + denom: T, +} + +#[cfg(feature = "num-bigint")] +impl Ratio> { + pub fn to_nlimbs(&self) -> Ratio> { + let Self { numer, denom } = self; + Ratio::new(numer.to_nlimbs(), denom.to_nlimbs()) + } +} + +/// Alias for a `Ratio` of machine-sized integers. +#[deprecated( + since = "0.4.0", + note = "it's better to use a specific size, like `Rational32` or `Rational64`" +)] +pub type Rational = Ratio; +/// Alias for a `Ratio` of 32-bit-sized integers. +pub type Rational32 = Ratio; +/// Alias for a `Ratio` of 64-bit-sized integers. +pub type Rational64 = Ratio; + +#[cfg(feature = "num-bigint")] +/// Alias for arbitrary precision rationals. +pub type BigRational = Ratio>; + +/// These method are `const`. +impl Ratio { + /// Creates a `Ratio` without checking for `denom == 0` or reducing. + /// + /// **There are several methods that will panic if used on a `Ratio` with + /// `denom == 0`.** + #[inline] + pub const fn new_raw(numer: T, denom: T) -> Ratio { + Ratio { numer, denom } + } + + /// Deconstructs a `Ratio` into its numerator and denominator. + #[inline] + pub fn into_raw(self) -> (T, T) { + (self.numer, self.denom) + } + + /// Gets an immutable reference to the numerator. + #[inline] + pub const fn numer(&self) -> &T { + &self.numer + } + + /// Gets an immutable reference to the denominator. + #[inline] + pub const fn denom(&self) -> &T { + &self.denom + } +} + +impl Ratio { + /// Creates a new `Ratio`. + /// + /// **Panics if `denom` is zero.** + #[inline] + pub fn new(numer: T, denom: T) -> Ratio { + let mut ret = Ratio::new_raw(numer, denom); + ret.reduce(); + ret + } + + /// Creates a `Ratio` representing the integer `t`. + #[inline] + pub fn from_integer(t: T) -> Ratio { + Ratio::new_raw(t, One::one()) + } + + /// Converts to an integer, rounding towards zero. + #[inline] + pub fn to_integer(&self) -> T { + self.trunc().numer + } + + /// Returns true if the rational number is an integer (denominator is 1). + #[inline] + pub fn is_integer(&self) -> bool { + self.denom.is_one() + } + + /// Puts self into lowest terms, with `denom` > 0. + /// + /// **Panics if `denom` is zero.** + fn reduce(&mut self) { + if self.denom.is_zero() { + panic!("denominator == 0"); + } + if self.numer.is_zero() { + self.denom.set_one(); + return; + } + if self.numer == self.denom { + self.set_one(); + return; + } + let g: T = self.numer.gcd(&self.denom); + + // FIXME(#5992): assignment operator overloads + // T: Clone + Integer != T: Clone + NumAssign + + #[inline] + fn replace_with(x: &mut T, f: impl FnOnce(T) -> T) { + let y = core::mem::replace(x, T::zero()); + *x = f(y); + } + + // self.numer /= g; + replace_with(&mut self.numer, |x| x / g.clone()); + + // self.denom /= g; + replace_with(&mut self.denom, |x| x / g); + + // keep denom positive! + if self.denom < T::zero() { + replace_with(&mut self.numer, |x| T::zero() - x); + replace_with(&mut self.denom, |x| T::zero() - x); + } + } + + /// Returns a reduced copy of self. + /// + /// In general, it is not necessary to use this method, as the only + /// method of procuring a non-reduced fraction is through `new_raw`. + /// + /// **Panics if `denom` is zero.** + pub fn reduced(&self) -> Ratio { + let mut ret = self.clone(); + ret.reduce(); + ret + } + + /// Returns the reciprocal. + /// + /// **Panics if the `Ratio` is zero.** + #[inline] + pub fn recip(&self) -> Ratio { + self.clone().into_recip() + } + + #[inline] + fn into_recip(self) -> Ratio { + match self.numer.cmp(&T::zero()) { + cmp::Ordering::Equal => panic!("division by zero"), + cmp::Ordering::Greater => Ratio::new_raw(self.denom, self.numer), + cmp::Ordering::Less => Ratio::new_raw(T::zero() - self.denom, T::zero() - self.numer), + } + } + + /// Rounds towards minus infinity. + #[inline] + pub fn floor(&self) -> Ratio { + if *self < Zero::zero() { + let one: T = One::one(); + Ratio::from_integer( + (self.numer.clone() - self.denom.clone() + one) / self.denom.clone(), + ) + } else { + Ratio::from_integer(self.numer.clone() / self.denom.clone()) + } + } + + /// Rounds towards plus infinity. + #[inline] + pub fn ceil(&self) -> Ratio { + if *self < Zero::zero() { + Ratio::from_integer(self.numer.clone() / self.denom.clone()) + } else { + let one: T = One::one(); + Ratio::from_integer( + (self.numer.clone() + self.denom.clone() - one) / self.denom.clone(), + ) + } + } + + /// Rounds to the nearest integer. Rounds half-way cases away from zero. + #[inline] + pub fn round(&self) -> Ratio { + let zero: Ratio = Zero::zero(); + let one: T = One::one(); + let two: T = one.clone() + one.clone(); + + // Find unsigned fractional part of rational number + let mut fractional = self.fract(); + if fractional < zero { + fractional = zero - fractional + }; + + // The algorithm compares the unsigned fractional part with 1/2, that + // is, a/b >= 1/2, or a >= b/2. For odd denominators, we use + // a >= (b/2)+1. This avoids overflow issues. + let half_or_larger = if fractional.denom.is_even() { + fractional.numer >= fractional.denom / two + } else { + fractional.numer >= (fractional.denom / two) + one + }; + + if half_or_larger { + let one: Ratio = One::one(); + if *self >= Zero::zero() { + self.trunc() + one + } else { + self.trunc() - one + } + } else { + self.trunc() + } + } + + /// Rounds towards zero. + #[inline] + pub fn trunc(&self) -> Ratio { + Ratio::from_integer(self.numer.clone() / self.denom.clone()) + } + + /// Returns the fractional part of a number, with division rounded towards zero. + /// + /// Satisfies `self == self.trunc() + self.fract()`. + #[inline] + pub fn fract(&self) -> Ratio { + Ratio::new_raw(self.numer.clone() % self.denom.clone(), self.denom.clone()) + } + + /// Raises the `Ratio` to the power of an exponent. + #[inline] + pub fn pow(&self, expon: i32) -> Ratio + where + for<'a> &'a T: Pow, + { + Pow::pow(self, expon) + } +} + +#[cfg(feature = "num-bigint")] +impl Ratio { + /// Converts a float into a rational number. + pub fn from_float(f: T) -> Option { + if !f.is_finite() { + return None; + } + let (mantissa, exponent, sign) = f.integer_decode(); + let bigint_sign = if sign == 1 { Sign::Plus } else { Sign::Minus }; + if exponent < 0 { + let one: BigInt = One::one(); + let denom: BigInt = one << ((-exponent) as usize); + let numer: BigUint = FromPrimitive::from_u64(mantissa).unwrap(); + Some(Ratio::new(BigInt::from_biguint(bigint_sign, numer), denom)) + } else { + let mut numer: BigUint = FromPrimitive::from_u64(mantissa).unwrap(); + numer <<= exponent as usize; + Some(Ratio::from_integer(BigInt::from_biguint( + bigint_sign, + numer, + ))) + } + } +} + +impl Default for Ratio { + /// Returns zero + fn default() -> Self { + Ratio::zero() + } +} + +// From integer +impl From for Ratio +where + T: Clone + Integer, +{ + fn from(x: T) -> Ratio { + Ratio::from_integer(x) + } +} + +// From pair (through the `new` constructor) +impl From<(T, T)> for Ratio +where + T: Clone + Integer, +{ + fn from(pair: (T, T)) -> Ratio { + Ratio::new(pair.0, pair.1) + } +} + +// Comparisons + +// Mathematically, comparing a/b and c/d is the same as comparing a*d and b*c, but it's very easy +// for those multiplications to overflow fixed-size integers, so we need to take care. + +impl Ord for Ratio { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + // With equal denominators, the numerators can be directly compared + if self.denom == other.denom { + let ord = self.numer.cmp(&other.numer); + return if self.denom < T::zero() { + ord.reverse() + } else { + ord + }; + } + + // With equal numerators, the denominators can be inversely compared + if self.numer == other.numer { + if self.numer.is_zero() { + return cmp::Ordering::Equal; + } + let ord = self.denom.cmp(&other.denom); + return if self.numer < T::zero() { + ord + } else { + ord.reverse() + }; + } + + // Unfortunately, we don't have CheckedMul to try. That could sometimes avoid all the + // division below, or even always avoid it for BigInt and BigUint. + // FIXME- future breaking change to add Checked* to Integer? + + // Compare as floored integers and remainders + let (self_int, self_rem) = self.numer.div_mod_floor(&self.denom); + let (other_int, other_rem) = other.numer.div_mod_floor(&other.denom); + match self_int.cmp(&other_int) { + cmp::Ordering::Greater => cmp::Ordering::Greater, + cmp::Ordering::Less => cmp::Ordering::Less, + cmp::Ordering::Equal => { + match (self_rem.is_zero(), other_rem.is_zero()) { + (true, true) => cmp::Ordering::Equal, + (true, false) => cmp::Ordering::Less, + (false, true) => cmp::Ordering::Greater, + (false, false) => { + // Compare the reciprocals of the remaining fractions in reverse + let self_recip = Ratio::new_raw(self.denom.clone(), self_rem); + let other_recip = Ratio::new_raw(other.denom.clone(), other_rem); + self_recip.cmp(&other_recip).reverse() + } + } + } + } + } +} + +impl PartialOrd for Ratio { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Ratio { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == cmp::Ordering::Equal + } +} + +impl Eq for Ratio {} + +// NB: We can't just `#[derive(Hash)]`, because it needs to agree +// with `Eq` even for non-reduced ratios. +impl Hash for Ratio { + fn hash(&self, state: &mut H) { + recurse(&self.numer, &self.denom, state); + + fn recurse(numer: &T, denom: &T, state: &mut H) { + if !denom.is_zero() { + let (int, rem) = numer.div_mod_floor(denom); + int.hash(state); + recurse(denom, &rem, state); + } else { + denom.hash(state); + } + } + } +} + +mod iter_sum_product { + use crate::Ratio; + use core::iter::{Product, Sum}; + use num_integer::Integer; + use num_traits::{One, Zero}; + + impl Sum for Ratio { + fn sum(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::zero(), |sum, num| sum + num) + } + } + + impl<'a, T: Integer + Clone> Sum<&'a Ratio> for Ratio { + fn sum(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::zero(), |sum, num| sum + num) + } + } + + impl Product for Ratio { + fn product(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::one(), |prod, num| prod * num) + } + } + + impl<'a, T: Integer + Clone> Product<&'a Ratio> for Ratio { + fn product(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::one(), |prod, num| prod * num) + } + } +} + +mod opassign { + use core::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign}; + + use crate::Ratio; + use num_integer::Integer; + use num_traits::NumAssign; + + impl AddAssign for Ratio { + fn add_assign(&mut self, other: Ratio) { + if self.denom == other.denom { + self.numer += other.numer + } else { + let lcm = self.denom.lcm(&other.denom); + let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone()); + let rhs_numer = other.numer * (lcm.clone() / other.denom); + self.numer = lhs_numer + rhs_numer; + self.denom = lcm; + } + self.reduce(); + } + } + + // (a/b) / (c/d) = (a/gcd_ac)*(d/gcd_bd) / ((c/gcd_ac)*(b/gcd_bd)) + impl DivAssign for Ratio { + fn div_assign(&mut self, other: Ratio) { + let gcd_ac = self.numer.gcd(&other.numer); + let gcd_bd = self.denom.gcd(&other.denom); + self.numer /= gcd_ac.clone(); + self.numer *= other.denom / gcd_bd.clone(); + self.denom /= gcd_bd; + self.denom *= other.numer / gcd_ac; + self.reduce(); // TODO: remove this line. see #8. + } + } + + // a/b * c/d = (a/gcd_ad)*(c/gcd_bc) / ((d/gcd_ad)*(b/gcd_bc)) + impl MulAssign for Ratio { + fn mul_assign(&mut self, other: Ratio) { + let gcd_ad = self.numer.gcd(&other.denom); + let gcd_bc = self.denom.gcd(&other.numer); + self.numer /= gcd_ad.clone(); + self.numer *= other.numer / gcd_bc.clone(); + self.denom /= gcd_bc; + self.denom *= other.denom / gcd_ad; + self.reduce(); // TODO: remove this line. see #8. + } + } + + impl RemAssign for Ratio { + fn rem_assign(&mut self, other: Ratio) { + if self.denom == other.denom { + self.numer %= other.numer + } else { + let lcm = self.denom.lcm(&other.denom); + let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone()); + let rhs_numer = other.numer * (lcm.clone() / other.denom); + self.numer = lhs_numer % rhs_numer; + self.denom = lcm; + } + self.reduce(); + } + } + + impl SubAssign for Ratio { + fn sub_assign(&mut self, other: Ratio) { + if self.denom == other.denom { + self.numer -= other.numer + } else { + let lcm = self.denom.lcm(&other.denom); + let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone()); + let rhs_numer = other.numer * (lcm.clone() / other.denom); + self.numer = lhs_numer - rhs_numer; + self.denom = lcm; + } + self.reduce(); + } + } + + // a/b + c/1 = (a*1 + b*c) / (b*1) = (a + b*c) / b + impl AddAssign for Ratio { + fn add_assign(&mut self, other: T) { + self.numer += self.denom.clone() * other; + self.reduce(); + } + } + + impl DivAssign for Ratio { + fn div_assign(&mut self, other: T) { + let gcd = self.numer.gcd(&other); + self.numer /= gcd.clone(); + self.denom *= other / gcd; + self.reduce(); // TODO: remove this line. see #8. + } + } + + impl MulAssign for Ratio { + fn mul_assign(&mut self, other: T) { + let gcd = self.denom.gcd(&other); + self.denom /= gcd.clone(); + self.numer *= other / gcd; + self.reduce(); // TODO: remove this line. see #8. + } + } + + // a/b % c/1 = (a*1 % b*c) / (b*1) = (a % b*c) / b + impl RemAssign for Ratio { + fn rem_assign(&mut self, other: T) { + self.numer %= self.denom.clone() * other; + self.reduce(); + } + } + + // a/b - c/1 = (a*1 - b*c) / (b*1) = (a - b*c) / b + impl SubAssign for Ratio { + fn sub_assign(&mut self, other: T) { + self.numer -= self.denom.clone() * other; + self.reduce(); + } + } + + macro_rules! forward_op_assign { + (impl $imp:ident, $method:ident) => { + impl<'a, T: Clone + Integer + NumAssign> $imp<&'a Ratio> for Ratio { + #[inline] + fn $method(&mut self, other: &Ratio) { + self.$method(other.clone()) + } + } + impl<'a, T: Clone + Integer + NumAssign> $imp<&'a T> for Ratio { + #[inline] + fn $method(&mut self, other: &T) { + self.$method(other.clone()) + } + } + }; + } + + forward_op_assign!(impl AddAssign, add_assign); + forward_op_assign!(impl DivAssign, div_assign); + forward_op_assign!(impl MulAssign, mul_assign); + forward_op_assign!(impl RemAssign, rem_assign); + forward_op_assign!(impl SubAssign, sub_assign); +} + +macro_rules! forward_ref_ref_binop { + (impl $imp:ident, $method:ident) => { + impl<'a, 'b, T: Clone + Integer> $imp<&'b Ratio> for &'a Ratio { + type Output = Ratio; + + #[inline] + fn $method(self, other: &'b Ratio) -> Ratio { + self.clone().$method(other.clone()) + } + } + impl<'a, 'b, T: Clone + Integer> $imp<&'b T> for &'a Ratio { + type Output = Ratio; + + #[inline] + fn $method(self, other: &'b T) -> Ratio { + self.clone().$method(other.clone()) + } + } + }; +} + +macro_rules! forward_ref_val_binop { + (impl $imp:ident, $method:ident) => { + impl<'a, T> $imp> for &'a Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: Ratio) -> Ratio { + self.clone().$method(other) + } + } + impl<'a, T> $imp for &'a Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: T) -> Ratio { + self.clone().$method(other) + } + } + }; +} + +macro_rules! forward_val_ref_binop { + (impl $imp:ident, $method:ident) => { + impl<'a, T> $imp<&'a Ratio> for Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: &Ratio) -> Ratio { + self.$method(other.clone()) + } + } + impl<'a, T> $imp<&'a T> for Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: &T) -> Ratio { + self.$method(other.clone()) + } + } + }; +} + +macro_rules! forward_all_binop { + (impl $imp:ident, $method:ident) => { + forward_ref_ref_binop!(impl $imp, $method); + forward_ref_val_binop!(impl $imp, $method); + forward_val_ref_binop!(impl $imp, $method); + }; +} + +// Arithmetic +forward_all_binop!(impl Mul, mul); +// a/b * c/d = (a/gcd_ad)*(c/gcd_bc) / ((d/gcd_ad)*(b/gcd_bc)) +impl Mul> for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + #[inline] + fn mul(self, rhs: Ratio) -> Ratio { + let gcd_ad = self.numer.gcd(&rhs.denom); + let gcd_bc = self.denom.gcd(&rhs.numer); + Ratio::new( + self.numer / gcd_ad.clone() * (rhs.numer / gcd_bc.clone()), + self.denom / gcd_bc * (rhs.denom / gcd_ad), + ) + } +} +// a/b * c/1 = (a*c) / (b*1) = (a*c) / b +impl Mul for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + #[inline] + fn mul(self, rhs: T) -> Ratio { + let gcd = self.denom.gcd(&rhs); + Ratio::new(self.numer * (rhs / gcd.clone()), self.denom / gcd) + } +} + +forward_all_binop!(impl Div, div); +// (a/b) / (c/d) = (a/gcd_ac)*(d/gcd_bd) / ((c/gcd_ac)*(b/gcd_bd)) +impl Div> for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn div(self, rhs: Ratio) -> Ratio { + let gcd_ac = self.numer.gcd(&rhs.numer); + let gcd_bd = self.denom.gcd(&rhs.denom); + Ratio::new( + self.numer / gcd_ac.clone() * (rhs.denom / gcd_bd.clone()), + self.denom / gcd_bd * (rhs.numer / gcd_ac), + ) + } +} +// (a/b) / (c/1) = (a*1) / (b*c) = a / (b*c) +impl Div for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn div(self, rhs: T) -> Ratio { + let gcd = self.numer.gcd(&rhs); + Ratio::new(self.numer / gcd.clone(), self.denom * (rhs / gcd)) + } +} + +macro_rules! arith_impl { + (impl $imp:ident, $method:ident) => { + forward_all_binop!(impl $imp, $method); + // Abstracts a/b `op` c/d = (a*lcm/b `op` c*lcm/d)/lcm where lcm = lcm(b,d) + impl $imp> for Ratio { + type Output = Ratio; + #[inline] + fn $method(self, rhs: Ratio) -> Ratio { + if self.denom == rhs.denom { + return Ratio::new(self.numer.$method(rhs.numer), rhs.denom); + } + let lcm = self.denom.lcm(&rhs.denom); + let lhs_numer = self.numer * (lcm.clone() / self.denom); + let rhs_numer = rhs.numer * (lcm.clone() / rhs.denom); + Ratio::new(lhs_numer.$method(rhs_numer), lcm) + } + } + // Abstracts the a/b `op` c/1 = (a*1 `op` b*c) / (b*1) = (a `op` b*c) / b pattern + impl $imp for Ratio { + type Output = Ratio; + #[inline] + fn $method(self, rhs: T) -> Ratio { + Ratio::new(self.numer.$method(self.denom.clone() * rhs), self.denom) + } + } + }; +} + +arith_impl!(impl Add, add); +arith_impl!(impl Sub, sub); +arith_impl!(impl Rem, rem); + +// a/b * c/d = (a*c)/(b*d) +impl CheckedMul for Ratio +where + T: Clone + Integer + CheckedMul, +{ + #[inline] + fn checked_mul(&self, rhs: &Ratio) -> Option> { + let gcd_ad = self.numer.gcd(&rhs.denom); + let gcd_bc = self.denom.gcd(&rhs.numer); + Some(Ratio::new( + (self.numer.clone() / gcd_ad.clone()) + .checked_mul(&(rhs.numer.clone() / gcd_bc.clone()))?, + (self.denom.clone() / gcd_bc).checked_mul(&(rhs.denom.clone() / gcd_ad))?, + )) + } +} + +// (a/b) / (c/d) = (a*d)/(b*c) +impl CheckedDiv for Ratio +where + T: Clone + Integer + CheckedMul, +{ + #[inline] + fn checked_div(&self, rhs: &Ratio) -> Option> { + if rhs.is_zero() { + return None; + } + let (numer, denom) = if self.denom == rhs.denom { + (self.numer.clone(), rhs.numer.clone()) + } else if self.numer == rhs.numer { + (rhs.denom.clone(), self.denom.clone()) + } else { + let gcd_ac = self.numer.gcd(&rhs.numer); + let gcd_bd = self.denom.gcd(&rhs.denom); + ( + (self.numer.clone() / gcd_ac.clone()) + .checked_mul(&(rhs.denom.clone() / gcd_bd.clone()))?, + (self.denom.clone() / gcd_bd).checked_mul(&(rhs.numer.clone() / gcd_ac))?, + ) + }; + // Manual `reduce()`, avoiding sharp edges + if denom.is_zero() { + None + } else if numer.is_zero() { + Some(Self::zero()) + } else if numer == denom { + Some(Self::one()) + } else { + let g = numer.gcd(&denom); + let numer = numer / g.clone(); + let denom = denom / g; + let raw = if denom < T::zero() { + // We need to keep denom positive, but 2's-complement MIN may + // overflow negation -- instead we can check multiplying -1. + let n1 = T::zero() - T::one(); + Ratio::new_raw(numer.checked_mul(&n1)?, denom.checked_mul(&n1)?) + } else { + Ratio::new_raw(numer, denom) + }; + Some(raw) + } + } +} + +// As arith_impl! but for Checked{Add,Sub} traits +macro_rules! checked_arith_impl { + (impl $imp:ident, $method:ident) => { + impl $imp for Ratio { + #[inline] + fn $method(&self, rhs: &Ratio) -> Option> { + let gcd = self.denom.clone().gcd(&rhs.denom); + let lcm = (self.denom.clone() / gcd.clone()).checked_mul(&rhs.denom)?; + let lhs_numer = (lcm.clone() / self.denom.clone()).checked_mul(&self.numer)?; + let rhs_numer = (lcm.clone() / rhs.denom.clone()).checked_mul(&rhs.numer)?; + Some(Ratio::new(lhs_numer.$method(&rhs_numer)?, lcm)) + } + } + }; +} + +// a/b + c/d = (lcm/b*a + lcm/d*c)/lcm, where lcm = lcm(b,d) +checked_arith_impl!(impl CheckedAdd, checked_add); + +// a/b - c/d = (lcm/b*a - lcm/d*c)/lcm, where lcm = lcm(b,d) +checked_arith_impl!(impl CheckedSub, checked_sub); + +impl Neg for Ratio +where + T: Clone + Integer + Neg, +{ + type Output = Ratio; + + #[inline] + fn neg(self) -> Ratio { + Ratio::new_raw(-self.numer, self.denom) + } +} + +impl<'a, T> Neg for &'a Ratio +where + T: Clone + Integer + Neg, +{ + type Output = Ratio; + + #[inline] + fn neg(self) -> Ratio { + -self.clone() + } +} + +impl Inv for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn inv(self) -> Ratio { + self.recip() + } +} + +impl<'a, T> Inv for &'a Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn inv(self) -> Ratio { + self.recip() + } +} + +// Constants +impl Ratio { + /// A constant `Ratio` 0/1. + pub const ZERO: Self = Self::new_raw(T::ZERO, T::ONE); +} + +impl ConstZero for Ratio { + const ZERO: Self = Self::ZERO; +} + +impl Zero for Ratio { + #[inline] + fn zero() -> Ratio { + Ratio::new_raw(Zero::zero(), One::one()) + } + + #[inline] + fn is_zero(&self) -> bool { + self.numer.is_zero() + } + + #[inline] + fn set_zero(&mut self) { + self.numer.set_zero(); + self.denom.set_one(); + } +} + +impl Ratio { + /// A constant `Ratio` 1/1. + pub const ONE: Self = Self::new_raw(T::ONE, T::ONE); +} + +impl ConstOne for Ratio { + const ONE: Self = Self::ONE; +} + +impl One for Ratio { + #[inline] + fn one() -> Ratio { + Ratio::new_raw(One::one(), One::one()) + } + + #[inline] + fn is_one(&self) -> bool { + self.numer == self.denom + } + + #[inline] + fn set_one(&mut self) { + self.numer.set_one(); + self.denom.set_one(); + } +} + +impl Num for Ratio { + type FromStrRadixErr = ParseRatioError; + + /// Parses `numer/denom` where the numbers are in base `radix`. + fn from_str_radix(s: &str, radix: u32) -> Result, ParseRatioError> { + if s.splitn(2, '/').count() == 2 { + let mut parts = s.splitn(2, '/').map(|ss| { + T::from_str_radix(ss, radix).map_err(|_| ParseRatioError { + kind: RatioErrorKind::ParseError, + }) + }); + let numer: T = parts.next().unwrap()?; + let denom: T = parts.next().unwrap()?; + if denom.is_zero() { + Err(ParseRatioError { + kind: RatioErrorKind::ZeroDenominator, + }) + } else { + Ok(Ratio::new(numer, denom)) + } + } else { + Err(ParseRatioError { + kind: RatioErrorKind::ParseError, + }) + } + } +} + +impl Signed for Ratio { + #[inline] + fn abs(&self) -> Ratio { + if self.is_negative() { + -self.clone() + } else { + self.clone() + } + } + + #[inline] + fn abs_sub(&self, other: &Ratio) -> Ratio { + if *self <= *other { + Zero::zero() + } else { + self - other + } + } + + #[inline] + fn signum(&self) -> Ratio { + if self.is_positive() { + Self::one() + } else if self.is_zero() { + Self::zero() + } else { + -Self::one() + } + } + + #[inline] + fn is_positive(&self) -> bool { + (self.numer.is_positive() && self.denom.is_positive()) + || (self.numer.is_negative() && self.denom.is_negative()) + } + + #[inline] + fn is_negative(&self) -> bool { + (self.numer.is_negative() && self.denom.is_positive()) + || (self.numer.is_positive() && self.denom.is_negative()) + } +} + +// String conversions +macro_rules! impl_formatting { + ($fmt_trait:ident, $prefix:expr, $fmt_str:expr, $fmt_alt:expr) => { + impl $fmt_trait for Ratio { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let pre_pad = if self.denom.is_one() { + format!($fmt_str, self.numer) + } else { + if f.alternate() { + format!(concat!($fmt_str, "/", $fmt_alt), self.numer, self.denom) + } else { + format!(concat!($fmt_str, "/", $fmt_str), self.numer, self.denom) + } + }; + if let Some(pre_pad) = pre_pad.strip_prefix("-") { + f.pad_integral(false, $prefix, pre_pad) + } else { + f.pad_integral(true, $prefix, &pre_pad) + } + } + #[cfg(not(feature = "std"))] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let plus = if f.sign_plus() && self.numer >= T::zero() { + "+" + } else { + "" + }; + if self.denom.is_one() { + if f.alternate() { + write!(f, concat!("{}", $fmt_alt), plus, self.numer) + } else { + write!(f, concat!("{}", $fmt_str), plus, self.numer) + } + } else { + if f.alternate() { + write!( + f, + concat!("{}", $fmt_alt, "/", $fmt_alt), + plus, self.numer, self.denom + ) + } else { + write!( + f, + concat!("{}", $fmt_str, "/", $fmt_str), + plus, self.numer, self.denom + ) + } + } + } + } + }; +} + +impl_formatting!(Display, "", "{}", "{:#}"); +impl_formatting!(Octal, "0o", "{:o}", "{:#o}"); +impl_formatting!(Binary, "0b", "{:b}", "{:#b}"); +impl_formatting!(LowerHex, "0x", "{:x}", "{:#x}"); +impl_formatting!(UpperHex, "0x", "{:X}", "{:#X}"); +impl_formatting!(LowerExp, "", "{:e}", "{:#e}"); +impl_formatting!(UpperExp, "", "{:E}", "{:#E}"); + +impl FromStr for Ratio { + type Err = ParseRatioError; + + /// Parses `numer/denom` or just `numer`. + fn from_str(s: &str) -> Result, ParseRatioError> { + let mut split = s.splitn(2, '/'); + + let n = split.next().ok_or(ParseRatioError { + kind: RatioErrorKind::ParseError, + })?; + let num = FromStr::from_str(n).map_err(|_| ParseRatioError { + kind: RatioErrorKind::ParseError, + })?; + + let d = split.next().unwrap_or("1"); + let den = FromStr::from_str(d).map_err(|_| ParseRatioError { + kind: RatioErrorKind::ParseError, + })?; + + if Zero::is_zero(&den) { + Err(ParseRatioError { + kind: RatioErrorKind::ZeroDenominator, + }) + } else { + Ok(Ratio::new(num, den)) + } + } +} + +impl From> for (T, T) { + fn from(val: Ratio) -> Self { + (val.numer, val.denom) + } +} + +#[cfg(feature = "serde")] +impl serde::Serialize for Ratio +where + T: serde::Serialize + Clone + Integer + PartialOrd, +{ + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + (self.numer(), self.denom()).serialize(serializer) + } +} + +#[cfg(feature = "serde")] +impl<'de, T> serde::Deserialize<'de> for Ratio +where + T: serde::Deserialize<'de> + Clone + Integer + PartialOrd, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + use serde::de::Unexpected; + let (numer, denom): (T, T) = serde::Deserialize::deserialize(deserializer)?; + if denom.is_zero() { + Err(Error::invalid_value( + Unexpected::Signed(0), + &"a ratio with non-zero denominator", + )) + } else { + Ok(Ratio::new_raw(numer, denom)) + } + } +} + +// FIXME: Bubble up specific errors +#[derive(Copy, Clone, Debug, PartialEq)] +pub struct ParseRatioError { + kind: RatioErrorKind, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +enum RatioErrorKind { + ParseError, + ZeroDenominator, +} + +impl fmt::Display for ParseRatioError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.kind.description().fmt(f) + } +} + +#[cfg(feature = "std")] +impl Error for ParseRatioError { + #[allow(deprecated)] + fn description(&self) -> &str { + self.kind.description() + } +} + +impl RatioErrorKind { + fn description(&self) -> &'static str { + match *self { + RatioErrorKind::ParseError => "failed to parse integer", + RatioErrorKind::ZeroDenominator => "zero value denominator", + } + } +} + +#[cfg(feature = "num-bigint")] +impl FromPrimitive for Ratio { + fn from_i64(n: i64) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_i128(n: i128) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_u64(n: u64) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_u128(n: u128) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_f32(n: f32) -> Option { + Ratio::from_float(n) + } + + fn from_f64(n: f64) -> Option { + Ratio::from_float(n) + } +} + +macro_rules! from_primitive_integer { + ($typ:ty, $approx:ident) => { + impl FromPrimitive for Ratio<$typ> { + fn from_i64(n: i64) -> Option { + <$typ as FromPrimitive>::from_i64(n).map(Ratio::from_integer) + } + + fn from_i128(n: i128) -> Option { + <$typ as FromPrimitive>::from_i128(n).map(Ratio::from_integer) + } + + fn from_u64(n: u64) -> Option { + <$typ as FromPrimitive>::from_u64(n).map(Ratio::from_integer) + } + + fn from_u128(n: u128) -> Option { + <$typ as FromPrimitive>::from_u128(n).map(Ratio::from_integer) + } + + fn from_f32(n: f32) -> Option { + $approx(n, 10e-20, 30) + } + + fn from_f64(n: f64) -> Option { + $approx(n, 10e-20, 30) + } + } + }; +} + +from_primitive_integer!(i8, approximate_float); +from_primitive_integer!(i16, approximate_float); +from_primitive_integer!(i32, approximate_float); +from_primitive_integer!(i64, approximate_float); +from_primitive_integer!(i128, approximate_float); +from_primitive_integer!(isize, approximate_float); + +from_primitive_integer!(u8, approximate_float_unsigned); +from_primitive_integer!(u16, approximate_float_unsigned); +from_primitive_integer!(u32, approximate_float_unsigned); +from_primitive_integer!(u64, approximate_float_unsigned); +from_primitive_integer!(u128, approximate_float_unsigned); +from_primitive_integer!(usize, approximate_float_unsigned); + +impl Ratio { + pub fn approximate_float(f: F) -> Option> { + // 1/10e-20 < 1/2**32 which seems like a good default, and 30 seems + // to work well. Might want to choose something based on the types in the future, e.g. + // T::max().recip() and T::bits() or something similar. + let epsilon = ::from(10e-20).expect("Can't convert 10e-20"); + approximate_float(f, epsilon, 30) + } +} + +impl Ratio { + pub fn approximate_float_unsigned(f: F) -> Option> { + // 1/10e-20 < 1/2**32 which seems like a good default, and 30 seems + // to work well. Might want to choose something based on the types in the future, e.g. + // T::max().recip() and T::bits() or something similar. + let epsilon = ::from(10e-20).expect("Can't convert 10e-20"); + approximate_float_unsigned(f, epsilon, 30) + } +} + +fn approximate_float(val: F, max_error: F, max_iterations: usize) -> Option> +where + T: Integer + Signed + Bounded + NumCast + Clone, + F: FloatCore + NumCast, +{ + let negative = val.is_sign_negative(); + let abs_val = val.abs(); + + let r = approximate_float_unsigned(abs_val, max_error, max_iterations)?; + + // Make negative again if needed + Some(if negative { r.neg() } else { r }) +} + +// No Unsigned constraint because this also works on positive integers and is called +// like that, see above +fn approximate_float_unsigned(val: F, max_error: F, max_iterations: usize) -> Option> +where + T: Integer + Bounded + NumCast + Clone, + F: FloatCore + NumCast, +{ + // Continued fractions algorithm + // https://web.archive.org/web/20200629111319/http://mathforum.org:80/dr.math/faq/faq.fractions.html#decfrac + + if val < F::zero() || val.is_nan() { + return None; + } + + let mut q = val; + let mut n0 = T::zero(); + let mut d0 = T::one(); + let mut n1 = T::one(); + let mut d1 = T::zero(); + + let t_max = T::max_value(); + let t_max_f = ::from(t_max.clone())?; + + // 1/epsilon > T::MAX + let epsilon = t_max_f.recip(); + + // Overflow + if q > t_max_f { + return None; + } + + for _ in 0..max_iterations { + let a = match ::from(q) { + None => break, + Some(a) => a, + }; + + let a_f = match ::from(a.clone()) { + None => break, + Some(a_f) => a_f, + }; + let f = q - a_f; + + // Prevent overflow + if !a.is_zero() + && (n1 > t_max.clone() / a.clone() + || d1 > t_max.clone() / a.clone() + || a.clone() * n1.clone() > t_max.clone() - n0.clone() + || a.clone() * d1.clone() > t_max.clone() - d0.clone()) + { + break; + } + + let n = a.clone() * n1.clone() + n0.clone(); + let d = a.clone() * d1.clone() + d0.clone(); + + n0 = n1; + d0 = d1; + n1 = n.clone(); + d1 = d.clone(); + + // Simplify fraction. Doing so here instead of at the end + // allows us to get closer to the target value without overflows + let g = Integer::gcd(&n1, &d1); + if !g.is_zero() { + n1 = n1 / g.clone(); + d1 = d1 / g.clone(); + } + + // Close enough? + let (n_f, d_f) = match (::from(n), ::from(d)) { + (Some(n_f), Some(d_f)) => (n_f, d_f), + _ => break, + }; + if (n_f / d_f - val).abs() < max_error { + break; + } + + // Prevent division by ~0 + if f < epsilon { + break; + } + q = f.recip(); + } + + // Overflow + if d1.is_zero() { + return None; + } + + Some(Ratio::new(n1, d1)) +} + +#[cfg(not(feature = "num-bigint"))] +macro_rules! to_primitive_small { + ($($type_name:ty)*) => ($( + impl ToPrimitive for Ratio<$type_name> { + fn to_i64(&self) -> Option { + self.to_integer().to_i64() + } + + fn to_i128(&self) -> Option { + self.to_integer().to_i128() + } + + fn to_u64(&self) -> Option { + self.to_integer().to_u64() + } + + fn to_u128(&self) -> Option { + self.to_integer().to_u128() + } + + fn to_f64(&self) -> Option { + let float = self.numer.to_f64().unwrap() / self.denom.to_f64().unwrap(); + if float.is_nan() { + None + } else { + Some(float) + } + } + } + )*) +} + +#[cfg(not(feature = "num-bigint"))] +to_primitive_small!(u8 i8 u16 i16 u32 i32); + +#[cfg(all(target_pointer_width = "32", not(feature = "num-bigint")))] +to_primitive_small!(usize isize); + +#[cfg(not(feature = "num-bigint"))] +macro_rules! to_primitive_64 { + ($($type_name:ty)*) => ($( + impl ToPrimitive for Ratio<$type_name> { + fn to_i64(&self) -> Option { + self.to_integer().to_i64() + } + + fn to_i128(&self) -> Option { + self.to_integer().to_i128() + } + + fn to_u64(&self) -> Option { + self.to_integer().to_u64() + } + + fn to_u128(&self) -> Option { + self.to_integer().to_u128() + } + + fn to_f64(&self) -> Option { + let float = ratio_to_f64( + self.numer as i128, + self.denom as i128 + ); + if float.is_nan() { + None + } else { + Some(float) + } + } + } + )*) +} + +#[cfg(not(feature = "num-bigint"))] +to_primitive_64!(u64 i64); + +#[cfg(all(target_pointer_width = "64", not(feature = "num-bigint")))] +to_primitive_64!(usize isize); + +#[cfg(feature = "num-bigint")] +impl ToPrimitive for Ratio { + fn to_i64(&self) -> Option { + self.to_integer().to_i64() + } + + fn to_i128(&self) -> Option { + self.to_integer().to_i128() + } + + fn to_u64(&self) -> Option { + self.to_integer().to_u64() + } + + fn to_u128(&self) -> Option { + self.to_integer().to_u128() + } + + fn to_f64(&self) -> Option { + let float = match (self.numer.to_i64(), self.denom.to_i64()) { + (Some(numer), Some(denom)) => ratio_to_f64( + >::from(numer), + >::from(denom), + ), + _ => { + let numer: BigInt<32> = self.numer.to_bigint()?; + let denom: BigInt<32> = self.denom.to_bigint()?; + ratio_to_f64(numer, denom) + } + }; + if float.is_nan() { + None + } else { + Some(float) + } + } +} + +trait Bits { + fn bits(&self) -> u64; +} + +#[cfg(feature = "num-bigint")] +impl Bits for BigInt { + fn bits(&self) -> u64 { + self.bits() + } +} + +#[cfg(feature = "num-bigint")] +impl Bits for BigInt<32> { + fn bits(&self) -> u64 { + self.bits() + } +} + +impl Bits for i128 { + fn bits(&self) -> u64 { + (128 - self.wrapping_abs().leading_zeros()).into() + } +} + +/// Converts a ratio of `T` to an f64. +/// +/// In addition to stated trait bounds, `T` must be able to hold numbers 56 bits larger than +/// the largest of `numer` and `denom`. This is automatically true if `T` is `BigInt`. +fn ratio_to_f64 + ToPrimitive>( + numer: T, + denom: T, +) -> f64 { + use core::f64::{INFINITY, MANTISSA_DIGITS, MAX_EXP, MIN_EXP, RADIX}; + + assert_eq!( + RADIX, 2, + "only floating point implementations with radix 2 are supported" + ); + + // Inclusive upper and lower bounds to the range of exactly-representable ints in an f64. + const MAX_EXACT_INT: i64 = 1i64 << MANTISSA_DIGITS; + const MIN_EXACT_INT: i64 = -MAX_EXACT_INT; + + let flo_sign = numer.signum().to_f64().unwrap() / denom.signum().to_f64().unwrap(); + if !flo_sign.is_normal() { + return flo_sign; + } + + // Fast track: both sides can losslessly be converted to f64s. In this case, letting the + // FPU do the job is faster and easier. In any other case, converting to f64s may lead + // to an inexact result: https://stackoverflow.com/questions/56641441/. + if let (Some(n), Some(d)) = (numer.to_i64(), denom.to_i64()) { + let exact = MIN_EXACT_INT..=MAX_EXACT_INT; + if exact.contains(&n) && exact.contains(&d) { + return n.to_f64().unwrap() / d.to_f64().unwrap(); + } + } + + // Otherwise, the goal is to obtain a quotient with at least 55 bits. 53 of these bits will + // be used as the mantissa of the resulting float, and the remaining two are for rounding. + // There's an error of up to 1 on the number of resulting bits, so we may get either 55 or + // 56 bits. + let mut numer = numer.abs(); + let mut denom = denom.abs(); + let (is_diff_positive, absolute_diff) = match numer.bits().checked_sub(denom.bits()) { + Some(diff) => (true, diff), + None => (false, denom.bits() - numer.bits()), + }; + + // Filter out overflows and underflows. After this step, the signed difference fits in an + // isize. + if is_diff_positive && absolute_diff > MAX_EXP as u64 { + return INFINITY * flo_sign; + } + if !is_diff_positive && absolute_diff > -MIN_EXP as u64 + MANTISSA_DIGITS as u64 + 1 { + return 0.0 * flo_sign; + } + let diff = if is_diff_positive { + absolute_diff.to_isize().unwrap() + } else { + -absolute_diff.to_isize().unwrap() + }; + + // Shift is chosen so that the quotient will have 55 or 56 bits. The exception is if the + // quotient is going to be subnormal, in which case it may have fewer bits. + let shift: isize = diff.max(MIN_EXP as isize) - MANTISSA_DIGITS as isize - 2; + if shift >= 0 { + denom <<= shift as usize + } else { + numer <<= -shift as usize + }; + + let (quotient, remainder) = numer.div_rem(&denom); + + // This is guaranteed to fit since we've set up quotient to be at most 56 bits. + let mut quotient = quotient.to_u64().unwrap(); + let n_rounding_bits = { + let quotient_bits = 64 - quotient.leading_zeros() as isize; + let subnormal_bits = MIN_EXP as isize - shift; + quotient_bits.max(subnormal_bits) - MANTISSA_DIGITS as isize + } as usize; + debug_assert!(n_rounding_bits == 2 || n_rounding_bits == 3); + let rounding_bit_mask = (1u64 << n_rounding_bits) - 1; + + // Round to 53 bits with round-to-even. For rounding, we need to take into account both + // our rounding bits and the division's remainder. + let ls_bit = quotient & (1u64 << n_rounding_bits) != 0; + let ms_rounding_bit = quotient & (1u64 << (n_rounding_bits - 1)) != 0; + let ls_rounding_bits = quotient & (rounding_bit_mask >> 1) != 0; + if ms_rounding_bit && (ls_bit || ls_rounding_bits || !remainder.is_zero()) { + quotient += 1u64 << n_rounding_bits; + } + quotient &= !rounding_bit_mask; + + // The quotient is guaranteed to be exactly representable as it's now 53 bits + 2 or 3 + // trailing zeros, so there is no risk of a rounding error here. + let q_float = quotient as f64 * flo_sign; + ldexp(q_float, shift as i32) +} + +/// Multiply `x` by 2 to the power of `exp`. Returns an accurate result even if `2^exp` is not +/// representable. +fn ldexp(x: f64, exp: i32) -> f64 { + use core::f64::{INFINITY, MANTISSA_DIGITS, MAX_EXP, RADIX}; + + assert_eq!( + RADIX, 2, + "only floating point implementations with radix 2 are supported" + ); + + const EXPONENT_MASK: u64 = 0x7ff << 52; + const MAX_UNSIGNED_EXPONENT: i32 = 0x7fe; + const MIN_SUBNORMAL_POWER: i32 = MANTISSA_DIGITS as i32; + + if x.is_zero() || x.is_infinite() || x.is_nan() { + return x; + } + + // Filter out obvious over / underflows to make sure the resulting exponent fits in an isize. + if exp > 3 * MAX_EXP { + return INFINITY * x.signum(); + } else if exp < -3 * MAX_EXP { + return 0.0 * x.signum(); + } + + // curr_exp is the x's *biased* exponent, and is in the [-54, MAX_UNSIGNED_EXPONENT] range. + let (bits, curr_exp) = if !x.is_normal() { + // If x is subnormal, we make it normal by multiplying by 2^53. This causes no loss of + // precision or rounding. + let normal_x = x * 2f64.powi(MIN_SUBNORMAL_POWER); + let bits = normal_x.to_bits(); + // This cast is safe because the exponent is at most 0x7fe, which fits in an i32. + ( + bits, + ((bits & EXPONENT_MASK) >> 52) as i32 - MIN_SUBNORMAL_POWER, + ) + } else { + let bits = x.to_bits(); + let curr_exp = (bits & EXPONENT_MASK) >> 52; + // This cast is safe because the exponent is at most 0x7fe, which fits in an i32. + (bits, curr_exp as i32) + }; + + // The addition can't overflow because exponent is between 0 and 0x7fe, and exp is between + // -2*MAX_EXP and 2*MAX_EXP. + let new_exp = curr_exp + exp; + + if new_exp > MAX_UNSIGNED_EXPONENT { + INFINITY * x.signum() + } else if new_exp > 0 { + // Normal case: exponent is not too large nor subnormal. + let new_bits = (bits & !EXPONENT_MASK) | ((new_exp as u64) << 52); + f64::from_bits(new_bits) + } else if new_exp >= -(MANTISSA_DIGITS as i32) { + // Result is subnormal but may not be zero. + // In this case, we increase the exponent by 54 to make it normal, then multiply the end + // result by 2^-53. This results in a single multiplication with no prior rounding error, + // so there is no risk of double rounding. + let new_exp = new_exp + MIN_SUBNORMAL_POWER; + debug_assert!(new_exp >= 0); + let new_bits = (bits & !EXPONENT_MASK) | ((new_exp as u64) << 52); + f64::from_bits(new_bits) * 2f64.powi(-MIN_SUBNORMAL_POWER) + } else { + // Result is zero. + return 0.0 * x.signum(); + } +} + +#[cfg(test)] +#[cfg(feature = "std")] +fn hash(x: &T) -> u64 { + use std::collections::hash_map::RandomState; + use std::hash::BuildHasher; + let mut hasher = ::Hasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +#[cfg(test)] +mod test { + use super::ldexp; + #[cfg(feature = "num-bigint")] + use super::{BigInt, BigRational}; + use super::{Ratio, Rational64}; + + use core::f64; + use core::i32; + use core::i64; + use core::str::FromStr; + use num_integer::Integer; + use num_traits::ToPrimitive; + use num_traits::{FromPrimitive, One, Pow, Signed, Zero}; + + pub const _0: Rational64 = Ratio { numer: 0, denom: 1 }; + pub const _1: Rational64 = Ratio { numer: 1, denom: 1 }; + pub const _2: Rational64 = Ratio { numer: 2, denom: 1 }; + pub const _NEG2: Rational64 = Ratio { + numer: -2, + denom: 1, + }; + pub const _8: Rational64 = Ratio { numer: 8, denom: 1 }; + pub const _15: Rational64 = Ratio { + numer: 15, + denom: 1, + }; + pub const _16: Rational64 = Ratio { + numer: 16, + denom: 1, + }; + + pub const _1_2: Rational64 = Ratio { numer: 1, denom: 2 }; + pub const _1_8: Rational64 = Ratio { numer: 1, denom: 8 }; + pub const _1_15: Rational64 = Ratio { + numer: 1, + denom: 15, + }; + pub const _1_16: Rational64 = Ratio { + numer: 1, + denom: 16, + }; + pub const _3_2: Rational64 = Ratio { numer: 3, denom: 2 }; + pub const _5_2: Rational64 = Ratio { numer: 5, denom: 2 }; + pub const _NEG1_2: Rational64 = Ratio { + numer: -1, + denom: 2, + }; + pub const _1_NEG2: Rational64 = Ratio { + numer: 1, + denom: -2, + }; + pub const _NEG1_NEG2: Rational64 = Ratio { + numer: -1, + denom: -2, + }; + pub const _1_3: Rational64 = Ratio { numer: 1, denom: 3 }; + pub const _NEG1_3: Rational64 = Ratio { + numer: -1, + denom: 3, + }; + pub const _2_3: Rational64 = Ratio { numer: 2, denom: 3 }; + pub const _NEG2_3: Rational64 = Ratio { + numer: -2, + denom: 3, + }; + pub const _MIN: Rational64 = Ratio { + numer: i64::MIN, + denom: 1, + }; + pub const _MIN_P1: Rational64 = Ratio { + numer: i64::MIN + 1, + denom: 1, + }; + pub const _MAX: Rational64 = Ratio { + numer: i64::MAX, + denom: 1, + }; + pub const _MAX_M1: Rational64 = Ratio { + numer: i64::MAX - 1, + denom: 1, + }; + pub const _BILLION: Rational64 = Ratio { + numer: 1_000_000_000, + denom: 1, + }; + + #[cfg(feature = "num-bigint")] + pub fn to_big(n: Rational64) -> BigRational { + Ratio::new( + FromPrimitive::from_i64(n.numer).unwrap(), + FromPrimitive::from_i64(n.denom).unwrap(), + ) + } + #[cfg(not(feature = "num-bigint"))] + pub fn to_big(n: Rational64) -> Rational64 { + Ratio::new( + FromPrimitive::from_i64(n.numer).unwrap(), + FromPrimitive::from_i64(n.denom).unwrap(), + ) + } + + #[test] + fn test_test_constants() { + // check our constants are what Ratio::new etc. would make. + assert_eq!(_0, Zero::zero()); + assert_eq!(_1, One::one()); + assert_eq!(_2, Ratio::from_integer(2)); + assert_eq!(_1_2, Ratio::new(1, 2)); + assert_eq!(_3_2, Ratio::new(3, 2)); + assert_eq!(_NEG1_2, Ratio::new(-1, 2)); + assert_eq!(_2, From::from(2)); + } + + #[test] + fn test_new_reduce() { + assert_eq!(Ratio::new(2, 2), One::one()); + assert_eq!(Ratio::new(0, i32::MIN), Zero::zero()); + assert_eq!(Ratio::new(i32::MIN, i32::MIN), One::one()); + } + #[test] + #[should_panic] + fn test_new_zero() { + let _a = Ratio::new(1, 0); + } + + #[test] + fn test_approximate_float() { + assert_eq!(Ratio::from_f32(0.5f32), Some(Ratio::new(1i64, 2))); + assert_eq!(Ratio::from_f64(0.5f64), Some(Ratio::new(1i32, 2))); + assert_eq!(Ratio::from_f32(5f32), Some(Ratio::new(5i64, 1))); + assert_eq!(Ratio::from_f64(5f64), Some(Ratio::new(5i32, 1))); + assert_eq!(Ratio::from_f32(29.97f32), Some(Ratio::new(2997i64, 100))); + assert_eq!(Ratio::from_f32(-29.97f32), Some(Ratio::new(-2997i64, 100))); + + assert_eq!(Ratio::::from_f32(63.5f32), Some(Ratio::new(127i8, 2))); + assert_eq!(Ratio::::from_f32(126.5f32), Some(Ratio::new(126i8, 1))); + assert_eq!(Ratio::::from_f32(127.0f32), Some(Ratio::new(127i8, 1))); + assert_eq!(Ratio::::from_f32(127.5f32), None); + assert_eq!(Ratio::::from_f32(-63.5f32), Some(Ratio::new(-127i8, 2))); + assert_eq!( + Ratio::::from_f32(-126.5f32), + Some(Ratio::new(-126i8, 1)) + ); + assert_eq!( + Ratio::::from_f32(-127.0f32), + Some(Ratio::new(-127i8, 1)) + ); + assert_eq!(Ratio::::from_f32(-127.5f32), None); + + assert_eq!(Ratio::::from_f32(-127f32), None); + assert_eq!(Ratio::::from_f32(127f32), Some(Ratio::new(127u8, 1))); + assert_eq!(Ratio::::from_f32(127.5f32), Some(Ratio::new(255u8, 2))); + assert_eq!(Ratio::::from_f32(256f32), None); + + assert_eq!(Ratio::::from_f64(-10e200), None); + assert_eq!(Ratio::::from_f64(10e200), None); + assert_eq!(Ratio::::from_f64(f64::INFINITY), None); + assert_eq!(Ratio::::from_f64(f64::NEG_INFINITY), None); + assert_eq!(Ratio::::from_f64(f64::NAN), None); + assert_eq!( + Ratio::::from_f64(f64::EPSILON), + Some(Ratio::new(1, 4503599627370496)) + ); + assert_eq!(Ratio::::from_f64(0.0), Some(Ratio::new(0, 1))); + assert_eq!(Ratio::::from_f64(-0.0), Some(Ratio::new(0, 1))); + } + + #[test] + #[allow(clippy::eq_op)] + fn test_cmp() { + assert!(_0 == _0 && _1 == _1); + assert!(_0 != _1 && _1 != _0); + assert!(_0 < _1 && !(_1 < _0)); + assert!(_1 > _0 && !(_0 > _1)); + + assert!(_0 <= _0 && _1 <= _1); + assert!(_0 <= _1 && !(_1 <= _0)); + + assert!(_0 >= _0 && _1 >= _1); + assert!(_1 >= _0 && !(_0 >= _1)); + + let _0_2: Rational64 = Ratio::new_raw(0, 2); + assert_eq!(_0, _0_2); + } + + #[test] + fn test_cmp_overflow() { + use core::cmp::Ordering; + + // issue #7 example: + let big = Ratio::new(128u8, 1); + let small = big.recip(); + assert!(big > small); + + // try a few that are closer together + // (some matching numer, some matching denom, some neither) + let ratios = [ + Ratio::new(125_i8, 127_i8), + Ratio::new(63_i8, 64_i8), + Ratio::new(124_i8, 125_i8), + Ratio::new(125_i8, 126_i8), + Ratio::new(126_i8, 127_i8), + Ratio::new(127_i8, 126_i8), + ]; + + fn check_cmp(a: Ratio, b: Ratio, ord: Ordering) { + #[cfg(feature = "std")] + println!("comparing {} and {}", a, b); + assert_eq!(a.cmp(&b), ord); + assert_eq!(b.cmp(&a), ord.reverse()); + } + + for (i, &a) in ratios.iter().enumerate() { + check_cmp(a, a, Ordering::Equal); + check_cmp(-a, a, Ordering::Less); + for &b in &ratios[i + 1..] { + check_cmp(a, b, Ordering::Less); + check_cmp(-a, -b, Ordering::Greater); + check_cmp(a.recip(), b.recip(), Ordering::Greater); + check_cmp(-a.recip(), -b.recip(), Ordering::Less); + } + } + } + + #[test] + fn test_to_integer() { + assert_eq!(_0.to_integer(), 0); + assert_eq!(_1.to_integer(), 1); + assert_eq!(_2.to_integer(), 2); + assert_eq!(_1_2.to_integer(), 0); + assert_eq!(_3_2.to_integer(), 1); + assert_eq!(_NEG1_2.to_integer(), 0); + } + + #[test] + fn test_numer() { + assert_eq!(_0.numer(), &0); + assert_eq!(_1.numer(), &1); + assert_eq!(_2.numer(), &2); + assert_eq!(_1_2.numer(), &1); + assert_eq!(_3_2.numer(), &3); + assert_eq!(_NEG1_2.numer(), &(-1)); + } + #[test] + fn test_denom() { + assert_eq!(_0.denom(), &1); + assert_eq!(_1.denom(), &1); + assert_eq!(_2.denom(), &1); + assert_eq!(_1_2.denom(), &2); + assert_eq!(_3_2.denom(), &2); + assert_eq!(_NEG1_2.denom(), &2); + } + + #[test] + fn test_is_integer() { + assert!(_0.is_integer()); + assert!(_1.is_integer()); + assert!(_2.is_integer()); + assert!(!_1_2.is_integer()); + assert!(!_3_2.is_integer()); + assert!(!_NEG1_2.is_integer()); + } + + #[cfg(not(feature = "std"))] + use core::fmt::{self, Write}; + #[cfg(not(feature = "std"))] + #[derive(Debug)] + struct NoStdTester { + cursor: usize, + buf: [u8; NoStdTester::BUF_SIZE], + } + + #[cfg(not(feature = "std"))] + impl NoStdTester { + fn new() -> NoStdTester { + NoStdTester { + buf: [0; Self::BUF_SIZE], + cursor: 0, + } + } + + fn clear(&mut self) { + self.buf = [0; Self::BUF_SIZE]; + self.cursor = 0; + } + + const WRITE_ERR: &'static str = "Formatted output too long"; + const BUF_SIZE: usize = 32; + } + + #[cfg(not(feature = "std"))] + impl Write for NoStdTester { + fn write_str(&mut self, s: &str) -> fmt::Result { + for byte in s.bytes() { + self.buf[self.cursor] = byte; + self.cursor += 1; + if self.cursor >= self.buf.len() { + return Err(fmt::Error {}); + } + } + Ok(()) + } + } + + #[cfg(not(feature = "std"))] + impl PartialEq for NoStdTester { + fn eq(&self, other: &str) -> bool { + let other = other.as_bytes(); + for index in 0..self.cursor { + if self.buf.get(index) != other.get(index) { + return false; + } + } + true + } + } + + macro_rules! assert_fmt_eq { + ($fmt_args:expr, $string:expr) => { + #[cfg(not(feature = "std"))] + { + let mut tester = NoStdTester::new(); + write!(tester, "{}", $fmt_args).expect(NoStdTester::WRITE_ERR); + assert_eq!(tester, *$string); + tester.clear(); + } + #[cfg(feature = "std")] + { + assert_eq!(std::fmt::format($fmt_args), $string); + } + }; + } + + #[test] + fn test_show() { + // Test: + // :b :o :x, :X, :? + // alternate or not (#) + // positive and negative + // padding + // does not test precision (i.e. truncation) + assert_fmt_eq!(format_args!("{}", _2), "2"); + assert_fmt_eq!(format_args!("{:+}", _2), "+2"); + assert_fmt_eq!(format_args!("{:-}", _2), "2"); + assert_fmt_eq!(format_args!("{}", _1_2), "1/2"); + assert_fmt_eq!(format_args!("{}", -_1_2), "-1/2"); // test negatives + assert_fmt_eq!(format_args!("{}", _0), "0"); + assert_fmt_eq!(format_args!("{}", -_2), "-2"); + assert_fmt_eq!(format_args!("{:+}", -_2), "-2"); + assert_fmt_eq!(format_args!("{:b}", _2), "10"); + assert_fmt_eq!(format_args!("{:#b}", _2), "0b10"); + assert_fmt_eq!(format_args!("{:b}", _1_2), "1/10"); + assert_fmt_eq!(format_args!("{:+b}", _1_2), "+1/10"); + assert_fmt_eq!(format_args!("{:-b}", _1_2), "1/10"); + assert_fmt_eq!(format_args!("{:b}", _0), "0"); + assert_fmt_eq!(format_args!("{:#b}", _1_2), "0b1/0b10"); + // no std does not support padding + #[cfg(feature = "std")] + assert_eq!(&format!("{:010b}", _1_2), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:#010b}", _1_2), "0b001/0b10"); + let half_i8: Ratio = Ratio::new(1_i8, 2_i8); + assert_fmt_eq!(format_args!("{:b}", -half_i8), "11111111/10"); + assert_fmt_eq!(format_args!("{:#b}", -half_i8), "0b11111111/0b10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:05}", Ratio::new(-1_i8, 1_i8)), "-0001"); + + assert_fmt_eq!(format_args!("{:o}", _8), "10"); + assert_fmt_eq!(format_args!("{:o}", _1_8), "1/10"); + assert_fmt_eq!(format_args!("{:o}", _0), "0"); + assert_fmt_eq!(format_args!("{:#o}", _1_8), "0o1/0o10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:010o}", _1_8), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:#010o}", _1_8), "0o001/0o10"); + assert_fmt_eq!(format_args!("{:o}", -half_i8), "377/2"); + assert_fmt_eq!(format_args!("{:#o}", -half_i8), "0o377/0o2"); + + assert_fmt_eq!(format_args!("{:x}", _16), "10"); + assert_fmt_eq!(format_args!("{:x}", _15), "f"); + assert_fmt_eq!(format_args!("{:x}", _1_16), "1/10"); + assert_fmt_eq!(format_args!("{:x}", _1_15), "1/f"); + assert_fmt_eq!(format_args!("{:x}", _0), "0"); + assert_fmt_eq!(format_args!("{:#x}", _1_16), "0x1/0x10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:010x}", _1_16), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:#010x}", _1_16), "0x001/0x10"); + assert_fmt_eq!(format_args!("{:x}", -half_i8), "ff/2"); + assert_fmt_eq!(format_args!("{:#x}", -half_i8), "0xff/0x2"); + + assert_fmt_eq!(format_args!("{:X}", _16), "10"); + assert_fmt_eq!(format_args!("{:X}", _15), "F"); + assert_fmt_eq!(format_args!("{:X}", _1_16), "1/10"); + assert_fmt_eq!(format_args!("{:X}", _1_15), "1/F"); + assert_fmt_eq!(format_args!("{:X}", _0), "0"); + assert_fmt_eq!(format_args!("{:#X}", _1_16), "0x1/0x10"); + #[cfg(feature = "std")] + assert_eq!(format!("{:010X}", _1_16), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(format!("{:#010X}", _1_16), "0x001/0x10"); + assert_fmt_eq!(format_args!("{:X}", -half_i8), "FF/2"); + assert_fmt_eq!(format_args!("{:#X}", -half_i8), "0xFF/0x2"); + + assert_fmt_eq!(format_args!("{:e}", -_2), "-2e0"); + assert_fmt_eq!(format_args!("{:#e}", -_2), "-2e0"); + assert_fmt_eq!(format_args!("{:+e}", -_2), "-2e0"); + assert_fmt_eq!(format_args!("{:e}", _BILLION), "1e9"); + assert_fmt_eq!(format_args!("{:+e}", _BILLION), "+1e9"); + assert_fmt_eq!(format_args!("{:e}", _BILLION.recip()), "1e0/1e9"); + assert_fmt_eq!(format_args!("{:+e}", _BILLION.recip()), "+1e0/1e9"); + + assert_fmt_eq!(format_args!("{:E}", -_2), "-2E0"); + assert_fmt_eq!(format_args!("{:#E}", -_2), "-2E0"); + assert_fmt_eq!(format_args!("{:+E}", -_2), "-2E0"); + assert_fmt_eq!(format_args!("{:E}", _BILLION), "1E9"); + assert_fmt_eq!(format_args!("{:+E}", _BILLION), "+1E9"); + assert_fmt_eq!(format_args!("{:E}", _BILLION.recip()), "1E0/1E9"); + assert_fmt_eq!(format_args!("{:+E}", _BILLION.recip()), "+1E0/1E9"); + } + + mod arith { + use super::super::{Ratio, Rational64}; + use super::{to_big, _0, _1, _1_2, _2, _3_2, _5_2, _MAX, _MAX_M1, _MIN, _MIN_P1, _NEG1_2}; + use core::fmt::Debug; + use num_integer::Integer; + use num_traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, NumAssign}; + + #[test] + fn test_add() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a + b, c); + assert_eq!( + { + let mut x = a; + x += b; + x + }, + c + ); + assert_eq!(to_big(a) + to_big(b), to_big(c)); + assert_eq!(a.checked_add(&b), Some(c)); + assert_eq!(to_big(a).checked_add(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a + b, c); + assert_eq!( + { + let mut x = a; + x += b; + x + }, + c + ); + } + + test(_1, _1_2, _3_2); + test(_1, _1, _2); + test(_1_2, _3_2, _2); + test(_1_2, _NEG1_2, _0); + test_assign(_1_2, 1, _3_2); + } + + #[test] + fn test_add_overflow() { + // compares Ratio(1, T::max_value()) + Ratio(1, T::max_value()) + // to Ratio(1+1, T::max_value()) for each integer type. + // Previously, this calculation would overflow. + fn test_add_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign, + { + let _1_max = Ratio::new(T::one(), T::max_value()); + let _2_max = Ratio::new(T::one() + T::one(), T::max_value()); + assert_eq!(_1_max.clone() + _1_max.clone(), _2_max); + assert_eq!( + { + let mut tmp = _1_max.clone(); + tmp += _1_max; + tmp + }, + _2_max + ); + } + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + } + + #[test] + fn test_sub() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a - b, c); + assert_eq!( + { + let mut x = a; + x -= b; + x + }, + c + ); + assert_eq!(to_big(a) - to_big(b), to_big(c)); + assert_eq!(a.checked_sub(&b), Some(c)); + assert_eq!(to_big(a).checked_sub(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a - b, c); + assert_eq!( + { + let mut x = a; + x -= b; + x + }, + c + ); + } + + test(_1, _1_2, _1_2); + test(_3_2, _1_2, _1); + test(_1, _NEG1_2, _3_2); + test_assign(_1_2, 1, _NEG1_2); + } + + #[test] + fn test_sub_overflow() { + // compares Ratio(1, T::max_value()) - Ratio(1, T::max_value()) to T::zero() + // for each integer type. Previously, this calculation would overflow. + fn test_sub_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign, + { + let _1_max: Ratio = Ratio::new(T::one(), T::max_value()); + assert!(T::is_zero(&(_1_max.clone() - _1_max.clone()).numer)); + { + let mut tmp: Ratio = _1_max.clone(); + tmp -= _1_max; + assert!(T::is_zero(&tmp.numer)); + } + } + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + } + + #[test] + fn test_mul() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a * b, c); + assert_eq!( + { + let mut x = a; + x *= b; + x + }, + c + ); + assert_eq!(to_big(a) * to_big(b), to_big(c)); + assert_eq!(a.checked_mul(&b), Some(c)); + assert_eq!(to_big(a).checked_mul(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a * b, c); + assert_eq!( + { + let mut x = a; + x *= b; + x + }, + c + ); + } + + test(_1, _1_2, _1_2); + test(_1_2, _3_2, Ratio::new(3, 4)); + test(_1_2, _NEG1_2, Ratio::new(-1, 4)); + test_assign(_1_2, 2, _1); + } + + #[test] + fn test_mul_overflow() { + fn test_mul_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign + CheckedMul, + { + let two = T::one() + T::one(); + let _3 = T::one() + T::one() + T::one(); + + // 1/big * 2/3 = 1/(max/4*3), where big is max/2 + // make big = max/2, but also divisible by 2 + let big = T::max_value() / two.clone() / two.clone() * two.clone(); + let _1_big: Ratio = Ratio::new(T::one(), big.clone()); + let _2_3: Ratio = Ratio::new(two.clone(), _3.clone()); + assert_eq!(None, big.clone().checked_mul(&_3.clone())); + let expected = Ratio::new(T::one(), big / two.clone() * _3.clone()); + assert_eq!(expected.clone(), _1_big.clone() * _2_3.clone()); + assert_eq!( + Some(expected.clone()), + _1_big.clone().checked_mul(&_2_3.clone()) + ); + assert_eq!(expected, { + let mut tmp = _1_big; + tmp *= _2_3; + tmp + }); + + // big/3 * 3 = big/1 + // make big = max/2, but make it indivisible by 3 + let big = T::max_value() / two / _3.clone() * _3.clone() + T::one(); + assert_eq!(None, big.clone().checked_mul(&_3.clone())); + let big_3 = Ratio::new(big.clone(), _3.clone()); + let expected = Ratio::new(big, T::one()); + assert_eq!(expected, big_3.clone() * _3.clone()); + assert_eq!(expected, { + let mut tmp = big_3; + tmp *= _3; + tmp + }); + } + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + } + + #[test] + fn test_div() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a / b, c); + assert_eq!( + { + let mut x = a; + x /= b; + x + }, + c + ); + assert_eq!(to_big(a) / to_big(b), to_big(c)); + assert_eq!(a.checked_div(&b), Some(c)); + assert_eq!(to_big(a).checked_div(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a / b, c); + assert_eq!( + { + let mut x = a; + x /= b; + x + }, + c + ); + } + + test(_1, _1_2, _2); + test(_3_2, _1_2, _1 + _2); + test(_1, _NEG1_2, _NEG1_2 + _NEG1_2 + _NEG1_2 + _NEG1_2); + test_assign(_1, 2, _1_2); + } + + #[test] + fn test_div_overflow() { + fn test_div_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign + CheckedMul, + { + let two = T::one() + T::one(); + let _3 = T::one() + T::one() + T::one(); + + // 1/big / 3/2 = 1/(max/4*3), where big is max/2 + // big ~ max/2, and big is divisible by 2 + let big = T::max_value() / two.clone() / two.clone() * two.clone(); + assert_eq!(None, big.clone().checked_mul(&_3.clone())); + let _1_big: Ratio = Ratio::new(T::one(), big.clone()); + let _3_two: Ratio = Ratio::new(_3.clone(), two.clone()); + let expected = Ratio::new(T::one(), big / two.clone() * _3.clone()); + assert_eq!(expected.clone(), _1_big.clone() / _3_two.clone()); + assert_eq!( + Some(expected.clone()), + _1_big.clone().checked_div(&_3_two.clone()) + ); + assert_eq!(expected, { + let mut tmp = _1_big; + tmp /= _3_two; + tmp + }); + + // 3/big / 3 = 1/big where big is max/2 + // big ~ max/2, and big is not divisible by 3 + let big = T::max_value() / two / _3.clone() * _3.clone() + T::one(); + assert_eq!(None, big.clone().checked_mul(&_3.clone())); + let _3_big = Ratio::new(_3.clone(), big.clone()); + let expected = Ratio::new(T::one(), big); + assert_eq!(expected, _3_big.clone() / _3.clone()); + assert_eq!(expected, { + let mut tmp = _3_big; + tmp /= _3; + tmp + }); + } + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + } + + #[test] + fn test_rem() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a % b, c); + assert_eq!( + { + let mut x = a; + x %= b; + x + }, + c + ); + assert_eq!(to_big(a) % to_big(b), to_big(c)) + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a % b, c); + assert_eq!( + { + let mut x = a; + x %= b; + x + }, + c + ); + } + + test(_3_2, _1, _1_2); + test(_3_2, _1_2, _0); + test(_5_2, _3_2, _1); + test(_2, _NEG1_2, _0); + test(_1_2, _2, _1_2); + test_assign(_3_2, 1, _1_2); + } + + #[test] + fn test_rem_overflow() { + // tests that Ratio(1,2) % Ratio(1, T::max_value()) equals 0 + // for each integer type. Previously, this calculation would overflow. + fn test_rem_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign, + { + let two = T::one() + T::one(); + // value near to maximum, but divisible by two + let max_div2 = T::max_value() / two.clone() * two.clone(); + let _1_max: Ratio = Ratio::new(T::one(), max_div2); + let _1_two: Ratio = Ratio::new(T::one(), two); + assert!(T::is_zero(&(_1_two.clone() % _1_max.clone()).numer)); + { + let mut tmp: Ratio = _1_two; + tmp %= _1_max; + assert!(T::is_zero(&tmp.numer)); + } + } + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + } + + #[test] + fn test_neg() { + fn test(a: Rational64, b: Rational64) { + assert_eq!(-a, b); + assert_eq!(-to_big(a), to_big(b)) + } + + test(_0, _0); + test(_1_2, _NEG1_2); + test(-_1, _1); + } + #[test] + #[allow(clippy::eq_op)] + fn test_zero() { + assert_eq!(_0 + _0, _0); + assert_eq!(_0 * _0, _0); + assert_eq!(_0 * _1, _0); + assert_eq!(_0 / _NEG1_2, _0); + assert_eq!(_0 - _0, _0); + } + #[test] + #[should_panic] + fn test_div_0() { + let _a = _1 / _0; + } + + #[test] + fn test_checked_failures() { + let big = Ratio::new(128u8, 1); + let small = Ratio::new(1, 128u8); + assert_eq!(big.checked_add(&big), None); + assert_eq!(small.checked_sub(&big), None); + assert_eq!(big.checked_mul(&big), None); + assert_eq!(small.checked_div(&big), None); + assert_eq!(_1.checked_div(&_0), None); + } + + #[test] + fn test_checked_zeros() { + assert_eq!(_0.checked_add(&_0), Some(_0)); + assert_eq!(_0.checked_sub(&_0), Some(_0)); + assert_eq!(_0.checked_mul(&_0), Some(_0)); + assert_eq!(_0.checked_div(&_0), None); + } + + #[test] + fn test_checked_min() { + assert_eq!(_MIN.checked_add(&_MIN), None); + assert_eq!(_MIN.checked_sub(&_MIN), Some(_0)); + assert_eq!(_MIN.checked_mul(&_MIN), None); + assert_eq!(_MIN.checked_div(&_MIN), Some(_1)); + assert_eq!(_0.checked_add(&_MIN), Some(_MIN)); + assert_eq!(_0.checked_sub(&_MIN), None); + assert_eq!(_0.checked_mul(&_MIN), Some(_0)); + assert_eq!(_0.checked_div(&_MIN), Some(_0)); + assert_eq!(_1.checked_add(&_MIN), Some(_MIN_P1)); + assert_eq!(_1.checked_sub(&_MIN), None); + assert_eq!(_1.checked_mul(&_MIN), Some(_MIN)); + assert_eq!(_1.checked_div(&_MIN), None); + assert_eq!(_MIN.checked_add(&_0), Some(_MIN)); + assert_eq!(_MIN.checked_sub(&_0), Some(_MIN)); + assert_eq!(_MIN.checked_mul(&_0), Some(_0)); + assert_eq!(_MIN.checked_div(&_0), None); + assert_eq!(_MIN.checked_add(&_1), Some(_MIN_P1)); + assert_eq!(_MIN.checked_sub(&_1), None); + assert_eq!(_MIN.checked_mul(&_1), Some(_MIN)); + assert_eq!(_MIN.checked_div(&_1), Some(_MIN)); + } + + #[test] + fn test_checked_max() { + assert_eq!(_MAX.checked_add(&_MAX), None); + assert_eq!(_MAX.checked_sub(&_MAX), Some(_0)); + assert_eq!(_MAX.checked_mul(&_MAX), None); + assert_eq!(_MAX.checked_div(&_MAX), Some(_1)); + assert_eq!(_0.checked_add(&_MAX), Some(_MAX)); + assert_eq!(_0.checked_sub(&_MAX), Some(_MIN_P1)); + assert_eq!(_0.checked_mul(&_MAX), Some(_0)); + assert_eq!(_0.checked_div(&_MAX), Some(_0)); + assert_eq!(_1.checked_add(&_MAX), None); + assert_eq!(_1.checked_sub(&_MAX), Some(-_MAX_M1)); + assert_eq!(_1.checked_mul(&_MAX), Some(_MAX)); + assert_eq!(_1.checked_div(&_MAX), Some(_MAX.recip())); + assert_eq!(_MAX.checked_add(&_0), Some(_MAX)); + assert_eq!(_MAX.checked_sub(&_0), Some(_MAX)); + assert_eq!(_MAX.checked_mul(&_0), Some(_0)); + assert_eq!(_MAX.checked_div(&_0), None); + assert_eq!(_MAX.checked_add(&_1), None); + assert_eq!(_MAX.checked_sub(&_1), Some(_MAX_M1)); + assert_eq!(_MAX.checked_mul(&_1), Some(_MAX)); + assert_eq!(_MAX.checked_div(&_1), Some(_MAX)); + } + + #[test] + fn test_checked_min_max() { + assert_eq!(_MIN.checked_add(&_MAX), Some(-_1)); + assert_eq!(_MIN.checked_sub(&_MAX), None); + assert_eq!(_MIN.checked_mul(&_MAX), None); + assert_eq!( + _MIN.checked_div(&_MAX), + Some(Ratio::new(_MIN.numer, _MAX.numer)) + ); + assert_eq!(_MAX.checked_add(&_MIN), Some(-_1)); + assert_eq!(_MAX.checked_sub(&_MIN), None); + assert_eq!(_MAX.checked_mul(&_MIN), None); + assert_eq!(_MAX.checked_div(&_MIN), None); + } + } + + #[test] + fn test_round() { + assert_eq!(_1_3.ceil(), _1); + assert_eq!(_1_3.floor(), _0); + assert_eq!(_1_3.round(), _0); + assert_eq!(_1_3.trunc(), _0); + + assert_eq!(_NEG1_3.ceil(), _0); + assert_eq!(_NEG1_3.floor(), -_1); + assert_eq!(_NEG1_3.round(), _0); + assert_eq!(_NEG1_3.trunc(), _0); + + assert_eq!(_2_3.ceil(), _1); + assert_eq!(_2_3.floor(), _0); + assert_eq!(_2_3.round(), _1); + assert_eq!(_2_3.trunc(), _0); + + assert_eq!(_NEG2_3.ceil(), _0); + assert_eq!(_NEG2_3.floor(), -_1); + assert_eq!(_NEG2_3.round(), -_1); + assert_eq!(_NEG2_3.trunc(), _0); + + assert_eq!(_1_2.ceil(), _1); + assert_eq!(_1_2.floor(), _0); + assert_eq!(_1_2.round(), _1); + assert_eq!(_1_2.trunc(), _0); + + assert_eq!(_NEG1_2.ceil(), _0); + assert_eq!(_NEG1_2.floor(), -_1); + assert_eq!(_NEG1_2.round(), -_1); + assert_eq!(_NEG1_2.trunc(), _0); + + assert_eq!(_1.ceil(), _1); + assert_eq!(_1.floor(), _1); + assert_eq!(_1.round(), _1); + assert_eq!(_1.trunc(), _1); + + // Overflow checks + + let _neg1 = Ratio::from_integer(-1); + let _large_rat1 = Ratio::new(i32::MAX, i32::MAX - 1); + let _large_rat2 = Ratio::new(i32::MAX - 1, i32::MAX); + let _large_rat3 = Ratio::new(i32::MIN + 2, i32::MIN + 1); + let _large_rat4 = Ratio::new(i32::MIN + 1, i32::MIN + 2); + let _large_rat5 = Ratio::new(i32::MIN + 2, i32::MAX); + let _large_rat6 = Ratio::new(i32::MAX, i32::MIN + 2); + let _large_rat7 = Ratio::new(1, i32::MIN + 1); + let _large_rat8 = Ratio::new(1, i32::MAX); + + assert_eq!(_large_rat1.round(), One::one()); + assert_eq!(_large_rat2.round(), One::one()); + assert_eq!(_large_rat3.round(), One::one()); + assert_eq!(_large_rat4.round(), One::one()); + assert_eq!(_large_rat5.round(), _neg1); + assert_eq!(_large_rat6.round(), _neg1); + assert_eq!(_large_rat7.round(), Zero::zero()); + assert_eq!(_large_rat8.round(), Zero::zero()); + } + + #[test] + fn test_fract() { + assert_eq!(_1.fract(), _0); + assert_eq!(_NEG1_2.fract(), _NEG1_2); + assert_eq!(_1_2.fract(), _1_2); + assert_eq!(_3_2.fract(), _1_2); + } + + #[test] + fn test_recip() { + assert_eq!(_1 * _1.recip(), _1); + assert_eq!(_2 * _2.recip(), _1); + assert_eq!(_1_2 * _1_2.recip(), _1); + assert_eq!(_3_2 * _3_2.recip(), _1); + assert_eq!(_NEG1_2 * _NEG1_2.recip(), _1); + + assert_eq!(_3_2.recip(), _2_3); + assert_eq!(_NEG1_2.recip(), _NEG2); + assert_eq!(_NEG1_2.recip().denom(), &1); + } + + #[test] + #[should_panic(expected = "division by zero")] + fn test_recip_fail() { + let _a = Ratio::new(0, 1).recip(); + } + + #[test] + fn test_pow() { + fn test(r: Rational64, e: i32, expected: Rational64) { + assert_eq!(r.pow(e), expected); + assert_eq!(Pow::pow(r, e), expected); + assert_eq!(Pow::pow(r, &e), expected); + assert_eq!(Pow::pow(&r, e), expected); + assert_eq!(Pow::pow(&r, &e), expected); + #[cfg(feature = "num-bigint")] + test_big(r, e, expected); + } + + #[cfg(feature = "num-bigint")] + fn test_big(r: Rational64, e: i32, expected: Rational64) { + let r = BigRational::<4>::new_raw(r.numer.into(), r.denom.into()); + let expected = BigRational::new_raw(expected.numer.into(), expected.denom.into()); + assert_eq!((&r).pow(e), expected); + assert_eq!(Pow::pow(r.clone(), e), expected); + assert_eq!(Pow::pow(r.clone(), &e), expected); + assert_eq!(Pow::pow(&r, e), expected); + assert_eq!(Pow::pow(&r, &e), expected); + } + + test(_1_2, 2, Ratio::new(1, 4)); + test(_1_2, -2, Ratio::new(4, 1)); + test(_1, 1, _1); + test(_1, i32::MAX, _1); + test(_1, i32::MIN, _1); + test(_NEG1_2, 2, _1_2.pow(2i32)); + test(_NEG1_2, 3, -_1_2.pow(3i32)); + test(_3_2, 0, _1); + test(_3_2, -1, _3_2.recip()); + test(_3_2, 3, Ratio::new(27, 8)); + } + + #[test] + #[cfg(feature = "std")] + fn test_to_from_str() { + use std::string::{String, ToString}; + fn test(r: Rational64, s: String) { + assert_eq!(FromStr::from_str(&s), Ok(r)); + assert_eq!(r.to_string(), s); + } + test(_1, "1".to_string()); + test(_0, "0".to_string()); + test(_1_2, "1/2".to_string()); + test(_3_2, "3/2".to_string()); + test(_2, "2".to_string()); + test(_NEG1_2, "-1/2".to_string()); + } + #[test] + fn test_from_str_fail() { + fn test(s: &str) { + let rational: Result = FromStr::from_str(s); + assert!(rational.is_err()); + } + + let xs = ["0 /1", "abc", "", "1/", "--1/2", "3/2/1", "1/0"]; + for &s in xs.iter() { + test(s); + } + } + + #[cfg(feature = "num-bigint")] + #[test] + fn test_from_float() { + use num_traits::float::FloatCore; + fn test(given: T, (numer, denom): (&str, &str)) { + let ratio: BigRational = Ratio::from_float(given).unwrap(); + assert_eq!( + ratio, + Ratio::new( + FromStr::from_str(numer).unwrap(), + FromStr::from_str(denom).unwrap() + ) + ); + } + + // f32 + test(core::f32::consts::PI, ("13176795", "4194304")); + test(2f32.powf(100.), ("1267650600228229401496703205376", "1")); + test( + -(2f32.powf(100.)), + ("-1267650600228229401496703205376", "1"), + ); + test( + 1.0 / 2f32.powf(100.), + ("1", "1267650600228229401496703205376"), + ); + test(684729.48391f32, ("1369459", "2")); + test(-8573.5918555f32, ("-4389679", "512")); + + // f64 + test( + core::f64::consts::PI, + ("884279719003555", "281474976710656"), + ); + test(2f64.powf(100.), ("1267650600228229401496703205376", "1")); + test( + -(2f64.powf(100.)), + ("-1267650600228229401496703205376", "1"), + ); + test(684729.48391f64, ("367611342500051", "536870912")); + test(-8573.5918555f64, ("-4713381968463931", "549755813888")); + test( + 1.0 / 2f64.powf(100.), + ("1", "1267650600228229401496703205376"), + ); + } + + #[cfg(feature = "num-bigint")] + #[test] + fn test_from_float_fail() { + use core::{f32, f64}; + + assert_eq!(Ratio::from_float(f32::NAN), None); + assert_eq!(Ratio::from_float(f32::INFINITY), None); + assert_eq!(Ratio::from_float(f32::NEG_INFINITY), None); + assert_eq!(Ratio::from_float(f64::NAN), None); + assert_eq!(Ratio::from_float(f64::INFINITY), None); + assert_eq!(Ratio::from_float(f64::NEG_INFINITY), None); + } + + #[test] + fn test_signed() { + assert_eq!(_NEG1_2.abs(), _1_2); + assert_eq!(_3_2.abs_sub(&_1_2), _1); + assert_eq!(_1_2.abs_sub(&_3_2), Zero::zero()); + assert_eq!(_1_2.signum(), One::one()); + assert_eq!(_NEG1_2.signum(), ->::one()); + assert_eq!(_0.signum(), Zero::zero()); + assert!(_NEG1_2.is_negative()); + assert!(_1_NEG2.is_negative()); + assert!(!_NEG1_2.is_positive()); + assert!(!_1_NEG2.is_positive()); + assert!(_1_2.is_positive()); + assert!(_NEG1_NEG2.is_positive()); + assert!(!_1_2.is_negative()); + assert!(!_NEG1_NEG2.is_negative()); + assert!(!_0.is_positive()); + assert!(!_0.is_negative()); + } + + #[test] + #[cfg(feature = "std")] + fn test_hash() { + assert!(crate::hash(&_0) != crate::hash(&_1)); + assert!(crate::hash(&_0) != crate::hash(&_3_2)); + + // a == b -> hash(a) == hash(b) + let a = Rational64::new_raw(4, 2); + let b = Rational64::new_raw(6, 3); + assert_eq!(a, b); + assert_eq!(crate::hash(&a), crate::hash(&b)); + + let a = Rational64::new_raw(123456789, 1000); + let b = Rational64::new_raw(123456789 * 5, 5000); + assert_eq!(a, b); + assert_eq!(crate::hash(&a), crate::hash(&b)); + } + + #[test] + fn test_into_pair() { + assert_eq!((0, 1), _0.into()); + assert_eq!((-2, 1), _NEG2.into()); + assert_eq!((1, -2), _1_NEG2.into()); + } + + #[test] + fn test_from_pair() { + assert_eq!(_0, Ratio::from((0, 1))); + assert_eq!(_1, Ratio::from((1, 1))); + assert_eq!(_NEG2, Ratio::from((-2, 1))); + assert_eq!(_1_NEG2, Ratio::from((1, -2))); + } + + #[test] + fn ratio_iter_sum() { + // generic function to assure the iter method can be called + // for any Iterator with Item = Ratio or Ratio<&impl Integer> + fn iter_sums(slice: &[Ratio]) -> [Ratio; 3] { + let mut manual_sum = Ratio::new(T::zero(), T::one()); + for ratio in slice { + manual_sum = manual_sum + ratio; + } + [manual_sum, slice.iter().sum(), slice.iter().cloned().sum()] + } + // collect into array so test works on no_std + let mut nums = [Ratio::new(0, 1); 1000]; + for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() { + nums[i] = r; + } + let sums = iter_sums(&nums[..]); + assert_eq!(sums[0], sums[1]); + assert_eq!(sums[0], sums[2]); + } + + #[test] + fn ratio_iter_product() { + // generic function to assure the iter method can be called + // for any Iterator with Item = Ratio or Ratio<&impl Integer> + fn iter_products(slice: &[Ratio]) -> [Ratio; 3] { + let mut manual_prod = Ratio::new(T::one(), T::one()); + for ratio in slice { + manual_prod = manual_prod * ratio; + } + [ + manual_prod, + slice.iter().product(), + slice.iter().cloned().product(), + ] + } + + // collect into array so test works on no_std + let mut nums = [Ratio::new(0, 1); 1000]; + for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() { + nums[i] = r; + } + let products = iter_products(&nums[..]); + assert_eq!(products[0], products[1]); + assert_eq!(products[0], products[2]); + } + + #[test] + fn test_num_zero() { + let zero = Rational64::zero(); + assert!(zero.is_zero()); + + let mut r = Rational64::new(123, 456); + assert!(!r.is_zero()); + assert_eq!(r + zero, r); + + r.set_zero(); + assert!(r.is_zero()); + } + + #[test] + fn test_num_one() { + let one = Rational64::one(); + assert!(one.is_one()); + + let mut r = Rational64::new(123, 456); + assert!(!r.is_one()); + assert_eq!(r * one, r); + + r.set_one(); + assert!(r.is_one()); + } + + #[test] + fn test_const() { + const N: Ratio = Ratio::new_raw(123, 456); + const N_NUMER: &i32 = N.numer(); + const N_DENOM: &i32 = N.denom(); + + assert_eq!(N_NUMER, &123); + assert_eq!(N_DENOM, &456); + + let r = N.reduced(); + assert_eq!(r.numer(), &(123 / 3)); + assert_eq!(r.denom(), &(456 / 3)); + } + + #[test] + fn test_ratio_to_i64() { + assert_eq!(5, Rational64::new(70, 14).to_u64().unwrap()); + assert_eq!(-3, Rational64::new(-31, 8).to_i64().unwrap()); + assert_eq!(None, Rational64::new(-31, 8).to_u64()); + } + + #[test] + #[cfg(feature = "num-bigint")] + fn test_ratio_to_i128() { + assert_eq!( + 1i128 << 70, + Ratio::::new(1i128 << 77, 1i128 << 7) + .to_i128() + .unwrap() + ); + } + + #[test] + #[cfg(feature = "num-bigint")] + fn test_big_ratio_to_f64() { + assert_eq!( + BigRational::<4>::new( + "1234567890987654321234567890987654321234567890" + .parse() + .unwrap(), + "3".parse().unwrap() + ) + .to_nlimbs::<32>() + .to_f64(), + Some(411522630329218100000000000000000000000000000f64) + ); + assert_eq!(Ratio::from_float(5e-324).unwrap().to_nlimbs::<32>().to_f64(), Some(5e-324)); + assert_eq!( + // subnormal + BigRational::new(BigInt::one(), BigInt::one() << 1050).to_f64(), + Some(2.0f64.powi(-50).powi(21)) + ); + assert_eq!( + // definite underflow + BigRational::new(BigInt::one(), BigInt::one() << 1100).to_f64(), + Some(0.0) + ); + assert_eq!( + BigRational::from(BigInt::one() << 1050).to_f64(), + Some(core::f64::INFINITY) + ); + assert_eq!( + BigRational::from((-BigInt::one()) << 1050).to_f64(), + Some(core::f64::NEG_INFINITY) + ); + assert_eq!( + BigRational::<4>::new( + "1234567890987654321234567890".parse().unwrap(), + "987654321234567890987654321".parse().unwrap() + ) + .to_nlimbs::<32>() + .to_f64(), + Some(1.2499999893125f64) + ); + assert_eq!( + BigRational::new_raw(BigInt::one(), BigInt::zero()).to_f64(), + Some(core::f64::INFINITY) + ); + assert_eq!( + BigRational::new_raw(-BigInt::one(), BigInt::zero()).to_f64(), + Some(core::f64::NEG_INFINITY) + ); + assert_eq!( + BigRational::new_raw(BigInt::zero(), BigInt::zero()).to_f64(), + None + ); + } + + #[test] + fn test_ratio_to_f64() { + assert_eq!(Ratio::::new(1, 2).to_f64(), Some(0.5f64)); + assert_eq!(Rational64::new(1, 2).to_f64(), Some(0.5f64)); + assert_eq!(Rational64::new(1, -2).to_f64(), Some(-0.5f64)); + assert_eq!(Rational64::new(0, 2).to_f64(), Some(0.0f64)); + assert_eq!(Rational64::new(0, -2).to_f64(), Some(-0.0f64)); + assert_eq!(Rational64::new((1 << 57) + 1, 1 << 54).to_f64(), Some(8f64)); + assert_eq!( + Rational64::new((1 << 52) + 1, 1 << 52).to_f64(), + Some(1.0000000000000002f64), + ); + assert_eq!( + Rational64::new((1 << 60) + (1 << 8), 1 << 60).to_f64(), + Some(1.0000000000000002f64), + ); + assert_eq!( + Ratio::::new_raw(1, 0).to_f64(), + Some(core::f64::INFINITY) + ); + assert_eq!( + Ratio::::new_raw(-1, 0).to_f64(), + Some(core::f64::NEG_INFINITY) + ); + assert_eq!(Ratio::::new_raw(0, 0).to_f64(), None); + } + + #[test] + fn test_ldexp() { + use core::f64::{INFINITY, MAX_EXP, MIN_EXP, NAN, NEG_INFINITY}; + assert_eq!(ldexp(1.0, 0), 1.0); + assert_eq!(ldexp(1.0, 1), 2.0); + assert_eq!(ldexp(0.0, 1), 0.0); + assert_eq!(ldexp(-0.0, 1), -0.0); + + // Cases where ldexp is equivalent to multiplying by 2^exp because there's no over- or + // underflow. + assert_eq!(ldexp(3.5, 5), 3.5 * 2f64.powi(5)); + assert_eq!(ldexp(1.0, MAX_EXP - 1), 2f64.powi(MAX_EXP - 1)); + assert_eq!(ldexp(2.77, MIN_EXP + 3), 2.77 * 2f64.powi(MIN_EXP + 3)); + + // Case where initial value is subnormal + assert_eq!(ldexp(5e-324, 4), 5e-324 * 2f64.powi(4)); + assert_eq!(ldexp(5e-324, 200), 5e-324 * 2f64.powi(200)); + + // Near underflow (2^exp is too small to represent, but not x*2^exp) + assert_eq!(ldexp(4.0, MIN_EXP - 3), 2f64.powi(MIN_EXP - 1)); + + // Near overflow + assert_eq!(ldexp(0.125, MAX_EXP + 3), 2f64.powi(MAX_EXP)); + + // Overflow and underflow cases + assert_eq!(ldexp(1.0, MIN_EXP - 54), 0.0); + assert_eq!(ldexp(-1.0, MIN_EXP - 54), -0.0); + assert_eq!(ldexp(1.0, MAX_EXP), INFINITY); + assert_eq!(ldexp(-1.0, MAX_EXP), NEG_INFINITY); + + // Special values + assert_eq!(ldexp(INFINITY, 1), INFINITY); + assert_eq!(ldexp(NEG_INFINITY, 1), NEG_INFINITY); + assert!(ldexp(NAN, 1).is_nan()); + } +} diff --git a/vendor/num-rational-generic/main.rs b/vendor/num-rational-generic/main.rs new file mode 100644 index 000000000..37e7a42d6 --- /dev/null +++ b/vendor/num-rational-generic/main.rs @@ -0,0 +1,32 @@ +#![feature(test)] + +extern crate test; + +use num_bigint::BigInt; +use num_rational::{BigRational, Ratio}; +use test::Bencher; + +mod rng; +use rng::get_rng; + +#[bench] +fn alloc_ratio_bigint_bench(b: &mut Bencher) { + use rand::RngCore; + let mut rng = get_rng(); + b.iter(|| { + let a = BigInt::from(rng.next_u64()); + let b = BigInt::from(rng.next_u64()); + BigRational::new(a, b) + }); +} + +#[bench] +fn alloc_ratio_u64_bench(b: &mut Bencher) { + use rand::RngCore; + let mut rng = get_rng(); + b.iter(|| { + let a = rng.next_u64(); + let b = rng.next_u64(); + Ratio::new(a, b) + }); +} diff --git a/vendor/num-rational-generic/pow.rs b/vendor/num-rational-generic/pow.rs new file mode 100644 index 000000000..33253320e --- /dev/null +++ b/vendor/num-rational-generic/pow.rs @@ -0,0 +1,173 @@ +use crate::Ratio; + +use core::cmp; +use num_integer::Integer; +use num_traits::{One, Pow}; + +macro_rules! pow_unsigned_impl { + (@ $exp:ty) => { + type Output = Ratio; + #[inline] + fn pow(self, expon: $exp) -> Ratio { + Ratio::new_raw(self.numer.pow(expon), self.denom.pow(expon)) + } + }; + ($exp:ty) => { + impl> Pow<$exp> for Ratio { + pow_unsigned_impl!(@ $exp); + } + impl<'a, T: Clone + Integer> Pow<$exp> for &'a Ratio + where + &'a T: Pow<$exp, Output = T>, + { + pow_unsigned_impl!(@ $exp); + } + impl<'b, T: Clone + Integer + Pow<$exp, Output = T>> Pow<&'b $exp> for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b $exp> for &'a Ratio + where + &'a T: Pow<$exp, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + }; +} +pow_unsigned_impl!(u8); +pow_unsigned_impl!(u16); +pow_unsigned_impl!(u32); +pow_unsigned_impl!(u64); +pow_unsigned_impl!(u128); +pow_unsigned_impl!(usize); + +macro_rules! pow_signed_impl { + (@ &'b BigInt, BigUint) => { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b BigInt) -> Ratio { + match expon.sign() { + Sign::NoSign => One::one(), + Sign::Minus => { + Pow::pow(self, expon.magnitude()).into_recip() + } + Sign::Plus => Pow::pow(self, expon.magnitude()), + } + } + }; + (@ $exp:ty, $unsigned:ty) => { + type Output = Ratio; + #[inline] + fn pow(self, expon: $exp) -> Ratio { + match expon.cmp(&0) { + cmp::Ordering::Equal => One::one(), + cmp::Ordering::Less => { + let expon = expon.wrapping_abs() as $unsigned; + Pow::pow(self, expon).into_recip() + } + cmp::Ordering::Greater => Pow::pow(self, expon as $unsigned), + } + } + }; + ($exp:ty, $unsigned:ty) => { + impl> Pow<$exp> for Ratio { + pow_signed_impl!(@ $exp, $unsigned); + } + impl<'a, T: Clone + Integer> Pow<$exp> for &'a Ratio + where + &'a T: Pow<$unsigned, Output = T>, + { + pow_signed_impl!(@ $exp, $unsigned); + } + impl<'b, T: Clone + Integer + Pow<$unsigned, Output = T>> Pow<&'b $exp> for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b $exp> for &'a Ratio + where + &'a T: Pow<$unsigned, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + }; +} +pow_signed_impl!(i8, u8); +pow_signed_impl!(i16, u16); +pow_signed_impl!(i32, u32); +pow_signed_impl!(i64, u64); +pow_signed_impl!(i128, u128); +pow_signed_impl!(isize, usize); + +#[cfg(feature = "num-bigint")] +mod bigint { + use super::*; + use num_bigint::{BigInt, BigUint, Sign}; + + impl Pow<&'b BigUint, Output = T>> Pow for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigUint) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'a, T: Clone + Integer> Pow for &'a Ratio + where + &'a T: for<'b> Pow<&'b BigUint, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigUint) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'b, T: Clone + Integer + Pow<&'b BigUint, Output = T>> Pow<&'b BigUint> for Ratio { + pow_unsigned_impl!(@ &'b BigUint); + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b BigUint> for &'a Ratio + where + &'a T: Pow<&'b BigUint, Output = T>, + { + pow_unsigned_impl!(@ &'b BigUint); + } + + impl Pow<&'b BigUint, Output = T>> Pow for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigInt) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'a, T: Clone + Integer> Pow for &'a Ratio + where + &'a T: for<'b> Pow<&'b BigUint, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigInt) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'b, T: Clone + Integer + Pow<&'b BigUint, Output = T>> Pow<&'b BigInt> for Ratio { + pow_signed_impl!(@ &'b BigInt, BigUint); + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b BigInt> for &'a Ratio + where + &'a T: Pow<&'b BigUint, Output = T>, + { + pow_signed_impl!(@ &'b BigInt, BigUint); + } +} diff --git a/vendor/num-rational-generic/rng.rs b/vendor/num-rational-generic/rng.rs new file mode 100644 index 000000000..33e4f0fad --- /dev/null +++ b/vendor/num-rational-generic/rng.rs @@ -0,0 +1,38 @@ +use rand::RngCore; + +pub(crate) fn get_rng() -> impl RngCore { + XorShiftStar { + a: 0x0123_4567_89AB_CDEF, + } +} + +/// Simple `Rng` for benchmarking without additional dependencies +struct XorShiftStar { + a: u64, +} + +impl RngCore for XorShiftStar { + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + fn next_u64(&mut self) -> u64 { + // https://en.wikipedia.org/wiki/Xorshift#xorshift* + self.a ^= self.a >> 12; + self.a ^= self.a << 25; + self.a ^= self.a >> 27; + self.a.wrapping_mul(0x2545_F491_4F6C_DD1D) + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + for chunk in dest.chunks_mut(8) { + let bytes = self.next_u64().to_le_bytes(); + let slice = &bytes[..chunk.len()]; + chunk.copy_from_slice(slice) + } + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { + Ok(self.fill_bytes(dest)) + } +} diff --git a/vendor/num-rational-generic/rustup.sh b/vendor/num-rational-generic/rustup.sh new file mode 100755 index 000000000..144042bc0 --- /dev/null +++ b/vendor/num-rational-generic/rustup.sh @@ -0,0 +1,10 @@ +#!/bin/sh +# Use rustup to locally run the same suite of tests as .github/workflows/ +# (You should first install/update all of the versions below.) + +set -ex + +ci=$(dirname "$0") +for version in 1.60.0 stable beta nightly; do + rustup run "$version" "$ci/test_full.sh" +done diff --git a/vendor/num-rational-generic/src/lib.rs b/vendor/num-rational-generic/src/lib.rs new file mode 100644 index 000000000..5a9ffbb53 --- /dev/null +++ b/vendor/num-rational-generic/src/lib.rs @@ -0,0 +1,3134 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Rational numbers +//! +//! ## Compatibility +//! +//! The `num-rational` crate is tested for rustc 1.60 and greater. + +#![doc(html_root_url = "https://docs.rs/num-rational/0.4")] +#![no_std] +// Ratio ops often use other "suspicious" ops +#![allow(clippy::suspicious_arithmetic_impl)] +#![allow(clippy::suspicious_op_assign_impl)] + +#[cfg(feature = "std")] +#[macro_use] +extern crate std; + +use core::{ + cmp, fmt, + fmt::{Binary, Display, Formatter, LowerExp, LowerHex, Octal, UpperExp, UpperHex}, + hash::{Hash, Hasher}, + ops::{Add, Div, Mul, Neg, Rem, ShlAssign, Sub}, + str::FromStr, +}; +#[cfg(feature = "std")] +use std::error::Error; + +#[cfg(feature = "num-bigint-generic")] +use num_bigint_generic::{BigInt, BigUint, Sign, ToBigInt}; + +use num_integer::Integer; +use num_traits::{ + float::FloatCore, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, ConstOne, ConstZero, + FromPrimitive, Inv, Num, NumCast, One, Pow, Signed, ToPrimitive, Unsigned, Zero, +}; + +mod pow; + +/// Represents the ratio between two numbers. +#[derive(Copy, Clone, Debug)] +#[allow(missing_docs)] +pub struct Ratio { + /// Numerator. + numer: T, + /// Denominator. + denom: T, +} + +#[cfg(feature = "num-bigint-generic")] +impl Ratio> { + pub fn to_nlimbs(&self) -> Ratio> { + let Self { numer, denom } = self; + Ratio::new(numer.to_digits(), denom.to_digits()) + } +} + +/// Alias for a `Ratio` of machine-sized integers. +#[deprecated( + since = "0.4.0", + note = "it's better to use a specific size, like `Rational32` or `Rational64`" +)] +pub type Rational = Ratio; +/// Alias for a `Ratio` of 32-bit-sized integers. +pub type Rational32 = Ratio; +/// Alias for a `Ratio` of 64-bit-sized integers. +pub type Rational64 = Ratio; + +#[cfg(feature = "num-bigint-generic")] +/// Alias for arbitrary precision rationals. +pub type BigRational = Ratio>; + +/// These method are `const`. +impl Ratio { + /// Creates a `Ratio` without checking for `denom == 0` or reducing. + /// + /// **There are several methods that will panic if used on a `Ratio` with + /// `denom == 0`.** + #[inline] + pub const fn new_raw(numer: T, denom: T) -> Ratio { + Ratio { numer, denom } + } + + /// Deconstructs a `Ratio` into its numerator and denominator. + #[inline] + pub fn into_raw(self) -> (T, T) { + (self.numer, self.denom) + } + + /// Gets an immutable reference to the numerator. + #[inline] + pub const fn numer(&self) -> &T { + &self.numer + } + + /// Gets an immutable reference to the denominator. + #[inline] + pub const fn denom(&self) -> &T { + &self.denom + } +} + +impl Ratio { + /// Creates a new `Ratio`. + /// + /// **Panics if `denom` is zero.** + #[inline] + pub fn new(numer: T, denom: T) -> Ratio { + let mut ret = Ratio::new_raw(numer, denom); + ret.reduce(); + ret + } + + /// Creates a `Ratio` representing the integer `t`. + #[inline] + pub fn from_integer(t: T) -> Ratio { + Ratio::new_raw(t, One::one()) + } + + /// Converts to an integer, rounding towards zero. + #[inline] + pub fn to_integer(&self) -> T { + self.trunc().numer + } + + /// Returns true if the rational number is an integer (denominator is 1). + #[inline] + pub fn is_integer(&self) -> bool { + self.denom.is_one() + } + + /// Puts self into lowest terms, with `denom` > 0. + /// + /// **Panics if `denom` is zero.** + fn reduce(&mut self) { + if self.denom.is_zero() { + panic!("denominator == 0"); + } + if self.numer.is_zero() { + self.denom.set_one(); + return; + } + if self.numer == self.denom { + self.set_one(); + return; + } + let g: T = self.numer.gcd(&self.denom); + + // FIXME(#5992): assignment operator overloads + // T: Clone + Integer != T: Clone + NumAssign + + #[inline] + fn replace_with(x: &mut T, f: impl FnOnce(T) -> T) { + let y = core::mem::replace(x, T::zero()); + *x = f(y); + } + + // self.numer /= g; + replace_with(&mut self.numer, |x| x / g.clone()); + + // self.denom /= g; + replace_with(&mut self.denom, |x| x / g); + + // keep denom positive! + if self.denom < T::zero() { + replace_with(&mut self.numer, |x| T::zero() - x); + replace_with(&mut self.denom, |x| T::zero() - x); + } + } + + /// Returns a reduced copy of self. + /// + /// In general, it is not necessary to use this method, as the only + /// method of procuring a non-reduced fraction is through `new_raw`. + /// + /// **Panics if `denom` is zero.** + pub fn reduced(&self) -> Ratio { + let mut ret = self.clone(); + ret.reduce(); + ret + } + + /// Returns the reciprocal. + /// + /// **Panics if the `Ratio` is zero.** + #[inline] + pub fn recip(&self) -> Ratio { + self.clone().into_recip() + } + + #[inline] + fn into_recip(self) -> Ratio { + match self.numer.cmp(&T::zero()) { + cmp::Ordering::Equal => panic!("division by zero"), + cmp::Ordering::Greater => Ratio::new_raw(self.denom, self.numer), + cmp::Ordering::Less => Ratio::new_raw(T::zero() - self.denom, T::zero() - self.numer), + } + } + + /// Rounds towards minus infinity. + #[inline] + pub fn floor(&self) -> Ratio { + if *self < Zero::zero() { + let one: T = One::one(); + Ratio::from_integer( + (self.numer.clone() - self.denom.clone() + one) / self.denom.clone(), + ) + } else { + Ratio::from_integer(self.numer.clone() / self.denom.clone()) + } + } + + /// Rounds towards plus infinity. + #[inline] + pub fn ceil(&self) -> Ratio { + if *self < Zero::zero() { + Ratio::from_integer(self.numer.clone() / self.denom.clone()) + } else { + let one: T = One::one(); + Ratio::from_integer( + (self.numer.clone() + self.denom.clone() - one) / self.denom.clone(), + ) + } + } + + /// Rounds to the nearest integer. Rounds half-way cases away from zero. + #[inline] + pub fn round(&self) -> Ratio { + let zero: Ratio = Zero::zero(); + let one: T = One::one(); + let two: T = one.clone() + one.clone(); + + // Find unsigned fractional part of rational number + let mut fractional = self.fract(); + if fractional < zero { + fractional = zero - fractional + }; + + // The algorithm compares the unsigned fractional part with 1/2, that + // is, a/b >= 1/2, or a >= b/2. For odd denominators, we use + // a >= (b/2)+1. This avoids overflow issues. + let half_or_larger = if fractional.denom.is_even() { + fractional.numer >= fractional.denom / two + } else { + fractional.numer >= (fractional.denom / two) + one + }; + + if half_or_larger { + let one: Ratio = One::one(); + if *self >= Zero::zero() { + self.trunc() + one + } else { + self.trunc() - one + } + } else { + self.trunc() + } + } + + /// Rounds towards zero. + #[inline] + pub fn trunc(&self) -> Ratio { + Ratio::from_integer(self.numer.clone() / self.denom.clone()) + } + + /// Returns the fractional part of a number, with division rounded towards zero. + /// + /// Satisfies `self == self.trunc() + self.fract()`. + #[inline] + pub fn fract(&self) -> Ratio { + Ratio::new_raw(self.numer.clone() % self.denom.clone(), self.denom.clone()) + } + + /// Raises the `Ratio` to the power of an exponent. + #[inline] + pub fn pow(&self, expon: i32) -> Ratio + where + for<'a> &'a T: Pow, + { + Pow::pow(self, expon) + } +} + +#[cfg(feature = "num-bigint-generic")] +impl Ratio { + /// Converts a float into a rational number. + pub fn from_float(f: T) -> Option { + if !f.is_finite() { + return None; + } + let (mantissa, exponent, sign) = f.integer_decode(); + let bigint_sign = if sign == 1 { Sign::Plus } else { Sign::Minus }; + if exponent < 0 { + let one: BigInt = One::one(); + let denom: BigInt = one << ((-exponent) as usize); + let numer: BigUint = FromPrimitive::from_u64(mantissa).unwrap(); + Some(Ratio::new(BigInt::from_biguint(bigint_sign, numer), denom)) + } else { + let mut numer: BigUint = FromPrimitive::from_u64(mantissa).unwrap(); + numer <<= exponent as usize; + Some(Ratio::from_integer(BigInt::from_biguint( + bigint_sign, + numer, + ))) + } + } +} + +impl Default for Ratio { + /// Returns zero + fn default() -> Self { + Ratio::zero() + } +} + +// From integer +impl From for Ratio +where + T: Clone + Integer, +{ + fn from(x: T) -> Ratio { + Ratio::from_integer(x) + } +} + +// From pair (through the `new` constructor) +impl From<(T, T)> for Ratio +where + T: Clone + Integer, +{ + fn from(pair: (T, T)) -> Ratio { + Ratio::new(pair.0, pair.1) + } +} + +// Comparisons + +// Mathematically, comparing a/b and c/d is the same as comparing a*d and b*c, but it's very easy +// for those multiplications to overflow fixed-size integers, so we need to take care. + +impl Ord for Ratio { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + // With equal denominators, the numerators can be directly compared + if self.denom == other.denom { + let ord = self.numer.cmp(&other.numer); + return if self.denom < T::zero() { + ord.reverse() + } else { + ord + }; + } + + // With equal numerators, the denominators can be inversely compared + if self.numer == other.numer { + if self.numer.is_zero() { + return cmp::Ordering::Equal; + } + let ord = self.denom.cmp(&other.denom); + return if self.numer < T::zero() { + ord + } else { + ord.reverse() + }; + } + + // Unfortunately, we don't have CheckedMul to try. That could sometimes avoid all the + // division below, or even always avoid it for BigInt and BigUint. + // FIXME- future breaking change to add Checked* to Integer? + + // Compare as floored integers and remainders + let (self_int, self_rem) = self.numer.div_mod_floor(&self.denom); + let (other_int, other_rem) = other.numer.div_mod_floor(&other.denom); + match self_int.cmp(&other_int) { + cmp::Ordering::Greater => cmp::Ordering::Greater, + cmp::Ordering::Less => cmp::Ordering::Less, + cmp::Ordering::Equal => { + match (self_rem.is_zero(), other_rem.is_zero()) { + (true, true) => cmp::Ordering::Equal, + (true, false) => cmp::Ordering::Less, + (false, true) => cmp::Ordering::Greater, + (false, false) => { + // Compare the reciprocals of the remaining fractions in reverse + let self_recip = Ratio::new_raw(self.denom.clone(), self_rem); + let other_recip = Ratio::new_raw(other.denom.clone(), other_rem); + self_recip.cmp(&other_recip).reverse() + } + } + } + } + } +} + +impl PartialOrd for Ratio { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Ratio { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == cmp::Ordering::Equal + } +} + +impl Eq for Ratio {} + +// NB: We can't just `#[derive(Hash)]`, because it needs to agree +// with `Eq` even for non-reduced ratios. +impl Hash for Ratio { + fn hash(&self, state: &mut H) { + recurse(&self.numer, &self.denom, state); + + fn recurse(numer: &T, denom: &T, state: &mut H) { + if !denom.is_zero() { + let (int, rem) = numer.div_mod_floor(denom); + int.hash(state); + recurse(denom, &rem, state); + } else { + denom.hash(state); + } + } + } +} + +mod iter_sum_product { + use crate::Ratio; + use core::iter::{Product, Sum}; + use num_integer::Integer; + use num_traits::{One, Zero}; + + impl Sum for Ratio { + fn sum(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::zero(), |sum, num| sum + num) + } + } + + impl<'a, T: Integer + Clone> Sum<&'a Ratio> for Ratio { + fn sum(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::zero(), |sum, num| sum + num) + } + } + + impl Product for Ratio { + fn product(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::one(), |prod, num| prod * num) + } + } + + impl<'a, T: Integer + Clone> Product<&'a Ratio> for Ratio { + fn product(iter: I) -> Self + where + I: Iterator>, + { + iter.fold(Self::one(), |prod, num| prod * num) + } + } +} + +mod opassign { + use core::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign}; + + use crate::Ratio; + use num_integer::Integer; + use num_traits::NumAssign; + + impl AddAssign for Ratio { + fn add_assign(&mut self, other: Ratio) { + if self.denom == other.denom { + self.numer += other.numer + } else { + let lcm = self.denom.lcm(&other.denom); + let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone()); + let rhs_numer = other.numer * (lcm.clone() / other.denom); + self.numer = lhs_numer + rhs_numer; + self.denom = lcm; + } + self.reduce(); + } + } + + // (a/b) / (c/d) = (a/gcd_ac)*(d/gcd_bd) / ((c/gcd_ac)*(b/gcd_bd)) + impl DivAssign for Ratio { + fn div_assign(&mut self, other: Ratio) { + let gcd_ac = self.numer.gcd(&other.numer); + let gcd_bd = self.denom.gcd(&other.denom); + self.numer /= gcd_ac.clone(); + self.numer *= other.denom / gcd_bd.clone(); + self.denom /= gcd_bd; + self.denom *= other.numer / gcd_ac; + self.reduce(); // TODO: remove this line. see #8. + } + } + + // a/b * c/d = (a/gcd_ad)*(c/gcd_bc) / ((d/gcd_ad)*(b/gcd_bc)) + impl MulAssign for Ratio { + fn mul_assign(&mut self, other: Ratio) { + let gcd_ad = self.numer.gcd(&other.denom); + let gcd_bc = self.denom.gcd(&other.numer); + self.numer /= gcd_ad.clone(); + self.numer *= other.numer / gcd_bc.clone(); + self.denom /= gcd_bc; + self.denom *= other.denom / gcd_ad; + self.reduce(); // TODO: remove this line. see #8. + } + } + + impl RemAssign for Ratio { + fn rem_assign(&mut self, other: Ratio) { + if self.denom == other.denom { + self.numer %= other.numer + } else { + let lcm = self.denom.lcm(&other.denom); + let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone()); + let rhs_numer = other.numer * (lcm.clone() / other.denom); + self.numer = lhs_numer % rhs_numer; + self.denom = lcm; + } + self.reduce(); + } + } + + impl SubAssign for Ratio { + fn sub_assign(&mut self, other: Ratio) { + if self.denom == other.denom { + self.numer -= other.numer + } else { + let lcm = self.denom.lcm(&other.denom); + let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone()); + let rhs_numer = other.numer * (lcm.clone() / other.denom); + self.numer = lhs_numer - rhs_numer; + self.denom = lcm; + } + self.reduce(); + } + } + + // a/b + c/1 = (a*1 + b*c) / (b*1) = (a + b*c) / b + impl AddAssign for Ratio { + fn add_assign(&mut self, other: T) { + self.numer += self.denom.clone() * other; + self.reduce(); + } + } + + impl DivAssign for Ratio { + fn div_assign(&mut self, other: T) { + let gcd = self.numer.gcd(&other); + self.numer /= gcd.clone(); + self.denom *= other / gcd; + self.reduce(); // TODO: remove this line. see #8. + } + } + + impl MulAssign for Ratio { + fn mul_assign(&mut self, other: T) { + let gcd = self.denom.gcd(&other); + self.denom /= gcd.clone(); + self.numer *= other / gcd; + self.reduce(); // TODO: remove this line. see #8. + } + } + + // a/b % c/1 = (a*1 % b*c) / (b*1) = (a % b*c) / b + impl RemAssign for Ratio { + fn rem_assign(&mut self, other: T) { + self.numer %= self.denom.clone() * other; + self.reduce(); + } + } + + // a/b - c/1 = (a*1 - b*c) / (b*1) = (a - b*c) / b + impl SubAssign for Ratio { + fn sub_assign(&mut self, other: T) { + self.numer -= self.denom.clone() * other; + self.reduce(); + } + } + + macro_rules! forward_op_assign { + (impl $imp:ident, $method:ident) => { + impl<'a, T: Clone + Integer + NumAssign> $imp<&'a Ratio> for Ratio { + #[inline] + fn $method(&mut self, other: &Ratio) { + self.$method(other.clone()) + } + } + impl<'a, T: Clone + Integer + NumAssign> $imp<&'a T> for Ratio { + #[inline] + fn $method(&mut self, other: &T) { + self.$method(other.clone()) + } + } + }; + } + + forward_op_assign!(impl AddAssign, add_assign); + forward_op_assign!(impl DivAssign, div_assign); + forward_op_assign!(impl MulAssign, mul_assign); + forward_op_assign!(impl RemAssign, rem_assign); + forward_op_assign!(impl SubAssign, sub_assign); +} + +macro_rules! forward_ref_ref_binop { + (impl $imp:ident, $method:ident) => { + impl<'a, 'b, T: Clone + Integer> $imp<&'b Ratio> for &'a Ratio { + type Output = Ratio; + + #[inline] + fn $method(self, other: &'b Ratio) -> Ratio { + self.clone().$method(other.clone()) + } + } + impl<'a, 'b, T: Clone + Integer> $imp<&'b T> for &'a Ratio { + type Output = Ratio; + + #[inline] + fn $method(self, other: &'b T) -> Ratio { + self.clone().$method(other.clone()) + } + } + }; +} + +macro_rules! forward_ref_val_binop { + (impl $imp:ident, $method:ident) => { + impl<'a, T> $imp> for &'a Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: Ratio) -> Ratio { + self.clone().$method(other) + } + } + impl<'a, T> $imp for &'a Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: T) -> Ratio { + self.clone().$method(other) + } + } + }; +} + +macro_rules! forward_val_ref_binop { + (impl $imp:ident, $method:ident) => { + impl<'a, T> $imp<&'a Ratio> for Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: &Ratio) -> Ratio { + self.$method(other.clone()) + } + } + impl<'a, T> $imp<&'a T> for Ratio + where + T: Clone + Integer, + { + type Output = Ratio; + + #[inline] + fn $method(self, other: &T) -> Ratio { + self.$method(other.clone()) + } + } + }; +} + +macro_rules! forward_all_binop { + (impl $imp:ident, $method:ident) => { + forward_ref_ref_binop!(impl $imp, $method); + forward_ref_val_binop!(impl $imp, $method); + forward_val_ref_binop!(impl $imp, $method); + }; +} + +// Arithmetic +forward_all_binop!(impl Mul, mul); +// a/b * c/d = (a/gcd_ad)*(c/gcd_bc) / ((d/gcd_ad)*(b/gcd_bc)) +impl Mul> for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + #[inline] + fn mul(self, rhs: Ratio) -> Ratio { + let gcd_ad = self.numer.gcd(&rhs.denom); + let gcd_bc = self.denom.gcd(&rhs.numer); + Ratio::new( + self.numer / gcd_ad.clone() * (rhs.numer / gcd_bc.clone()), + self.denom / gcd_bc * (rhs.denom / gcd_ad), + ) + } +} +// a/b * c/1 = (a*c) / (b*1) = (a*c) / b +impl Mul for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + #[inline] + fn mul(self, rhs: T) -> Ratio { + let gcd = self.denom.gcd(&rhs); + Ratio::new(self.numer * (rhs / gcd.clone()), self.denom / gcd) + } +} + +forward_all_binop!(impl Div, div); +// (a/b) / (c/d) = (a/gcd_ac)*(d/gcd_bd) / ((c/gcd_ac)*(b/gcd_bd)) +impl Div> for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn div(self, rhs: Ratio) -> Ratio { + let gcd_ac = self.numer.gcd(&rhs.numer); + let gcd_bd = self.denom.gcd(&rhs.denom); + Ratio::new( + self.numer / gcd_ac.clone() * (rhs.denom / gcd_bd.clone()), + self.denom / gcd_bd * (rhs.numer / gcd_ac), + ) + } +} +// (a/b) / (c/1) = (a*1) / (b*c) = a / (b*c) +impl Div for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn div(self, rhs: T) -> Ratio { + let gcd = self.numer.gcd(&rhs); + Ratio::new(self.numer / gcd.clone(), self.denom * (rhs / gcd)) + } +} + +macro_rules! arith_impl { + (impl $imp:ident, $method:ident) => { + forward_all_binop!(impl $imp, $method); + // Abstracts a/b `op` c/d = (a*lcm/b `op` c*lcm/d)/lcm where lcm = lcm(b,d) + impl $imp> for Ratio { + type Output = Ratio; + #[inline] + fn $method(self, rhs: Ratio) -> Ratio { + if self.denom == rhs.denom { + return Ratio::new(self.numer.$method(rhs.numer), rhs.denom); + } + let lcm = self.denom.lcm(&rhs.denom); + let lhs_numer = self.numer * (lcm.clone() / self.denom); + let rhs_numer = rhs.numer * (lcm.clone() / rhs.denom); + Ratio::new(lhs_numer.$method(rhs_numer), lcm) + } + } + // Abstracts the a/b `op` c/1 = (a*1 `op` b*c) / (b*1) = (a `op` b*c) / b pattern + impl $imp for Ratio { + type Output = Ratio; + #[inline] + fn $method(self, rhs: T) -> Ratio { + Ratio::new(self.numer.$method(self.denom.clone() * rhs), self.denom) + } + } + }; +} + +arith_impl!(impl Add, add); +arith_impl!(impl Sub, sub); +arith_impl!(impl Rem, rem); + +// a/b * c/d = (a*c)/(b*d) +impl CheckedMul for Ratio +where + T: Clone + Integer + CheckedMul, +{ + #[inline] + fn checked_mul(&self, rhs: &Ratio) -> Option> { + let gcd_ad = self.numer.gcd(&rhs.denom); + let gcd_bc = self.denom.gcd(&rhs.numer); + Some(Ratio::new( + (self.numer.clone() / gcd_ad.clone()) + .checked_mul(&(rhs.numer.clone() / gcd_bc.clone()))?, + (self.denom.clone() / gcd_bc).checked_mul(&(rhs.denom.clone() / gcd_ad))?, + )) + } +} + +// (a/b) / (c/d) = (a*d)/(b*c) +impl CheckedDiv for Ratio +where + T: Clone + Integer + CheckedMul, +{ + #[inline] + fn checked_div(&self, rhs: &Ratio) -> Option> { + if rhs.is_zero() { + return None; + } + let (numer, denom) = if self.denom == rhs.denom { + (self.numer.clone(), rhs.numer.clone()) + } else if self.numer == rhs.numer { + (rhs.denom.clone(), self.denom.clone()) + } else { + let gcd_ac = self.numer.gcd(&rhs.numer); + let gcd_bd = self.denom.gcd(&rhs.denom); + ( + (self.numer.clone() / gcd_ac.clone()) + .checked_mul(&(rhs.denom.clone() / gcd_bd.clone()))?, + (self.denom.clone() / gcd_bd).checked_mul(&(rhs.numer.clone() / gcd_ac))?, + ) + }; + // Manual `reduce()`, avoiding sharp edges + if denom.is_zero() { + None + } else if numer.is_zero() { + Some(Self::zero()) + } else if numer == denom { + Some(Self::one()) + } else { + let g = numer.gcd(&denom); + let numer = numer / g.clone(); + let denom = denom / g; + let raw = if denom < T::zero() { + // We need to keep denom positive, but 2's-complement MIN may + // overflow negation -- instead we can check multiplying -1. + let n1 = T::zero() - T::one(); + Ratio::new_raw(numer.checked_mul(&n1)?, denom.checked_mul(&n1)?) + } else { + Ratio::new_raw(numer, denom) + }; + Some(raw) + } + } +} + +// As arith_impl! but for Checked{Add,Sub} traits +macro_rules! checked_arith_impl { + (impl $imp:ident, $method:ident) => { + impl $imp for Ratio { + #[inline] + fn $method(&self, rhs: &Ratio) -> Option> { + let gcd = self.denom.clone().gcd(&rhs.denom); + let lcm = (self.denom.clone() / gcd.clone()).checked_mul(&rhs.denom)?; + let lhs_numer = (lcm.clone() / self.denom.clone()).checked_mul(&self.numer)?; + let rhs_numer = (lcm.clone() / rhs.denom.clone()).checked_mul(&rhs.numer)?; + Some(Ratio::new(lhs_numer.$method(&rhs_numer)?, lcm)) + } + } + }; +} + +// a/b + c/d = (lcm/b*a + lcm/d*c)/lcm, where lcm = lcm(b,d) +checked_arith_impl!(impl CheckedAdd, checked_add); + +// a/b - c/d = (lcm/b*a - lcm/d*c)/lcm, where lcm = lcm(b,d) +checked_arith_impl!(impl CheckedSub, checked_sub); + +impl Neg for Ratio +where + T: Clone + Integer + Neg, +{ + type Output = Ratio; + + #[inline] + fn neg(self) -> Ratio { + Ratio::new_raw(-self.numer, self.denom) + } +} + +impl Neg for &Ratio +where + T: Clone + Integer + Neg, +{ + type Output = Ratio; + + #[inline] + fn neg(self) -> Ratio { + -self.clone() + } +} + +impl Inv for Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn inv(self) -> Ratio { + self.recip() + } +} + +impl Inv for &Ratio +where + T: Clone + Integer, +{ + type Output = Ratio; + + #[inline] + fn inv(self) -> Ratio { + self.recip() + } +} + +// Constants +impl Ratio { + /// A constant `Ratio` 0/1. + pub const ZERO: Self = Self::new_raw(T::ZERO, T::ONE); +} + +impl ConstZero for Ratio { + const ZERO: Self = Self::ZERO; +} + +impl Zero for Ratio { + #[inline] + fn zero() -> Ratio { + Ratio::new_raw(Zero::zero(), One::one()) + } + + #[inline] + fn is_zero(&self) -> bool { + self.numer.is_zero() + } + + #[inline] + fn set_zero(&mut self) { + self.numer.set_zero(); + self.denom.set_one(); + } +} + +impl Ratio { + /// A constant `Ratio` 1/1. + pub const ONE: Self = Self::new_raw(T::ONE, T::ONE); +} + +impl ConstOne for Ratio { + const ONE: Self = Self::ONE; +} + +impl One for Ratio { + #[inline] + fn one() -> Ratio { + Ratio::new_raw(One::one(), One::one()) + } + + #[inline] + fn is_one(&self) -> bool { + self.numer == self.denom + } + + #[inline] + fn set_one(&mut self) { + self.numer.set_one(); + self.denom.set_one(); + } +} + +impl Num for Ratio { + type FromStrRadixErr = ParseRatioError; + + /// Parses `numer/denom` where the numbers are in base `radix`. + fn from_str_radix(s: &str, radix: u32) -> Result, ParseRatioError> { + if s.splitn(2, '/').count() == 2 { + let mut parts = s.splitn(2, '/').map(|ss| { + T::from_str_radix(ss, radix).map_err(|_| ParseRatioError { + kind: RatioErrorKind::ParseError, + }) + }); + let numer: T = parts.next().unwrap()?; + let denom: T = parts.next().unwrap()?; + if denom.is_zero() { + Err(ParseRatioError { + kind: RatioErrorKind::ZeroDenominator, + }) + } else { + Ok(Ratio::new(numer, denom)) + } + } else { + Err(ParseRatioError { + kind: RatioErrorKind::ParseError, + }) + } + } +} + +impl Signed for Ratio { + #[inline] + fn abs(&self) -> Ratio { + if self.is_negative() { + -self.clone() + } else { + self.clone() + } + } + + #[inline] + fn abs_sub(&self, other: &Ratio) -> Ratio { + if *self <= *other { + Zero::zero() + } else { + self - other + } + } + + #[inline] + fn signum(&self) -> Ratio { + if self.is_positive() { + Self::one() + } else if self.is_zero() { + Self::zero() + } else { + -Self::one() + } + } + + #[inline] + fn is_positive(&self) -> bool { + (self.numer.is_positive() && self.denom.is_positive()) + || (self.numer.is_negative() && self.denom.is_negative()) + } + + #[inline] + fn is_negative(&self) -> bool { + (self.numer.is_negative() && self.denom.is_positive()) + || (self.numer.is_positive() && self.denom.is_negative()) + } +} + +// String conversions +macro_rules! impl_formatting { + ($fmt_trait:ident, $prefix:expr, $fmt_str:expr, $fmt_alt:expr) => { + impl $fmt_trait for Ratio { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let pre_pad = if self.denom.is_one() { + format!($fmt_str, self.numer) + } else { + if f.alternate() { + format!(concat!($fmt_str, "/", $fmt_alt), self.numer, self.denom) + } else { + format!(concat!($fmt_str, "/", $fmt_str), self.numer, self.denom) + } + }; + if let Some(pre_pad) = pre_pad.strip_prefix("-") { + f.pad_integral(false, $prefix, pre_pad) + } else { + f.pad_integral(true, $prefix, &pre_pad) + } + } + #[cfg(not(feature = "std"))] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let plus = if f.sign_plus() && self.numer >= T::zero() { + "+" + } else { + "" + }; + if self.denom.is_one() { + if f.alternate() { + write!(f, concat!("{}", $fmt_alt), plus, self.numer) + } else { + write!(f, concat!("{}", $fmt_str), plus, self.numer) + } + } else { + if f.alternate() { + write!( + f, + concat!("{}", $fmt_alt, "/", $fmt_alt), + plus, self.numer, self.denom + ) + } else { + write!( + f, + concat!("{}", $fmt_str, "/", $fmt_str), + plus, self.numer, self.denom + ) + } + } + } + } + }; +} + +impl_formatting!(Display, "", "{}", "{:#}"); +impl_formatting!(Octal, "0o", "{:o}", "{:#o}"); +impl_formatting!(Binary, "0b", "{:b}", "{:#b}"); +impl_formatting!(LowerHex, "0x", "{:x}", "{:#x}"); +impl_formatting!(UpperHex, "0x", "{:X}", "{:#X}"); +impl_formatting!(LowerExp, "", "{:e}", "{:#e}"); +impl_formatting!(UpperExp, "", "{:E}", "{:#E}"); + +impl FromStr for Ratio { + type Err = ParseRatioError; + + /// Parses `numer/denom` or just `numer`. + fn from_str(s: &str) -> Result, ParseRatioError> { + let mut split = s.splitn(2, '/'); + + let n = split.next().ok_or(ParseRatioError { + kind: RatioErrorKind::ParseError, + })?; + let num = FromStr::from_str(n).map_err(|_| ParseRatioError { + kind: RatioErrorKind::ParseError, + })?; + + let d = split.next().unwrap_or("1"); + let den = FromStr::from_str(d).map_err(|_| ParseRatioError { + kind: RatioErrorKind::ParseError, + })?; + + if Zero::is_zero(&den) { + Err(ParseRatioError { + kind: RatioErrorKind::ZeroDenominator, + }) + } else { + Ok(Ratio::new(num, den)) + } + } +} + +impl From> for (T, T) { + fn from(val: Ratio) -> Self { + (val.numer, val.denom) + } +} + +#[cfg(feature = "serde")] +impl serde::Serialize for Ratio +where + T: serde::Serialize + Clone + Integer + PartialOrd, +{ + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + (self.numer(), self.denom()).serialize(serializer) + } +} + +#[cfg(feature = "serde")] +impl<'de, T> serde::Deserialize<'de> for Ratio +where + T: serde::Deserialize<'de> + Clone + Integer + PartialOrd, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::{Error, Unexpected}; + let (numer, denom): (T, T) = serde::Deserialize::deserialize(deserializer)?; + if denom.is_zero() { + Err(Error::invalid_value( + Unexpected::Signed(0), + &"a ratio with non-zero denominator", + )) + } else { + Ok(Ratio::new_raw(numer, denom)) + } + } +} + +// FIXME: Bubble up specific errors +#[derive(Copy, Clone, Debug, PartialEq)] +pub struct ParseRatioError { + kind: RatioErrorKind, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +enum RatioErrorKind { + ParseError, + ZeroDenominator, +} + +impl fmt::Display for ParseRatioError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.kind.description().fmt(f) + } +} + +#[cfg(feature = "std")] +impl Error for ParseRatioError { + #[allow(deprecated)] + fn description(&self) -> &str { + self.kind.description() + } +} + +impl RatioErrorKind { + fn description(&self) -> &'static str { + match *self { + RatioErrorKind::ParseError => "failed to parse integer", + RatioErrorKind::ZeroDenominator => "zero value denominator", + } + } +} + +#[cfg(feature = "num-bigint-generic")] +impl FromPrimitive for Ratio { + fn from_i64(n: i64) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_i128(n: i128) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_u64(n: u64) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_u128(n: u128) -> Option { + Some(Ratio::from_integer(n.into())) + } + + fn from_f32(n: f32) -> Option { + Ratio::from_float(n) + } + + fn from_f64(n: f64) -> Option { + Ratio::from_float(n) + } +} + +macro_rules! from_primitive_integer { + ($typ:ty, $approx:ident) => { + impl FromPrimitive for Ratio<$typ> { + fn from_i64(n: i64) -> Option { + <$typ as FromPrimitive>::from_i64(n).map(Ratio::from_integer) + } + + fn from_i128(n: i128) -> Option { + <$typ as FromPrimitive>::from_i128(n).map(Ratio::from_integer) + } + + fn from_u64(n: u64) -> Option { + <$typ as FromPrimitive>::from_u64(n).map(Ratio::from_integer) + } + + fn from_u128(n: u128) -> Option { + <$typ as FromPrimitive>::from_u128(n).map(Ratio::from_integer) + } + + fn from_f32(n: f32) -> Option { + $approx(n, 10e-20, 30) + } + + fn from_f64(n: f64) -> Option { + $approx(n, 10e-20, 30) + } + } + }; +} + +from_primitive_integer!(i8, approximate_float); +from_primitive_integer!(i16, approximate_float); +from_primitive_integer!(i32, approximate_float); +from_primitive_integer!(i64, approximate_float); +from_primitive_integer!(i128, approximate_float); +from_primitive_integer!(isize, approximate_float); + +from_primitive_integer!(u8, approximate_float_unsigned); +from_primitive_integer!(u16, approximate_float_unsigned); +from_primitive_integer!(u32, approximate_float_unsigned); +from_primitive_integer!(u64, approximate_float_unsigned); +from_primitive_integer!(u128, approximate_float_unsigned); +from_primitive_integer!(usize, approximate_float_unsigned); + +impl Ratio { + pub fn approximate_float(f: F) -> Option> { + // 1/10e-20 < 1/2**32 which seems like a good default, and 30 seems + // to work well. Might want to choose something based on the types in the future, e.g. + // T::max().recip() and T::bits() or something similar. + let epsilon = ::from(10e-20).expect("Can't convert 10e-20"); + approximate_float(f, epsilon, 30) + } +} + +impl Ratio { + pub fn approximate_float_unsigned(f: F) -> Option> { + // 1/10e-20 < 1/2**32 which seems like a good default, and 30 seems + // to work well. Might want to choose something based on the types in the future, e.g. + // T::max().recip() and T::bits() or something similar. + let epsilon = ::from(10e-20).expect("Can't convert 10e-20"); + approximate_float_unsigned(f, epsilon, 30) + } +} + +fn approximate_float(val: F, max_error: F, max_iterations: usize) -> Option> +where + T: Integer + Signed + Bounded + NumCast + Clone, + F: FloatCore + NumCast, +{ + let negative = val.is_sign_negative(); + let abs_val = val.abs(); + + let r = approximate_float_unsigned(abs_val, max_error, max_iterations)?; + + // Make negative again if needed + Some(if negative { r.neg() } else { r }) +} + +// No Unsigned constraint because this also works on positive integers and is called +// like that, see above +fn approximate_float_unsigned(val: F, max_error: F, max_iterations: usize) -> Option> +where + T: Integer + Bounded + NumCast + Clone, + F: FloatCore + NumCast, +{ + // Continued fractions algorithm + // https://web.archive.org/web/20200629111319/http://mathforum.org:80/dr.math/faq/faq.fractions.html#decfrac + + if val < F::zero() || val.is_nan() { + return None; + } + + let mut q = val; + let mut n0 = T::zero(); + let mut d0 = T::one(); + let mut n1 = T::one(); + let mut d1 = T::zero(); + + let t_max = T::max_value(); + let t_max_f = ::from(t_max.clone())?; + + // 1/epsilon > T::MAX + let epsilon = t_max_f.recip(); + + // Overflow + if q > t_max_f { + return None; + } + + for _ in 0..max_iterations { + let a = match ::from(q) { + None => break, + Some(a) => a, + }; + + let a_f = match ::from(a.clone()) { + None => break, + Some(a_f) => a_f, + }; + let f = q - a_f; + + // Prevent overflow + if !a.is_zero() + && (n1 > t_max.clone() / a.clone() + || d1 > t_max.clone() / a.clone() + || a.clone() * n1.clone() > t_max.clone() - n0.clone() + || a.clone() * d1.clone() > t_max.clone() - d0.clone()) + { + break; + } + + let n = a.clone() * n1.clone() + n0.clone(); + let d = a.clone() * d1.clone() + d0.clone(); + + n0 = n1; + d0 = d1; + n1 = n.clone(); + d1 = d.clone(); + + // Simplify fraction. Doing so here instead of at the end + // allows us to get closer to the target value without overflows + let g = Integer::gcd(&n1, &d1); + if !g.is_zero() { + n1 = n1 / g.clone(); + d1 = d1 / g.clone(); + } + + // Close enough? + let (n_f, d_f) = match (::from(n), ::from(d)) { + (Some(n_f), Some(d_f)) => (n_f, d_f), + _ => break, + }; + if (n_f / d_f - val).abs() < max_error { + break; + } + + // Prevent division by ~0 + if f < epsilon { + break; + } + q = f.recip(); + } + + // Overflow + if d1.is_zero() { + return None; + } + + Some(Ratio::new(n1, d1)) +} + +#[cfg(not(feature = "num-bigint-generic"))] +macro_rules! to_primitive_small { + ($($type_name:ty)*) => ($( + impl ToPrimitive for Ratio<$type_name> { + fn to_i64(&self) -> Option { + self.to_integer().to_i64() + } + + fn to_i128(&self) -> Option { + self.to_integer().to_i128() + } + + fn to_u64(&self) -> Option { + self.to_integer().to_u64() + } + + fn to_u128(&self) -> Option { + self.to_integer().to_u128() + } + + fn to_f64(&self) -> Option { + let float = self.numer.to_f64().unwrap() / self.denom.to_f64().unwrap(); + if float.is_nan() { + None + } else { + Some(float) + } + } + } + )*) +} + +#[cfg(not(feature = "num-bigint-generic"))] +to_primitive_small!(u8 i8 u16 i16 u32 i32); + +#[cfg(all(target_pointer_width = "32", not(feature = "num-bigint-generic")))] +to_primitive_small!(usize isize); + +#[cfg(not(feature = "num-bigint-generic"))] +macro_rules! to_primitive_64 { + ($($type_name:ty)*) => ($( + impl ToPrimitive for Ratio<$type_name> { + fn to_i64(&self) -> Option { + self.to_integer().to_i64() + } + + fn to_i128(&self) -> Option { + self.to_integer().to_i128() + } + + fn to_u64(&self) -> Option { + self.to_integer().to_u64() + } + + fn to_u128(&self) -> Option { + self.to_integer().to_u128() + } + + fn to_f64(&self) -> Option { + let float = ratio_to_f64( + self.numer as i128, + self.denom as i128 + ); + if float.is_nan() { + None + } else { + Some(float) + } + } + } + )*) +} + +#[cfg(not(feature = "num-bigint-generic"))] +to_primitive_64!(u64 i64); + +#[cfg(all(target_pointer_width = "64", not(feature = "num-bigint-generic")))] +to_primitive_64!(usize isize); + +#[cfg(feature = "num-bigint-generic")] +impl ToPrimitive for Ratio { + fn to_i64(&self) -> Option { + self.to_integer().to_i64() + } + + fn to_i128(&self) -> Option { + self.to_integer().to_i128() + } + + fn to_u64(&self) -> Option { + self.to_integer().to_u64() + } + + fn to_u128(&self) -> Option { + self.to_integer().to_u128() + } + + fn to_f64(&self) -> Option { + let float = match (self.numer.to_i64(), self.denom.to_i64()) { + (Some(numer), Some(denom)) => ratio_to_f64( + >::from(numer), + >::from(denom), + ), + _ => { + let numer: BigInt<32> = self.numer.to_bigint()?; + let denom: BigInt<32> = self.denom.to_bigint()?; + ratio_to_f64(numer, denom) + } + }; + if float.is_nan() { + None + } else { + Some(float) + } + } +} + +trait Bits { + fn bits(&self) -> u64; +} + +#[cfg(feature = "num-bigint-generic")] +impl Bits for BigInt { + fn bits(&self) -> u64 { + self.bits() + } +} + +impl Bits for i128 { + fn bits(&self) -> u64 { + (128 - self.wrapping_abs().leading_zeros()).into() + } +} + +/// Converts a ratio of `T` to an f64. +/// +/// In addition to stated trait bounds, `T` must be able to hold numbers 56 bits larger than +/// the largest of `numer` and `denom`. This is automatically true if `T` is `BigInt`. +fn ratio_to_f64 + ToPrimitive>( + numer: T, + denom: T, +) -> f64 { + assert_eq!( + f64::RADIX, + 2, + "only floating point implementations with radix 2 are supported" + ); + + // Inclusive upper and lower bounds to the range of exactly-representable ints in an f64. + const MAX_EXACT_INT: i64 = 1i64 << f64::MANTISSA_DIGITS; + const MIN_EXACT_INT: i64 = -MAX_EXACT_INT; + + let flo_sign = numer.signum().to_f64().unwrap() / denom.signum().to_f64().unwrap(); + if !flo_sign.is_normal() { + return flo_sign; + } + + // Fast track: both sides can losslessly be converted to f64s. In this case, letting the + // FPU do the job is faster and easier. In any other case, converting to f64s may lead + // to an inexact result: https://stackoverflow.com/questions/56641441/. + if let (Some(n), Some(d)) = (numer.to_i64(), denom.to_i64()) { + let exact = MIN_EXACT_INT..=MAX_EXACT_INT; + if exact.contains(&n) && exact.contains(&d) { + return n.to_f64().unwrap() / d.to_f64().unwrap(); + } + } + + // Otherwise, the goal is to obtain a quotient with at least 55 bits. 53 of these bits will + // be used as the mantissa of the resulting float, and the remaining two are for rounding. + // There's an error of up to 1 on the number of resulting bits, so we may get either 55 or + // 56 bits. + let mut numer = numer.abs(); + let mut denom = denom.abs(); + let (is_diff_positive, absolute_diff) = match numer.bits().checked_sub(denom.bits()) { + Some(diff) => (true, diff), + None => (false, denom.bits() - numer.bits()), + }; + + // Filter out overflows and underflows. After this step, the signed difference fits in an + // isize. + if is_diff_positive && absolute_diff > f64::MAX_EXP as u64 { + return f64::INFINITY * flo_sign; + } + if !is_diff_positive && absolute_diff > -f64::MIN_EXP as u64 + f64::MANTISSA_DIGITS as u64 + 1 { + return 0.0 * flo_sign; + } + let diff = if is_diff_positive { + absolute_diff.to_isize().unwrap() + } else { + -absolute_diff.to_isize().unwrap() + }; + + // Shift is chosen so that the quotient will have 55 or 56 bits. The exception is if the + // quotient is going to be subnormal, in which case it may have fewer bits. + let shift: isize = diff.max(f64::MIN_EXP as isize) - f64::MANTISSA_DIGITS as isize - 2; + if shift >= 0 { + denom <<= shift as usize + } else { + numer <<= -shift as usize + }; + + let (quotient, remainder) = numer.div_rem(&denom); + + // This is guaranteed to fit since we've set up quotient to be at most 56 bits. + let mut quotient = quotient.to_u64().unwrap(); + let n_rounding_bits = { + let quotient_bits = 64 - quotient.leading_zeros() as isize; + let subnormal_bits = f64::MIN_EXP as isize - shift; + quotient_bits.max(subnormal_bits) - f64::MANTISSA_DIGITS as isize + } as usize; + debug_assert!(n_rounding_bits == 2 || n_rounding_bits == 3); + let rounding_bit_mask = (1u64 << n_rounding_bits) - 1; + + // Round to 53 bits with round-to-even. For rounding, we need to take into account both + // our rounding bits and the division's remainder. + let ls_bit = quotient & (1u64 << n_rounding_bits) != 0; + let ms_rounding_bit = quotient & (1u64 << (n_rounding_bits - 1)) != 0; + let ls_rounding_bits = quotient & (rounding_bit_mask >> 1) != 0; + if ms_rounding_bit && (ls_bit || ls_rounding_bits || !remainder.is_zero()) { + quotient += 1u64 << n_rounding_bits; + } + quotient &= !rounding_bit_mask; + + // The quotient is guaranteed to be exactly representable as it's now 53 bits + 2 or 3 + // trailing zeros, so there is no risk of a rounding error here. + let q_float = quotient as f64 * flo_sign; + ldexp(q_float, shift as i32) +} + +/// Multiply `x` by 2 to the power of `exp`. Returns an accurate result even if `2^exp` is not +/// representable. +fn ldexp(x: f64, exp: i32) -> f64 { + assert_eq!( + f64::RADIX, + 2, + "only floating point implementations with radix 2 are supported" + ); + + const EXPONENT_MASK: u64 = 0x7ff << 52; + const MAX_UNSIGNED_EXPONENT: i32 = 0x7fe; + const MIN_SUBNORMAL_POWER: i32 = f64::MANTISSA_DIGITS as i32; + + if x.is_zero() || x.is_infinite() || x.is_nan() { + return x; + } + + // Filter out obvious over / underflows to make sure the resulting exponent fits in an isize. + if exp > 3 * f64::MAX_EXP { + return f64::INFINITY * x.signum(); + } else if exp < -3 * f64::MAX_EXP { + return 0.0 * x.signum(); + } + + // curr_exp is the x's *biased* exponent, and is in the [-54, MAX_UNSIGNED_EXPONENT] range. + let (bits, curr_exp) = if !x.is_normal() { + // If x is subnormal, we make it normal by multiplying by 2^53. This causes no loss of + // precision or rounding. + let normal_x = x * 2f64.powi(MIN_SUBNORMAL_POWER); + let bits = normal_x.to_bits(); + // This cast is safe because the exponent is at most 0x7fe, which fits in an i32. + ( + bits, + ((bits & EXPONENT_MASK) >> 52) as i32 - MIN_SUBNORMAL_POWER, + ) + } else { + let bits = x.to_bits(); + let curr_exp = (bits & EXPONENT_MASK) >> 52; + // This cast is safe because the exponent is at most 0x7fe, which fits in an i32. + (bits, curr_exp as i32) + }; + + // The addition can't overflow because exponent is between 0 and 0x7fe, and exp is between + // -2*f64::MAX_EXP and 2*f64::MAX_EXP. + let new_exp = curr_exp + exp; + + if new_exp > MAX_UNSIGNED_EXPONENT { + f64::INFINITY * x.signum() + } else if new_exp > 0 { + // Normal case: exponent is not too large nor subnormal. + let new_bits = (bits & !EXPONENT_MASK) | ((new_exp as u64) << 52); + f64::from_bits(new_bits) + } else if new_exp >= -(f64::MANTISSA_DIGITS as i32) { + // Result is subnormal but may not be zero. + // In this case, we increase the exponent by 54 to make it normal, then multiply the end + // result by 2^-53. This results in a single multiplication with no prior rounding error, + // so there is no risk of double rounding. + let new_exp = new_exp + MIN_SUBNORMAL_POWER; + debug_assert!(new_exp >= 0); + let new_bits = (bits & !EXPONENT_MASK) | ((new_exp as u64) << 52); + f64::from_bits(new_bits) * 2f64.powi(-MIN_SUBNORMAL_POWER) + } else { + // Result is zero. + return 0.0 * x.signum(); + } +} + +#[cfg(test)] +#[cfg(feature = "std")] +fn hash(x: &T) -> u64 { + use std::{collections::hash_map::RandomState, hash::BuildHasher}; + let mut hasher = ::Hasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +#[cfg(test)] +mod test { + use super::{ldexp, Ratio, Rational64}; + #[cfg(feature = "num-bigint-generic")] + use super::{BigInt, BigRational}; + + use core::str::FromStr; + use num_integer::Integer; + use num_traits::{FromPrimitive, One, Pow, Signed, ToPrimitive, Zero}; + + pub const _0: Rational64 = Ratio { numer: 0, denom: 1 }; + pub const _1: Rational64 = Ratio { numer: 1, denom: 1 }; + pub const _2: Rational64 = Ratio { numer: 2, denom: 1 }; + pub const _NEG2: Rational64 = Ratio { + numer: -2, + denom: 1, + }; + pub const _8: Rational64 = Ratio { numer: 8, denom: 1 }; + pub const _15: Rational64 = Ratio { + numer: 15, + denom: 1, + }; + pub const _16: Rational64 = Ratio { + numer: 16, + denom: 1, + }; + + pub const _1_2: Rational64 = Ratio { numer: 1, denom: 2 }; + pub const _1_8: Rational64 = Ratio { numer: 1, denom: 8 }; + pub const _1_15: Rational64 = Ratio { + numer: 1, + denom: 15, + }; + pub const _1_16: Rational64 = Ratio { + numer: 1, + denom: 16, + }; + pub const THREE_2: Rational64 = Ratio { numer: 3, denom: 2 }; + pub const _5_2: Rational64 = Ratio { numer: 5, denom: 2 }; + pub const _NEG1_2: Rational64 = Ratio { + numer: -1, + denom: 2, + }; + pub const _1_NEG2: Rational64 = Ratio { + numer: 1, + denom: -2, + }; + pub const _NEG1_NEG2: Rational64 = Ratio { + numer: -1, + denom: -2, + }; + pub const _1THREE: Rational64 = Ratio { numer: 1, denom: 3 }; + pub const _NEG1THREE: Rational64 = Ratio { + numer: -1, + denom: 3, + }; + pub const TWO_THIRDS: Rational64 = Ratio { numer: 2, denom: 3 }; + pub const _NEG2THREE: Rational64 = Ratio { + numer: -2, + denom: 3, + }; + pub const _MIN: Rational64 = Ratio { + numer: i64::MIN, + denom: 1, + }; + pub const _MIN_P1: Rational64 = Ratio { + numer: i64::MIN + 1, + denom: 1, + }; + pub const _MAX: Rational64 = Ratio { + numer: i64::MAX, + denom: 1, + }; + pub const _MAX_M1: Rational64 = Ratio { + numer: i64::MAX - 1, + denom: 1, + }; + pub const _BILLION: Rational64 = Ratio { + numer: 1_000_000_000, + denom: 1, + }; + + #[cfg(feature = "num-bigint-generic")] + pub fn to_big(n: Rational64) -> BigRational { + Ratio::new( + FromPrimitive::from_i64(n.numer).unwrap(), + FromPrimitive::from_i64(n.denom).unwrap(), + ) + } + #[cfg(not(feature = "num-bigint-generic"))] + pub fn to_big(n: Rational64) -> Rational64 { + Ratio::new( + FromPrimitive::from_i64(n.numer).unwrap(), + FromPrimitive::from_i64(n.denom).unwrap(), + ) + } + + #[test] + fn test_test_constants() { + // check our constants are what Ratio::new etc. would make. + assert_eq!(_0, Zero::zero()); + assert_eq!(_1, One::one()); + assert_eq!(_2, Ratio::from_integer(2)); + assert_eq!(_1_2, Ratio::new(1, 2)); + assert_eq!(THREE_2, Ratio::new(3, 2)); + assert_eq!(_NEG1_2, Ratio::new(-1, 2)); + assert_eq!(_2, From::from(2)); + } + + #[test] + fn test_new_reduce() { + assert_eq!(Ratio::new(2, 2), One::one()); + assert_eq!(Ratio::new(0, i32::MIN), Zero::zero()); + assert_eq!(Ratio::new(i32::MIN, i32::MIN), One::one()); + } + #[test] + #[should_panic] + fn test_new_zero() { + let _a = Ratio::new(1, 0); + } + + #[test] + fn test_approximate_float() { + assert_eq!(Ratio::from_f32(0.5f32), Some(Ratio::new(1i64, 2))); + assert_eq!(Ratio::from_f64(0.5f64), Some(Ratio::new(1i32, 2))); + assert_eq!(Ratio::from_f32(5f32), Some(Ratio::new(5i64, 1))); + assert_eq!(Ratio::from_f64(5f64), Some(Ratio::new(5i32, 1))); + assert_eq!(Ratio::from_f32(29.97f32), Some(Ratio::new(2997i64, 100))); + assert_eq!(Ratio::from_f32(-29.97f32), Some(Ratio::new(-2997i64, 100))); + + assert_eq!(Ratio::::from_f32(63.5f32), Some(Ratio::new(127i8, 2))); + assert_eq!(Ratio::::from_f32(126.5f32), Some(Ratio::new(126i8, 1))); + assert_eq!(Ratio::::from_f32(127.0f32), Some(Ratio::new(127i8, 1))); + assert_eq!(Ratio::::from_f32(127.5f32), None); + assert_eq!(Ratio::::from_f32(-63.5f32), Some(Ratio::new(-127i8, 2))); + assert_eq!( + Ratio::::from_f32(-126.5f32), + Some(Ratio::new(-126i8, 1)) + ); + assert_eq!( + Ratio::::from_f32(-127.0f32), + Some(Ratio::new(-127i8, 1)) + ); + assert_eq!(Ratio::::from_f32(-127.5f32), None); + + assert_eq!(Ratio::::from_f32(-127f32), None); + assert_eq!(Ratio::::from_f32(127f32), Some(Ratio::new(127u8, 1))); + assert_eq!(Ratio::::from_f32(127.5f32), Some(Ratio::new(255u8, 2))); + assert_eq!(Ratio::::from_f32(256f32), None); + + assert_eq!(Ratio::::from_f64(-10e200), None); + assert_eq!(Ratio::::from_f64(10e200), None); + assert_eq!(Ratio::::from_f64(f64::INFINITY), None); + assert_eq!(Ratio::::from_f64(f64::NEG_INFINITY), None); + assert_eq!(Ratio::::from_f64(f64::NAN), None); + assert_eq!( + Ratio::::from_f64(f64::EPSILON), + Some(Ratio::new(1, 4503599627370496)) + ); + assert_eq!(Ratio::::from_f64(0.0), Some(Ratio::new(0, 1))); + assert_eq!(Ratio::::from_f64(-0.0), Some(Ratio::new(0, 1))); + } + + #[test] + #[allow(clippy::eq_op)] + fn test_cmp() { + assert!(_0 == _0 && _1 == _1); + assert!(_0 != _1); + assert!(_0 < _1); + assert!(_1 > _0); + + assert!(_0 <= _0 && _1 <= _1); + assert!(_0 <= _1 && (_1 > _0)); + + assert!(_0 >= _0 && _1 >= _1); + assert!(_1 >= _0 && (_0 < _1)); + + let zero_half: Rational64 = Ratio::new_raw(0, 2); + assert_eq!(_0, zero_half); + } + + #[test] + fn test_cmp_overflow() { + use core::cmp::Ordering; + + // issue #7 example: + let big = Ratio::new(128u8, 1); + let small = big.recip(); + assert!(big > small); + + // try a few that are closer together + // (some matching numer, some matching denom, some neither) + let ratios = [ + Ratio::new(125_i8, 127_i8), + Ratio::new(63_i8, 64_i8), + Ratio::new(124_i8, 125_i8), + Ratio::new(125_i8, 126_i8), + Ratio::new(126_i8, 127_i8), + Ratio::new(127_i8, 126_i8), + ]; + + fn check_cmp(a: Ratio, b: Ratio, ord: Ordering) { + #[cfg(feature = "std")] + println!("comparing {} and {}", a, b); + assert_eq!(a.cmp(&b), ord); + assert_eq!(b.cmp(&a), ord.reverse()); + } + + for (i, &a) in ratios.iter().enumerate() { + check_cmp(a, a, Ordering::Equal); + check_cmp(-a, a, Ordering::Less); + for &b in &ratios[i + 1..] { + check_cmp(a, b, Ordering::Less); + check_cmp(-a, -b, Ordering::Greater); + check_cmp(a.recip(), b.recip(), Ordering::Greater); + check_cmp(-a.recip(), -b.recip(), Ordering::Less); + } + } + } + + #[test] + fn test_to_integer() { + assert_eq!(_0.to_integer(), 0); + assert_eq!(_1.to_integer(), 1); + assert_eq!(_2.to_integer(), 2); + assert_eq!(_1_2.to_integer(), 0); + assert_eq!(THREE_2.to_integer(), 1); + assert_eq!(_NEG1_2.to_integer(), 0); + } + + #[test] + fn test_numer() { + assert_eq!(_0.numer(), &0); + assert_eq!(_1.numer(), &1); + assert_eq!(_2.numer(), &2); + assert_eq!(_1_2.numer(), &1); + assert_eq!(THREE_2.numer(), &3); + assert_eq!(_NEG1_2.numer(), &(-1)); + } + #[test] + fn test_denom() { + assert_eq!(_0.denom(), &1); + assert_eq!(_1.denom(), &1); + assert_eq!(_2.denom(), &1); + assert_eq!(_1_2.denom(), &2); + assert_eq!(THREE_2.denom(), &2); + assert_eq!(_NEG1_2.denom(), &2); + } + + #[test] + fn test_is_integer() { + assert!(_0.is_integer()); + assert!(_1.is_integer()); + assert!(_2.is_integer()); + assert!(!_1_2.is_integer()); + assert!(!THREE_2.is_integer()); + assert!(!_NEG1_2.is_integer()); + } + + #[cfg(not(feature = "std"))] + use core::fmt::{self, Write}; + #[cfg(not(feature = "std"))] + #[derive(Debug)] + struct NoStdTester { + cursor: usize, + buf: [u8; NoStdTester::BUF_SIZE], + } + + #[cfg(not(feature = "std"))] + impl NoStdTester { + fn new() -> NoStdTester { + NoStdTester { + buf: [0; Self::BUF_SIZE], + cursor: 0, + } + } + + fn clear(&mut self) { + self.buf = [0; Self::BUF_SIZE]; + self.cursor = 0; + } + + const WRITE_ERR: &'static str = "Formatted output too long"; + const BUF_SIZE: usize = 32; + } + + #[cfg(not(feature = "std"))] + impl Write for NoStdTester { + fn write_str(&mut self, s: &str) -> fmt::Result { + for byte in s.bytes() { + self.buf[self.cursor] = byte; + self.cursor += 1; + if self.cursor >= self.buf.len() { + return Err(fmt::Error {}); + } + } + Ok(()) + } + } + + #[cfg(not(feature = "std"))] + impl PartialEq for NoStdTester { + fn eq(&self, other: &str) -> bool { + let other = other.as_bytes(); + for index in 0..self.cursor { + if self.buf.get(index) != other.get(index) { + return false; + } + } + true + } + } + + macro_rules! assert_fmt_eq { + ($fmt_args:expr, $string:expr) => { + #[cfg(not(feature = "std"))] + { + let mut tester = NoStdTester::new(); + write!(tester, "{}", $fmt_args).expect(NoStdTester::WRITE_ERR); + assert_eq!(tester, *$string); + tester.clear(); + } + #[cfg(feature = "std")] + { + assert_eq!(std::fmt::format($fmt_args), $string); + } + }; + } + + #[test] + fn test_show() { + // Test: + // :b :o :x, :X, :? + // alternate or not (#) + // positive and negative + // padding + // does not test precision (i.e. truncation) + assert_fmt_eq!(format_args!("{}", _2), "2"); + assert_fmt_eq!(format_args!("{:+}", _2), "+2"); + assert_fmt_eq!(format_args!("{:-}", _2), "2"); + assert_fmt_eq!(format_args!("{}", _1_2), "1/2"); + assert_fmt_eq!(format_args!("{}", -_1_2), "-1/2"); // test negatives + assert_fmt_eq!(format_args!("{}", _0), "0"); + assert_fmt_eq!(format_args!("{}", -_2), "-2"); + assert_fmt_eq!(format_args!("{:+}", -_2), "-2"); + assert_fmt_eq!(format_args!("{:b}", _2), "10"); + assert_fmt_eq!(format_args!("{:#b}", _2), "0b10"); + assert_fmt_eq!(format_args!("{:b}", _1_2), "1/10"); + assert_fmt_eq!(format_args!("{:+b}", _1_2), "+1/10"); + assert_fmt_eq!(format_args!("{:-b}", _1_2), "1/10"); + assert_fmt_eq!(format_args!("{:b}", _0), "0"); + assert_fmt_eq!(format_args!("{:#b}", _1_2), "0b1/0b10"); + // no std does not support padding + #[cfg(feature = "std")] + assert_eq!(&format!("{:010b}", _1_2), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:#010b}", _1_2), "0b001/0b10"); + let half_i8: Ratio = Ratio::new(1_i8, 2_i8); + assert_fmt_eq!(format_args!("{:b}", -half_i8), "11111111/10"); + assert_fmt_eq!(format_args!("{:#b}", -half_i8), "0b11111111/0b10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:05}", Ratio::new(-1_i8, 1_i8)), "-0001"); + + assert_fmt_eq!(format_args!("{:o}", _8), "10"); + assert_fmt_eq!(format_args!("{:o}", _1_8), "1/10"); + assert_fmt_eq!(format_args!("{:o}", _0), "0"); + assert_fmt_eq!(format_args!("{:#o}", _1_8), "0o1/0o10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:010o}", _1_8), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:#010o}", _1_8), "0o001/0o10"); + assert_fmt_eq!(format_args!("{:o}", -half_i8), "377/2"); + assert_fmt_eq!(format_args!("{:#o}", -half_i8), "0o377/0o2"); + + assert_fmt_eq!(format_args!("{:x}", _16), "10"); + assert_fmt_eq!(format_args!("{:x}", _15), "f"); + assert_fmt_eq!(format_args!("{:x}", _1_16), "1/10"); + assert_fmt_eq!(format_args!("{:x}", _1_15), "1/f"); + assert_fmt_eq!(format_args!("{:x}", _0), "0"); + assert_fmt_eq!(format_args!("{:#x}", _1_16), "0x1/0x10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:010x}", _1_16), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(&format!("{:#010x}", _1_16), "0x001/0x10"); + assert_fmt_eq!(format_args!("{:x}", -half_i8), "ff/2"); + assert_fmt_eq!(format_args!("{:#x}", -half_i8), "0xff/0x2"); + + assert_fmt_eq!(format_args!("{:X}", _16), "10"); + assert_fmt_eq!(format_args!("{:X}", _15), "F"); + assert_fmt_eq!(format_args!("{:X}", _1_16), "1/10"); + assert_fmt_eq!(format_args!("{:X}", _1_15), "1/F"); + assert_fmt_eq!(format_args!("{:X}", _0), "0"); + assert_fmt_eq!(format_args!("{:#X}", _1_16), "0x1/0x10"); + #[cfg(feature = "std")] + assert_eq!(format!("{:010X}", _1_16), "0000001/10"); + #[cfg(feature = "std")] + assert_eq!(format!("{:#010X}", _1_16), "0x001/0x10"); + assert_fmt_eq!(format_args!("{:X}", -half_i8), "FF/2"); + assert_fmt_eq!(format_args!("{:#X}", -half_i8), "0xFF/0x2"); + + assert_fmt_eq!(format_args!("{:e}", -_2), "-2e0"); + assert_fmt_eq!(format_args!("{:#e}", -_2), "-2e0"); + assert_fmt_eq!(format_args!("{:+e}", -_2), "-2e0"); + assert_fmt_eq!(format_args!("{:e}", _BILLION), "1e9"); + assert_fmt_eq!(format_args!("{:+e}", _BILLION), "+1e9"); + assert_fmt_eq!(format_args!("{:e}", _BILLION.recip()), "1e0/1e9"); + assert_fmt_eq!(format_args!("{:+e}", _BILLION.recip()), "+1e0/1e9"); + + assert_fmt_eq!(format_args!("{:E}", -_2), "-2E0"); + assert_fmt_eq!(format_args!("{:#E}", -_2), "-2E0"); + assert_fmt_eq!(format_args!("{:+E}", -_2), "-2E0"); + assert_fmt_eq!(format_args!("{:E}", _BILLION), "1E9"); + assert_fmt_eq!(format_args!("{:+E}", _BILLION), "+1E9"); + assert_fmt_eq!(format_args!("{:E}", _BILLION.recip()), "1E0/1E9"); + assert_fmt_eq!(format_args!("{:+E}", _BILLION.recip()), "+1E0/1E9"); + } + + mod arith { + use super::{ + super::{Ratio, Rational64}, + to_big, THREE_2, _0, _1, _1_2, _2, _5_2, _MAX, _MAX_M1, _MIN, _MIN_P1, _NEG1_2, + }; + use core::fmt::Debug; + use num_integer::Integer; + use num_traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, NumAssign}; + + #[test] + fn test_add() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a + b, c); + assert_eq!( + { + let mut x = a; + x += b; + x + }, + c + ); + assert_eq!(to_big(a) + to_big(b), to_big(c)); + assert_eq!(a.checked_add(&b), Some(c)); + assert_eq!(to_big(a).checked_add(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a + b, c); + assert_eq!( + { + let mut x = a; + x += b; + x + }, + c + ); + } + + test(_1, _1_2, THREE_2); + test(_1, _1, _2); + test(_1_2, THREE_2, _2); + test(_1_2, _NEG1_2, _0); + test_assign(_1_2, 1, THREE_2); + } + + #[test] + fn test_add_overflow() { + // compares Ratio(1, T::max_value()) + Ratio(1, T::max_value()) + // to Ratio(1+1, T::max_value()) for each integer type. + // Previously, this calculation would overflow. + fn test_add_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign, + { + let _1_max = Ratio::new(T::one(), T::max_value()); + let _2_max = Ratio::new(T::one() + T::one(), T::max_value()); + assert_eq!(_1_max.clone() + _1_max.clone(), _2_max); + assert_eq!( + { + let mut tmp = _1_max.clone(); + tmp += _1_max; + tmp + }, + _2_max + ); + } + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + test_add_typed_overflow::(); + } + + #[test] + fn test_sub() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a - b, c); + assert_eq!( + { + let mut x = a; + x -= b; + x + }, + c + ); + assert_eq!(to_big(a) - to_big(b), to_big(c)); + assert_eq!(a.checked_sub(&b), Some(c)); + assert_eq!(to_big(a).checked_sub(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a - b, c); + assert_eq!( + { + let mut x = a; + x -= b; + x + }, + c + ); + } + + test(_1, _1_2, _1_2); + test(THREE_2, _1_2, _1); + test(_1, _NEG1_2, THREE_2); + test_assign(_1_2, 1, _NEG1_2); + } + + #[test] + fn test_sub_overflow() { + // compares Ratio(1, T::max_value()) - Ratio(1, T::max_value()) to T::zero() + // for each integer type. Previously, this calculation would overflow. + fn test_sub_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign, + { + let _1_max: Ratio = Ratio::new(T::one(), T::max_value()); + assert!(T::is_zero(&(_1_max.clone() - _1_max.clone()).numer)); + { + let mut tmp: Ratio = _1_max.clone(); + tmp -= _1_max; + assert!(T::is_zero(&tmp.numer)); + } + } + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + test_sub_typed_overflow::(); + } + + #[test] + fn test_mul() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a * b, c); + assert_eq!( + { + let mut x = a; + x *= b; + x + }, + c + ); + assert_eq!(to_big(a) * to_big(b), to_big(c)); + assert_eq!(a.checked_mul(&b), Some(c)); + assert_eq!(to_big(a).checked_mul(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a * b, c); + assert_eq!( + { + let mut x = a; + x *= b; + x + }, + c + ); + } + + test(_1, _1_2, _1_2); + test(_1_2, THREE_2, Ratio::new(3, 4)); + test(_1_2, _NEG1_2, Ratio::new(-1, 4)); + test_assign(_1_2, 2, _1); + } + + #[test] + fn test_mul_overflow() { + fn test_mul_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign + CheckedMul, + { + let two = T::one() + T::one(); + let three = T::one() + T::one() + T::one(); + + // 1/big * 2/3 = 1/(max/4*3), where big is max/2 + // make big = max/2, but also divisible by 2 + let big = T::max_value() / two.clone() / two.clone() * two.clone(); + let _1_big: Ratio = Ratio::new(T::one(), big.clone()); + let two_thirds: Ratio = Ratio::new(two.clone(), three.clone()); + assert_eq!(None, big.clone().checked_mul(&three.clone())); + let expected = Ratio::new(T::one(), big / two.clone() * three.clone()); + assert_eq!(expected.clone(), _1_big.clone() * two_thirds.clone()); + assert_eq!( + Some(expected.clone()), + _1_big.clone().checked_mul(&two_thirds.clone()) + ); + assert_eq!(expected, { + let mut tmp = _1_big; + tmp *= two_thirds; + tmp + }); + + // big/3 * 3 = big/1 + // make big = max/2, but make it indivisible by 3 + let big = T::max_value() / two / three.clone() * three.clone() + T::one(); + assert_eq!(None, big.clone().checked_mul(&three.clone())); + let bigthree = Ratio::new(big.clone(), three.clone()); + let expected = Ratio::new(big, T::one()); + assert_eq!(expected, bigthree.clone() * three.clone()); + assert_eq!(expected, { + let mut tmp = bigthree; + tmp *= three; + tmp + }); + } + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + test_mul_typed_overflow::(); + } + + #[test] + fn test_div() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a / b, c); + assert_eq!( + { + let mut x = a; + x /= b; + x + }, + c + ); + assert_eq!(to_big(a) / to_big(b), to_big(c)); + assert_eq!(a.checked_div(&b), Some(c)); + assert_eq!(to_big(a).checked_div(&to_big(b)), Some(to_big(c))); + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a / b, c); + assert_eq!( + { + let mut x = a; + x /= b; + x + }, + c + ); + } + + test(_1, _1_2, _2); + test(THREE_2, _1_2, _1 + _2); + test(_1, _NEG1_2, _NEG1_2 + _NEG1_2 + _NEG1_2 + _NEG1_2); + test_assign(_1, 2, _1_2); + } + + #[test] + fn test_div_overflow() { + fn test_div_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign + CheckedMul, + { + let two = T::one() + T::one(); + let three = T::one() + T::one() + T::one(); + + // 1/big / 3/2 = 1/(max/4*3), where big is max/2 + // big ~ max/2, and big is divisible by 2 + let big = T::max_value() / two.clone() / two.clone() * two.clone(); + assert_eq!(None, big.clone().checked_mul(&three.clone())); + let _1_big: Ratio = Ratio::new(T::one(), big.clone()); + let three_two: Ratio = Ratio::new(three.clone(), two.clone()); + let expected = Ratio::new(T::one(), big / two.clone() * three.clone()); + assert_eq!(expected.clone(), _1_big.clone() / three_two.clone()); + assert_eq!( + Some(expected.clone()), + _1_big.clone().checked_div(&three_two.clone()) + ); + assert_eq!(expected, { + let mut tmp = _1_big; + tmp /= three_two; + tmp + }); + + // 3/big / 3 = 1/big where big is max/2 + // big ~ max/2, and big is not divisible by 3 + let big = T::max_value() / two / three.clone() * three.clone() + T::one(); + assert_eq!(None, big.clone().checked_mul(&three.clone())); + let three_big = Ratio::new(three.clone(), big.clone()); + let expected = Ratio::new(T::one(), big); + assert_eq!(expected, three_big.clone() / three.clone()); + assert_eq!(expected, { + let mut tmp = three_big; + tmp /= three; + tmp + }); + } + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + test_div_typed_overflow::(); + } + + #[test] + fn test_rem() { + fn test(a: Rational64, b: Rational64, c: Rational64) { + assert_eq!(a % b, c); + assert_eq!( + { + let mut x = a; + x %= b; + x + }, + c + ); + assert_eq!(to_big(a) % to_big(b), to_big(c)) + } + fn test_assign(a: Rational64, b: i64, c: Rational64) { + assert_eq!(a % b, c); + assert_eq!( + { + let mut x = a; + x %= b; + x + }, + c + ); + } + + test(THREE_2, _1, _1_2); + test(THREE_2, _1_2, _0); + test(_5_2, THREE_2, _1); + test(_2, _NEG1_2, _0); + test(_1_2, _2, _1_2); + test_assign(THREE_2, 1, _1_2); + } + + #[test] + fn test_rem_overflow() { + // tests that Ratio(1,2) % Ratio(1, T::max_value()) equals 0 + // for each integer type. Previously, this calculation would overflow. + fn test_rem_typed_overflow() + where + T: Integer + Bounded + Clone + Debug + NumAssign, + { + let two = T::one() + T::one(); + // value near to maximum, but divisible by two + let max_div2 = T::max_value() / two.clone() * two.clone(); + let _1_max: Ratio = Ratio::new(T::one(), max_div2); + let _1_two: Ratio = Ratio::new(T::one(), two); + assert!(T::is_zero(&(_1_two.clone() % _1_max.clone()).numer)); + { + let mut tmp: Ratio = _1_two; + tmp %= _1_max; + assert!(T::is_zero(&tmp.numer)); + } + } + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + test_rem_typed_overflow::(); + } + + #[test] + fn test_neg() { + fn test(a: Rational64, b: Rational64) { + assert_eq!(-a, b); + assert_eq!(-to_big(a), to_big(b)) + } + + test(_0, _0); + test(_1_2, _NEG1_2); + test(-_1, _1); + } + #[test] + #[allow(clippy::eq_op)] + fn test_zero() { + assert_eq!(_0 + _0, _0); + assert_eq!(_0 * _0, _0); + assert_eq!(_0 * _1, _0); + assert_eq!(_0 / _NEG1_2, _0); + assert_eq!(_0 - _0, _0); + } + #[test] + #[should_panic] + fn test_div_0() { + let _a = _1 / _0; + } + + #[test] + fn test_checked_failures() { + let big = Ratio::new(128u8, 1); + let small = Ratio::new(1, 128u8); + assert_eq!(big.checked_add(&big), None); + assert_eq!(small.checked_sub(&big), None); + assert_eq!(big.checked_mul(&big), None); + assert_eq!(small.checked_div(&big), None); + assert_eq!(_1.checked_div(&_0), None); + } + + #[test] + fn test_checked_zeros() { + assert_eq!(_0.checked_add(&_0), Some(_0)); + assert_eq!(_0.checked_sub(&_0), Some(_0)); + assert_eq!(_0.checked_mul(&_0), Some(_0)); + assert_eq!(_0.checked_div(&_0), None); + } + + #[test] + fn test_checked_min() { + assert_eq!(_MIN.checked_add(&_MIN), None); + assert_eq!(_MIN.checked_sub(&_MIN), Some(_0)); + assert_eq!(_MIN.checked_mul(&_MIN), None); + assert_eq!(_MIN.checked_div(&_MIN), Some(_1)); + assert_eq!(_0.checked_add(&_MIN), Some(_MIN)); + assert_eq!(_0.checked_sub(&_MIN), None); + assert_eq!(_0.checked_mul(&_MIN), Some(_0)); + assert_eq!(_0.checked_div(&_MIN), Some(_0)); + assert_eq!(_1.checked_add(&_MIN), Some(_MIN_P1)); + assert_eq!(_1.checked_sub(&_MIN), None); + assert_eq!(_1.checked_mul(&_MIN), Some(_MIN)); + assert_eq!(_1.checked_div(&_MIN), None); + assert_eq!(_MIN.checked_add(&_0), Some(_MIN)); + assert_eq!(_MIN.checked_sub(&_0), Some(_MIN)); + assert_eq!(_MIN.checked_mul(&_0), Some(_0)); + assert_eq!(_MIN.checked_div(&_0), None); + assert_eq!(_MIN.checked_add(&_1), Some(_MIN_P1)); + assert_eq!(_MIN.checked_sub(&_1), None); + assert_eq!(_MIN.checked_mul(&_1), Some(_MIN)); + assert_eq!(_MIN.checked_div(&_1), Some(_MIN)); + } + + #[test] + fn test_checked_max() { + assert_eq!(_MAX.checked_add(&_MAX), None); + assert_eq!(_MAX.checked_sub(&_MAX), Some(_0)); + assert_eq!(_MAX.checked_mul(&_MAX), None); + assert_eq!(_MAX.checked_div(&_MAX), Some(_1)); + assert_eq!(_0.checked_add(&_MAX), Some(_MAX)); + assert_eq!(_0.checked_sub(&_MAX), Some(_MIN_P1)); + assert_eq!(_0.checked_mul(&_MAX), Some(_0)); + assert_eq!(_0.checked_div(&_MAX), Some(_0)); + assert_eq!(_1.checked_add(&_MAX), None); + assert_eq!(_1.checked_sub(&_MAX), Some(-_MAX_M1)); + assert_eq!(_1.checked_mul(&_MAX), Some(_MAX)); + assert_eq!(_1.checked_div(&_MAX), Some(_MAX.recip())); + assert_eq!(_MAX.checked_add(&_0), Some(_MAX)); + assert_eq!(_MAX.checked_sub(&_0), Some(_MAX)); + assert_eq!(_MAX.checked_mul(&_0), Some(_0)); + assert_eq!(_MAX.checked_div(&_0), None); + assert_eq!(_MAX.checked_add(&_1), None); + assert_eq!(_MAX.checked_sub(&_1), Some(_MAX_M1)); + assert_eq!(_MAX.checked_mul(&_1), Some(_MAX)); + assert_eq!(_MAX.checked_div(&_1), Some(_MAX)); + } + + #[test] + fn test_checked_min_max() { + assert_eq!(_MIN.checked_add(&_MAX), Some(-_1)); + assert_eq!(_MIN.checked_sub(&_MAX), None); + assert_eq!(_MIN.checked_mul(&_MAX), None); + assert_eq!( + _MIN.checked_div(&_MAX), + Some(Ratio::new(_MIN.numer, _MAX.numer)) + ); + assert_eq!(_MAX.checked_add(&_MIN), Some(-_1)); + assert_eq!(_MAX.checked_sub(&_MIN), None); + assert_eq!(_MAX.checked_mul(&_MIN), None); + assert_eq!(_MAX.checked_div(&_MIN), None); + } + } + + #[test] + fn test_round() { + assert_eq!(_1THREE.ceil(), _1); + assert_eq!(_1THREE.floor(), _0); + assert_eq!(_1THREE.round(), _0); + assert_eq!(_1THREE.trunc(), _0); + + assert_eq!(_NEG1THREE.ceil(), _0); + assert_eq!(_NEG1THREE.floor(), -_1); + assert_eq!(_NEG1THREE.round(), _0); + assert_eq!(_NEG1THREE.trunc(), _0); + + assert_eq!(TWO_THIRDS.ceil(), _1); + assert_eq!(TWO_THIRDS.floor(), _0); + assert_eq!(TWO_THIRDS.round(), _1); + assert_eq!(TWO_THIRDS.trunc(), _0); + + assert_eq!(_NEG2THREE.ceil(), _0); + assert_eq!(_NEG2THREE.floor(), -_1); + assert_eq!(_NEG2THREE.round(), -_1); + assert_eq!(_NEG2THREE.trunc(), _0); + + assert_eq!(_1_2.ceil(), _1); + assert_eq!(_1_2.floor(), _0); + assert_eq!(_1_2.round(), _1); + assert_eq!(_1_2.trunc(), _0); + + assert_eq!(_NEG1_2.ceil(), _0); + assert_eq!(_NEG1_2.floor(), -_1); + assert_eq!(_NEG1_2.round(), -_1); + assert_eq!(_NEG1_2.trunc(), _0); + + assert_eq!(_1.ceil(), _1); + assert_eq!(_1.floor(), _1); + assert_eq!(_1.round(), _1); + assert_eq!(_1.trunc(), _1); + + // Overflow checks + + let _neg1 = Ratio::from_integer(-1); + let _large_rat1 = Ratio::new(i32::MAX, i32::MAX - 1); + let _large_rat2 = Ratio::new(i32::MAX - 1, i32::MAX); + let _large_rat3 = Ratio::new(i32::MIN + 2, i32::MIN + 1); + let _large_rat4 = Ratio::new(i32::MIN + 1, i32::MIN + 2); + let _large_rat5 = Ratio::new(i32::MIN + 2, i32::MAX); + let _large_rat6 = Ratio::new(i32::MAX, i32::MIN + 2); + let _large_rat7 = Ratio::new(1, i32::MIN + 1); + let _large_rat8 = Ratio::new(1, i32::MAX); + + assert_eq!(_large_rat1.round(), One::one()); + assert_eq!(_large_rat2.round(), One::one()); + assert_eq!(_large_rat3.round(), One::one()); + assert_eq!(_large_rat4.round(), One::one()); + assert_eq!(_large_rat5.round(), _neg1); + assert_eq!(_large_rat6.round(), _neg1); + assert_eq!(_large_rat7.round(), Zero::zero()); + assert_eq!(_large_rat8.round(), Zero::zero()); + } + + #[test] + fn test_fract() { + assert_eq!(_1.fract(), _0); + assert_eq!(_NEG1_2.fract(), _NEG1_2); + assert_eq!(_1_2.fract(), _1_2); + assert_eq!(THREE_2.fract(), _1_2); + } + + #[test] + fn test_recip() { + assert_eq!(_1 * _1.recip(), _1); + assert_eq!(_2 * _2.recip(), _1); + assert_eq!(_1_2 * _1_2.recip(), _1); + assert_eq!(THREE_2 * THREE_2.recip(), _1); + assert_eq!(_NEG1_2 * _NEG1_2.recip(), _1); + + assert_eq!(THREE_2.recip(), TWO_THIRDS); + assert_eq!(_NEG1_2.recip(), _NEG2); + assert_eq!(_NEG1_2.recip().denom(), &1); + } + + #[test] + #[should_panic(expected = "division by zero")] + fn test_recip_fail() { + let _a = Ratio::new(0, 1).recip(); + } + + #[test] + fn test_pow() { + fn test(r: Rational64, e: i32, expected: Rational64) { + assert_eq!(r.pow(e), expected); + assert_eq!(Pow::pow(r, e), expected); + assert_eq!(Pow::pow(r, &e), expected); + assert_eq!(Pow::pow(&r, e), expected); + assert_eq!(Pow::pow(&r, &e), expected); + #[cfg(feature = "num-bigint-generic")] + test_big(r, e, expected); + } + + #[cfg(feature = "num-bigint-generic")] + fn test_big(r: Rational64, e: i32, expected: Rational64) { + let r = BigRational::<4>::new_raw(r.numer.into(), r.denom.into()); + let expected = BigRational::new_raw(expected.numer.into(), expected.denom.into()); + assert_eq!(r.clone().pow(e), expected.clone()); + assert_eq!(Pow::pow(r.clone(), e), expected); + assert_eq!(Pow::pow(r.clone(), &e), expected); + assert_eq!(Pow::pow(&r, e), expected); + assert_eq!(Pow::pow(&r, &e), expected); + } + + test(_1_2, 2, Ratio::new(1, 4)); + test(_1_2, -2, Ratio::new(4, 1)); + test(_1, 1, _1); + test(_1, i32::MAX, _1); + test(_1, i32::MIN, _1); + test(_NEG1_2, 2, _1_2.pow(2i32)); + test(_NEG1_2, 3, -_1_2.pow(3i32)); + test(THREE_2, 0, _1); + test(THREE_2, -1, THREE_2.recip()); + test(THREE_2, 3, Ratio::new(27, 8)); + } + + #[test] + #[cfg(feature = "std")] + fn test_to_from_str() { + use std::string::{String, ToString}; + fn test(r: Rational64, s: String) { + assert_eq!(FromStr::from_str(&s), Ok(r)); + assert_eq!(r.to_string(), s); + } + test(_1, "1".to_string()); + test(_0, "0".to_string()); + test(_1_2, "1/2".to_string()); + test(THREE_2, "3/2".to_string()); + test(_2, "2".to_string()); + test(_NEG1_2, "-1/2".to_string()); + } + #[test] + fn test_from_str_fail() { + fn test(s: &str) { + let rational: Result = FromStr::from_str(s); + assert!(rational.is_err()); + } + + let xs = ["0 /1", "abc", "", "1/", "--1/2", "3/2/1", "1/0"]; + for &s in xs.iter() { + test(s); + } + } + + #[cfg(feature = "num-bigint-generic")] + #[test] + fn test_from_float() { + use num_traits::float::FloatCore; + fn test(given: T, (numer, denom): (&str, &str)) { + let ratio: BigRational = Ratio::from_float(given).unwrap(); + assert_eq!( + ratio, + Ratio::new( + FromStr::from_str(numer).unwrap(), + FromStr::from_str(denom).unwrap() + ) + ); + } + + // f32 + test(core::f32::consts::PI, ("13176795", "4194304")); + test(2f32.powf(100.), ("1267650600228229401496703205376", "1")); + test( + -(2f32.powf(100.)), + ("-1267650600228229401496703205376", "1"), + ); + test( + 1.0 / 2f32.powf(100.), + ("1", "1267650600228229401496703205376"), + ); + test(684_729.5_f32, ("1369459", "2")); + test(-8_573.592_f32, ("-4389679", "512")); + + // f64 + test( + core::f64::consts::PI, + ("884279719003555", "281474976710656"), + ); + test(2f64.powf(100.), ("1267650600228229401496703205376", "1")); + test( + -(2f64.powf(100.)), + ("-1267650600228229401496703205376", "1"), + ); + test(684729.48391f64, ("367611342500051", "536870912")); + test(-8573.5918555f64, ("-4713381968463931", "549755813888")); + test( + 1.0 / 2f64.powf(100.), + ("1", "1267650600228229401496703205376"), + ); + } + + #[cfg(feature = "num-bigint-generic")] + #[test] + fn test_from_float_fail() { + use core::{f32, f64}; + + assert_eq!(Ratio::from_float(f32::NAN), None); + assert_eq!(Ratio::from_float(f32::INFINITY), None); + assert_eq!(Ratio::from_float(f32::NEG_INFINITY), None); + assert_eq!(Ratio::from_float(f64::NAN), None); + assert_eq!(Ratio::from_float(f64::INFINITY), None); + assert_eq!(Ratio::from_float(f64::NEG_INFINITY), None); + } + + #[test] + fn test_signed() { + assert_eq!(_NEG1_2.abs(), _1_2); + assert_eq!(THREE_2.abs_sub(&_1_2), _1); + assert_eq!(_1_2.abs_sub(&THREE_2), Zero::zero()); + assert_eq!(_1_2.signum(), One::one()); + assert_eq!(_NEG1_2.signum(), ->::one()); + assert_eq!(_0.signum(), Zero::zero()); + assert!(_NEG1_2.is_negative()); + assert!(_1_NEG2.is_negative()); + assert!(!_NEG1_2.is_positive()); + assert!(!_1_NEG2.is_positive()); + assert!(_1_2.is_positive()); + assert!(_NEG1_NEG2.is_positive()); + assert!(!_1_2.is_negative()); + assert!(!_NEG1_NEG2.is_negative()); + assert!(!_0.is_positive()); + assert!(!_0.is_negative()); + } + + #[test] + #[cfg(feature = "std")] + fn test_hash() { + assert!(crate::hash(&_0) != crate::hash(&_1)); + assert!(crate::hash(&_0) != crate::hash(&THREE_2)); + + // a == b -> hash(a) == hash(b) + let a = Rational64::new_raw(4, 2); + let b = Rational64::new_raw(6, 3); + assert_eq!(a, b); + assert_eq!(crate::hash(&a), crate::hash(&b)); + + let a = Rational64::new_raw(123456789, 1000); + let b = Rational64::new_raw(123456789 * 5, 5000); + assert_eq!(a, b); + assert_eq!(crate::hash(&a), crate::hash(&b)); + } + + #[test] + fn test_into_pair() { + assert_eq!((0, 1), _0.into()); + assert_eq!((-2, 1), _NEG2.into()); + assert_eq!((1, -2), _1_NEG2.into()); + } + + #[test] + fn test_from_pair() { + assert_eq!(_0, Ratio::from((0, 1))); + assert_eq!(_1, Ratio::from((1, 1))); + assert_eq!(_NEG2, Ratio::from((-2, 1))); + assert_eq!(_1_NEG2, Ratio::from((1, -2))); + } + + #[test] + fn ratio_iter_sum() { + // generic function to assure the iter method can be called + // for any Iterator with Item = Ratio or Ratio<&impl Integer> + fn iter_sums(slice: &[Ratio]) -> [Ratio; 3] { + let mut manual_sum = Ratio::new(T::zero(), T::one()); + for ratio in slice { + manual_sum = manual_sum + ratio; + } + [manual_sum, slice.iter().sum(), slice.iter().cloned().sum()] + } + // collect into array so test works on no_std + let mut nums = [Ratio::new(0, 1); 1000]; + for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() { + nums[i] = r; + } + let sums = iter_sums(&nums[..]); + assert_eq!(sums[0], sums[1]); + assert_eq!(sums[0], sums[2]); + } + + #[test] + fn ratio_iter_product() { + // generic function to assure the iter method can be called + // for any Iterator with Item = Ratio or Ratio<&impl Integer> + fn iter_products(slice: &[Ratio]) -> [Ratio; 3] { + let mut manual_prod = Ratio::new(T::one(), T::one()); + for ratio in slice { + manual_prod = manual_prod * ratio; + } + [ + manual_prod, + slice.iter().product(), + slice.iter().cloned().product(), + ] + } + + // collect into array so test works on no_std + let mut nums = [Ratio::new(0, 1); 1000]; + for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() { + nums[i] = r; + } + let products = iter_products(&nums[..]); + assert_eq!(products[0], products[1]); + assert_eq!(products[0], products[2]); + } + + #[test] + fn test_num_zero() { + let zero = Rational64::zero(); + assert!(zero.is_zero()); + + let mut r = Rational64::new(123, 456); + assert!(!r.is_zero()); + assert_eq!(r + zero, r); + + r.set_zero(); + assert!(r.is_zero()); + } + + #[test] + fn test_num_one() { + let one = Rational64::one(); + assert!(one.is_one()); + + let mut r = Rational64::new(123, 456); + assert!(!r.is_one()); + assert_eq!(r * one, r); + + r.set_one(); + assert!(r.is_one()); + } + + #[test] + fn test_const() { + const N: Ratio = Ratio::new_raw(123, 456); + const N_NUMER: &i32 = N.numer(); + const N_DENOM: &i32 = N.denom(); + + assert_eq!(N_NUMER, &123); + assert_eq!(N_DENOM, &456); + + let r = N.reduced(); + assert_eq!(r.numer(), &(123 / 3)); + assert_eq!(r.denom(), &(456 / 3)); + } + + #[test] + fn test_ratio_to_i64() { + assert_eq!(5, Rational64::new(70, 14).to_u64().unwrap()); + assert_eq!(-3, Rational64::new(-31, 8).to_i64().unwrap()); + assert_eq!(None, Rational64::new(-31, 8).to_u64()); + } + + #[test] + #[cfg(feature = "num-bigint-generic")] + fn test_ratio_to_i128() { + assert_eq!( + 1i128 << 70, + Ratio::::new(1i128 << 77, 1i128 << 7) + .to_i128() + .unwrap() + ); + } + + #[test] + #[cfg(feature = "num-bigint-generic")] + fn test_big_ratio_to_f64() { + assert_eq!( + BigRational::new( + "1234567890987654321234567890987654321234567890" + .parse() + .unwrap(), + "3".parse().unwrap() + ) + .to_f64(), + Some(411522630329218100000000000000000000000000000f64) + ); + assert_eq!( + BigRational::from_float(5e-324).unwrap().to_f64(), + Some(5e-324) + ); + assert_eq!( + // subnormal + BigRational::new(BigInt::one(), BigInt::one() << 1050).to_f64(), + Some(2.0f64.powi(-50).powi(21)) + ); + assert_eq!( + // definite underflow + BigRational::new(BigInt::one(), BigInt::one() << 1100).to_f64(), + Some(0.0) + ); + assert_eq!( + BigRational::from(BigInt::one() << 1050).to_f64(), + Some(f64::INFINITY) + ); + assert_eq!( + BigRational::from((-BigInt::one()) << 1050).to_f64(), + Some(f64::NEG_INFINITY) + ); + assert_eq!( + BigRational::new( + "1234567890987654321234567890".parse().unwrap(), + "987654321234567890987654321".parse().unwrap() + ) + .to_f64(), + Some(1.2499999893125f64) + ); + assert_eq!( + BigRational::new_raw(BigInt::one(), BigInt::zero()).to_f64(), + Some(f64::INFINITY) + ); + assert_eq!( + BigRational::new_raw(-BigInt::one(), BigInt::zero()).to_f64(), + Some(f64::NEG_INFINITY) + ); + assert_eq!( + BigRational::new_raw(BigInt::zero(), BigInt::zero()).to_f64(), + None + ); + } + + #[test] + fn test_ratio_to_f64() { + assert_eq!(Ratio::::new(1, 2).to_f64(), Some(0.5f64)); + assert_eq!(Rational64::new(1, 2).to_f64(), Some(0.5f64)); + assert_eq!(Rational64::new(1, -2).to_f64(), Some(-0.5f64)); + assert_eq!(Rational64::new(0, 2).to_f64(), Some(0.0f64)); + assert_eq!(Rational64::new(0, -2).to_f64(), Some(-0.0f64)); + assert_eq!(Rational64::new((1 << 57) + 1, 1 << 54).to_f64(), Some(8f64)); + assert_eq!( + Rational64::new((1 << 52) + 1, 1 << 52).to_f64(), + Some(1.0000000000000002f64), + ); + assert_eq!( + Rational64::new((1 << 60) + (1 << 8), 1 << 60).to_f64(), + Some(1.0000000000000002f64), + ); + assert_eq!(Ratio::::new_raw(1, 0).to_f64(), Some(f64::INFINITY)); + assert_eq!( + Ratio::::new_raw(-1, 0).to_f64(), + Some(f64::NEG_INFINITY) + ); + assert_eq!(Ratio::::new_raw(0, 0).to_f64(), None); + } + + #[test] + fn test_ldexp() { + assert_eq!(ldexp(1.0, 0), 1.0); + assert_eq!(ldexp(1.0, 1), 2.0); + assert_eq!(ldexp(0.0, 1), 0.0); + assert_eq!(ldexp(-0.0, 1), -0.0); + + // Cases where ldexp is equivalent to multiplying by 2^exp because there's no over- or + // underflow. + assert_eq!(ldexp(3.5, 5), 3.5 * 2f64.powi(5)); + assert_eq!(ldexp(1.0, f64::MAX_EXP - 1), 2f64.powi(f64::MAX_EXP - 1)); + assert_eq!( + ldexp(2.77, f64::MIN_EXP + 3), + 2.77 * 2f64.powi(f64::MIN_EXP + 3) + ); + + // Case where initial value is subnormal + assert_eq!(ldexp(5e-324, 4), 5e-324 * 2f64.powi(4)); + assert_eq!(ldexp(5e-324, 200), 5e-324 * 2f64.powi(200)); + + // Near underflow (2^exp is too small to represent, but not x*2^exp) + assert_eq!(ldexp(4.0, f64::MIN_EXP - 3), 2f64.powi(f64::MIN_EXP - 1)); + + // Near overflow + assert_eq!(ldexp(0.125, f64::MAX_EXP + 3), 2f64.powi(f64::MAX_EXP)); + + // Overflow and underflow cases + assert_eq!(ldexp(1.0, f64::MIN_EXP - 54), 0.0); + assert_eq!(ldexp(-1.0, f64::MIN_EXP - 54), -0.0); + assert_eq!(ldexp(1.0, f64::MAX_EXP), f64::INFINITY); + assert_eq!(ldexp(-1.0, f64::MAX_EXP), f64::NEG_INFINITY); + + // Special values + assert_eq!(ldexp(f64::INFINITY, 1), f64::INFINITY); + assert_eq!(ldexp(f64::NEG_INFINITY, 1), f64::NEG_INFINITY); + assert!(ldexp(f64::NAN, 1).is_nan()); + } +} diff --git a/vendor/num-rational-generic/src/pow.rs b/vendor/num-rational-generic/src/pow.rs new file mode 100644 index 000000000..7df37a04c --- /dev/null +++ b/vendor/num-rational-generic/src/pow.rs @@ -0,0 +1,173 @@ +use crate::Ratio; + +use core::cmp; +use num_integer::Integer; +use num_traits::{One, Pow}; + +macro_rules! pow_unsigned_impl { + (@ $exp:ty) => { + type Output = Ratio; + #[inline] + fn pow(self, expon: $exp) -> Ratio { + Ratio::new_raw(self.numer.pow(expon), self.denom.pow(expon)) + } + }; + ($exp:ty) => { + impl> Pow<$exp> for Ratio { + pow_unsigned_impl!(@ $exp); + } + impl<'a, T: Clone + Integer> Pow<$exp> for &'a Ratio + where + &'a T: Pow<$exp, Output = T>, + { + pow_unsigned_impl!(@ $exp); + } + impl<'b, T: Clone + Integer + Pow<$exp, Output = T>> Pow<&'b $exp> for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b $exp> for &'a Ratio + where + &'a T: Pow<$exp, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + }; +} +pow_unsigned_impl!(u8); +pow_unsigned_impl!(u16); +pow_unsigned_impl!(u32); +pow_unsigned_impl!(u64); +pow_unsigned_impl!(u128); +pow_unsigned_impl!(usize); + +macro_rules! pow_signed_impl { + (@ &'b BigInt, BigUint) => { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b BigInt) -> Ratio { + match expon.sign() { + Sign::NoSign => One::one(), + Sign::Minus => { + Pow::pow(self, expon.magnitude()).into_recip() + } + Sign::Plus => Pow::pow(self, expon.magnitude()), + } + } + }; + (@ $exp:ty, $unsigned:ty) => { + type Output = Ratio; + #[inline] + fn pow(self, expon: $exp) -> Ratio { + match expon.cmp(&0) { + cmp::Ordering::Equal => One::one(), + cmp::Ordering::Less => { + let expon = expon.wrapping_abs() as $unsigned; + Pow::pow(self, expon).into_recip() + } + cmp::Ordering::Greater => Pow::pow(self, expon as $unsigned), + } + } + }; + ($exp:ty, $unsigned:ty) => { + impl> Pow<$exp> for Ratio { + pow_signed_impl!(@ $exp, $unsigned); + } + impl<'a, T: Clone + Integer> Pow<$exp> for &'a Ratio + where + &'a T: Pow<$unsigned, Output = T>, + { + pow_signed_impl!(@ $exp, $unsigned); + } + impl<'b, T: Clone + Integer + Pow<$unsigned, Output = T>> Pow<&'b $exp> for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b $exp> for &'a Ratio + where + &'a T: Pow<$unsigned, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: &'b $exp) -> Ratio { + Pow::pow(self, *expon) + } + } + }; +} +pow_signed_impl!(i8, u8); +pow_signed_impl!(i16, u16); +pow_signed_impl!(i32, u32); +pow_signed_impl!(i64, u64); +pow_signed_impl!(i128, u128); +pow_signed_impl!(isize, usize); + +#[cfg(feature = "num-bigint-generic")] +mod bigint { + use super::*; + use num_bigint_generic::{BigInt, BigUint, Sign}; + + impl Pow<&'b BigUint, Output = T>> Pow for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigUint) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'a, T: Clone + Integer> Pow for &'a Ratio + where + &'a T: for<'b> Pow<&'b BigUint, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigUint) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'b, T: Clone + Integer + Pow<&'b BigUint, Output = T>> Pow<&'b BigUint> for Ratio { + pow_unsigned_impl!(@ &'b BigUint); + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b BigUint> for &'a Ratio + where + &'a T: Pow<&'b BigUint, Output = T>, + { + pow_unsigned_impl!(@ &'b BigUint); + } + + impl Pow<&'b BigUint, Output = T>> Pow for Ratio { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigInt) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'a, T: Clone + Integer> Pow for &'a Ratio + where + &'a T: for<'b> Pow<&'b BigUint, Output = T>, + { + type Output = Ratio; + #[inline] + fn pow(self, expon: BigInt) -> Ratio { + Pow::pow(self, &expon) + } + } + impl<'b, T: Clone + Integer + Pow<&'b BigUint, Output = T>> Pow<&'b BigInt> for Ratio { + pow_signed_impl!(@ &'b BigInt, BigUint); + } + impl<'a, 'b, T: Clone + Integer> Pow<&'b BigInt> for &'a Ratio + where + &'a T: Pow<&'b BigUint, Output = T>, + { + pow_signed_impl!(@ &'b BigInt, BigUint); + } +} diff --git a/vendor/num-rational-generic/test_full.sh b/vendor/num-rational-generic/test_full.sh new file mode 100755 index 000000000..dbb963a83 --- /dev/null +++ b/vendor/num-rational-generic/test_full.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +set -e + +CRATE=num-rational +MSRV=1.60 + +get_rust_version() { + local array=("$(rustc --version)"); + echo "${array[1]}"; + return 0; +} +RUST_VERSION=$(get_rust_version) + +check_version() { + IFS=. read -ra rust <<< "$RUST_VERSION" + IFS=. read -ra want <<< "$1" + [[ "${rust[0]}" -gt "${want[0]}" || + ( "${rust[0]}" -eq "${want[0]}" && + "${rust[1]}" -ge "${want[1]}" ) + ]] +} + +echo "Testing $CRATE on rustc $RUST_VERSION" +if ! check_version $MSRV ; then + echo "The minimum for $CRATE is rustc $MSRV" + exit 1 +fi + +STD_FEATURES=(num-bigint-std serde) +NO_STD_FEATURES=(num-bigint serde) +echo "Testing supported features: ${STD_FEATURES[*]}" +echo " no_std supported features: ${NO_STD_FEATURES[*]}" + +set -x + +# test the default with std +cargo build +cargo test + +# test each isolated feature with std +for feature in "${STD_FEATURES[@]}"; do + cargo build --no-default-features --features="std $feature" + cargo test --no-default-features --features="std $feature" +done + +# test all supported features with std +cargo build --no-default-features --features="std ${STD_FEATURES[*]}" +cargo test --no-default-features --features="std ${STD_FEATURES[*]}" + + +# test minimal `no_std` +cargo build --no-default-features +cargo test --no-default-features + +# test each isolated feature without std +for feature in "${NO_STD_FEATURES[@]}"; do + cargo build --no-default-features --features="$feature" + cargo test --no-default-features --features="$feature" +done + +# test all supported features without std +cargo build --no-default-features --features="${NO_STD_FEATURES[*]}" +cargo test --no-default-features --features="${NO_STD_FEATURES[*]}" + +# make sure benchmarks can be built and sanity-tested +if rustc --version | grep -q nightly; then + cargo test --manifest-path ci/benchmarks/Cargo.toml +fi