From 5314217e02378fa17d57cd0c832ff90b6bcbf431 Mon Sep 17 00:00:00 2001 From: Trevor McKay Date: Mon, 2 Jun 2025 13:25:49 -0400 Subject: [PATCH 1/6] allow long client version in service version check' (#61) Only consider the first two fields in the client version. Without this change the service will report that the version is a bad format, which is not necessary in this case. --- python/cuopt_server/cuopt_server/utils/job_queue.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/cuopt_server/cuopt_server/utils/job_queue.py b/python/cuopt_server/cuopt_server/utils/job_queue.py index dfeb98ae4..1c4996742 100755 --- a/python/cuopt_server/cuopt_server/utils/job_queue.py +++ b/python/cuopt_server/cuopt_server/utils/job_queue.py @@ -109,7 +109,7 @@ def check_client_version(client_vers): if client_vers == "custom": return [] cv = client_vers.split(".") - if len(cv) != 3: + if len(cv) < 2: logging.warn("Client version missing or bad format") return [ f"Client version missing or not the current format. " @@ -118,7 +118,7 @@ def check_client_version(client_vers): "if this is a custom client." ] else: - cmajor, cminor, _ = cv + cmajor, cminor = cv[:2] matches = (cmajor, cminor) == (major, minor) if not matches: logging.warn(f"Client version {cmajor}.{cminor} does not match") From 09aa8c955afd072cf0b683488b426df0a39e67ad Mon Sep 17 00:00:00 2001 From: Chris Maes Date: Mon, 2 Jun 2025 18:17:36 -0700 Subject: [PATCH 2/6] Fix obj constant on max. Fix undefined memory access at root (#52) This PR fixes two bugs discovered while testing the AMPL interface: 1) We weren't correctly calculating the objective constant when presolve eliminated variables on a maximization problem. Thanks to Alice for the quick fix! 2) We had an undefined memory access on the number of nodes and simplex iterations when solving a MIP at the root node. --------- Co-authored-by: Ramakrishnap <42624703+rgsl888prabhu@users.noreply.github.com> --- cpp/src/dual_simplex/branch_and_bound.cpp | 6 ++++-- cpp/src/linear_programming/utilities/logger_init.hpp | 2 ++ cpp/src/mip/presolve/trivial_presolve.cuh | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cpp/src/dual_simplex/branch_and_bound.cpp b/cpp/src/dual_simplex/branch_and_bound.cpp index 1b8a307d2..f1075ed55 100644 --- a/cpp/src/dual_simplex/branch_and_bound.cpp +++ b/cpp/src/dual_simplex/branch_and_bound.cpp @@ -463,8 +463,10 @@ mip_status_t branch_and_bound_t::solve(mip_solution_t& solut global_variables::mutex_upper.unlock(); // We should be done here uncrush_primal_solution(original_problem, original_lp, incumbent.x, solution.x); - solution.objective = incumbent.objective; - solution.lower_bound = lower_bound; + solution.objective = incumbent.objective; + solution.lower_bound = lower_bound; + solution.nodes_explored = 0; + solution.simplex_iterations = root_relax_soln.iterations; settings.log.printf("Optimal solution found at root node. Objective %.16e. Time %.2f.\n", compute_user_objective(original_lp, root_objective), toc(start_time)); diff --git a/cpp/src/linear_programming/utilities/logger_init.hpp b/cpp/src/linear_programming/utilities/logger_init.hpp index 0e847065d..4d93f8cb6 100644 --- a/cpp/src/linear_programming/utilities/logger_init.hpp +++ b/cpp/src/linear_programming/utilities/logger_init.hpp @@ -37,6 +37,8 @@ class init_logger_t { // TODO save the defaul sink and restore it cuopt::default_logger().sinks().push_back( std::make_shared(log_file, true)); + cuopt::default_logger().set_pattern("%v"); + cuopt::default_logger().flush_on(rapids_logger::level_enum::info); } } ~init_logger_t() { cuopt::reset_default_logger(); } diff --git a/cpp/src/mip/presolve/trivial_presolve.cuh b/cpp/src/mip/presolve/trivial_presolve.cuh index d7cfa6dc1..e201f401e 100644 --- a/cpp/src/mip/presolve/trivial_presolve.cuh +++ b/cpp/src/mip/presolve/trivial_presolve.cuh @@ -234,6 +234,7 @@ void update_from_csr(problem_t& pb) // update objective_offset pb.presolve_data.objective_offset += + pb.presolve_data.objective_scaling_factor * thrust::transform_reduce(handle_ptr->get_thrust_policy(), thrust::counting_iterator(0), thrust::counting_iterator(pb.n_variables), From 6853c5e9c6db080bd3864fd1fbfbe95a3b7a5ae9 Mon Sep 17 00:00:00 2001 From: Nicolas Blin <31096601+Kh4ster@users.noreply.github.com> Date: Tue, 3 Jun 2025 18:10:10 +0200 Subject: [PATCH 3/6] Pdlp fix batch cuda graph (#68) This PR aims at fixing the invalid operation while there is a graph capture we sometimes see when using batch solve. Solution is to use a regular instead of a non-blocking stream to make sure that if any operation (like a cudaFree from Thrust) is being launched on the default stream, it will wait for all other operations on other stream to finish first, preventing any cudaMalloc/Free while another stream might be doing a CUDA Graph capture. --- .../cuopt/linear_programming/utilities/cython_solve.hpp | 3 ++- cpp/src/linear_programming/utilities/cython_solve.cu | 9 ++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp b/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp index de4d9cf66..cc30ff7a0 100644 --- a/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp +++ b/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp @@ -103,7 +103,8 @@ struct solver_ret_t { // Wrapper for solve to expose the API to cython. std::unique_ptr call_solve(cuopt::mps_parser::data_model_view_t*, - linear_programming::solver_settings_t*); + linear_programming::solver_settings_t*, + unsigned int flags = cudaStreamNonBlocking); std::pair>, double> call_batch_solve( std::vector*>, diff --git a/cpp/src/linear_programming/utilities/cython_solve.cu b/cpp/src/linear_programming/utilities/cython_solve.cu index b333e99a4..2b784beeb 100644 --- a/cpp/src/linear_programming/utilities/cython_solve.cu +++ b/cpp/src/linear_programming/utilities/cython_solve.cu @@ -208,12 +208,13 @@ mip_ret_t call_solve_mip( std::unique_ptr call_solve( cuopt::mps_parser::data_model_view_t* data_model, - cuopt::linear_programming::solver_settings_t* solver_settings) + cuopt::linear_programming::solver_settings_t* solver_settings, + unsigned int flags) { raft::common::nvtx::range fun_scope("Call Solve"); cudaStream_t stream; - RAFT_CUDA_TRY(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); + RAFT_CUDA_TRY(cudaStreamCreateWithFlags(&stream, flags)); const raft::handle_t handle_{stream}; auto op_problem = data_model_to_optimization_problem(data_model, solver_settings, &handle_); @@ -283,9 +284,11 @@ std::pair>, double> call_batch_solve( solver_settings->set_parameter(CUOPT_METHOD, CUOPT_METHOD_PDLP); } + // Use a default stream instead of a non-blocking to avoid invalid operations while some CUDA + // Graph might be capturing in another stream #pragma omp parallel for num_threads(max_thread) for (std::size_t i = 0; i < size; ++i) - list[i] = std::move(call_solve(data_models[i], solver_settings)); + list[i] = std::move(call_solve(data_models[i], solver_settings, cudaStreamDefault)); auto end = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast(end - start_solver); From 8939e34a3000896e23c8ec3da7994cbb4a635639 Mon Sep 17 00:00:00 2001 From: Chris Maes Date: Wed, 4 Jun 2025 12:04:20 -0700 Subject: [PATCH 4/6] Write out mps file containing user problem. Useful for debugging (#66) This PR adds the string parameter CUOPT_USER_PROBLEM_FILE = "user_problem_file". By default the parameter is the empty string "". If the parameter is set by the user to something other than the empty string, we will write out an MPS file containing the LP or MIP. This is very useful when trying to debug interfaces like CVXPY where the interface between CVXPY and cuOpt may have performed transformation to the user problem. This allows us to reproduce failures on the engine side directly from the MPS file without needing Python scripts. --- .../cuopt/linear_programming/constants.h | 3 +- .../mip/solver_settings.hpp | 1 + .../pdlp/solver_settings.hpp | 1 + cpp/src/linear_programming/solve.cu | 5 +++ cpp/src/math_optimization/solver_settings.cu | 6 ++- cpp/src/mip/problem/write_mps.cu | 6 ++- cpp/src/mip/solve.cu | 4 ++ cpp/tests/mip/doc_example_test.cu | 45 +++++++++++++++++++ .../source/cuopt-c/lp-milp/lp-milp-c-api.rst | 6 ++- docs/cuopt/source/lp-milp-settings.rst | 11 +++++ .../solver/solver_parameters.pyx | 4 ++ .../linear_programming/test_lp_solver.py | 38 ++++++++++++++++ 12 files changed, 123 insertions(+), 7 deletions(-) diff --git a/cpp/include/cuopt/linear_programming/constants.h b/cpp/include/cuopt/linear_programming/constants.h index 4cf856bb8..ca4377de9 100644 --- a/cpp/include/cuopt/linear_programming/constants.h +++ b/cpp/include/cuopt/linear_programming/constants.h @@ -57,8 +57,9 @@ #define CUOPT_MIP_RELATIVE_GAP "mip_relative_gap" #define CUOPT_MIP_HEURISTICS_ONLY "mip_heuristics_only" #define CUOPT_MIP_SCALING "mip_scaling" -#define CUOPT_SOL_FILE "solution_file" +#define CUOPT_SOLUTION_FILE "solution_file" #define CUOPT_NUM_CPU_THREADS "num_cpu_threads" +#define CUOPT_USER_PROBLEM_FILE "user_problem_file" /* @brief LP/MIP termination status constants */ #define CUOPT_TERIMINATION_STATUS_NO_TERMINATION 0 diff --git a/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp b/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp index 850248b5f..71afb366d 100644 --- a/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp +++ b/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp @@ -92,6 +92,7 @@ class mip_solver_settings_t { bool log_to_console = true; std::string log_file; std::string sol_file; + std::string user_problem_file; /** Initial primal solution */ std::shared_ptr> initial_solution_; diff --git a/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp b/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp index 8631c857e..9dcccf7a7 100644 --- a/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp +++ b/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp @@ -200,6 +200,7 @@ class pdlp_solver_settings_t { bool log_to_console{true}; std::string log_file{""}; std::string sol_file{""}; + std::string user_problem_file{""}; bool per_constraint_residual{false}; bool crossover{false}; bool save_best_primal_so_far{false}; diff --git a/cpp/src/linear_programming/solve.cu b/cpp/src/linear_programming/solve.cu index 12859a118..ed2a30bcf 100644 --- a/cpp/src/linear_programming/solve.cu +++ b/cpp/src/linear_programming/solve.cu @@ -569,6 +569,11 @@ optimization_problem_solution_t solve_lp(optimization_problem_t::solver_settings_t() : pdlp_settings(), mip_settings string_parameters = { {CUOPT_LOG_FILE, &mip_settings.log_file, ""}, {CUOPT_LOG_FILE, &pdlp_settings.log_file, ""}, - {CUOPT_SOL_FILE, &mip_settings.sol_file, ""}, - {CUOPT_SOL_FILE, &pdlp_settings.sol_file, ""} + {CUOPT_SOLUTION_FILE, &mip_settings.sol_file, ""}, + {CUOPT_SOLUTION_FILE, &pdlp_settings.sol_file, ""}, + {CUOPT_USER_PROBLEM_FILE, &mip_settings.user_problem_file, ""}, + {CUOPT_USER_PROBLEM_FILE, &pdlp_settings.user_problem_file, ""} }; // clang-format on } diff --git a/cpp/src/mip/problem/write_mps.cu b/cpp/src/mip/problem/write_mps.cu index 31315fc9c..cca3cd5b1 100644 --- a/cpp/src/mip/problem/write_mps.cu +++ b/cpp/src/mip/problem/write_mps.cu @@ -54,6 +54,8 @@ void problem_t::write_as_mps(const std::string& path) // NAME section mps_file << "NAME " << original_problem_ptr->get_problem_name() << "\n"; + if (maximize) { mps_file << "OBJSENSE\n MAXIMIZE\n"; } + // ROWS section mps_file << "ROWS\n"; mps_file << " N " << (objective_name.empty() ? "OBJ" : objective_name) << "\n"; @@ -86,7 +88,7 @@ void problem_t::write_as_mps(const std::string& path) // Write objective coefficient if non-zero if (h_obj_coeffs[j] != 0.0) { mps_file << " " << col_name << " " << (objective_name.empty() ? "OBJ" : objective_name) - << " " << h_obj_coeffs[j] << "\n"; + << " " << (maximize ? -h_obj_coeffs[j] : h_obj_coeffs[j]) << "\n"; } // Write constraint coefficients @@ -146,7 +148,7 @@ void problem_t::write_as_mps(const std::string& path) h_var_ub[j] == std::numeric_limits::infinity()) { mps_file << " FR BOUND1 " << col_name << "\n"; } else { - if (h_var_lb[j] != 0.0) { + if (h_var_lb[j] != 0.0 || h_obj_coeffs[j] == 0.0 || h_var_types[j] != var_t::CONTINUOUS) { if (h_var_lb[j] == -std::numeric_limits::infinity()) { mps_file << " MI BOUND1 " << col_name << "\n"; } else { diff --git a/cpp/src/mip/solve.cu b/cpp/src/mip/solve.cu index 6ca891a3b..e96456801 100644 --- a/cpp/src/mip/solve.cu +++ b/cpp/src/mip/solve.cu @@ -161,6 +161,10 @@ mip_solution_t solve_mip(optimization_problem_t& op_problem, // have solve, problem, solution, utils etc. in common dir detail::problem_t problem(op_problem); + if (settings.user_problem_file != "") { + CUOPT_LOG_INFO("Writing user problem to file: %s", settings.user_problem_file.c_str()); + problem.write_as_mps(settings.user_problem_file); + } // this is for PDLP, i think this should be part of pdlp solver setup_device_symbols(op_problem.get_handle_ptr()->get_stream()); diff --git a/cpp/tests/mip/doc_example_test.cu b/cpp/tests/mip/doc_example_test.cu index e945a7ef5..7b32cf068 100644 --- a/cpp/tests/mip/doc_example_test.cu +++ b/cpp/tests/mip/doc_example_test.cu @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -120,4 +121,48 @@ void test_mps_file() TEST(docs, mixed_integer_linear_programming) { test_mps_file(); } +TEST(docs, user_problem_file) +{ + const raft::handle_t handle_{}; + mip_solver_settings_t settings; + constexpr double test_time_limit = 1.; + + // Create the problem from documentation example + auto problem = create_doc_example_problem(); + + EXPECT_FALSE(std::filesystem::exists("user_problem.mps")); + + settings.time_limit = test_time_limit; + settings.user_problem_file = "user_problem.mps"; + EXPECT_EQ(solve_mip(&handle_, problem, settings).get_termination_status(), + mip_termination_status_t::Optimal); + + EXPECT_TRUE(std::filesystem::exists("user_problem.mps")); + + cuopt::mps_parser::mps_data_model_t problem2 = + cuopt::mps_parser::parse_mps("user_problem.mps", false); + + EXPECT_EQ(problem2.get_n_variables(), problem.get_n_variables()); + EXPECT_EQ(problem2.get_n_constraints(), problem.get_n_constraints()); + EXPECT_EQ(problem2.get_nnz(), problem.get_nnz()); + + settings.user_problem_file = "user_problem2.mps"; + mip_solution_t solution = solve_mip(&handle_, problem2, settings); + EXPECT_EQ(solution.get_termination_status(), mip_termination_status_t::Optimal); + + double obj_val = solution.get_objective_value(); + // Expected objective value from documentation example is approximately 303.5 + EXPECT_NEAR(303.5, obj_val, 1.0); + + // Get solution values + const auto& sol_values = solution.get_solution(); + // x should be approximately 37 and integer + EXPECT_NEAR(37.0, sol_values.element(0, handle_.get_stream()), 0.1); + EXPECT_NEAR(std::round(sol_values.element(0, handle_.get_stream())), + sol_values.element(0, handle_.get_stream()), + settings.tolerances.integrality_tolerance); // Check x is integer + // y should be approximately 39.5 + EXPECT_NEAR(39.5, sol_values.element(1, handle_.get_stream()), 0.1); +} + } // namespace cuopt::linear_programming::test diff --git a/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst b/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst index fbd312430..4e10c8c43 100644 --- a/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst +++ b/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst @@ -129,7 +129,7 @@ The following functions are used to set and get parameters. You can find more de Parameter Constants -------------------- +------------------- These constants are used as the parameter name in the `cuOptSetParameter `_ , `cuOptGetParameter `_ and similar functions. More details on the parameters can be found in the `LP/MILP settings <../../lp-milp-settings.html>`_ section. @@ -157,7 +157,9 @@ These constants are used as the parameter name in the `cuOptSetParameter Date: Thu, 5 Jun 2025 13:17:13 -0500 Subject: [PATCH 5/6] Enable nightly package publishing and fix nightly testing workflow (#72) This adds jobs to push/publish nightly packages and also fixes nightly testing to wait for the build workflow to complete first. --- .github/workflows/build.yaml | 64 ++++++++++++++++++++++++++++++++++ .github/workflows/nightly.yaml | 8 +++-- 2 files changed, 70 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 7281b81db..24db1550f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -47,6 +47,15 @@ jobs: date: ${{ inputs.date }} sha: ${{ inputs.sha }} script: ci/build_python.sh + upload-conda: + needs: [cpp-build, python-build] + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/conda-upload-packages.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + date: ${{ inputs.date }} + sha: ${{ inputs.sha }} wheel-build-cuopt-mps-parser: secrets: inherit uses: rapidsai/shared-workflows/.github/workflows/wheels-build.yaml@branch-25.06 @@ -60,6 +69,17 @@ jobs: package-name: cuopt_mps_parser package-type: python append-cuda-suffix: false + wheel-publish-cuopt-mps-parser: + needs: wheel-build-cuopt-mps-parser + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt_mps_parser + package-type: python wheel-build-libcuopt: needs: wheel-build-cuopt-mps-parser secrets: inherit @@ -73,6 +93,17 @@ jobs: package-name: libcuopt package-type: cpp matrix_filter: map(select((.CUDA_VER | startswith("12")) and .PY_VER == "3.12")) + wheel-publish-libcuopt: + needs: wheel-build-libcuopt + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: libcuopt + package-type: cpp wheel-build-cuopt: needs: [wheel-build-cuopt-mps-parser, wheel-build-libcuopt] secrets: inherit @@ -86,6 +117,17 @@ jobs: script: ci/build_wheel_cuopt.sh package-name: cuopt package-type: python + wheel-publish-cuopt: + needs: wheel-build-cuopt + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt + package-type: python wheel-build-cuopt-server: needs: wheel-build-cuopt secrets: inherit @@ -99,6 +141,17 @@ jobs: script: ci/build_wheel_cuopt_server.sh package-name: cuopt_server package-type: python + wheel-publish-cuopt-server: + needs: wheel-build-cuopt-server + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt_server + package-type: python #docs-build: # if: inputs.build_type == 'nightly' || github.ref_type == 'branch' # needs: [python-build] @@ -127,6 +180,17 @@ jobs: package-name: cuopt_sh_client package-type: python append-cuda-suffix: false + wheel-publish-cuopt-sh-client: + needs: wheel-build-cuopt-sh-client + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt_sh_client + package-type: python service-container: if: inputs.build_type == 'nightly' needs: [wheel-build-cuopt, wheel-build-cuopt-server] diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index 8d4760f50..dfd8d6826 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -23,11 +23,15 @@ jobs: export DATE=$(date +%F) export SHA=$(gh api -q '.commit.sha' "repos/nvidia/cuopt/branches/${CUOPT_BRANCH}") - gh workflow run build.yaml \ + RUN_ID=$(gh workflow run build.yaml \ -f branch=${CUOPT_BRANCH} \ -f sha=${SHA} \ -f date=${DATE} \ - -f build_type=nightly + -f build_type=nightly \ + --json databaseId --jq '.databaseId') + + # Wait for workflow to complete + gh run watch $RUN_ID trigger-test: runs-on: ubuntu-latest From 13ca7f6d9f977dcb721387cf03aab4492f45b703 Mon Sep 17 00:00:00 2001 From: Ramakrishnap <42624703+rgsl888prabhu@users.noreply.github.com> Date: Fri, 6 Jun 2025 14:33:58 -0500 Subject: [PATCH 6/6] Documentation update to add third party modeling languages and nightly documentation (#77) Adding details on third party modeling languages and providing details on how to install nightly packages for latest development build. --------- Co-authored-by: Chris Maes --- README.md | 11 ++++++++--- docs/cuopt/source/cuopt-python/quick-start.rst | 14 ++++++++++++-- docs/cuopt/source/cuopt-server/quick-start.rst | 12 ++++++++++-- docs/cuopt/source/index.rst | 10 ++++++++++ docs/cuopt/source/introduction.rst | 4 ++++ docs/cuopt/source/lp-features.rst | 6 ++++++ docs/cuopt/source/lp-milp-settings.rst | 2 +- docs/cuopt/source/milp-features.rst | 6 ++++++ .../thirdparty_modeling_languages/index.rst | 17 +++++++++++++++++ 9 files changed, 74 insertions(+), 8 deletions(-) create mode 100644 docs/cuopt/source/thirdparty_modeling_languages/index.rst diff --git a/README.md b/README.md index 301296c39..4ca1e3aef 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,8 @@ For CUDA 12.x: pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.5.* cuopt-sh-client==25.5.* nvidia-cuda-runtime-cu12==12.8.* ``` +Development wheels are available as nightlies, please update `--extra-index-url` to `https://pypi.anaconda.org/rapidsai-wheels-nightly/simple/` to install latest nightly packages. + ### Conda cuOpt can be installed with conda (via [miniforge](https://github.com/conda-forge/miniforge)) from the `nvidia` channel: @@ -74,19 +76,22 @@ Users who are used to conda env based workflows would benefit with conda package For CUDA 12.x: ```bash conda install -c rapidsai -c conda-forge -c nvidia \ - cuopt-server=25.05 cuopt-sh-client=25.05 python=3.12 cuda-version=12.8 + cuopt-server=25.05.* cuopt-sh-client=25.05.* python=3.12 cuda-version=12.8 ``` We also provide [nightly Conda packages](https://anaconda.org/rapidsai-nightly) built from the HEAD -of our latest development branch. +of our latest development branch. Just replace `-c rapidsai` with `-c rapidsai-nightly`. ### Container Users can pull the cuOpt container from the NVIDIA container registry. ```bash -docker pull nvidia/cuopt:25.5.0-cuda12.8-py312 +docker pull nvidia/cuopt:latest-cuda12.8-py312 ``` + +Note: The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. + More information about the cuOpt container can be found [here](https://docs.nvidia.com/cuopt/user-guide/latest/cuopt-server/quick-start.html#container-from-docker-hub). Users who are using cuOpt for quick testing or research can use the cuOpt container. Alternatively, users who are planning to plug cuOpt as a service in their workflow can quickly start with the cuOpt container. But users are required to build security layers around the service to safeguard the service from untrusted users. diff --git a/docs/cuopt/source/cuopt-python/quick-start.rst b/docs/cuopt/source/cuopt-python/quick-start.rst index 3fd72d58c..50d494318 100644 --- a/docs/cuopt/source/cuopt-python/quick-start.rst +++ b/docs/cuopt/source/cuopt-python/quick-start.rst @@ -17,6 +17,10 @@ For CUDA 12.x: pip install --extra-index-url=https://pypi.nvidia.com cuopt-cu12==25.5.* nvidia-cuda-runtime-cu12==12.8.* +.. note:: + For development wheels which are available as nightlies, please update `--extra-index-url` to `https://pypi.anaconda.org/rapidsai-wheels-nightly/simple/`. + + Conda ----- @@ -29,6 +33,9 @@ For CUDA 12.x: conda install -c rapidsai -c conda-forge -c nvidia \ cuopt=25.05.* python=3.12 cuda-version=12.8 +.. note:: + For development conda packages which are available as nightlies, please update `-c rapidsai` to `-c rapidsai-nightly`. + Container --------- @@ -37,13 +44,16 @@ NVIDIA cuOpt is also available as a container from Docker Hub: .. code-block:: bash - docker pull nvidia/cuopt:25.5.0-cuda12.8-py312 + docker pull nvidia/cuopt:latest-cuda12.8-py312 + +.. note:: + The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. The container includes both the Python API and self-hosted server components. To run the container: .. code-block:: bash - docker run --gpus all -it --rm nvidia/cuopt:25.5.0-cuda12.8-py312 + docker run --gpus all -it --rm nvidia/cuopt:latest-cuda12.8-py312 This will start an interactive session with cuOpt pre-installed and ready to use. diff --git a/docs/cuopt/source/cuopt-server/quick-start.rst b/docs/cuopt/source/cuopt-server/quick-start.rst index 770d36559..5fad1d522 100644 --- a/docs/cuopt/source/cuopt-server/quick-start.rst +++ b/docs/cuopt/source/cuopt-server/quick-start.rst @@ -14,6 +14,8 @@ For CUDA 12.x: pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.5.* cuopt-sh-client==25.5.* nvidia-cuda-runtime-cu12==12.8.* +.. note:: + For development wheels which are available as nightlies, please update `--extra-index-url` to `https://pypi.anaconda.org/rapidsai-wheels-nightly/simple/`. Conda ----- @@ -27,6 +29,9 @@ For CUDA 12.x: conda install -c rapidsai -c conda-forge -c nvidia \ cuopt-server=25.05.* cuopt-sh-client=25.05.* python=3.12 cuda-version=12.8 +.. note:: + For development conda packages which are available as nightlies, please update `-c rapidsai` to `-c rapidsai-nightly`. + Container from Docker Hub ------------------------- @@ -35,13 +40,16 @@ NVIDIA cuOpt is also available as a container from Docker Hub: .. code-block:: bash - docker pull nvidia/cuopt:25.5.0-cuda12.8-py312 + docker pull nvidia/cuopt:latest-cuda12.8-py312 + +.. note:: + The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. The container includes both the Python API and self-hosted server components. To run the container: .. code-block:: bash - docker run --gpus all -it --rm -p 8000:8000 -e CUOPT_SERVER_PORT=8000 nvidia/cuopt:25.5.0-cuda12.8-py312 /bin/bash -c "python3 -m cuopt_server.cuopt_service" + docker run --gpus all -it --rm -p 8000:8000 -e CUOPT_SERVER_PORT=8000 nvidia/cuopt:latest-cuda12.8-py312 /bin/bash -c "python3 -m cuopt_server.cuopt_service" .. note:: Make sure you have the NVIDIA Container Toolkit installed on your system to enable GPU support in containers. See the `installation guide `_ for details. diff --git a/docs/cuopt/source/index.rst b/docs/cuopt/source/index.rst index 44f50db16..38b585f3a 100644 --- a/docs/cuopt/source/index.rst +++ b/docs/cuopt/source/index.rst @@ -60,6 +60,16 @@ Command Line Interface (cuopt-cli) Command Line Interface Overview +======================================== +Third-Party Modeling Languages +======================================== +.. toctree:: + :maxdepth: 4 + :caption: Third-Party Modeling Languages + :name: Third-Party Modeling Languages + + thirdparty_modeling_languages/index.rst + ============= Resources ============= diff --git a/docs/cuopt/source/introduction.rst b/docs/cuopt/source/introduction.rst index d3878d3ea..5eb537ce2 100644 --- a/docs/cuopt/source/introduction.rst +++ b/docs/cuopt/source/introduction.rst @@ -118,6 +118,10 @@ cuOpt supports the following APIs: - `Linear Programming (LP) - Server `_ - `Mixed Integer Linear Programming (MILP) - Server `_ - `Routing (TSP, VRP, and PDP) - Server `_ +- Third-party modeling languages + - `AMPL `_ + - `PuLP `_ + ================================== Installation Options diff --git a/docs/cuopt/source/lp-features.rst b/docs/cuopt/source/lp-features.rst index 7d0adab21..9fa3b3fd9 100644 --- a/docs/cuopt/source/lp-features.rst +++ b/docs/cuopt/source/lp-features.rst @@ -7,6 +7,12 @@ Availability The LP solver can be accessed in the following ways: +- **Third-Party Modeling Languages**: cuOpt's LP and MILP solver can be called directly from the following third-party modeling languages. This allows you to leverage GPU acceleration while maintaining your existing optimization workflow in these modeling languages. + + Supported modeling languages: + - AMPL + - PuLP + - **C API**: A native C API that provides direct low-level access to cuOpt's LP capabilities, enabling integration into any application or system that can interface with C. - **As a Self-Hosted Service**: cuOpt's LP solver can be deployed as a in your own infrastructure, enabling you to maintain full control while integrating it into your existing systems. diff --git a/docs/cuopt/source/lp-milp-settings.rst b/docs/cuopt/source/lp-milp-settings.rst index d7800e14c..6a5309a57 100644 --- a/docs/cuopt/source/lp-milp-settings.rst +++ b/docs/cuopt/source/lp-milp-settings.rst @@ -48,7 +48,7 @@ Solution File Note: the default value is ``""`` and no solution file is written. User Problem File -^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^ ``CUOPT_USER_PROBLEM_FILE`` controls the name of a file where cuOpt should write the user problem. Note: the default value is ``""`` and no user problem file is written. diff --git a/docs/cuopt/source/milp-features.rst b/docs/cuopt/source/milp-features.rst index 97a5729ac..9168047f2 100644 --- a/docs/cuopt/source/milp-features.rst +++ b/docs/cuopt/source/milp-features.rst @@ -7,6 +7,12 @@ Availability The MILP solver can be accessed in the following ways: +- **Third-Party Modeling Languages**: cuOpt's LP and MILP solver can be called directly from the following third-party modeling languages. This allows you to leverage GPU acceleration while maintaining your existing optimization workflow in these modeling languages. + + Currently supported solvers: + - AMPL + - PuLP + - **C API**: A native C API that provides direct low-level access to cuOpt's MILP solver, enabling integration into any application or system that can interface with C. - **As a Self-Hosted Service**: cuOpt's MILP solver can be deployed in your own infrastructure, enabling you to maintain full control while integrating it into your existing systems. diff --git a/docs/cuopt/source/thirdparty_modeling_languages/index.rst b/docs/cuopt/source/thirdparty_modeling_languages/index.rst new file mode 100644 index 000000000..8a5024e9e --- /dev/null +++ b/docs/cuopt/source/thirdparty_modeling_languages/index.rst @@ -0,0 +1,17 @@ +=============================== +Third-Party Modeling Languages +=============================== + + +-------------------------- +AMPL Support +-------------------------- + +AMPL can be used with near zero code changes: simply switch to cuOpt as a solver to solve linear and mixed-integer programming problems. Please refer to the `AMPL documentation `_ for more information. + +-------------------------- +PuLP Support +-------------------------- + +PuLP can be used with near zero code changes: simply switch to cuOpt as a solver to solve linear and mixed-integer programming problems. +Please refer to the `PuLP documentation `_ for more information. Also, see the example notebook in the `cuopt-examples `_ repository.