diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 7281b81db..24db1550f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -47,6 +47,15 @@ jobs: date: ${{ inputs.date }} sha: ${{ inputs.sha }} script: ci/build_python.sh + upload-conda: + needs: [cpp-build, python-build] + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/conda-upload-packages.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + date: ${{ inputs.date }} + sha: ${{ inputs.sha }} wheel-build-cuopt-mps-parser: secrets: inherit uses: rapidsai/shared-workflows/.github/workflows/wheels-build.yaml@branch-25.06 @@ -60,6 +69,17 @@ jobs: package-name: cuopt_mps_parser package-type: python append-cuda-suffix: false + wheel-publish-cuopt-mps-parser: + needs: wheel-build-cuopt-mps-parser + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt_mps_parser + package-type: python wheel-build-libcuopt: needs: wheel-build-cuopt-mps-parser secrets: inherit @@ -73,6 +93,17 @@ jobs: package-name: libcuopt package-type: cpp matrix_filter: map(select((.CUDA_VER | startswith("12")) and .PY_VER == "3.12")) + wheel-publish-libcuopt: + needs: wheel-build-libcuopt + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: libcuopt + package-type: cpp wheel-build-cuopt: needs: [wheel-build-cuopt-mps-parser, wheel-build-libcuopt] secrets: inherit @@ -86,6 +117,17 @@ jobs: script: ci/build_wheel_cuopt.sh package-name: cuopt package-type: python + wheel-publish-cuopt: + needs: wheel-build-cuopt + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt + package-type: python wheel-build-cuopt-server: needs: wheel-build-cuopt secrets: inherit @@ -99,6 +141,17 @@ jobs: script: ci/build_wheel_cuopt_server.sh package-name: cuopt_server package-type: python + wheel-publish-cuopt-server: + needs: wheel-build-cuopt-server + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt_server + package-type: python #docs-build: # if: inputs.build_type == 'nightly' || github.ref_type == 'branch' # needs: [python-build] @@ -127,6 +180,17 @@ jobs: package-name: cuopt_sh_client package-type: python append-cuda-suffix: false + wheel-publish-cuopt-sh-client: + needs: wheel-build-cuopt-sh-client + secrets: inherit + uses: rapidsai/shared-workflows/.github/workflows/wheels-publish.yaml@branch-25.06 + with: + build_type: ${{ inputs.build_type || 'branch' }} + branch: ${{ inputs.branch }} + sha: ${{ inputs.sha }} + date: ${{ inputs.date }} + package-name: cuopt_sh_client + package-type: python service-container: if: inputs.build_type == 'nightly' needs: [wheel-build-cuopt, wheel-build-cuopt-server] diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index 8d4760f50..dfd8d6826 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -23,11 +23,15 @@ jobs: export DATE=$(date +%F) export SHA=$(gh api -q '.commit.sha' "repos/nvidia/cuopt/branches/${CUOPT_BRANCH}") - gh workflow run build.yaml \ + RUN_ID=$(gh workflow run build.yaml \ -f branch=${CUOPT_BRANCH} \ -f sha=${SHA} \ -f date=${DATE} \ - -f build_type=nightly + -f build_type=nightly \ + --json databaseId --jq '.databaseId') + + # Wait for workflow to complete + gh run watch $RUN_ID trigger-test: runs-on: ubuntu-latest diff --git a/README.md b/README.md index 301296c39..4ca1e3aef 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,8 @@ For CUDA 12.x: pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.5.* cuopt-sh-client==25.5.* nvidia-cuda-runtime-cu12==12.8.* ``` +Development wheels are available as nightlies, please update `--extra-index-url` to `https://pypi.anaconda.org/rapidsai-wheels-nightly/simple/` to install latest nightly packages. + ### Conda cuOpt can be installed with conda (via [miniforge](https://github.com/conda-forge/miniforge)) from the `nvidia` channel: @@ -74,19 +76,22 @@ Users who are used to conda env based workflows would benefit with conda package For CUDA 12.x: ```bash conda install -c rapidsai -c conda-forge -c nvidia \ - cuopt-server=25.05 cuopt-sh-client=25.05 python=3.12 cuda-version=12.8 + cuopt-server=25.05.* cuopt-sh-client=25.05.* python=3.12 cuda-version=12.8 ``` We also provide [nightly Conda packages](https://anaconda.org/rapidsai-nightly) built from the HEAD -of our latest development branch. +of our latest development branch. Just replace `-c rapidsai` with `-c rapidsai-nightly`. ### Container Users can pull the cuOpt container from the NVIDIA container registry. ```bash -docker pull nvidia/cuopt:25.5.0-cuda12.8-py312 +docker pull nvidia/cuopt:latest-cuda12.8-py312 ``` + +Note: The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. + More information about the cuOpt container can be found [here](https://docs.nvidia.com/cuopt/user-guide/latest/cuopt-server/quick-start.html#container-from-docker-hub). Users who are using cuOpt for quick testing or research can use the cuOpt container. Alternatively, users who are planning to plug cuOpt as a service in their workflow can quickly start with the cuOpt container. But users are required to build security layers around the service to safeguard the service from untrusted users. diff --git a/cpp/include/cuopt/linear_programming/constants.h b/cpp/include/cuopt/linear_programming/constants.h index 4cf856bb8..ca4377de9 100644 --- a/cpp/include/cuopt/linear_programming/constants.h +++ b/cpp/include/cuopt/linear_programming/constants.h @@ -57,8 +57,9 @@ #define CUOPT_MIP_RELATIVE_GAP "mip_relative_gap" #define CUOPT_MIP_HEURISTICS_ONLY "mip_heuristics_only" #define CUOPT_MIP_SCALING "mip_scaling" -#define CUOPT_SOL_FILE "solution_file" +#define CUOPT_SOLUTION_FILE "solution_file" #define CUOPT_NUM_CPU_THREADS "num_cpu_threads" +#define CUOPT_USER_PROBLEM_FILE "user_problem_file" /* @brief LP/MIP termination status constants */ #define CUOPT_TERIMINATION_STATUS_NO_TERMINATION 0 diff --git a/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp b/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp index 850248b5f..71afb366d 100644 --- a/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp +++ b/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp @@ -92,6 +92,7 @@ class mip_solver_settings_t { bool log_to_console = true; std::string log_file; std::string sol_file; + std::string user_problem_file; /** Initial primal solution */ std::shared_ptr> initial_solution_; diff --git a/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp b/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp index 8631c857e..9dcccf7a7 100644 --- a/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp +++ b/cpp/include/cuopt/linear_programming/pdlp/solver_settings.hpp @@ -200,6 +200,7 @@ class pdlp_solver_settings_t { bool log_to_console{true}; std::string log_file{""}; std::string sol_file{""}; + std::string user_problem_file{""}; bool per_constraint_residual{false}; bool crossover{false}; bool save_best_primal_so_far{false}; diff --git a/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp b/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp index de4d9cf66..cc30ff7a0 100644 --- a/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp +++ b/cpp/include/cuopt/linear_programming/utilities/cython_solve.hpp @@ -103,7 +103,8 @@ struct solver_ret_t { // Wrapper for solve to expose the API to cython. std::unique_ptr call_solve(cuopt::mps_parser::data_model_view_t*, - linear_programming::solver_settings_t*); + linear_programming::solver_settings_t*, + unsigned int flags = cudaStreamNonBlocking); std::pair>, double> call_batch_solve( std::vector*>, diff --git a/cpp/src/dual_simplex/branch_and_bound.cpp b/cpp/src/dual_simplex/branch_and_bound.cpp index 1b8a307d2..f1075ed55 100644 --- a/cpp/src/dual_simplex/branch_and_bound.cpp +++ b/cpp/src/dual_simplex/branch_and_bound.cpp @@ -463,8 +463,10 @@ mip_status_t branch_and_bound_t::solve(mip_solution_t& solut global_variables::mutex_upper.unlock(); // We should be done here uncrush_primal_solution(original_problem, original_lp, incumbent.x, solution.x); - solution.objective = incumbent.objective; - solution.lower_bound = lower_bound; + solution.objective = incumbent.objective; + solution.lower_bound = lower_bound; + solution.nodes_explored = 0; + solution.simplex_iterations = root_relax_soln.iterations; settings.log.printf("Optimal solution found at root node. Objective %.16e. Time %.2f.\n", compute_user_objective(original_lp, root_objective), toc(start_time)); diff --git a/cpp/src/linear_programming/solve.cu b/cpp/src/linear_programming/solve.cu index 12859a118..ed2a30bcf 100644 --- a/cpp/src/linear_programming/solve.cu +++ b/cpp/src/linear_programming/solve.cu @@ -569,6 +569,11 @@ optimization_problem_solution_t solve_lp(optimization_problem_t call_solve( cuopt::mps_parser::data_model_view_t* data_model, - cuopt::linear_programming::solver_settings_t* solver_settings) + cuopt::linear_programming::solver_settings_t* solver_settings, + unsigned int flags) { raft::common::nvtx::range fun_scope("Call Solve"); cudaStream_t stream; - RAFT_CUDA_TRY(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); + RAFT_CUDA_TRY(cudaStreamCreateWithFlags(&stream, flags)); const raft::handle_t handle_{stream}; auto op_problem = data_model_to_optimization_problem(data_model, solver_settings, &handle_); @@ -283,9 +284,11 @@ std::pair>, double> call_batch_solve( solver_settings->set_parameter(CUOPT_METHOD, CUOPT_METHOD_PDLP); } + // Use a default stream instead of a non-blocking to avoid invalid operations while some CUDA + // Graph might be capturing in another stream #pragma omp parallel for num_threads(max_thread) for (std::size_t i = 0; i < size; ++i) - list[i] = std::move(call_solve(data_models[i], solver_settings)); + list[i] = std::move(call_solve(data_models[i], solver_settings, cudaStreamDefault)); auto end = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast(end - start_solver); diff --git a/cpp/src/linear_programming/utilities/logger_init.hpp b/cpp/src/linear_programming/utilities/logger_init.hpp index 0e847065d..4d93f8cb6 100644 --- a/cpp/src/linear_programming/utilities/logger_init.hpp +++ b/cpp/src/linear_programming/utilities/logger_init.hpp @@ -37,6 +37,8 @@ class init_logger_t { // TODO save the defaul sink and restore it cuopt::default_logger().sinks().push_back( std::make_shared(log_file, true)); + cuopt::default_logger().set_pattern("%v"); + cuopt::default_logger().flush_on(rapids_logger::level_enum::info); } } ~init_logger_t() { cuopt::reset_default_logger(); } diff --git a/cpp/src/math_optimization/solver_settings.cu b/cpp/src/math_optimization/solver_settings.cu index 2666e7bd5..142ef55c5 100644 --- a/cpp/src/math_optimization/solver_settings.cu +++ b/cpp/src/math_optimization/solver_settings.cu @@ -110,8 +110,10 @@ solver_settings_t::solver_settings_t() : pdlp_settings(), mip_settings string_parameters = { {CUOPT_LOG_FILE, &mip_settings.log_file, ""}, {CUOPT_LOG_FILE, &pdlp_settings.log_file, ""}, - {CUOPT_SOL_FILE, &mip_settings.sol_file, ""}, - {CUOPT_SOL_FILE, &pdlp_settings.sol_file, ""} + {CUOPT_SOLUTION_FILE, &mip_settings.sol_file, ""}, + {CUOPT_SOLUTION_FILE, &pdlp_settings.sol_file, ""}, + {CUOPT_USER_PROBLEM_FILE, &mip_settings.user_problem_file, ""}, + {CUOPT_USER_PROBLEM_FILE, &pdlp_settings.user_problem_file, ""} }; // clang-format on } diff --git a/cpp/src/mip/presolve/trivial_presolve.cuh b/cpp/src/mip/presolve/trivial_presolve.cuh index d7cfa6dc1..e201f401e 100644 --- a/cpp/src/mip/presolve/trivial_presolve.cuh +++ b/cpp/src/mip/presolve/trivial_presolve.cuh @@ -234,6 +234,7 @@ void update_from_csr(problem_t& pb) // update objective_offset pb.presolve_data.objective_offset += + pb.presolve_data.objective_scaling_factor * thrust::transform_reduce(handle_ptr->get_thrust_policy(), thrust::counting_iterator(0), thrust::counting_iterator(pb.n_variables), diff --git a/cpp/src/mip/problem/write_mps.cu b/cpp/src/mip/problem/write_mps.cu index 31315fc9c..cca3cd5b1 100644 --- a/cpp/src/mip/problem/write_mps.cu +++ b/cpp/src/mip/problem/write_mps.cu @@ -54,6 +54,8 @@ void problem_t::write_as_mps(const std::string& path) // NAME section mps_file << "NAME " << original_problem_ptr->get_problem_name() << "\n"; + if (maximize) { mps_file << "OBJSENSE\n MAXIMIZE\n"; } + // ROWS section mps_file << "ROWS\n"; mps_file << " N " << (objective_name.empty() ? "OBJ" : objective_name) << "\n"; @@ -86,7 +88,7 @@ void problem_t::write_as_mps(const std::string& path) // Write objective coefficient if non-zero if (h_obj_coeffs[j] != 0.0) { mps_file << " " << col_name << " " << (objective_name.empty() ? "OBJ" : objective_name) - << " " << h_obj_coeffs[j] << "\n"; + << " " << (maximize ? -h_obj_coeffs[j] : h_obj_coeffs[j]) << "\n"; } // Write constraint coefficients @@ -146,7 +148,7 @@ void problem_t::write_as_mps(const std::string& path) h_var_ub[j] == std::numeric_limits::infinity()) { mps_file << " FR BOUND1 " << col_name << "\n"; } else { - if (h_var_lb[j] != 0.0) { + if (h_var_lb[j] != 0.0 || h_obj_coeffs[j] == 0.0 || h_var_types[j] != var_t::CONTINUOUS) { if (h_var_lb[j] == -std::numeric_limits::infinity()) { mps_file << " MI BOUND1 " << col_name << "\n"; } else { diff --git a/cpp/src/mip/solve.cu b/cpp/src/mip/solve.cu index 6ca891a3b..e96456801 100644 --- a/cpp/src/mip/solve.cu +++ b/cpp/src/mip/solve.cu @@ -161,6 +161,10 @@ mip_solution_t solve_mip(optimization_problem_t& op_problem, // have solve, problem, solution, utils etc. in common dir detail::problem_t problem(op_problem); + if (settings.user_problem_file != "") { + CUOPT_LOG_INFO("Writing user problem to file: %s", settings.user_problem_file.c_str()); + problem.write_as_mps(settings.user_problem_file); + } // this is for PDLP, i think this should be part of pdlp solver setup_device_symbols(op_problem.get_handle_ptr()->get_stream()); diff --git a/cpp/tests/mip/doc_example_test.cu b/cpp/tests/mip/doc_example_test.cu index e945a7ef5..7b32cf068 100644 --- a/cpp/tests/mip/doc_example_test.cu +++ b/cpp/tests/mip/doc_example_test.cu @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -120,4 +121,48 @@ void test_mps_file() TEST(docs, mixed_integer_linear_programming) { test_mps_file(); } +TEST(docs, user_problem_file) +{ + const raft::handle_t handle_{}; + mip_solver_settings_t settings; + constexpr double test_time_limit = 1.; + + // Create the problem from documentation example + auto problem = create_doc_example_problem(); + + EXPECT_FALSE(std::filesystem::exists("user_problem.mps")); + + settings.time_limit = test_time_limit; + settings.user_problem_file = "user_problem.mps"; + EXPECT_EQ(solve_mip(&handle_, problem, settings).get_termination_status(), + mip_termination_status_t::Optimal); + + EXPECT_TRUE(std::filesystem::exists("user_problem.mps")); + + cuopt::mps_parser::mps_data_model_t problem2 = + cuopt::mps_parser::parse_mps("user_problem.mps", false); + + EXPECT_EQ(problem2.get_n_variables(), problem.get_n_variables()); + EXPECT_EQ(problem2.get_n_constraints(), problem.get_n_constraints()); + EXPECT_EQ(problem2.get_nnz(), problem.get_nnz()); + + settings.user_problem_file = "user_problem2.mps"; + mip_solution_t solution = solve_mip(&handle_, problem2, settings); + EXPECT_EQ(solution.get_termination_status(), mip_termination_status_t::Optimal); + + double obj_val = solution.get_objective_value(); + // Expected objective value from documentation example is approximately 303.5 + EXPECT_NEAR(303.5, obj_val, 1.0); + + // Get solution values + const auto& sol_values = solution.get_solution(); + // x should be approximately 37 and integer + EXPECT_NEAR(37.0, sol_values.element(0, handle_.get_stream()), 0.1); + EXPECT_NEAR(std::round(sol_values.element(0, handle_.get_stream())), + sol_values.element(0, handle_.get_stream()), + settings.tolerances.integrality_tolerance); // Check x is integer + // y should be approximately 39.5 + EXPECT_NEAR(39.5, sol_values.element(1, handle_.get_stream()), 0.1); +} + } // namespace cuopt::linear_programming::test diff --git a/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst b/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst index fbd312430..4e10c8c43 100644 --- a/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst +++ b/docs/cuopt/source/cuopt-c/lp-milp/lp-milp-c-api.rst @@ -129,7 +129,7 @@ The following functions are used to set and get parameters. You can find more de Parameter Constants -------------------- +------------------- These constants are used as the parameter name in the `cuOptSetParameter `_ , `cuOptGetParameter `_ and similar functions. More details on the parameters can be found in the `LP/MILP settings <../../lp-milp-settings.html>`_ section. @@ -157,7 +157,9 @@ These constants are used as the parameter name in the `cuOptSetParameter -cuda12.8-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. The container includes both the Python API and self-hosted server components. To run the container: .. code-block:: bash - docker run --gpus all -it --rm nvidia/cuopt:25.5.0-cuda12.8-py312 + docker run --gpus all -it --rm nvidia/cuopt:latest-cuda12.8-py312 This will start an interactive session with cuOpt pre-installed and ready to use. diff --git a/docs/cuopt/source/cuopt-server/quick-start.rst b/docs/cuopt/source/cuopt-server/quick-start.rst index 770d36559..5fad1d522 100644 --- a/docs/cuopt/source/cuopt-server/quick-start.rst +++ b/docs/cuopt/source/cuopt-server/quick-start.rst @@ -14,6 +14,8 @@ For CUDA 12.x: pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.5.* cuopt-sh-client==25.5.* nvidia-cuda-runtime-cu12==12.8.* +.. note:: + For development wheels which are available as nightlies, please update `--extra-index-url` to `https://pypi.anaconda.org/rapidsai-wheels-nightly/simple/`. Conda ----- @@ -27,6 +29,9 @@ For CUDA 12.x: conda install -c rapidsai -c conda-forge -c nvidia \ cuopt-server=25.05.* cuopt-sh-client=25.05.* python=3.12 cuda-version=12.8 +.. note:: + For development conda packages which are available as nightlies, please update `-c rapidsai` to `-c rapidsai-nightly`. + Container from Docker Hub ------------------------- @@ -35,13 +40,16 @@ NVIDIA cuOpt is also available as a container from Docker Hub: .. code-block:: bash - docker pull nvidia/cuopt:25.5.0-cuda12.8-py312 + docker pull nvidia/cuopt:latest-cuda12.8-py312 + +.. note:: + The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. The container includes both the Python API and self-hosted server components. To run the container: .. code-block:: bash - docker run --gpus all -it --rm -p 8000:8000 -e CUOPT_SERVER_PORT=8000 nvidia/cuopt:25.5.0-cuda12.8-py312 /bin/bash -c "python3 -m cuopt_server.cuopt_service" + docker run --gpus all -it --rm -p 8000:8000 -e CUOPT_SERVER_PORT=8000 nvidia/cuopt:latest-cuda12.8-py312 /bin/bash -c "python3 -m cuopt_server.cuopt_service" .. note:: Make sure you have the NVIDIA Container Toolkit installed on your system to enable GPU support in containers. See the `installation guide `_ for details. diff --git a/docs/cuopt/source/index.rst b/docs/cuopt/source/index.rst index 44f50db16..38b585f3a 100644 --- a/docs/cuopt/source/index.rst +++ b/docs/cuopt/source/index.rst @@ -60,6 +60,16 @@ Command Line Interface (cuopt-cli) Command Line Interface Overview +======================================== +Third-Party Modeling Languages +======================================== +.. toctree:: + :maxdepth: 4 + :caption: Third-Party Modeling Languages + :name: Third-Party Modeling Languages + + thirdparty_modeling_languages/index.rst + ============= Resources ============= diff --git a/docs/cuopt/source/introduction.rst b/docs/cuopt/source/introduction.rst index d3878d3ea..5eb537ce2 100644 --- a/docs/cuopt/source/introduction.rst +++ b/docs/cuopt/source/introduction.rst @@ -118,6 +118,10 @@ cuOpt supports the following APIs: - `Linear Programming (LP) - Server `_ - `Mixed Integer Linear Programming (MILP) - Server `_ - `Routing (TSP, VRP, and PDP) - Server `_ +- Third-party modeling languages + - `AMPL `_ + - `PuLP `_ + ================================== Installation Options diff --git a/docs/cuopt/source/lp-features.rst b/docs/cuopt/source/lp-features.rst index 7d0adab21..9fa3b3fd9 100644 --- a/docs/cuopt/source/lp-features.rst +++ b/docs/cuopt/source/lp-features.rst @@ -7,6 +7,12 @@ Availability The LP solver can be accessed in the following ways: +- **Third-Party Modeling Languages**: cuOpt's LP and MILP solver can be called directly from the following third-party modeling languages. This allows you to leverage GPU acceleration while maintaining your existing optimization workflow in these modeling languages. + + Supported modeling languages: + - AMPL + - PuLP + - **C API**: A native C API that provides direct low-level access to cuOpt's LP capabilities, enabling integration into any application or system that can interface with C. - **As a Self-Hosted Service**: cuOpt's LP solver can be deployed as a in your own infrastructure, enabling you to maintain full control while integrating it into your existing systems. diff --git a/docs/cuopt/source/lp-milp-settings.rst b/docs/cuopt/source/lp-milp-settings.rst index 361f4865b..6a5309a57 100644 --- a/docs/cuopt/source/lp-milp-settings.rst +++ b/docs/cuopt/source/lp-milp-settings.rst @@ -41,6 +41,17 @@ Log File Note: the default value is ``""`` and no log file is written. +Solution File +^^^^^^^^^^^^^ +``CUOPT_SOL_FILE`` controls the name of a file where cuOpt should write the solution. + +Note: the default value is ``""`` and no solution file is written. + +User Problem File +^^^^^^^^^^^^^^^^^ +``CUOPT_USER_PROBLEM_FILE`` controls the name of a file where cuOpt should write the user problem. + +Note: the default value is ``""`` and no user problem file is written. Num CPU Threads ^^^^^^^^^^^^^^^ diff --git a/docs/cuopt/source/milp-features.rst b/docs/cuopt/source/milp-features.rst index 97a5729ac..9168047f2 100644 --- a/docs/cuopt/source/milp-features.rst +++ b/docs/cuopt/source/milp-features.rst @@ -7,6 +7,12 @@ Availability The MILP solver can be accessed in the following ways: +- **Third-Party Modeling Languages**: cuOpt's LP and MILP solver can be called directly from the following third-party modeling languages. This allows you to leverage GPU acceleration while maintaining your existing optimization workflow in these modeling languages. + + Currently supported solvers: + - AMPL + - PuLP + - **C API**: A native C API that provides direct low-level access to cuOpt's MILP solver, enabling integration into any application or system that can interface with C. - **As a Self-Hosted Service**: cuOpt's MILP solver can be deployed in your own infrastructure, enabling you to maintain full control while integrating it into your existing systems. diff --git a/docs/cuopt/source/thirdparty_modeling_languages/index.rst b/docs/cuopt/source/thirdparty_modeling_languages/index.rst new file mode 100644 index 000000000..8a5024e9e --- /dev/null +++ b/docs/cuopt/source/thirdparty_modeling_languages/index.rst @@ -0,0 +1,17 @@ +=============================== +Third-Party Modeling Languages +=============================== + + +-------------------------- +AMPL Support +-------------------------- + +AMPL can be used with near zero code changes: simply switch to cuOpt as a solver to solve linear and mixed-integer programming problems. Please refer to the `AMPL documentation `_ for more information. + +-------------------------- +PuLP Support +-------------------------- + +PuLP can be used with near zero code changes: simply switch to cuOpt as a solver to solve linear and mixed-integer programming problems. +Please refer to the `PuLP documentation `_ for more information. Also, see the example notebook in the `cuopt-examples `_ repository. diff --git a/python/cuopt/cuopt/linear_programming/solver/solver_parameters.pyx b/python/cuopt/cuopt/linear_programming/solver/solver_parameters.pyx index 009f49365..5783d8f4e 100644 --- a/python/cuopt/cuopt/linear_programming/solver/solver_parameters.pyx +++ b/python/cuopt/cuopt/linear_programming/solver/solver_parameters.pyx @@ -66,7 +66,9 @@ cdef extern from "cuopt/linear_programming/constants.h": # noqa cdef const char* c_CUOPT_MIP_RELATIVE_GAP "CUOPT_MIP_RELATIVE_GAP" # noqa cdef const char* c_CUOPT_MIP_HEURISTICS_ONLY "CUOPT_MIP_HEURISTICS_ONLY" # noqa cdef const char* c_CUOPT_MIP_SCALING "CUOPT_MIP_SCALING" # noqa + cdef const char* c_CUOPT_SOLUTION_FILE "CUOPT_SOLUTION_FILE" # noqa cdef const char* c_CUOPT_NUM_CPU_THREADS "CUOPT_NUM_CPU_THREADS" # noqa + cdef const char* c_CUOPT_USER_PROBLEM_FILE "CUOPT_USER_PROBLEM_FILE" # noqa # Create Python string constants from C string literals @@ -97,4 +99,6 @@ CUOPT_MIP_ABSOLUTE_GAP = c_CUOPT_MIP_ABSOLUTE_GAP.decode('utf-8') # noqa CUOPT_MIP_RELATIVE_GAP = c_CUOPT_MIP_RELATIVE_GAP.decode('utf-8') # noqa CUOPT_MIP_HEURISTICS_ONLY = c_CUOPT_MIP_HEURISTICS_ONLY.decode('utf-8') # noqa CUOPT_MIP_SCALING = c_CUOPT_MIP_SCALING.decode('utf-8') # noqa +CUOPT_SOLUTION_FILE = c_CUOPT_SOLUTION_FILE.decode('utf-8') # noqa CUOPT_NUM_CPU_THREADS = c_CUOPT_NUM_CPU_THREADS.decode('utf-8') # noqa +CUOPT_USER_PROBLEM_FILE = c_CUOPT_USER_PROBLEM_FILE.decode('utf-8') # noqa diff --git a/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py b/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py index 81bae6e03..eeb5400ad 100644 --- a/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py +++ b/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py @@ -34,7 +34,9 @@ CUOPT_RELATIVE_DUAL_TOLERANCE, CUOPT_RELATIVE_GAP_TOLERANCE, CUOPT_RELATIVE_PRIMAL_TOLERANCE, + CUOPT_SOLUTION_FILE, CUOPT_TIME_LIMIT, + CUOPT_USER_PROBLEM_FILE, ) from cuopt.linear_programming.solver.solver_wrapper import ( ErrorStatus, @@ -668,3 +670,39 @@ def test_bound_in_maximization(): upper_bound = solution.get_milp_stats()["solution_bound"] assert upper_bound == pytest.approx(280, 1e-6) assert solution.get_primal_objective() == pytest.approx(280, 1e-6) + + +def test_write_files(): + + file_path = ( + RAPIDS_DATASET_ROOT_DIR + "/linear_programming/afiro_original.mps" + ) + data_model_obj = cuopt_mps_parser.ParseMps(file_path) + + settings = solver_settings.SolverSettings() + settings.set_parameter(CUOPT_METHOD, SolverMethod.DualSimplex) + settings.set_parameter(CUOPT_USER_PROBLEM_FILE, "afiro_out.mps") + + solver.Solve(data_model_obj, settings) + + assert os.path.isfile("afiro_out.mps") + + afiro = cuopt_mps_parser.ParseMps("afiro_out.mps") + os.remove("afiro_out.mps") + + settings.set_parameter(CUOPT_USER_PROBLEM_FILE, "") + settings.set_parameter(CUOPT_SOLUTION_FILE, "afiro.sol") + + solution = solver.Solve(afiro, settings) + + assert solution.get_termination_status() == LPTerminationStatus.Optimal + assert solution.get_primal_objective() == pytest.approx(-464.7531) + + assert os.path.isfile("afiro.sol") + + with open("afiro.sol") as f: + for line in f: + if "X01" in line: + assert float(line.split()[-1]) == pytest.approx(80) + + os.remove("afiro.sol") diff --git a/python/cuopt_server/cuopt_server/utils/job_queue.py b/python/cuopt_server/cuopt_server/utils/job_queue.py index dfeb98ae4..1c4996742 100755 --- a/python/cuopt_server/cuopt_server/utils/job_queue.py +++ b/python/cuopt_server/cuopt_server/utils/job_queue.py @@ -109,7 +109,7 @@ def check_client_version(client_vers): if client_vers == "custom": return [] cv = client_vers.split(".") - if len(cv) != 3: + if len(cv) < 2: logging.warn("Client version missing or bad format") return [ f"Client version missing or not the current format. " @@ -118,7 +118,7 @@ def check_client_version(client_vers): "if this is a custom client." ] else: - cmajor, cminor, _ = cv + cmajor, cminor = cv[:2] matches = (cmajor, cminor) == (major, minor) if not matches: logging.warn(f"Client version {cmajor}.{cminor} does not match")