From 6f8de7a60eb43dd328bce719398db968e15a7ae8 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Mon, 9 Mar 2026 13:09:03 -0400 Subject: [PATCH 01/17] Avoid using internal bool in sparsity computation. --- include/pyoptinterface/knitro_model.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index d3413bd..18d82df 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -168,9 +168,9 @@ struct CallbackEvaluator { jac_pattern_in.set(i, i, i); } - fun.for_jac_sparsity(jac_pattern_in, false, false, true, jac_pattern_); + fun.for_jac_sparsity(jac_pattern_in, false, false, false, jac_pattern_); std::vector select_rows(ny, true); - fun.rev_hes_sparsity(select_rows, false, true, hess_pattern_); + fun.rev_hes_sparsity(select_rows, false, false, hess_pattern_); auto &hess_rows = hess_pattern_.row(); auto &hess_cols = hess_pattern_.col(); for (size_t k = 0; k < hess_pattern_.nnz(); k++) From a0bfa94417f31177734a3ca70e16576d461dbacd Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Mon, 9 Mar 2026 13:54:46 -0400 Subject: [PATCH 02/17] Refactor CallbackEvaluator to improve variable naming and consistency --- include/pyoptinterface/knitro_model.hpp | 86 ++++++++++++------------- 1 file changed, 42 insertions(+), 44 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 18d82df..58b821f 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -141,19 +141,18 @@ struct CallbackPattern template struct CallbackEvaluator { - static inline const std::string jac_coloring_ = "cppad"; - static inline const std::string hess_coloring_ = "cppad.symmetric"; + static inline constexpr const char *JAC_CLRNG = "cppad"; + static inline constexpr const char *HES_CLRNG = "cppad.symmetric"; std::vector indexVars; std::vector indexCons; CppAD::ADFun fun; - CppAD::sparse_rc> jac_pattern_; - CppAD::sparse_rcv, std::vector> jac_; - CppAD::sparse_jac_work jac_work_; - CppAD::sparse_rc> hess_pattern_; - CppAD::sparse_rc> hess_pattern_symm_; - CppAD::sparse_rcv, std::vector> hess_; - CppAD::sparse_hes_work hess_work_; + CppAD::sparse_rc> jp; + CppAD::sparse_rcv, std::vector> jac; + CppAD::sparse_jac_work jw; + CppAD::sparse_rc> hp; + CppAD::sparse_rcv, std::vector> hes; + CppAD::sparse_hes_work hw; std::vector x; std::vector w; @@ -161,31 +160,32 @@ struct CallbackEvaluator void setup() { fun.optimize(); - auto nx = fun.Domain(); - auto ny = fun.Range(); - CppAD::sparse_rc> jac_pattern_in(nx, nx, nx); + size_t nx = fun.Domain(); + size_t ny = fun.Range(); + CppAD::sparse_rc> jp_in(nx, nx, nx); for (size_t i = 0; i < nx; i++) { - jac_pattern_in.set(i, i, i); + jp_in.set(i, i, i); } - fun.for_jac_sparsity(jac_pattern_in, false, false, false, jac_pattern_); + fun.for_jac_sparsity(jp_in, false, false, false, jp); std::vector select_rows(ny, true); - fun.rev_hes_sparsity(select_rows, false, false, hess_pattern_); - auto &hess_rows = hess_pattern_.row(); - auto &hess_cols = hess_pattern_.col(); - for (size_t k = 0; k < hess_pattern_.nnz(); k++) + CppAD::sparse_rc> hp_out; + fun.rev_hes_sparsity(select_rows, false, false, hp_out); + auto &hrow = hp_out.row(); + auto &hcol = hp_out.col(); + for (size_t k = 0; k < hp_out.nnz(); k++) { - size_t row = hess_rows[k]; - size_t col = hess_cols[k]; + size_t row = hrow[k]; + size_t col = hcol[k]; if (row <= col) { - hess_pattern_symm_.push_back(row, col); + hp.push_back(row, col); } } x.resize(nx, 0.0); w.resize(ny, 0.0); - jac_ = CppAD::sparse_rcv, std::vector>(jac_pattern_); - hess_ = CppAD::sparse_rcv, std::vector>(hess_pattern_symm_); + jac = CppAD::sparse_rcv, std::vector>(jp); + hes = CppAD::sparse_rcv, std::vector>(hp); } bool is_objective() const @@ -203,52 +203,50 @@ struct CallbackEvaluator void eval_jac(const V *req_x, V *res_jac) { copy_ptr(req_x, indexVars.data(), x); - fun.sparse_jac_rev(x, jac_, jac_pattern_, jac_coloring_, jac_work_); - auto &jac = jac_.val(); - copy_vec(jac, res_jac); + fun.sparse_jac_rev(x, jac, jp, JAC_CLRNG, jw); + copy_vec(jac.val(), res_jac); } void eval_hess(const V *req_x, const V *req_w, V *res_hess) { copy_ptr(req_x, indexVars.data(), x); copy_ptr(req_w, indexCons.data(), w, is_objective()); - fun.sparse_hes(x, w, hess_, hess_pattern_, hess_coloring_, hess_work_); - auto &hess = hess_.val(); - copy_vec(hess, res_hess); + fun.sparse_hes(x, w, hes, hp, HES_CLRNG, hw); + copy_vec(hes.val(), res_hess); } CallbackPattern get_callback_pattern() const { - CallbackPattern pattern; - pattern.indexCons = indexCons; + CallbackPattern p; + p.indexCons = indexCons; - auto &jac_rows = jac_pattern_.row(); - auto &jac_cols = jac_pattern_.col(); + auto &jrow = jp.row(); + auto &jcol = jp.col(); if (indexCons.empty()) { - for (size_t k = 0; k < jac_pattern_.nnz(); k++) + for (size_t k = 0; k < jp.nnz(); k++) { - pattern.objGradIndexVars.push_back(indexVars[jac_cols[k]]); + p.objGradIndexVars.push_back(indexVars[jcol[k]]); } } else { - for (size_t k = 0; k < jac_pattern_.nnz(); k++) + for (size_t k = 0; k < jp.nnz(); k++) { - pattern.jacIndexCons.push_back(indexCons[jac_rows[k]]); - pattern.jacIndexVars.push_back(indexVars[jac_cols[k]]); + p.jacIndexCons.push_back(indexCons[jrow[k]]); + p.jacIndexVars.push_back(indexVars[jcol[k]]); } } - auto &hess_rows = hess_pattern_symm_.row(); - auto &hess_cols = hess_pattern_symm_.col(); - for (size_t k = 0; k < hess_pattern_symm_.nnz(); k++) + auto &hrow = hp.row(); + auto &hcol = hp.col(); + for (size_t k = 0; k < hp.nnz(); k++) { - pattern.hessIndexVars1.push_back(indexVars[hess_rows[k]]); - pattern.hessIndexVars2.push_back(indexVars[hess_cols[k]]); + p.hessIndexVars1.push_back(indexVars[hrow[k]]); + p.hessIndexVars2.push_back(indexVars[hcol[k]]); } - return pattern; + return p; } private: From 310311c2ae065725992466ecc0a8b73628765862 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Mon, 9 Mar 2026 14:01:26 -0400 Subject: [PATCH 03/17] Enhance documentation in CallbackEvaluator by adding comments for clarity on CppAD tape, sparsity patterns, and workspaces --- include/pyoptinterface/knitro_model.hpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 58b821f..7adcf6e 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -146,16 +146,21 @@ struct CallbackEvaluator std::vector indexVars; std::vector indexCons; - CppAD::ADFun fun; + CppAD::ADFun fun; /// < CppAD tape. + + /// Sparsity patterns CppAD::sparse_rc> jp; - CppAD::sparse_rcv, std::vector> jac; - CppAD::sparse_jac_work jw; CppAD::sparse_rc> hp; - CppAD::sparse_rcv, std::vector> hes; + + /// Workspaces for sparse Jacobian and Hessian calculations + CppAD::sparse_jac_work jw; CppAD::sparse_hes_work hw; + /// Temporary vectors for evaluations std::vector x; std::vector w; + CppAD::sparse_rcv, std::vector> jac; + CppAD::sparse_rcv, std::vector> hes; void setup() { From 8ada79c7beff9ae922c5670dccd260ab1bfc2155 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Mon, 9 Mar 2026 15:07:18 -0400 Subject: [PATCH 04/17] Fix formatting in CallbackEvaluator by adding missing newlines for improved readability --- include/pyoptinterface/knitro_model.hpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 7adcf6e..2a34d67 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -167,12 +167,14 @@ struct CallbackEvaluator fun.optimize(); size_t nx = fun.Domain(); size_t ny = fun.Range(); + CppAD::sparse_rc> jp_in(nx, nx, nx); for (size_t i = 0; i < nx; i++) { jp_in.set(i, i, i); } fun.for_jac_sparsity(jp_in, false, false, false, jp); + std::vector select_rows(ny, true); CppAD::sparse_rc> hp_out; fun.rev_hes_sparsity(select_rows, false, false, hp_out); From 4b61def578e93082cf1b5ae583348b4054e6db94 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 16:26:07 -0400 Subject: [PATCH 05/17] Add Jacobian support in CallbackEvaluator and refactor evaluation methods --- include/pyoptinterface/knitro_model.hpp | 108 +++++++++++++++--------- 1 file changed, 68 insertions(+), 40 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 2a34d67..3de69f6 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -146,7 +146,8 @@ struct CallbackEvaluator std::vector indexVars; std::vector indexCons; - CppAD::ADFun fun; /// < CppAD tape. + CppAD::ADFun fun; /// < CppAD tape. + CppAD::ADFun jfun; /// < CppAD tape for Jacobian /// Sparsity patterns CppAD::sparse_rc> jp; @@ -154,11 +155,12 @@ struct CallbackEvaluator /// Workspaces for sparse Jacobian and Hessian calculations CppAD::sparse_jac_work jw; - CppAD::sparse_hes_work hw; + CppAD::sparse_jac_work hw; /// Temporary vectors for evaluations std::vector x; std::vector w; + std::vector xw; CppAD::sparse_rcv, std::vector> jac; CppAD::sparse_rcv, std::vector> hes; @@ -168,19 +170,40 @@ struct CallbackEvaluator size_t nx = fun.Domain(); size_t ny = fun.Range(); - CppAD::sparse_rc> jp_in(nx, nx, nx); + std::vector dom(nx, true); + std::vector rng(ny, true); + fun.subgraph_sparsity(dom, rng, false, jp); + + auto &af = fun.base2ad(); + std::vector> jaxw(nx + ny); + CppAD::Independent(jaxw); + std::vector> jax(nx); + std::vector> jaw(ny); + std::vector> jaz(nx); + for (size_t i = 0; i < nx; i++) + { + jax[i] = jaxw[i]; + } + for (size_t i = 0; i < ny; i++) + { + jaw[i] = jaxw[nx + i]; + } + af.Forward(0, jax); + jaz = af.Reverse(1, jaw); + jfun.Dependent(jaxw, jaz); + jfun.optimize(); + std::vector jdom(nx + ny, false); for (size_t i = 0; i < nx; i++) { - jp_in.set(i, i, i); + jdom[i] = true; } - fun.for_jac_sparsity(jp_in, false, false, false, jp); - - std::vector select_rows(ny, true); - CppAD::sparse_rc> hp_out; - fun.rev_hes_sparsity(select_rows, false, false, hp_out); - auto &hrow = hp_out.row(); - auto &hcol = hp_out.col(); - for (size_t k = 0; k < hp_out.nnz(); k++) + std::vector jrng(nx, true); + CppAD::sparse_rc> hsp; + jfun.subgraph_sparsity(jdom, jrng, false, hsp); + + auto &hrow = hsp.row(); + auto &hcol = hsp.col(); + for (size_t k = 0; k < hsp.nnz(); k++) { size_t row = hrow[k]; size_t col = hcol[k]; @@ -189,8 +212,9 @@ struct CallbackEvaluator hp.push_back(row, col); } } - x.resize(nx, 0.0); - w.resize(ny, 0.0); + x.resize(nx); + w.resize(ny); + xw.resize(nx + ny); jac = CppAD::sparse_rcv, std::vector>(jp); hes = CppAD::sparse_rcv, std::vector>(hp); } @@ -202,24 +226,29 @@ struct CallbackEvaluator void eval_fun(const V *req_x, V *res_y) { - copy_ptr(req_x, indexVars.data(), x); + size_t nx = fun.Domain(); + size_t ny = fun.Range(); + copy(nx, req_x, indexVars.data(), x.data()); auto y = fun.Forward(0, x); - copy_vec(y, res_y, is_objective()); + copy(ny, y.data(), nullptr, res_y, is_objective()); } void eval_jac(const V *req_x, V *res_jac) { - copy_ptr(req_x, indexVars.data(), x); + size_t nx = fun.Domain(); + copy(nx, req_x, indexVars.data(), x.data()); fun.sparse_jac_rev(x, jac, jp, JAC_CLRNG, jw); - copy_vec(jac.val(), res_jac); + copy_vec(jac.nnz(), jac.val().data(), nullptr, res_jac); } void eval_hess(const V *req_x, const V *req_w, V *res_hess) { - copy_ptr(req_x, indexVars.data(), x); - copy_ptr(req_w, indexCons.data(), w, is_objective()); - fun.sparse_hes(x, w, hes, hp, HES_CLRNG, hw); - copy_vec(hes.val(), res_hess); + size_t nx = fun.Domain(); + size_t ny = fun.Range(); + copy(nx, req_x, indexVars.data(), xw.data()); + copy(ny, req_w, indexCons.data(), xw.data() + nx, false, is_objective()); + jfun.sparse_jac_rev(xw, hes, hp, JAC_CLRNG, hw); + copy_vec(hes.nnz(), hes.val().data(), nullptr, res_hess); } CallbackPattern get_callback_pattern() const @@ -258,39 +287,38 @@ struct CallbackEvaluator private: template - static void copy_ptr(const T *src, const I *idx, std::vector &dst, bool duplicate = false) + static void copy(const size_t n, const T *src, const I *idx, V *dst, bool agg = false, + bool dpl = false) { - for (size_t i = 0; i < dst.size(); i++) + if (dpl) { - if (duplicate) + for (size_t i = 0; i < n; i++) { dst[i] = src[0]; } - else - { - dst[i] = src[idx[i]]; - } } - } - - template - static void copy_vec(const std::vector &src, T *dst, bool aggregate = false) - { - if (aggregate) + else if (agg) { dst[0] = 0.0; - } - for (size_t i = 0; i < src.size(); i++) - { - if (aggregate) + for (size_t i = 0; i < n; i++) { dst[0] += src[i]; } - else + } + else if (idx == nullptr) + { + for (size_t i = 0; i < n; i++) { dst[i] = src[i]; } } + else + { + for (size_t i = 0; i < n; i++) + { + dst[i] = src[idx[i]]; + } + } } }; From 90ae7fb79fda7e74cd807f902c02ae7ecd454063 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 16:26:52 -0400 Subject: [PATCH 06/17] Rename parameters in CallbackEvaluator::copy for clarity --- include/pyoptinterface/knitro_model.hpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 3de69f6..3395e34 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -287,17 +287,17 @@ struct CallbackEvaluator private: template - static void copy(const size_t n, const T *src, const I *idx, V *dst, bool agg = false, - bool dpl = false) + static void copy(const size_t n, const T *src, const I *idx, V *dst, bool aggregate = false, + bool duplicate = false) { - if (dpl) + if (duplicate) { for (size_t i = 0; i < n; i++) { dst[i] = src[0]; } } - else if (agg) + else if (aggregate { dst[0] = 0.0; for (size_t i = 0; i < n; i++) From 2aaf071f39ec7ce16f87af8f08d79762ce43f456 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 16:27:03 -0400 Subject: [PATCH 07/17] Fix conditional formatting in CallbackEvaluator for clarity --- include/pyoptinterface/knitro_model.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 3395e34..31af5af 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -297,7 +297,7 @@ struct CallbackEvaluator dst[i] = src[0]; } } - else if (aggregate + else if (aggregate) { dst[0] = 0.0; for (size_t i = 0; i < n; i++) From 209b7226c0cfc43f8ac09663f905dd2143e6fab8 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 16:27:27 -0400 Subject: [PATCH 08/17] Refactor CallbackEvaluator to use auto for base2ad return type --- include/pyoptinterface/knitro_model.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 31af5af..6fcd13a 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -174,7 +174,7 @@ struct CallbackEvaluator std::vector rng(ny, true); fun.subgraph_sparsity(dom, rng, false, jp); - auto &af = fun.base2ad(); + auto af = fun.base2ad(); std::vector> jaxw(nx + ny); CppAD::Independent(jaxw); std::vector> jax(nx); From 300cfedf4fac656ae252a1a3535602b6c179cff3 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:03:15 -0400 Subject: [PATCH 09/17] Refactor CallbackEvaluator to use template parameters for improved flexibility --- include/pyoptinterface/knitro_model.hpp | 198 +++++++++++++----------- lib/knitro_model.cpp | 10 +- 2 files changed, 110 insertions(+), 98 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 6fcd13a..f0e5de7 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -128,41 +128,90 @@ enum ConstraintSenseFlags CON_UPBND = 1 << 1, // 0x02 }; +template struct CallbackPattern { - std::vector indexCons; - std::vector objGradIndexVars; - std::vector jacIndexCons; - std::vector jacIndexVars; - std::vector hessIndexVars1; - std::vector hessIndexVars2; + std::vector indexCons; + std::vector objGradIndexVars; + std::vector jacIndexCons; + std::vector jacIndexVars; + std::vector hessIndexVars1; + std::vector hessIndexVars2; }; -template +enum class CopyMode +{ + Normal, + Aggregate, + Duplicate +}; + +template +static void copy(const size_t n, const T *src, const I *idx, T *dst, + CopyMode mode = CopyMode::Normal) +{ + if (mode == CopyMode::Duplicate) + { + for (size_t i = 0; i < n; i++) + { + dst[i] = src[0]; + } + } + else if (mode == CopyMode::Aggregate) + { + dst[0] = T(0.0); + for (size_t i = 0; i < n; i++) + { + dst[0] += src[i]; + } + } + else + { + if (idx == nullptr) + { + for (size_t i = 0; i < n; i++) + { + dst[i] = src[i]; + } + } + else + { + for (size_t i = 0; i < n; i++) + { + dst[i] = src[idx[i]]; + } + } + } +} + +using namespace CppAD; + +template struct CallbackEvaluator { + static inline constexpr const char *JAC_CLRNG = "cppad"; static inline constexpr const char *HES_CLRNG = "cppad.symmetric"; - std::vector indexVars; - std::vector indexCons; + std::vector indexVars; + std::vector indexCons; - CppAD::ADFun fun; /// < CppAD tape. - CppAD::ADFun jfun; /// < CppAD tape for Jacobian + ADFun fun; /// < CppAD tape. + ADFun jfun; /// < CppAD tape for Jacobian /// Sparsity patterns - CppAD::sparse_rc> jp; - CppAD::sparse_rc> hp; + sparse_rc> jp; + sparse_rc> hp; /// Workspaces for sparse Jacobian and Hessian calculations - CppAD::sparse_jac_work jw; - CppAD::sparse_jac_work hw; + sparse_jac_work jw; + sparse_jac_work hw; /// Temporary vectors for evaluations - std::vector x; - std::vector w; - std::vector xw; - CppAD::sparse_rcv, std::vector> jac; - CppAD::sparse_rcv, std::vector> hes; + vector x; + vector w; + vector xw; + sparse_rcv, vector> jac; + sparse_rcv, vector> hes; void setup() { @@ -170,16 +219,16 @@ struct CallbackEvaluator size_t nx = fun.Domain(); size_t ny = fun.Range(); - std::vector dom(nx, true); - std::vector rng(ny, true); + vector dom(nx, true); + vector rng(ny, true); fun.subgraph_sparsity(dom, rng, false, jp); auto af = fun.base2ad(); - std::vector> jaxw(nx + ny); - CppAD::Independent(jaxw); - std::vector> jax(nx); - std::vector> jaw(ny); - std::vector> jaz(nx); + vector> jaxw(nx + ny); + Independent(jaxw); + vector> jax(nx); + vector> jaw(ny); + vector> jaz(nx); for (size_t i = 0; i < nx; i++) { jax[i] = jaxw[i]; @@ -192,21 +241,21 @@ struct CallbackEvaluator jaz = af.Reverse(1, jaw); jfun.Dependent(jaxw, jaz); jfun.optimize(); - std::vector jdom(nx + ny, false); + vector jdom(nx + ny, false); for (size_t i = 0; i < nx; i++) { jdom[i] = true; } - std::vector jrng(nx, true); - CppAD::sparse_rc> hsp; + vector jrng(nx, true); + sparse_rc> hsp; jfun.subgraph_sparsity(jdom, jrng, false, hsp); auto &hrow = hsp.row(); auto &hcol = hsp.col(); for (size_t k = 0; k < hsp.nnz(); k++) { - size_t row = hrow[k]; - size_t col = hcol[k]; + S row = hrow[k]; + S col = hcol[k]; if (row <= col) { hp.push_back(row, col); @@ -215,8 +264,8 @@ struct CallbackEvaluator x.resize(nx); w.resize(ny); xw.resize(nx + ny); - jac = CppAD::sparse_rcv, std::vector>(jp); - hes = CppAD::sparse_rcv, std::vector>(hp); + jac = sparse_rcv, vector>(jp); + hes = sparse_rcv, vector>(hp); } bool is_objective() const @@ -226,34 +275,31 @@ struct CallbackEvaluator void eval_fun(const V *req_x, V *res_y) { - size_t nx = fun.Domain(); - size_t ny = fun.Range(); - copy(nx, req_x, indexVars.data(), x.data()); + copy(fun.Domain(), req_x, indexVars.data(), x.data()); auto y = fun.Forward(0, x); - copy(ny, y.data(), nullptr, res_y, is_objective()); + CopyMode mode = is_objective() ? CopyMode::Aggregate : CopyMode::Normal; + copy(fun.Range(), y.data(), (const KNINT *)nullptr, res_y, mode); } void eval_jac(const V *req_x, V *res_jac) { - size_t nx = fun.Domain(); - copy(nx, req_x, indexVars.data(), x.data()); + copy(fun.Domain(), req_x, indexVars.data(), x.data()); fun.sparse_jac_rev(x, jac, jp, JAC_CLRNG, jw); - copy_vec(jac.nnz(), jac.val().data(), nullptr, res_jac); + copy(jac.nnz(), jac.val().data(), (const I *)nullptr, res_jac); } void eval_hess(const V *req_x, const V *req_w, V *res_hess) { - size_t nx = fun.Domain(); - size_t ny = fun.Range(); - copy(nx, req_x, indexVars.data(), xw.data()); - copy(ny, req_w, indexCons.data(), xw.data() + nx, false, is_objective()); + copy(fun.Domain(), req_x, indexVars.data(), xw.data()); + CopyMode mode = is_objective() ? CopyMode::Duplicate : CopyMode::Normal; + copy(fun.Range(), req_w, indexCons.data(), xw.data() + fun.Domain(), mode); jfun.sparse_jac_rev(xw, hes, hp, JAC_CLRNG, hw); - copy_vec(hes.nnz(), hes.val().data(), nullptr, res_hess); + copy(hes.nnz(), hes.val().data(), (const I *)nullptr, res_hess); } - CallbackPattern get_callback_pattern() const + CallbackPattern get_callback_pattern() const { - CallbackPattern p; + CallbackPattern p; p.indexCons = indexCons; auto &jrow = jp.row(); @@ -284,49 +330,6 @@ struct CallbackEvaluator return p; } - - private: - template - static void copy(const size_t n, const T *src, const I *idx, V *dst, bool aggregate = false, - bool duplicate = false) - { - if (duplicate) - { - for (size_t i = 0; i < n; i++) - { - dst[i] = src[0]; - } - } - else if (aggregate) - { - dst[0] = 0.0; - for (size_t i = 0; i < n; i++) - { - dst[0] += src[i]; - } - } - else if (idx == nullptr) - { - for (size_t i = 0; i < n; i++) - { - dst[i] = src[i]; - } - } - else - { - for (size_t i = 0; i < n; i++) - { - dst[i] = src[idx[i]]; - } - } - } -}; - -struct Outputs -{ - std::vector objective_outputs; - std::vector constraint_outputs; - std::vector constraints; }; inline bool is_name_empty(const char *name) @@ -610,8 +613,17 @@ class KNITROModel : public OnesideLinearConstraintMixin, std::unordered_map m_con_sense_flags; uint8_t m_obj_flag = 0; + struct Outputs + { + std::vector objective_outputs; + std::vector constraint_outputs; + std::vector constraints; + }; + + using Evaluator = CallbackEvaluator; + std::unordered_map m_pending_outputs; - std::vector>> m_evaluators; + std::vector> m_evaluators; bool m_has_pending_callbacks = false; int m_solve_status = 0; bool m_is_dirty = true; @@ -637,7 +649,7 @@ class KNITROModel : public OnesideLinearConstraintMixin, void _add_callbacks(const ExpressionGraph &graph, const Outputs &outputs); void _add_callback(const ExpressionGraph &graph, const std::vector &outputs, const std::vector &constraints); - void _register_callback(CallbackEvaluator *evaluator); + void _register_callback(Evaluator *evaluator); void _update(); void _pre_solve(); void _solve(); diff --git a/lib/knitro_model.cpp b/lib/knitro_model.cpp index 1ffc098..cde9672 100644 --- a/lib/knitro_model.cpp +++ b/lib/knitro_model.cpp @@ -836,11 +836,11 @@ double KNITROModel::get_obj_value() const return _get_value(knitro::KN_get_obj_value); } -void KNITROModel::_register_callback(CallbackEvaluator *evaluator) +void KNITROModel::_register_callback(Evaluator *evaluator) { auto f = [](KN_context *, CB_context *cb, KN_eval_request *req, KN_eval_result *res, void *data) -> int { - auto evaluator = static_cast *>(data); + auto evaluator = static_cast(data); if (evaluator->is_objective()) { evaluator->eval_fun(req->x, res->obj); @@ -854,7 +854,7 @@ void KNITROModel::_register_callback(CallbackEvaluator *evaluator) auto g = [](KN_context *, CB_context *cb, KN_eval_request *req, KN_eval_result *res, void *data) -> int { - auto evaluator = static_cast *>(data); + auto evaluator = static_cast(data); if (evaluator->is_objective()) { evaluator->eval_jac(req->x, res->objGrad); @@ -868,7 +868,7 @@ void KNITROModel::_register_callback(CallbackEvaluator *evaluator) auto h = [](KN_context *, CB_context *cb, KN_eval_request *req, KN_eval_result *res, void *data) -> int { - auto evaluator = static_cast *>(data); + auto evaluator = static_cast(data); if (evaluator->is_objective()) { evaluator->eval_hess(req->x, req->sigma, res->hess); @@ -900,7 +900,7 @@ void KNITROModel::_register_callback(CallbackEvaluator *evaluator) void KNITROModel::_add_callback(const ExpressionGraph &graph, const std::vector &outputs, const std::vector &constraints) { - auto evaluator_ptr = std::make_unique>(); + auto evaluator_ptr = std::make_unique(); auto *evaluator = evaluator_ptr.get(); evaluator->indexVars.resize(graph.n_variables()); for (size_t i = 0; i < graph.n_variables(); i++) From ac6ebadba031449227137d6e1d090b4496dee1d7 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:11:44 -0400 Subject: [PATCH 10/17] Move CopyMode enum and copy function into CallbackEvaluator for better encapsulation --- include/pyoptinterface/knitro_model.hpp | 96 +++++++++++++------------ 1 file changed, 50 insertions(+), 46 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index f0e5de7..1d9549e 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -139,51 +139,6 @@ struct CallbackPattern std::vector hessIndexVars2; }; -enum class CopyMode -{ - Normal, - Aggregate, - Duplicate -}; - -template -static void copy(const size_t n, const T *src, const I *idx, T *dst, - CopyMode mode = CopyMode::Normal) -{ - if (mode == CopyMode::Duplicate) - { - for (size_t i = 0; i < n; i++) - { - dst[i] = src[0]; - } - } - else if (mode == CopyMode::Aggregate) - { - dst[0] = T(0.0); - for (size_t i = 0; i < n; i++) - { - dst[0] += src[i]; - } - } - else - { - if (idx == nullptr) - { - for (size_t i = 0; i < n; i++) - { - dst[i] = src[i]; - } - } - else - { - for (size_t i = 0; i < n; i++) - { - dst[i] = src[idx[i]]; - } - } - } -} - using namespace CppAD; template @@ -278,7 +233,7 @@ struct CallbackEvaluator copy(fun.Domain(), req_x, indexVars.data(), x.data()); auto y = fun.Forward(0, x); CopyMode mode = is_objective() ? CopyMode::Aggregate : CopyMode::Normal; - copy(fun.Range(), y.data(), (const KNINT *)nullptr, res_y, mode); + copy(fun.Range(), y.data(), (const I *)nullptr, res_y, mode); } void eval_jac(const V *req_x, V *res_jac) @@ -330,6 +285,55 @@ struct CallbackEvaluator return p; } + + private: + enum class CopyMode + { + Normal, + Aggregate, + Duplicate + }; + + static void copy(const size_t n, const V *src, const I *idx, V *dst, + CopyMode mode = CopyMode::Normal) + { + if (mode == CopyMode::Duplicate) + { + for (size_t i = 0; i < n; i++) + { + dst[i] = src[0]; + } + } + else if (mode == CopyMode::Aggregate) + { + if (n == 0) + { + return; + } + dst[0] = src[0]; + for (size_t i = 1; i < n; i++) + { + dst[0] += src[i]; + } + } + else + { + if (idx == nullptr) + { + for (size_t i = 0; i < n; i++) + { + dst[i] = src[i]; + } + } + else + { + for (size_t i = 0; i < n; i++) + { + dst[i] = src[idx[i]]; + } + } + } + } }; inline bool is_name_empty(const char *name) From f22659db52f16ba78310d0d39734a9abfb41c2af Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:21:58 -0400 Subject: [PATCH 11/17] Add test for Luksan-Vlcek Problem 10 in test_lukvle10.py --- tests/test_lukvle10.py | 49 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 tests/test_lukvle10.py diff --git a/tests/test_lukvle10.py b/tests/test_lukvle10.py new file mode 100644 index 0000000..66e843f --- /dev/null +++ b/tests/test_lukvle10.py @@ -0,0 +1,49 @@ +import pytest + +import pyoptinterface as poi +from pyoptinterface import ipopt, nl + + +def test_nlp_lukvle10(nlp_model_ctor): + # LUKSAN-VLCEK Problem 10 + # + # min sum[i=1..n] (x[2i]^2)^(x[2i+1]^2 + 1) + (x[2i+1]^2)^(x[2i]^2 + 1) + # s.t. (3 - 2*x[i+1])*x[i+1] + 1 - x[i] - 2*x[i+2] = 0, i = 1,...,2n-2 + # + # Starting point: x[2i] = 1, x[2i+1] = -1 + model = nlp_model_ctor() + if isinstance(model, ipopt.Model): + pytest.skip("lukvle10 is too large to be supported with IPOPT") + + n = 100000 + x = model.add_m_variables(2 * n, name="x") + + for i in range(2 * n - 2): + model.add_quadratic_constraint( + (3 - 2 * x[i + 1]) * x[i + 1] + 1 - x[i] - 2 * x[i + 2], + poi.ConstraintSense.Equal, + 0.0, + name=f"c{i}", + ) + + with nl.graph(): + for i in range(n): + x_sq_2i = x[2 * i] * x[2 * i] + x_sq_2ip1 = x[2 * i + 1] * x[2 * i + 1] + model.add_nl_objective( + nl.pow(x_sq_2i, x_sq_2ip1 + 1) + nl.pow(x_sq_2ip1, x_sq_2i + 1) + ) + + for i in range(n): + model.set_variable_attribute(x[2 * i], poi.VariableAttribute.PrimalStart, 1.0) + model.set_variable_attribute( + x[2 * i + 1], poi.VariableAttribute.PrimalStart, -1.0 + ) + + model.optimize() + + termination_status = model.get_model_attribute(poi.ModelAttribute.TerminationStatus) + assert ( + termination_status == poi.TerminationStatusCode.LOCALLY_SOLVED + or termination_status == poi.TerminationStatusCode.OPTIMAL + ) From d510a54ecbd37cd07293415f1d3f395013bcbe8d Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:26:43 -0400 Subject: [PATCH 12/17] Refactor CallbackEvaluator to replace CopyMode enum with integer constants for copy modes --- include/pyoptinterface/knitro_model.hpp | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 1d9549e..13e408f 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -163,7 +163,6 @@ struct CallbackEvaluator /// Temporary vectors for evaluations vector x; - vector w; vector xw; sparse_rcv, vector> jac; sparse_rcv, vector> hes; @@ -217,7 +216,6 @@ struct CallbackEvaluator } } x.resize(nx); - w.resize(ny); xw.resize(nx + ny); jac = sparse_rcv, vector>(jp); hes = sparse_rcv, vector>(hp); @@ -232,7 +230,7 @@ struct CallbackEvaluator { copy(fun.Domain(), req_x, indexVars.data(), x.data()); auto y = fun.Forward(0, x); - CopyMode mode = is_objective() ? CopyMode::Aggregate : CopyMode::Normal; + int mode = is_objective() ? 2 : 0; copy(fun.Range(), y.data(), (const I *)nullptr, res_y, mode); } @@ -246,7 +244,7 @@ struct CallbackEvaluator void eval_hess(const V *req_x, const V *req_w, V *res_hess) { copy(fun.Domain(), req_x, indexVars.data(), xw.data()); - CopyMode mode = is_objective() ? CopyMode::Duplicate : CopyMode::Normal; + int mode = is_objective() ? 1 : 0; copy(fun.Range(), req_w, indexCons.data(), xw.data() + fun.Domain(), mode); jfun.sparse_jac_rev(xw, hes, hp, JAC_CLRNG, hw); copy(hes.nnz(), hes.val().data(), (const I *)nullptr, res_hess); @@ -287,24 +285,20 @@ struct CallbackEvaluator } private: - enum class CopyMode + // Copy mode: + // - 0: normal copy + // - 1: duplicate (copy first element of src to all elements of dst) + // - 2: aggregate (sum all elements of src and copy to all elements of dst) + static void copy(const size_t n, const V *src, const I *idx, V *dst, int mode = 0) { - Normal, - Aggregate, - Duplicate - }; - - static void copy(const size_t n, const V *src, const I *idx, V *dst, - CopyMode mode = CopyMode::Normal) - { - if (mode == CopyMode::Duplicate) + if (mode == 1) { for (size_t i = 0; i < n; i++) { dst[i] = src[0]; } } - else if (mode == CopyMode::Aggregate) + else if (mode == 2) { if (n == 0) { From 9444ad7f4b2e8497094f7d3222c946bfb02e13e6 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:31:40 -0400 Subject: [PATCH 13/17] Refactor CallbackEvaluator to unify constant names for clarity and improve code readability --- include/pyoptinterface/knitro_model.hpp | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/include/pyoptinterface/knitro_model.hpp b/include/pyoptinterface/knitro_model.hpp index 13e408f..46b81ae 100644 --- a/include/pyoptinterface/knitro_model.hpp +++ b/include/pyoptinterface/knitro_model.hpp @@ -145,19 +145,19 @@ template struct CallbackEvaluator { - static inline constexpr const char *JAC_CLRNG = "cppad"; - static inline constexpr const char *HES_CLRNG = "cppad.symmetric"; + static inline constexpr const char *CLRNG = "cppad"; + std::vector indexVars; std::vector indexCons; ADFun fun; /// < CppAD tape. - ADFun jfun; /// < CppAD tape for Jacobian + ADFun jfun; /// < CppAD tape for Aggregated Jacobian /// Sparsity patterns sparse_rc> jp; sparse_rc> hp; - /// Workspaces for sparse Jacobian and Hessian calculations + /// Workspaces for Jacobian and Hessian calculations sparse_jac_work jw; sparse_jac_work hw; @@ -177,7 +177,7 @@ struct CallbackEvaluator vector rng(ny, true); fun.subgraph_sparsity(dom, rng, false, jp); - auto af = fun.base2ad(); + ADFun, V> af = fun.base2ad(); vector> jaxw(nx + ny); Independent(jaxw); vector> jax(nx); @@ -208,11 +208,9 @@ struct CallbackEvaluator auto &hcol = hsp.col(); for (size_t k = 0; k < hsp.nnz(); k++) { - S row = hrow[k]; - S col = hcol[k]; - if (row <= col) + if (hrow[k] <= hcol[k]) { - hp.push_back(row, col); + hp.push_back(hrow[k], hcol[k]); } } x.resize(nx); @@ -237,7 +235,7 @@ struct CallbackEvaluator void eval_jac(const V *req_x, V *res_jac) { copy(fun.Domain(), req_x, indexVars.data(), x.data()); - fun.sparse_jac_rev(x, jac, jp, JAC_CLRNG, jw); + fun.sparse_jac_rev(x, jac, jp, CLRNG, jw); copy(jac.nnz(), jac.val().data(), (const I *)nullptr, res_jac); } @@ -246,7 +244,7 @@ struct CallbackEvaluator copy(fun.Domain(), req_x, indexVars.data(), xw.data()); int mode = is_objective() ? 1 : 0; copy(fun.Range(), req_w, indexCons.data(), xw.data() + fun.Domain(), mode); - jfun.sparse_jac_rev(xw, hes, hp, JAC_CLRNG, hw); + jfun.sparse_jac_rev(xw, hes, hp, CLRNG, hw); copy(hes.nnz(), hes.val().data(), (const I *)nullptr, res_hess); } From f8222682a1916ecdfb6362d0e8eeb4de018dcae2 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:32:05 -0400 Subject: [PATCH 14/17] Refactor test_nlp_lukvle10 to improve variable naming for clarity --- tests/test_lukvle10.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_lukvle10.py b/tests/test_lukvle10.py index 66e843f..57d02d3 100644 --- a/tests/test_lukvle10.py +++ b/tests/test_lukvle10.py @@ -28,10 +28,10 @@ def test_nlp_lukvle10(nlp_model_ctor): with nl.graph(): for i in range(n): - x_sq_2i = x[2 * i] * x[2 * i] - x_sq_2ip1 = x[2 * i + 1] * x[2 * i + 1] + x2i_sq = x[2 * i] * x[2 * i] + x2ip1_sq = x[2 * i + 1] * x[2 * i + 1] model.add_nl_objective( - nl.pow(x_sq_2i, x_sq_2ip1 + 1) + nl.pow(x_sq_2ip1, x_sq_2i + 1) + nl.pow(x2i_sq, x2ip1_sq + 1) + nl.pow(x2ip1_sq, x2i_sq + 1) ) for i in range(n): From 11b0461b0d243051f41c6463047f405aa350dc14 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:33:29 -0400 Subject: [PATCH 15/17] Add comment to clarify IPOPT limitation for LUKVLE10 test --- tests/test_lukvle10.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_lukvle10.py b/tests/test_lukvle10.py index 57d02d3..d2b7842 100644 --- a/tests/test_lukvle10.py +++ b/tests/test_lukvle10.py @@ -13,6 +13,7 @@ def test_nlp_lukvle10(nlp_model_ctor): # Starting point: x[2i] = 1, x[2i+1] = -1 model = nlp_model_ctor() if isinstance(model, ipopt.Model): + # LUKVLE10 is too large and IPOPT raises a bad_alloc error. pytest.skip("lukvle10 is too large to be supported with IPOPT") n = 100000 From 17be832b5e04ca1529c714bea000d32ff26205bf Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Tue, 10 Mar 2026 17:36:29 -0400 Subject: [PATCH 16/17] Add missing import for COpt in LUKVLE10 test and clarify model limitations --- tests/test_lukvle10.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_lukvle10.py b/tests/test_lukvle10.py index d2b7842..3c128dd 100644 --- a/tests/test_lukvle10.py +++ b/tests/test_lukvle10.py @@ -1,7 +1,7 @@ import pytest import pyoptinterface as poi -from pyoptinterface import ipopt, nl +from pyoptinterface import ipopt, nl, copt def test_nlp_lukvle10(nlp_model_ctor): @@ -15,6 +15,10 @@ def test_nlp_lukvle10(nlp_model_ctor): if isinstance(model, ipopt.Model): # LUKVLE10 is too large and IPOPT raises a bad_alloc error. pytest.skip("lukvle10 is too large to be supported with IPOPT") + if isinstance(model, copt.Model): + # LUKVLE10 is too large the current license of COpt supports up + # to 2000 variables. + pytest.skip("lukvle10 is too large to be supported with COpt") n = 100000 x = model.add_m_variables(2 * n, name="x") From ab8b6528cb19610963f1d0e9be6e0ef69a1020b6 Mon Sep 17 00:00:00 2001 From: eminyouskn Date: Wed, 11 Mar 2026 15:52:54 -0400 Subject: [PATCH 17/17] Update LUKVLE10 test: correct starting points, increase variable count, and refine objective function --- tests/test_lukvle10.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/tests/test_lukvle10.py b/tests/test_lukvle10.py index 3c128dd..70b540b 100644 --- a/tests/test_lukvle10.py +++ b/tests/test_lukvle10.py @@ -10,7 +10,7 @@ def test_nlp_lukvle10(nlp_model_ctor): # min sum[i=1..n] (x[2i]^2)^(x[2i+1]^2 + 1) + (x[2i+1]^2)^(x[2i]^2 + 1) # s.t. (3 - 2*x[i+1])*x[i+1] + 1 - x[i] - 2*x[i+2] = 0, i = 1,...,2n-2 # - # Starting point: x[2i] = 1, x[2i+1] = -1 + # Starting point: x[2i] = -1, x[2i+1] = 1 model = nlp_model_ctor() if isinstance(model, ipopt.Model): # LUKVLE10 is too large and IPOPT raises a bad_alloc error. @@ -20,7 +20,7 @@ def test_nlp_lukvle10(nlp_model_ctor): # to 2000 variables. pytest.skip("lukvle10 is too large to be supported with COpt") - n = 100000 + n = 125000 x = model.add_m_variables(2 * n, name="x") for i in range(2 * n - 2): @@ -33,22 +33,19 @@ def test_nlp_lukvle10(nlp_model_ctor): with nl.graph(): for i in range(n): - x2i_sq = x[2 * i] * x[2 * i] - x2ip1_sq = x[2 * i + 1] * x[2 * i + 1] model.add_nl_objective( - nl.pow(x2i_sq, x2ip1_sq + 1) + nl.pow(x2ip1_sq, x2i_sq + 1) + nl.pow(x[2 * i] ** 2, (x[2 * i + 1] ** 2) + 1) + + nl.pow(x[2 * i + 1] ** 2, (x[2 * i] ** 2) + 1) ) for i in range(n): - model.set_variable_attribute(x[2 * i], poi.VariableAttribute.PrimalStart, 1.0) + model.set_variable_attribute(x[2 * i], poi.VariableAttribute.PrimalStart, -1.0) model.set_variable_attribute( - x[2 * i + 1], poi.VariableAttribute.PrimalStart, -1.0 + x[2 * i + 1], poi.VariableAttribute.PrimalStart, 1.0 ) model.optimize() + model.set_model_attribute(poi.ModelAttribute.Silent, False) termination_status = model.get_model_attribute(poi.ModelAttribute.TerminationStatus) - assert ( - termination_status == poi.TerminationStatusCode.LOCALLY_SOLVED - or termination_status == poi.TerminationStatusCode.OPTIMAL - ) + assert termination_status == poi.TerminationStatusCode.OPTIMAL