Package pypower :: Module ipopt_options
[hide private]
[frames] | no frames]

Source Code for Module pypower.ipopt_options

   1  # Copyright (C) 2010-2011 Power System Engineering Research Center (PSERC) 
   2  # Copyright (C) 2011 Richard Lincoln 
   3  # 
   4  # PYPOWER is free software: you can redistribute it and/or modify 
   5  # it under the terms of the GNU General Public License as published 
   6  # by the Free Software Foundation, either version 3 of the License, 
   7  # or (at your option) any later version. 
   8  # 
   9  # PYPOWER is distributed in the hope that it will be useful, 
  10  # but WITHOUT ANY WARRANTY], without even the implied warranty of 
  11  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 
  12  # GNU General Public License for more details. 
  13  # 
  14  # You should have received a copy of the GNU General Public License 
  15  # along with PYPOWER. If not, see <http://www.gnu.org/licenses/>. 
  16   
  17  """Sets options for IPOPT. 
  18  """ 
  19   
  20  from pypower.util import feval 
  21   
  22   
23 -def ipopt_options(overrides=None, ppopt=None):
24 """Sets options for IPOPT. 25 26 Sets the values for the options.ipopt dict normally passed to 27 IPOPT. 28 29 Inputs are all optional, second argument must be either a string 30 (C{fname}) or a dict (C{ppopt}): 31 32 - C{overrides} 33 - dict containing values to override the defaults 34 - C{fname} name of user-supplied function called after default 35 options are set to modify them. Calling syntax is:: 36 modified_opt = fname(default_opt) 37 - C{ppopt} PYPOWER options vector, uses the following entries: 38 - C{OPF_VIOLATION} used to set opt['constr_viol_tol'] 39 - C{VERBOSE} used to opt['print_level'] 40 - C{IPOPT_OPT} user option file, if ppopt['IPOPT_OPT'] is 41 non-zero it is appended to 'ipopt_user_options_' to form 42 the name of a user-supplied function used as C{fname} 43 described above, except with calling syntax:: 44 modified_opt = fname(default_opt ppopt) 45 46 Output is an options.ipopt dict to pass to IPOPT. 47 48 Example: If ppopt['IPOPT_OPT'] = 3, then after setting the default IPOPT 49 options, L{ipopt_options} will execute the following user-defined function 50 to allow option overrides:: 51 52 opt = ipopt_user_options_3(opt, ppopt); 53 54 The contents of ipopt_user_options_3.py, could be something like:: 55 56 def ipopt_user_options_3(opt, ppopt): 57 opt = {} 58 opt['nlp_scaling_method'] = 'none' 59 opt['max_iter'] = 500 60 opt['derivative_test'] = 'first-order' 61 return opt 62 63 See the options reference section in the IPOPT documentation for 64 details on the available options. 65 66 U{http://www.coin-or.org/Ipopt/documentation/} 67 68 @see: C{pyipopt}, L{ppoption} 69 70 @author: Ray Zimmerman (PSERC Cornell) 71 @author: Richard Lincoln 72 """ 73 ##----- initialization and arg handling ----- 74 ## defaults 75 verbose = 2 76 fname = '' 77 78 ## second argument 79 if ppopt != None: 80 if isinstance(ppopt, basestring): ## 2nd arg is FNAME (string) 81 fname = ppopt 82 have_ppopt = False 83 else: ## 2nd arg is ppopt (MATPOWER options vector) 84 have_ppopt = True 85 verbose = ppopt['VERBOSE'] 86 if ppopt['IPOPT_OPT']: 87 fname = 'ipopt_user_options_#d' % ppopt['IPOPT_OPT'] 88 else: 89 have_ppopt = False 90 91 opt = {} 92 ##----- set default options for IPOPT ----- 93 ## printing 94 if verbose: 95 opt['print_level'] = min([12, verbose * 2 + 1]) 96 else: 97 opt['print_level'] = 0 98 99 ## convergence 100 opt['tol'] = 1e-12 ## default 1e-8 101 opt['max_iter'] = 250 ## default 3000 102 opt['dual_inf_tol'] = 0.1 ## default 1 103 if have_ppopt: 104 opt['constr_viol_tol'] = ppopt[16] ## default 1e-4 105 opt['compl_inf_tol'] = 1e-5 ## default 1e-4 106 opt['acceptable_tol'] = 1e-8 ## default 1e-6 107 # opt['acceptable_iter'] = 15 ## default 15 108 # opt['acceptable_dual_inf_tol'] = 1e+10 ## default 1e+10 109 opt['acceptable_constr_viol_tol'] = 1e-4 ## default 1e-2 110 opt['acceptable_compl_inf_tol'] = 1e-3 ## default 1e-2 111 # opt['acceptable_obj_change_tol'] = 1e+20 ## default 1e+20 112 # opt['diverging_iterates_tol'] = 1e+20 ## default 1e+20 113 114 ## NLP scaling 115 # opt['nlp_scaling_method'] = 'none' ## default 'gradient-based' 116 117 ## NLP 118 # opt['fixed_variable_treatment'] = 'make_constraint' ## default 'make_parameter' 119 # opt['honor_original_bounds'] = 'no' ## default 'yes' 120 # opt['check_derivatives_for_naninf'] = 'yes' ## default 'no' 121 122 ## initialization 123 # opt['least_square_init_primal'] = 'yes' ## default 'no' 124 # opt['least_square_init_duals'] = 'yes' ## default 'no' 125 126 ## barrier parameter update 127 opt['mu_strategy'] = 'adaptive' ## default 'monotone' 128 129 ## linear solver 130 # opt['linear_solver'] = 'ma27' 131 # opt['linear_solver'] = 'ma57' 132 # opt['linear_solver'] = 'pardiso' 133 # opt['linear_solver'] = 'wsmp' 134 # opt['linear_solver'] = 'mumps' ## default 'mumps' 135 # opt['linear_solver'] = 'custom' 136 # opt['linear_scaling_on_demand'] = 'no' ## default 'yes' 137 138 ## step calculation 139 # opt['mehrotra_algorithm'] = 'yes' ## default 'no' 140 # opt['fast_step_computation'] = 'yes' ## default 'no' 141 142 ## restoration phase 143 # opt['expect_infeasible_problem'] = 'yes' ## default 'no' 144 145 ## derivative checker 146 # opt['derivative_test'] = 'second-order' ## default 'none' 147 148 ## hessian approximation 149 # opt['hessian_approximation'] = 'limited-memory' ## default 'exact' 150 151 152 ##----- call user function to modify defaults ----- 153 if len(fname) > 0: 154 if have_ppopt: 155 opt = feval(fname, opt, ppopt) 156 else: 157 opt = feval(fname, opt) 158 159 ##----- apply overrides ----- 160 if overrides is not None: 161 names = overrides.keys() 162 for k in range(len(names)): 163 opt[names[k]] = overrides[names[k]] 164 165 return opt
166 167 168 #-------------------------- Options Documentation -------------------------- 169 # (as printed by IPOPT 3.8) 170 # ### Output ### 171 # 172 # print_level 0 <= ( 5) <= 12 173 # Output verbosity level. 174 # Sets the default verbosity level for console output. The larger this 175 # value the more detailed is the output. 176 # 177 # output_file ("") 178 # File name of desired output file (leave unset for no file output). 179 # NOTE: This option only works when read from the ipopt.opt options file! 180 # An output file with this name will be written (leave unset for no file 181 # output). The verbosity level is by default set to "print_level", but can 182 # be overridden with "file_print_level". The file name is changed to use 183 # only small letters. 184 # Possible values: 185 # - * [Any acceptable standard file name] 186 # 187 # file_print_level 0 <= ( 5) <= 12 188 # Verbosity level for output file. 189 # NOTE: This option only works when read from the ipopt.opt options file! 190 # Determines the verbosity level for the file specified by "output_file". 191 # By default it is the same as "print_level". 192 # 193 # print_user_options ("no") 194 # Print all options set by the user. 195 # If selected, the algorithm will print the list of all options set by the 196 # user including their values and whether they have been used. In some 197 # cases this information might be incorrect, due to the internal program 198 # flow. 199 # Possible values: 200 # - no [don't print options] 201 # - yes [print options] 202 # 203 # print_options_documentation ("no") 204 # Switch to print all algorithmic options. 205 # If selected, the algorithm will print the list of all available 206 # algorithmic options with some documentation before solving the 207 # optimization problem. 208 # Possible values: 209 # - no [don't print list] 210 # - yes [print list] 211 # 212 # print_timing_statistics ("no") 213 # Switch to print timing statistics. 214 # If selected, the program will print the CPU usage (user time) for 215 # selected tasks. 216 # Possible values: 217 # - no [don't print statistics] 218 # - yes [print all timing statistics] 219 # 220 # option_file_name ("") 221 # File name of options file (to overwrite default). 222 # By default, the name of the Ipopt options file is "ipopt.opt" - or 223 # something else if specified in the IpoptApplication::Initialize call. If 224 # this option is set by SetStringValue BEFORE the options file is read, it 225 # specifies the name of the options file. It does not make any sense to 226 # specify this option within the options file. 227 # Possible values: 228 # - * [Any acceptable standard file name] 229 # 230 # replace_bounds ("no") 231 # Indicates if all variable bounds should be replaced by inequality 232 # constraints 233 # This option must be set for the inexact algorithm 234 # Possible values: 235 # - no [leave bounds on variables] 236 # - yes [replace variable bounds by inequality 237 # constraints] 238 # 239 # skip_finalize_solution_call ("no") 240 # Indicates if call to NLP::FinalizeSolution after optimization should be 241 # suppressed 242 # In some Ipopt applications, the user might want to call the 243 # FinalizeSolution method separately. Setting this option to "yes" will 244 # cause the IpoptApplication object to suppress the default call to that 245 # method. 246 # Possible values: 247 # - no [call FinalizeSolution] 248 # - yes [do not call FinalizeSolution] 249 # 250 # print_info_string ("no") 251 # Enables printing of additional info string at end of iteration output. 252 # This string contains some insider information about the current iteration. 253 # Possible values: 254 # - no [don't print string] 255 # - yes [print string at end of each iteration output] 256 # 257 # 258 # 259 # ### Convergence ### 260 # 261 # tol 0 < ( 1e-08) < +inf 262 # Desired convergence tolerance (relative). 263 # Determines the convergence tolerance for the algorithm. The algorithm 264 # terminates successfully, if the (scaled) NLP error becomes smaller than 265 # this value, and if the (absolute) criteria according to "dual_inf_tol", 266 # "primal_inf_tol", and "cmpl_inf_tol" are met. (This is epsilon_tol in 267 # Eqn. (6) in implementation paper). See also "acceptable_tol" as a second 268 # termination criterion. Note, some other algorithmic features also use 269 # this quantity to determine thresholds etc. 270 # 271 # s_max 0 < ( 100) < +inf 272 # Scaling threshold for the NLP error. 273 # (See paragraph after Eqn. (6) in the implementation paper.) 274 # 275 # max_iter 0 <= ( 3000) < +inf 276 # Maximum number of iterations. 277 # The algorithm terminates with an error message if the number of 278 # iterations exceeded this number. 279 # 280 # max_cpu_time 0 < ( 1e+06) < +inf 281 # Maximum number of CPU seconds. 282 # A limit on CPU seconds that Ipopt can use to solve one problem. If 283 # during the convergence check this limit is exceeded, Ipopt will terminate 284 # with a corresponding error message. 285 # 286 # dual_inf_tol 0 < ( 1) < +inf 287 # Desired threshold for the dual infeasibility. 288 # Absolute tolerance on the dual infeasibility. Successful termination 289 # requires that the max-norm of the (unscaled) dual infeasibility is less 290 # than this threshold. 291 # 292 # constr_viol_tol 0 < ( 0.0001) < +inf 293 # Desired threshold for the constraint violation. 294 # Absolute tolerance on the constraint violation. Successful termination 295 # requires that the max-norm of the (unscaled) constraint violation is less 296 # than this threshold. 297 # 298 # compl_inf_tol 0 < ( 0.0001) < +inf 299 # Desired threshold for the complementarity conditions. 300 # Absolute tolerance on the complementarity. Successful termination 301 # requires that the max-norm of the (unscaled) complementarity is less than 302 # this threshold. 303 # 304 # acceptable_tol 0 < ( 1e-06) < +inf 305 # "Acceptable" convergence tolerance (relative). 306 # Determines which (scaled) overall optimality error is considered to be 307 # "acceptable." There are two levels of termination criteria. If the usual 308 # "desired" tolerances (see tol, dual_inf_tol etc) are satisfied at an 309 # iteration, the algorithm immediately terminates with a success message. 310 # On the other hand, if the algorithm encounters "acceptable_iter" many 311 # iterations in a row that are considered "acceptable", it will terminate 312 # before the desired convergence tolerance is met. This is useful in cases 313 # where the algorithm might not be able to achieve the "desired" level of 314 # accuracy. 315 # 316 # acceptable_iter 0 <= ( 15) < +inf 317 # Number of "acceptable" iterates before triggering termination. 318 # If the algorithm encounters this many successive "acceptable" iterates 319 # (see "acceptable_tol"), it terminates, assuming that the problem has been 320 # solved to best possible accuracy given round-off. If it is set to zero, 321 # this heuristic is disabled. 322 # 323 # acceptable_dual_inf_tol 0 < ( 1e+10) < +inf 324 # "Acceptance" threshold for the dual infeasibility. 325 # Absolute tolerance on the dual infeasibility. "Acceptable" termination 326 # requires that the (max-norm of the unscaled) dual infeasibility is less 327 # than this threshold; see also acceptable_tol. 328 # 329 # acceptable_constr_viol_tol 0 < ( 0.01) < +inf 330 # "Acceptance" threshold for the constraint violation. 331 # Absolute tolerance on the constraint violation. "Acceptable" termination 332 # requires that the max-norm of the (unscaled) constraint violation is less 333 # than this threshold; see also acceptable_tol. 334 # 335 # acceptable_compl_inf_tol 0 < ( 0.01) < +inf 336 # "Acceptance" threshold for the complementarity conditions. 337 # Absolute tolerance on the complementarity. "Acceptable" termination 338 # requires that the max-norm of the (unscaled) complementarity is less than 339 # this threshold; see also acceptable_tol. 340 # 341 # acceptable_obj_change_tol 0 <= ( 1e+20) < +inf 342 # "Acceptance" stopping criterion based on objective function change. 343 # If the relative change of the objective function (scaled by 344 # Max(1,|f(x)|)) is less than this value, this part of the acceptable 345 # tolerance termination is satisfied; see also acceptable_tol. This is 346 # useful for the quasi-Newton option, which has trouble to bring down the 347 # dual infeasibility. 348 # 349 # diverging_iterates_tol 0 < ( 1e+20) < +inf 350 # Threshold for maximal value of primal iterates. 351 # If any component of the primal iterates exceeded this value (in absolute 352 # terms), the optimization is aborted with the exit message that the 353 # iterates seem to be diverging. 354 # 355 # 356 # 357 # ### NLP Scaling ### 358 # 359 # nlp_scaling_method ("gradient-based") 360 # Select the technique used for scaling the NLP. 361 # Selects the technique used for scaling the problem internally before it 362 # is solved. For user-scaling, the parameters come from the NLP. If you are 363 # using AMPL, they can be specified through suffixes ("scaling_factor") 364 # Possible values: 365 # - none [no problem scaling will be performed] 366 # - user-scaling [scaling parameters will come from the user] 367 # - gradient-based [scale the problem so the maximum gradient at 368 # the starting point is scaling_max_gradient] 369 # - equilibration-based [scale the problem so that first derivatives are 370 # of order 1 at random points (only available 371 # with MC19)] 372 # 373 # obj_scaling_factor -inf < ( 1) < +inf 374 # Scaling factor for the objective function. 375 # This option sets a scaling factor for the objective function. The scaling 376 # is seen internally by Ipopt but the unscaled objective is reported in the 377 # console output. If additional scaling parameters are computed (e.g. 378 # user-scaling or gradient-based), both factors are multiplied. If this 379 # value is chosen to be negative, Ipopt will maximize the objective 380 # function instead of minimizing it. 381 # 382 # nlp_scaling_max_gradient 0 < ( 100) < +inf 383 # Maximum gradient after NLP scaling. 384 # This is the gradient scaling cut-off. If the maximum gradient is above 385 # this value, then gradient based scaling will be performed. Scaling 386 # parameters are calculated to scale the maximum gradient back to this 387 # value. (This is g_max in Section 3.8 of the implementation paper.) Note: 388 # This option is only used if "nlp_scaling_method" is chosen as 389 # "gradient-based". 390 # 391 # nlp_scaling_obj_target_gradient 0 <= ( 0) < +inf 392 # Target value for objective function gradient size. 393 # If a positive number is chosen, the scaling factor the objective function 394 # is computed so that the gradient has the max norm of the given size at 395 # the starting point. This overrides nlp_scaling_max_gradient for the 396 # objective function. 397 # 398 # nlp_scaling_constr_target_gradient 0 <= ( 0) < +inf 399 # Target value for constraint function gradient size. 400 # If a positive number is chosen, the scaling factor the constraint 401 # functions is computed so that the gradient has the max norm of the given 402 # size at the starting point. This overrides nlp_scaling_max_gradient for 403 # the constraint functions. 404 # 405 # 406 # 407 # ### NLP ### 408 # 409 # nlp_lower_bound_inf -inf < ( -1e+19) < +inf 410 # any bound less or equal this value will be considered -inf (i.e. not lower 411 # bounded). 412 # 413 # nlp_upper_bound_inf -inf < ( 1e+19) < +inf 414 # any bound greater or this value will be considered +inf (i.e. not upper 415 # bounded). 416 # 417 # fixed_variable_treatment ("make_parameter") 418 # Determines how fixed variables should be handled. 419 # The main difference between those options is that the starting point in 420 # the "make_constraint" case still has the fixed variables at their given 421 # values, whereas in the case "make_parameter" the functions are always 422 # evaluated with the fixed values for those variables. Also, for 423 # "relax_bounds", the fixing bound constraints are relaxed (according to" 424 # bound_relax_factor"). For both "make_constraints" and "relax_bounds", 425 # bound multipliers are computed for the fixed variables. 426 # Possible values: 427 # - make_parameter [Remove fixed variable from optimization 428 # variables] 429 # - make_constraint [Add equality constraints fixing variables] 430 # - relax_bounds [Relax fixing bound constraints] 431 # 432 # dependency_detector ("none") 433 # Indicates which linear solver should be used to detect linearly dependent 434 # equality constraints. 435 # The default and available choices depend on how Ipopt has been compiled. 436 # This is experimental and does not work well. 437 # Possible values: 438 # - none [don't check; no extra work at beginning] 439 # - mumps [use MUMPS] 440 # - wsmp [use WSMP] 441 # - ma28 [use MA28] 442 # 443 # dependency_detection_with_rhs ("no") 444 # Indicates if the right hand sides of the constraints should be considered 445 # during dependency detection 446 # Possible values: 447 # - no [only look at gradients] 448 # - yes [also consider right hand side] 449 # 450 # num_linear_variables 0 <= ( 0) < +inf 451 # Number of linear variables 452 # When the Hessian is approximated, it is assumed that the first 453 # num_linear_variables variables are linear. The Hessian is then not 454 # approximated in this space. If the get_number_of_nonlinear_variables 455 # method in the TNLP is implemented, this option is ignored. 456 # 457 # kappa_d 0 <= ( 1e-05) < +inf 458 # Weight for linear damping term (to handle one-sided bounds). 459 # (see Section 3.7 in implementation paper.) 460 # 461 # bound_relax_factor 0 <= ( 1e-08) < +inf 462 # Factor for initial relaxation of the bounds. 463 # Before start of the optimization, the bounds given by the user are 464 # relaxed. This option sets the factor for this relaxation. If it is set 465 # to zero, then then bounds relaxation is disabled. (See Eqn.(35) in 466 # implementation paper.) 467 # 468 # honor_original_bounds ("yes") 469 # Indicates whether final points should be projected into original bounds. 470 # Ipopt might relax the bounds during the optimization (see, e.g., option 471 # "bound_relax_factor"). This option determines whether the final point 472 # should be projected back into the user-provide original bounds after the 473 # optimization. 474 # Possible values: 475 # - no [Leave final point unchanged] 476 # - yes [Project final point back into original bounds] 477 # 478 # check_derivatives_for_naninf ("no") 479 # Indicates whether it is desired to check for Nan/Inf in derivative matrices 480 # Activating this option will cause an error if an invalid number is 481 # detected in the constraint Jacobians or the Lagrangian Hessian. If this 482 # is not activated, the test is skipped, and the algorithm might proceed 483 # with invalid numbers and fail. 484 # Possible values: 485 # - no [Don't check (faster).] 486 # - yes [Check Jacobians and Hessian for Nan and Inf.] 487 # 488 # jac_c_constant ("no") 489 # Indicates whether all equality constraints are linear 490 # Activating this option will cause Ipopt to ask for the Jacobian of the 491 # equality constraints only once from the NLP and reuse this information 492 # later. 493 # Possible values: 494 # - no [Don't assume that all equality constraints are 495 # linear] 496 # - yes [Assume that equality constraints Jacobian are 497 # constant] 498 # 499 # jac_d_constant ("no") 500 # Indicates whether all inequality constraints are linear 501 # Activating this option will cause Ipopt to ask for the Jacobian of the 502 # inequality constraints only once from the NLP and reuse this information 503 # later. 504 # Possible values: 505 # - no [Don't assume that all inequality constraints 506 # are linear] 507 # - yes [Assume that equality constraints Jacobian are 508 # constant] 509 # 510 # hessian_constant ("no") 511 # Indicates whether the problem is a quadratic problem 512 # Activating this option will cause Ipopt to ask for the Hessian of the 513 # Lagrangian function only once from the NLP and reuse this information 514 # later. 515 # Possible values: 516 # - no [Assume that Hessian changes] 517 # - yes [Assume that Hessian is constant] 518 # 519 # 520 # 521 # ### Initialization ### 522 # 523 # bound_push 0 < ( 0.01) < +inf 524 # Desired minimum absolute distance from the initial point to bound. 525 # Determines how much the initial point might have to be modified in order 526 # to be sufficiently inside the bounds (together with "bound_frac"). (This 527 # is kappa_1 in Section 3.6 of implementation paper.) 528 # 529 # bound_frac 0 < ( 0.01) <= 0.5 530 # Desired minimum relative distance from the initial point to bound. 531 # Determines how much the initial point might have to be modified in order 532 # to be sufficiently inside the bounds (together with "bound_push"). (This 533 # is kappa_2 in Section 3.6 of implementation paper.) 534 # 535 # slack_bound_push 0 < ( 0.01) < +inf 536 # Desired minimum absolute distance from the initial slack to bound. 537 # Determines how much the initial slack variables might have to be modified 538 # in order to be sufficiently inside the inequality bounds (together with 539 # "slack_bound_frac"). (This is kappa_1 in Section 3.6 of implementation 540 # paper.) 541 # 542 # slack_bound_frac 0 < ( 0.01) <= 0.5 543 # Desired minimum relative distance from the initial slack to bound. 544 # Determines how much the initial slack variables might have to be modified 545 # in order to be sufficiently inside the inequality bounds (together with 546 # "slack_bound_push"). (This is kappa_2 in Section 3.6 of implementation 547 # paper.) 548 # 549 # constr_mult_init_max 0 <= ( 1000) < +inf 550 # Maximum allowed least-square guess of constraint multipliers. 551 # Determines how large the initial least-square guesses of the constraint 552 # multipliers are allowed to be (in max-norm). If the guess is larger than 553 # this value, it is discarded and all constraint multipliers are set to 554 # zero. This options is also used when initializing the restoration phase. 555 # By default, "resto.constr_mult_init_max" (the one used in 556 # RestoIterateInitializer) is set to zero. 557 # 558 # bound_mult_init_val 0 < ( 1) < +inf 559 # Initial value for the bound multipliers. 560 # All dual variables corresponding to bound constraints are initialized to 561 # this value. 562 # 563 # bound_mult_init_method ("constant") 564 # Initialization method for bound multipliers 565 # This option defines how the iterates for the bound multipliers are 566 # initialized. If "constant" is chosen, then all bound multipliers are 567 # initialized to the value of "bound_mult_init_val". If "mu-based" is 568 # chosen, the each value is initialized to the the value of "mu_init" 569 # divided by the corresponding slack variable. This latter option might be 570 # useful if the starting point is close to the optimal solution. 571 # Possible values: 572 # - constant [set all bound multipliers to the value of 573 # bound_mult_init_val] 574 # - mu-based [initialize to mu_init/x_slack] 575 # 576 # least_square_init_primal ("no") 577 # Least square initialization of the primal variables 578 # If set to yes, Ipopt ignores the user provided point and solves a least 579 # square problem for the primal variables (x and s), to fit the linearized 580 # equality and inequality constraints. This might be useful if the user 581 # doesn't know anything about the starting point, or for solving an LP or 582 # QP. 583 # Possible values: 584 # - no [take user-provided point] 585 # - yes [overwrite user-provided point with least-square 586 # estimates] 587 # 588 # least_square_init_duals ("no") 589 # Least square initialization of all dual variables 590 # If set to yes, Ipopt tries to compute least-square multipliers 591 # (considering ALL dual variables). If successful, the bound multipliers 592 # are possibly corrected to be at least bound_mult_init_val. This might be 593 # useful if the user doesn't know anything about the starting point, or for 594 # solving an LP or QP. This overwrites option "bound_mult_init_method". 595 # Possible values: 596 # - no [use bound_mult_init_val and least-square 597 # equality constraint multipliers] 598 # - yes [overwrite user-provided point with least-square 599 # estimates] 600 # 601 # 602 # 603 # ### Barrier Parameter Update ### 604 # 605 # mu_max_fact 0 < ( 1000) < +inf 606 # Factor for initialization of maximum value for barrier parameter. 607 # This option determines the upper bound on the barrier parameter. This 608 # upper bound is computed as the average complementarity at the initial 609 # point times the value of this option. (Only used if option "mu_strategy" 610 # is chosen as "adaptive".) 611 # 612 # mu_max 0 < ( 100000) < +inf 613 # Maximum value for barrier parameter. 614 # This option specifies an upper bound on the barrier parameter in the 615 # adaptive mu selection mode. If this option is set, it overwrites the 616 # effect of mu_max_fact. (Only used if option "mu_strategy" is chosen as 617 # "adaptive".) 618 # 619 # mu_min 0 < ( 1e-11) < +inf 620 # Minimum value for barrier parameter. 621 # This option specifies the lower bound on the barrier parameter in the 622 # adaptive mu selection mode. By default, it is set to the minimum of 1e-11 623 # and min("tol","compl_inf_tol")/("barrier_tol_factor"+1), which should be 624 # a reasonable value. (Only used if option "mu_strategy" is chosen as 625 # "adaptive".) 626 # 627 # adaptive_mu_globalization ("obj-constr-filter") 628 # Globalization strategy for the adaptive mu selection mode. 629 # To achieve global convergence of the adaptive version, the algorithm has 630 # to switch to the monotone mode (Fiacco-McCormick approach) when 631 # convergence does not seem to appear. This option sets the criterion used 632 # to decide when to do this switch. (Only used if option "mu_strategy" is 633 # chosen as "adaptive".) 634 # Possible values: 635 # - kkt-error [nonmonotone decrease of kkt-error] 636 # - obj-constr-filter [2-dim filter for objective and constraint 637 # violation] 638 # - never-monotone-mode [disables globalization] 639 # 640 # adaptive_mu_kkterror_red_iters 0 <= ( 4) < +inf 641 # Maximum number of iterations requiring sufficient progress. 642 # For the "kkt-error" based globalization strategy, sufficient progress 643 # must be made for "adaptive_mu_kkterror_red_iters" iterations. If this 644 # number of iterations is exceeded, the globalization strategy switches to 645 # the monotone mode. 646 # 647 # adaptive_mu_kkterror_red_fact 0 < ( 0.9999) < 1 648 # Sufficient decrease factor for "kkt-error" globalization strategy. 649 # For the "kkt-error" based globalization strategy, the error must decrease 650 # by this factor to be deemed sufficient decrease. 651 # 652 # filter_margin_fact 0 < ( 1e-05) < 1 653 # Factor determining width of margin for obj-constr-filter adaptive 654 # globalization strategy. 655 # When using the adaptive globalization strategy, "obj-constr-filter", 656 # sufficient progress for a filter entry is defined as follows: (new obj) < 657 # (filter obj) - filter_margin_fact*(new constr-viol) OR (new constr-viol) 658 # < (filter constr-viol) - filter_margin_fact*(new constr-viol). For the 659 # description of the "kkt-error-filter" option see "filter_max_margin". 660 # 661 # filter_max_margin 0 < ( 1) < +inf 662 # Maximum width of margin in obj-constr-filter adaptive globalization 663 # strategy. 664 # 665 # adaptive_mu_restore_previous_iterate("no") 666 # Indicates if the previous iterate should be restored if the monotone mode 667 # is entered. 668 # When the globalization strategy for the adaptive barrier algorithm 669 # switches to the monotone mode, it can either start from the most recent 670 # iterate (no), or from the last iterate that was accepted (yes). 671 # Possible values: 672 # - no [don't restore accepted iterate] 673 # - yes [restore accepted iterate] 674 # 675 # adaptive_mu_monotone_init_factor 0 < ( 0.8) < +inf 676 # Determines the initial value of the barrier parameter when switching to the 677 # monotone mode. 678 # When the globalization strategy for the adaptive barrier algorithm 679 # switches to the monotone mode and fixed_mu_oracle is chosen as 680 # "average_compl", the barrier parameter is set to the current average 681 # complementarity times the value of "adaptive_mu_monotone_init_factor". 682 # 683 # adaptive_mu_kkt_norm_type ("2-norm-squared") 684 # Norm used for the KKT error in the adaptive mu globalization strategies. 685 # When computing the KKT error for the globalization strategies, the norm 686 # to be used is specified with this option. Note, this options is also used 687 # in the QualityFunctionMuOracle. 688 # Possible values: 689 # - 1-norm [use the 1-norm (abs sum)] 690 # - 2-norm-squared [use the 2-norm squared (sum of squares)] 691 # - max-norm [use the infinity norm (max)] 692 # - 2-norm [use 2-norm] 693 # 694 # mu_strategy ("monotone") 695 # Update strategy for barrier parameter. 696 # Determines which barrier parameter update strategy is to be used. 697 # Possible values: 698 # - monotone [use the monotone (Fiacco-McCormick) strategy] 699 # - adaptive [use the adaptive update strategy] 700 # 701 # mu_oracle ("quality-function") 702 # Oracle for a new barrier parameter in the adaptive strategy. 703 # Determines how a new barrier parameter is computed in each "free-mode" 704 # iteration of the adaptive barrier parameter strategy. (Only considered if 705 # "adaptive" is selected for option "mu_strategy"). 706 # Possible values: 707 # - probing [Mehrotra's probing heuristic] 708 # - loqo [LOQO's centrality rule] 709 # - quality-function [minimize a quality function] 710 # 711 # fixed_mu_oracle ("average_compl") 712 # Oracle for the barrier parameter when switching to fixed mode. 713 # Determines how the first value of the barrier parameter should be 714 # computed when switching to the "monotone mode" in the adaptive strategy. 715 # (Only considered if "adaptive" is selected for option "mu_strategy".) 716 # Possible values: 717 # - probing [Mehrotra's probing heuristic] 718 # - loqo [LOQO's centrality rule] 719 # - quality-function [minimize a quality function] 720 # - average_compl [base on current average complementarity] 721 # 722 # mu_init 0 < ( 0.1) < +inf 723 # Initial value for the barrier parameter. 724 # This option determines the initial value for the barrier parameter (mu). 725 # It is only relevant in the monotone, Fiacco-McCormick version of the 726 # algorithm. (i.e., if "mu_strategy" is chosen as "monotone") 727 # 728 # barrier_tol_factor 0 < ( 10) < +inf 729 # Factor for mu in barrier stop test. 730 # The convergence tolerance for each barrier problem in the monotone mode 731 # is the value of the barrier parameter times "barrier_tol_factor". This 732 # option is also used in the adaptive mu strategy during the monotone mode. 733 # (This is kappa_epsilon in implementation paper). 734 # 735 # mu_linear_decrease_factor 0 < ( 0.2) < 1 736 # Determines linear decrease rate of barrier parameter. 737 # For the Fiacco-McCormick update procedure the new barrier parameter mu is 738 # obtained by taking the minimum of mu*"mu_linear_decrease_factor" and 739 # mu^"superlinear_decrease_power". (This is kappa_mu in implementation 740 # paper.) This option is also used in the adaptive mu strategy during the 741 # monotone mode. 742 # 743 # mu_superlinear_decrease_power 1 < ( 1.5) < 2 744 # Determines superlinear decrease rate of barrier parameter. 745 # For the Fiacco-McCormick update procedure the new barrier parameter mu is 746 # obtained by taking the minimum of mu*"mu_linear_decrease_factor" and 747 # mu^"superlinear_decrease_power". (This is theta_mu in implementation 748 # paper.) This option is also used in the adaptive mu strategy during the 749 # monotone mode. 750 # 751 # mu_allow_fast_monotone_decrease("yes") 752 # Allow skipping of barrier problem if barrier test is already met. 753 # If set to "no", the algorithm enforces at least one iteration per barrier 754 # problem, even if the barrier test is already met for the updated barrier 755 # parameter. 756 # Possible values: 757 # - no [Take at least one iteration per barrier problem] 758 # - yes [Allow fast decrease of mu if barrier test it met] 759 # 760 # tau_min 0 < ( 0.99) < 1 761 # Lower bound on fraction-to-the-boundary parameter tau. 762 # (This is tau_min in the implementation paper.) This option is also used 763 # in the adaptive mu strategy during the monotone mode. 764 # 765 # sigma_max 0 < ( 100) < +inf 766 # Maximum value of the centering parameter. 767 # This is the upper bound for the centering parameter chosen by the quality 768 # function based barrier parameter update. (Only used if option "mu_oracle" 769 # is set to "quality-function".) 770 # 771 # sigma_min 0 <= ( 1e-06) < +inf 772 # Minimum value of the centering parameter. 773 # This is the lower bound for the centering parameter chosen by the quality 774 # function based barrier parameter update. (Only used if option "mu_oracle" 775 # is set to "quality-function".) 776 # 777 # quality_function_norm_type ("2-norm-squared") 778 # Norm used for components of the quality function. 779 # (Only used if option "mu_oracle" is set to "quality-function".) 780 # Possible values: 781 # - 1-norm [use the 1-norm (abs sum)] 782 # - 2-norm-squared [use the 2-norm squared (sum of squares)] 783 # - max-norm [use the infinity norm (max)] 784 # - 2-norm [use 2-norm] 785 # 786 # quality_function_centrality ("none") 787 # The penalty term for centrality that is included in quality function. 788 # This determines whether a term is added to the quality function to 789 # penalize deviation from centrality with respect to complementarity. The 790 # complementarity measure here is the xi in the Loqo update rule. (Only 791 # used if option "mu_oracle" is set to "quality-function".) 792 # Possible values: 793 # - none [no penalty term is added] 794 # - log [complementarity * the log of the centrality 795 # measure] 796 # - reciprocal [complementarity * the reciprocal of the 797 # centrality measure] 798 # - cubed-reciprocal [complementarity * the reciprocal of the 799 # centrality measure cubed] 800 # 801 # quality_function_balancing_term("none") 802 # The balancing term included in the quality function for centrality. 803 # This determines whether a term is added to the quality function that 804 # penalizes situations where the complementarity is much smaller than dual 805 # and primal infeasibilities. (Only used if option "mu_oracle" is set to 806 # "quality-function".) 807 # Possible values: 808 # - none [no balancing term is added] 809 # - cubic [Max(0,Max(dual_inf,primal_inf)-compl)^3] 810 # 811 # quality_function_max_section_steps 0 <= ( 8) < +inf 812 # Maximum number of search steps during direct search procedure determining 813 # the optimal centering parameter. 814 # The golden section search is performed for the quality function based mu 815 # oracle. (Only used if option "mu_oracle" is set to "quality-function".) 816 # 817 # quality_function_section_sigma_tol 0 <= ( 0.01) < 1 818 # Tolerance for the section search procedure determining the optimal 819 # centering parameter (in sigma space). 820 # The golden section search is performed for the quality function based mu 821 # oracle. (Only used if option "mu_oracle" is set to "quality-function".) 822 # 823 # quality_function_section_qf_tol 0 <= ( 0) < 1 824 # Tolerance for the golden section search procedure determining the optimal 825 # centering parameter (in the function value space). 826 # The golden section search is performed for the quality function based mu 827 # oracle. (Only used if option "mu_oracle" is set to "quality-function".) 828 # 829 # 830 # 831 # ### Line Search ### 832 # 833 # alpha_red_factor 0 < ( 0.5) < 1 834 # Fractional reduction of the trial step size in the backtracking line search. 835 # At every step of the backtracking line search, the trial step size is 836 # reduced by this factor. 837 # 838 # accept_every_trial_step ("no") 839 # Always accept the first trial step. 840 # Setting this option to "yes" essentially disables the line search and 841 # makes the algorithm take aggressive steps, without global convergence 842 # guarantees. 843 # Possible values: 844 # - no [don't arbitrarily accept the full step] 845 # - yes [always accept the full step] 846 # 847 # accept_after_max_steps -1 <= ( -1) < +inf 848 # Accept a trial point after maximal this number of steps. 849 # Even if it does not satisfy line search conditions. 850 # 851 # alpha_for_y ("primal") 852 # Method to determine the step size for constraint multipliers. 853 # This option determines how the step size (alpha_y) will be calculated 854 # when updating the constraint multipliers. 855 # Possible values: 856 # - primal [use primal step size] 857 # - bound-mult [use step size for the bound multipliers (good 858 # for LPs)] 859 # - min [use the min of primal and bound multipliers] 860 # - max [use the max of primal and bound multipliers] 861 # - full [take a full step of size one] 862 # - min-dual-infeas [choose step size minimizing new dual 863 # infeasibility] 864 # - safer-min-dual-infeas [like "min_dual_infeas", but safeguarded by 865 # "min" and "max"] 866 # - primal-and-full [use the primal step size, and full step if 867 # delta_x <= alpha_for_y_tol] 868 # - dual-and-full [use the dual step size, and full step if 869 # delta_x <= alpha_for_y_tol] 870 # - acceptor [Call LSAcceptor to get step size for y] 871 # 872 # alpha_for_y_tol 0 <= ( 10) < +inf 873 # Tolerance for switching to full equality multiplier steps. 874 # This is only relevant if "alpha_for_y" is chosen "primal-and-full" or 875 # "dual-and-full". The step size for the equality constraint multipliers 876 # is taken to be one if the max-norm of the primal step is less than this 877 # tolerance. 878 # 879 # tiny_step_tol 0 <= (2.22045e-15) < +inf 880 # Tolerance for detecting numerically insignificant steps. 881 # If the search direction in the primal variables (x and s) is, in relative 882 # terms for each component, less than this value, the algorithm accepts the 883 # full step without line search. If this happens repeatedly, the algorithm 884 # will terminate with a corresponding exit message. The default value is 10 885 # times machine precision. 886 # 887 # tiny_step_y_tol 0 <= ( 0.01) < +inf 888 # Tolerance for quitting because of numerically insignificant steps. 889 # If the search direction in the primal variables (x and s) is, in relative 890 # terms for each component, repeatedly less than tiny_step_tol, and the 891 # step in the y variables is smaller than this threshold, the algorithm 892 # will terminate. 893 # 894 # watchdog_shortened_iter_trigger 0 <= ( 10) < +inf 895 # Number of shortened iterations that trigger the watchdog. 896 # If the number of successive iterations in which the backtracking line 897 # search did not accept the first trial point exceeds this number, the 898 # watchdog procedure is activated. Choosing "0" here disables the watchdog 899 # procedure. 900 # 901 # watchdog_trial_iter_max 1 <= ( 3) < +inf 902 # Maximum number of watchdog iterations. 903 # This option determines the number of trial iterations allowed before the 904 # watchdog procedure is aborted and the algorithm returns to the stored 905 # point. 906 # 907 # theta_max_fact 0 < ( 10000) < +inf 908 # Determines upper bound for constraint violation in the filter. 909 # The algorithmic parameter theta_max is determined as theta_max_fact times 910 # the maximum of 1 and the constraint violation at initial point. Any 911 # point with a constraint violation larger than theta_max is unacceptable 912 # to the filter (see Eqn. (21) in the implementation paper). 913 # 914 # theta_min_fact 0 < ( 0.0001) < +inf 915 # Determines constraint violation threshold in the switching rule. 916 # The algorithmic parameter theta_min is determined as theta_min_fact times 917 # the maximum of 1 and the constraint violation at initial point. The 918 # switching rules treats an iteration as an h-type iteration whenever the 919 # current constraint violation is larger than theta_min (see paragraph 920 # before Eqn. (19) in the implementation paper). 921 # 922 # eta_phi 0 < ( 1e-08) < 0.5 923 # Relaxation factor in the Armijo condition. 924 # (See Eqn. (20) in the implementation paper) 925 # 926 # delta 0 < ( 1) < +inf 927 # Multiplier for constraint violation in the switching rule. 928 # (See Eqn. (19) in the implementation paper.) 929 # 930 # s_phi 1 < ( 2.3) < +inf 931 # Exponent for linear barrier function model in the switching rule. 932 # (See Eqn. (19) in the implementation paper.) 933 # 934 # s_theta 1 < ( 1.1) < +inf 935 # Exponent for current constraint violation in the switching rule. 936 # (See Eqn. (19) in the implementation paper.) 937 # 938 # gamma_phi 0 < ( 1e-08) < 1 939 # Relaxation factor in the filter margin for the barrier function. 940 # (See Eqn. (18a) in the implementation paper.) 941 # 942 # gamma_theta 0 < ( 1e-05) < 1 943 # Relaxation factor in the filter margin for the constraint violation. 944 # (See Eqn. (18b) in the implementation paper.) 945 # 946 # alpha_min_frac 0 < ( 0.05) < 1 947 # Safety factor for the minimal step size (before switching to restoration 948 # phase). 949 # (This is gamma_alpha in Eqn. (20) in the implementation paper.) 950 # 951 # max_soc 0 <= ( 4) < +inf 952 # Maximum number of second order correction trial steps at each iteration. 953 # Choosing 0 disables the second order corrections. (This is p^{max} of 954 # Step A-5.9 of Algorithm A in the implementation paper.) 955 # 956 # kappa_soc 0 < ( 0.99) < +inf 957 # Factor in the sufficient reduction rule for second order correction. 958 # This option determines how much a second order correction step must 959 # reduce the constraint violation so that further correction steps are 960 # attempted. (See Step A-5.9 of Algorithm A in the implementation paper.) 961 # 962 # obj_max_inc 1 < ( 5) < +inf 963 # Determines the upper bound on the acceptable increase of barrier objective 964 # function. 965 # Trial points are rejected if they lead to an increase in the barrier 966 # objective function by more than obj_max_inc orders of magnitude. 967 # 968 # max_filter_resets 0 <= ( 5) < +inf 969 # Maximal allowed number of filter resets 970 # A positive number enables a heuristic that resets the filter, whenever in 971 # more than "filter_reset_trigger" successive iterations the last rejected 972 # trial steps size was rejected because of the filter. This option 973 # determine the maximal number of resets that are allowed to take place. 974 # 975 # filter_reset_trigger 1 <= ( 5) < +inf 976 # Number of iterations that trigger the filter reset. 977 # If the filter reset heuristic is active and the number of successive 978 # iterations in which the last rejected trial step size was rejected 979 # because of the filter, the filter is reset. 980 # 981 # corrector_type ("none") 982 # The type of corrector steps that should be taken (unsupported!). 983 # If "mu_strategy" is "adaptive", this option determines what kind of 984 # corrector steps should be tried. 985 # Possible values: 986 # - none [no corrector] 987 # - affine [corrector step towards mu=0] 988 # - primal-dual [corrector step towards current mu] 989 # 990 # skip_corr_if_neg_curv ("yes") 991 # Skip the corrector step in negative curvature iteration (unsupported!). 992 # The corrector step is not tried if negative curvature has been 993 # encountered during the computation of the search direction in the current 994 # iteration. This option is only used if "mu_strategy" is "adaptive". 995 # Possible values: 996 # - no [don't skip] 997 # - yes [skip] 998 # 999 # skip_corr_in_monotone_mode ("yes") 1000 # Skip the corrector step during monotone barrier parameter mode 1001 # (unsupported!). 1002 # The corrector step is not tried if the algorithm is currently in the 1003 # monotone mode (see also option "barrier_strategy").This option is only 1004 # used if "mu_strategy" is "adaptive". 1005 # Possible values: 1006 # - no [don't skip] 1007 # - yes [skip] 1008 # 1009 # corrector_compl_avrg_red_fact 0 < ( 1) < +inf 1010 # Complementarity tolerance factor for accepting corrector step 1011 # (unsupported!). 1012 # This option determines the factor by which complementarity is allowed to 1013 # increase for a corrector step to be accepted. 1014 # 1015 # nu_init 0 < ( 1e-06) < +inf 1016 # Initial value of the penalty parameter. 1017 # 1018 # nu_inc 0 < ( 0.0001) < +inf 1019 # Increment of the penalty parameter. 1020 # 1021 # rho 0 < ( 0.1) < 1 1022 # Value in penalty parameter update formula. 1023 # 1024 # kappa_sigma 0 < ( 1e+10) < +inf 1025 # Factor limiting the deviation of dual variables from primal estimates. 1026 # If the dual variables deviate from their primal estimates, a correction 1027 # is performed. (See Eqn. (16) in the implementation paper.) Setting the 1028 # value to less than 1 disables the correction. 1029 # 1030 # recalc_y ("no") 1031 # Tells the algorithm to recalculate the equality and inequality multipliers 1032 # as least square estimates. 1033 # This asks the algorithm to recompute the multipliers, whenever the 1034 # current infeasibility is less than recalc_y_feas_tol. Choosing yes might 1035 # be helpful in the quasi-Newton option. However, each recalculation 1036 # requires an extra factorization of the linear system. If a limited 1037 # memory quasi-Newton option is chosen, this is used by default. 1038 # Possible values: 1039 # - no [use the Newton step to update the multipliers] 1040 # - yes [use least-square multiplier estimates] 1041 # 1042 # recalc_y_feas_tol 0 < ( 1e-06) < +inf 1043 # Feasibility threshold for recomputation of multipliers. 1044 # If recalc_y is chosen and the current infeasibility is less than this 1045 # value, then the multipliers are recomputed. 1046 # 1047 # slack_move 0 <= (1.81899e-12) < +inf 1048 # Correction size for very small slacks. 1049 # Due to numerical issues or the lack of an interior, the slack variables 1050 # might become very small. If a slack becomes very small compared to 1051 # machine precision, the corresponding bound is moved slightly. This 1052 # parameter determines how large the move should be. Its default value is 1053 # mach_eps^{3/4}. (See also end of Section 3.5 in implementation paper - 1054 # but actual implementation might be somewhat different.) 1055 # 1056 # 1057 # 1058 # ### Warm Start ### 1059 # 1060 # warm_start_init_point ("no") 1061 # Warm-start for initial point 1062 # Indicates whether this optimization should use a warm start 1063 # initialization, where values of primal and dual variables are given 1064 # (e.g., from a previous optimization of a related problem.) 1065 # Possible values: 1066 # - no [do not use the warm start initialization] 1067 # - yes [use the warm start initialization] 1068 # 1069 # warm_start_same_structure ("no") 1070 # Indicates whether a problem with a structure identical to the previous one 1071 # is to be solved. 1072 # If "yes" is chosen, then the algorithm assumes that an NLP is now to be 1073 # solved, whose structure is identical to one that already was considered 1074 # (with the same NLP object). 1075 # Possible values: 1076 # - no [Assume this is a new problem.] 1077 # - yes [Assume this is problem has known structure] 1078 # 1079 # warm_start_bound_push 0 < ( 0.001) < +inf 1080 # same as bound_push for the regular initializer. 1081 # 1082 # warm_start_bound_frac 0 < ( 0.001) <= 0.5 1083 # same as bound_frac for the regular initializer. 1084 # 1085 # warm_start_slack_bound_push 0 < ( 0.001) < +inf 1086 # same as slack_bound_push for the regular initializer. 1087 # 1088 # warm_start_slack_bound_frac 0 < ( 0.001) <= 0.5 1089 # same as slack_bound_frac for the regular initializer. 1090 # 1091 # warm_start_mult_bound_push 0 < ( 0.001) < +inf 1092 # same as mult_bound_push for the regular initializer. 1093 # 1094 # warm_start_mult_init_max -inf < ( 1e+06) < +inf 1095 # Maximum initial value for the equality multipliers. 1096 # 1097 # warm_start_entire_iterate ("no") 1098 # Tells algorithm whether to use the GetWarmStartIterate method in the NLP. 1099 # Possible values: 1100 # - no [call GetStartingPoint in the NLP] 1101 # - yes [call GetWarmStartIterate in the NLP] 1102 # 1103 # 1104 # 1105 # ### Linear Solver ### 1106 # 1107 # linear_solver ("mumps") 1108 # Linear solver used for step computations. 1109 # Determines which linear algebra package is to be used for the solution of 1110 # the augmented linear system (for obtaining the search directions). Note, 1111 # the code must have been compiled with the linear solver you want to 1112 # choose. Depending on your Ipopt installation, not all options are 1113 # available. 1114 # Possible values: 1115 # - ma27 [use the Harwell routine MA27] 1116 # - ma57 [use the Harwell routine MA57] 1117 # - pardiso [use the Pardiso package] 1118 # - wsmp [use WSMP package] 1119 # - mumps [use MUMPS package] 1120 # - custom [use custom linear solver] 1121 # 1122 # linear_system_scaling ("none") 1123 # Method for scaling the linear system. 1124 # Determines the method used to compute symmetric scaling factors for the 1125 # augmented system (see also the "linear_scaling_on_demand" option). This 1126 # scaling is independent of the NLP problem scaling. By default, MC19 is 1127 # only used if MA27 or MA57 are selected as linear solvers. This option is 1128 # only available if Ipopt has been compiled with MC19. 1129 # Possible values: 1130 # - none [no scaling will be performed] 1131 # - mc19 [use the Harwell routine MC19] 1132 # 1133 # linear_scaling_on_demand ("yes") 1134 # Flag indicating that linear scaling is only done if it seems required. 1135 # This option is only important if a linear scaling method (e.g., mc19) is 1136 # used. If you choose "no", then the scaling factors are computed for 1137 # every linear system from the start. This can be quite expensive. 1138 # Choosing "yes" means that the algorithm will start the scaling method 1139 # only when the solutions to the linear system seem not good, and then use 1140 # it until the end. 1141 # Possible values: 1142 # - no [Always scale the linear system.] 1143 # - yes [Start using linear system scaling if solutions 1144 # seem not good.] 1145 # 1146 # 1147 # 1148 # ### Step Calculation ### 1149 # 1150 # mehrotra_algorithm ("no") 1151 # Indicates if we want to do Mehrotra's algorithm. 1152 # If set to yes, Ipopt runs as Mehrotra's predictor-corrector algorithm. 1153 # This works usually very well for LPs and convex QPs. This automatically 1154 # disables the line search, and chooses the (unglobalized) adaptive mu 1155 # strategy with the "probing" oracle, and uses "corrector_type=affine" 1156 # without any safeguards; you should not set any of those options 1157 # explicitly in addition. Also, unless otherwise specified, the values of 1158 # "bound_push", "bound_frac", and "bound_mult_init_val" are set more 1159 # aggressive, and sets "alpha_for_y=bound_mult". 1160 # Possible values: 1161 # - no [Do the usual Ipopt algorithm.] 1162 # - yes [Do Mehrotra's predictor-corrector algorithm.] 1163 # 1164 # fast_step_computation ("no") 1165 # Indicates if the linear system should be solved quickly. 1166 # If set to yes, the algorithm assumes that the linear system that is 1167 # solved to obtain the search direction, is solved sufficiently well. In 1168 # that case, no residuals are computed, and the computation of the search 1169 # direction is a little faster. 1170 # Possible values: 1171 # - no [Verify solution of linear system by computing 1172 # residuals.] 1173 # - yes [Trust that linear systems are solved well.] 1174 # 1175 # min_refinement_steps 0 <= ( 1) < +inf 1176 # Minimum number of iterative refinement steps per linear system solve. 1177 # Iterative refinement (on the full unsymmetric system) is performed for 1178 # each right hand side. This option determines the minimum number of 1179 # iterative refinements (i.e. at least "min_refinement_steps" iterative 1180 # refinement steps are enforced per right hand side.) 1181 # 1182 # max_refinement_steps 0 <= ( 10) < +inf 1183 # Maximum number of iterative refinement steps per linear system solve. 1184 # Iterative refinement (on the full unsymmetric system) is performed for 1185 # each right hand side. This option determines the maximum number of 1186 # iterative refinement steps. 1187 # 1188 # residual_ratio_max 0 < ( 1e-10) < +inf 1189 # Iterative refinement tolerance 1190 # Iterative refinement is performed until the residual test ratio is less 1191 # than this tolerance (or until "max_refinement_steps" refinement steps are 1192 # performed). 1193 # 1194 # residual_ratio_singular 0 < ( 1e-05) < +inf 1195 # Threshold for declaring linear system singular after failed iterative 1196 # refinement. 1197 # If the residual test ratio is larger than this value after failed 1198 # iterative refinement, the algorithm pretends that the linear system is 1199 # singular. 1200 # 1201 # residual_improvement_factor 0 < ( 1) < +inf 1202 # Minimal required reduction of residual test ratio in iterative refinement. 1203 # If the improvement of the residual test ratio made by one iterative 1204 # refinement step is not better than this factor, iterative refinement is 1205 # aborted. 1206 # 1207 # neg_curv_test_tol 0 < ( 0) < +inf 1208 # Tolerance for heuristic to ignore wrong inertia. 1209 # If positive, incorrect inertia in the augmented system is ignored, and we 1210 # test if the direction is a direction of positive curvature. This 1211 # tolerance determines when the direction is considered to be sufficiently 1212 # positive. 1213 # 1214 # max_hessian_perturbation 0 < ( 1e+20) < +inf 1215 # Maximum value of regularization parameter for handling negative curvature. 1216 # In order to guarantee that the search directions are indeed proper 1217 # descent directions, Ipopt requires that the inertia of the (augmented) 1218 # linear system for the step computation has the correct number of negative 1219 # and positive eigenvalues. The idea is that this guides the algorithm away 1220 # from maximizers and makes Ipopt more likely converge to first order 1221 # optimal points that are minimizers. If the inertia is not correct, a 1222 # multiple of the identity matrix is added to the Hessian of the Lagrangian 1223 # in the augmented system. This parameter gives the maximum value of the 1224 # regularization parameter. If a regularization of that size is not enough, 1225 # the algorithm skips this iteration and goes to the restoration phase. 1226 # (This is delta_w^max in the implementation paper.) 1227 # 1228 # min_hessian_perturbation 0 <= ( 1e-20) < +inf 1229 # Smallest perturbation of the Hessian block. 1230 # The size of the perturbation of the Hessian block is never selected 1231 # smaller than this value, unless no perturbation is necessary. (This is 1232 # delta_w^min in implementation paper.) 1233 # 1234 # perturb_inc_fact_first 1 < ( 100) < +inf 1235 # Increase factor for x-s perturbation for very first perturbation. 1236 # The factor by which the perturbation is increased when a trial value was 1237 # not sufficient - this value is used for the computation of the very first 1238 # perturbation and allows a different value for for the first perturbation 1239 # than that used for the remaining perturbations. (This is bar_kappa_w^+ in 1240 # the implementation paper.) 1241 # 1242 # perturb_inc_fact 1 < ( 8) < +inf 1243 # Increase factor for x-s perturbation. 1244 # The factor by which the perturbation is increased when a trial value was 1245 # not sufficient - this value is used for the computation of all 1246 # perturbations except for the first. (This is kappa_w^+ in the 1247 # implementation paper.) 1248 # 1249 # perturb_dec_fact 0 < ( 0.333333) < 1 1250 # Decrease factor for x-s perturbation. 1251 # The factor by which the perturbation is decreased when a trial value is 1252 # deduced from the size of the most recent successful perturbation. (This 1253 # is kappa_w^- in the implementation paper.) 1254 # 1255 # first_hessian_perturbation 0 < ( 0.0001) < +inf 1256 # Size of first x-s perturbation tried. 1257 # The first value tried for the x-s perturbation in the inertia correction 1258 # scheme.(This is delta_0 in the implementation paper.) 1259 # 1260 # jacobian_regularization_value 0 <= ( 1e-08) < +inf 1261 # Size of the regularization for rank-deficient constraint Jacobians. 1262 # (This is bar delta_c in the implementation paper.) 1263 # 1264 # jacobian_regularization_exponent 0 <= ( 0.25) < +inf 1265 # Exponent for mu in the regularization for rank-deficient constraint 1266 # Jacobians. 1267 # (This is kappa_c in the implementation paper.) 1268 # 1269 # perturb_always_cd ("no") 1270 # Active permanent perturbation of constraint linearization. 1271 # This options makes the delta_c and delta_d perturbation be used for the 1272 # computation of every search direction. Usually, it is only used when the 1273 # iteration matrix is singular. 1274 # Possible values: 1275 # - no [perturbation only used when required] 1276 # - yes [always use perturbation] 1277 # 1278 # 1279 # 1280 # ### Restoration Phase ### 1281 # 1282 # expect_infeasible_problem ("no") 1283 # Enable heuristics to quickly detect an infeasible problem. 1284 # This options is meant to activate heuristics that may speed up the 1285 # infeasibility determination if you expect that there is a good chance for 1286 # the problem to be infeasible. In the filter line search procedure, the 1287 # restoration phase is called more quickly than usually, and more reduction 1288 # in the constraint violation is enforced before the restoration phase is 1289 # left. If the problem is square, this option is enabled automatically. 1290 # Possible values: 1291 # - no [the problem probably be feasible] 1292 # - yes [the problem has a good chance to be infeasible] 1293 # 1294 # expect_infeasible_problem_ctol 0 <= ( 0.001) < +inf 1295 # Threshold for disabling "expect_infeasible_problem" option. 1296 # If the constraint violation becomes smaller than this threshold, the 1297 # "expect_infeasible_problem" heuristics in the filter line search are 1298 # disabled. If the problem is square, this options is set to 0. 1299 # 1300 # expect_infeasible_problem_ytol 0 < ( 1e+08) < +inf 1301 # Multiplier threshold for activating "expect_infeasible_problem" option. 1302 # If the max norm of the constraint multipliers becomes larger than this 1303 # value and "expect_infeasible_problem" is chosen, then the restoration 1304 # phase is entered. 1305 # 1306 # start_with_resto ("no") 1307 # Tells algorithm to switch to restoration phase in first iteration. 1308 # Setting this option to "yes" forces the algorithm to switch to the 1309 # feasibility restoration phase in the first iteration. If the initial 1310 # point is feasible, the algorithm will abort with a failure. 1311 # Possible values: 1312 # - no [don't force start in restoration phase] 1313 # - yes [force start in restoration phase] 1314 # 1315 # soft_resto_pderror_reduction_factor 0 <= ( 0.9999) < +inf 1316 # Required reduction in primal-dual error in the soft restoration phase. 1317 # The soft restoration phase attempts to reduce the primal-dual error with 1318 # regular steps. If the damped primal-dual step (damped only to satisfy the 1319 # fraction-to-the-boundary rule) is not decreasing the primal-dual error by 1320 # at least this factor, then the regular restoration phase is called. 1321 # Choosing "0" here disables the soft restoration phase. 1322 # 1323 # max_soft_resto_iters 0 <= ( 10) < +inf 1324 # Maximum number of iterations performed successively in soft restoration 1325 # phase. 1326 # If the soft restoration phase is performed for more than so many 1327 # iterations in a row, the regular restoration phase is called. 1328 # 1329 # required_infeasibility_reduction 0 <= ( 0.9) < 1 1330 # Required reduction of infeasibility before leaving restoration phase. 1331 # The restoration phase algorithm is performed, until a point is found that 1332 # is acceptable to the filter and the infeasibility has been reduced by at 1333 # least the fraction given by this option. 1334 # 1335 # max_resto_iter 0 <= ( 3000000) < +inf 1336 # Maximum number of successive iterations in restoration phase. 1337 # The algorithm terminates with an error message if the number of 1338 # iterations successively taken in the restoration phase exceeds this 1339 # number. 1340 # 1341 # evaluate_orig_obj_at_resto_trial("yes") 1342 # Determines if the original objective function should be evaluated at 1343 # restoration phase trial points. 1344 # Setting this option to "yes" makes the restoration phase algorithm 1345 # evaluate the objective function of the original problem at every trial 1346 # point encountered during the restoration phase, even if this value is not 1347 # required. In this way, it is guaranteed that the original objective 1348 # function can be evaluated without error at all accepted iterates; 1349 # otherwise the algorithm might fail at a point where the restoration phase 1350 # accepts an iterate that is good for the restoration phase problem, but 1351 # not the original problem. On the other hand, if the evaluation of the 1352 # original objective is expensive, this might be costly. 1353 # Possible values: 1354 # - no [skip evaluation] 1355 # - yes [evaluate at every trial point] 1356 # 1357 # resto_penalty_parameter 0 < ( 1000) < +inf 1358 # Penalty parameter in the restoration phase objective function. 1359 # This is the parameter rho in equation (31a) in the Ipopt implementation 1360 # paper. 1361 # 1362 # bound_mult_reset_threshold 0 <= ( 1000) < +inf 1363 # Threshold for resetting bound multipliers after the restoration phase. 1364 # After returning from the restoration phase, the bound multipliers are 1365 # updated with a Newton step for complementarity. Here, the change in the 1366 # primal variables during the entire restoration phase is taken to be the 1367 # corresponding primal Newton step. However, if after the update the 1368 # largest bound multiplier exceeds the threshold specified by this option, 1369 # the multipliers are all reset to 1. 1370 # 1371 # constr_mult_reset_threshold 0 <= ( 0) < +inf 1372 # Threshold for resetting equality and inequality multipliers after 1373 # restoration phase. 1374 # After returning from the restoration phase, the constraint multipliers 1375 # are recomputed by a least square estimate. This option triggers when 1376 # those least-square estimates should be ignored. 1377 # 1378 # 1379 # 1380 # ### Derivative Checker ### 1381 # 1382 # derivative_test ("none") 1383 # Enable derivative checker 1384 # If this option is enabled, a (slow!) derivative test will be performed 1385 # before the optimization. The test is performed at the user provided 1386 # starting point and marks derivative values that seem suspicious 1387 # Possible values: 1388 # - none [do not perform derivative test] 1389 # - first-order [perform test of first derivatives at starting 1390 # point] 1391 # - second-order [perform test of first and second derivatives at 1392 # starting point] 1393 # - only-second-order [perform test of second derivatives at starting 1394 # point] 1395 # 1396 # derivative_test_first_index -2 <= ( -2) < +inf 1397 # Index of first quantity to be checked by derivative checker 1398 # If this is set to -2, then all derivatives are checked. Otherwise, for 1399 # the first derivative test it specifies the first variable for which the 1400 # test is done (counting starts at 0). For second derivatives, it 1401 # specifies the first constraint for which the test is done; counting of 1402 # constraint indices starts at 0, and -1 refers to the objective function 1403 # Hessian. 1404 # 1405 # derivative_test_perturbation 0 < ( 1e-08) < +inf 1406 # Size of the finite difference perturbation in derivative test. 1407 # This determines the relative perturbation of the variable entries. 1408 # 1409 # derivative_test_tol 0 < ( 0.0001) < +inf 1410 # Threshold for indicating wrong derivative. 1411 # If the relative deviation of the estimated derivative from the given one 1412 # is larger than this value, the corresponding derivative is marked as 1413 # wrong. 1414 # 1415 # derivative_test_print_all ("no") 1416 # Indicates whether information for all estimated derivatives should be 1417 # printed. 1418 # Determines verbosity of derivative checker. 1419 # Possible values: 1420 # - no [Print only suspect derivatives] 1421 # - yes [Print all derivatives] 1422 # 1423 # jacobian_approximation ("exact") 1424 # Specifies technique to compute constraint Jacobian 1425 # Possible values: 1426 # - exact [user-provided derivatives] 1427 # - finite-difference-values [user-provided structure, values by finite 1428 # differences] 1429 # 1430 # findiff_perturbation 0 < ( 1e-07) < +inf 1431 # Size of the finite difference perturbation for derivative approximation. 1432 # This determines the relative perturbation of the variable entries. 1433 # 1434 # point_perturbation_radius 0 <= ( 10) < +inf 1435 # Maximal perturbation of an evaluation point. 1436 # If a random perturbation of a points is required, this number indicates 1437 # the maximal perturbation. This is for example used when determining the 1438 # center point at which the finite difference derivative test is executed. 1439 # 1440 # 1441 # 1442 # ### Hessian Approximation ### 1443 # 1444 # limited_memory_max_history 0 <= ( 6) < +inf 1445 # Maximum size of the history for the limited quasi-Newton Hessian 1446 # approximation. 1447 # This option determines the number of most recent iterations that are 1448 # taken into account for the limited-memory quasi-Newton approximation. 1449 # 1450 # limited_memory_update_type ("bfgs") 1451 # Quasi-Newton update formula for the limited memory approximation. 1452 # Determines which update formula is to be used for the limited-memory 1453 # quasi-Newton approximation. 1454 # Possible values: 1455 # - bfgs [BFGS update (with skipping)] 1456 # - sr1 [SR1 (not working well)] 1457 # 1458 # limited_memory_initialization ("scalar1") 1459 # Initialization strategy for the limited memory quasi-Newton approximation. 1460 # Determines how the diagonal Matrix B_0 as the first term in the limited 1461 # memory approximation should be computed. 1462 # Possible values: 1463 # - scalar1 [sigma = s^Ty/s^Ts] 1464 # - scalar2 [sigma = y^Ty/s^Ty] 1465 # - constant [sigma = limited_memory_init_val] 1466 # 1467 # limited_memory_init_val 0 < ( 1) < +inf 1468 # Value for B0 in low-rank update. 1469 # The starting matrix in the low rank update, B0, is chosen to be this 1470 # multiple of the identity in the first iteration (when no updates have 1471 # been performed yet), and is constantly chosen as this value, if 1472 # "limited_memory_initialization" is "constant". 1473 # 1474 # limited_memory_init_val_max 0 < ( 1e+08) < +inf 1475 # Upper bound on value for B0 in low-rank update. 1476 # The starting matrix in the low rank update, B0, is chosen to be this 1477 # multiple of the identity in the first iteration (when no updates have 1478 # been performed yet), and is constantly chosen as this value, if 1479 # "limited_memory_initialization" is "constant". 1480 # 1481 # limited_memory_init_val_min 0 < ( 1e-08) < +inf 1482 # Lower bound on value for B0 in low-rank update. 1483 # The starting matrix in the low rank update, B0, is chosen to be this 1484 # multiple of the identity in the first iteration (when no updates have 1485 # been performed yet), and is constantly chosen as this value, if 1486 # "limited_memory_initialization" is "constant". 1487 # 1488 # limited_memory_max_skipping 1 <= ( 2) < +inf 1489 # Threshold for successive iterations where update is skipped. 1490 # If the update is skipped more than this number of successive iterations, 1491 # we quasi-Newton approximation is reset. 1492 # 1493 # hessian_approximation ("exact") 1494 # Indicates what Hessian information is to be used. 1495 # This determines which kind of information for the Hessian of the 1496 # Lagrangian function is used by the algorithm. 1497 # Possible values: 1498 # - exact [Use second derivatives provided by the NLP.] 1499 # - limited-memory [Perform a limited-memory quasi-Newton 1500 # approximation] 1501 # 1502 # hessian_approximation_space ("nonlinear-variables") 1503 # Indicates in which subspace the Hessian information is to be approximated. 1504 # Possible values: 1505 # - nonlinear-variables [only in space of nonlinear variables.] 1506 # - all-variables [in space of all variables (without slacks)] 1507 # 1508 # 1509 # 1510 # ### MA27 Linear Solver ### 1511 # 1512 # ma27_pivtol 0 < ( 1e-08) < 1 1513 # Pivot tolerance for the linear solver MA27. 1514 # A smaller number pivots for sparsity, a larger number pivots for 1515 # stability. This option is only available if Ipopt has been compiled with 1516 # MA27. 1517 # 1518 # ma27_pivtolmax 0 < ( 0.0001) < 1 1519 # Maximum pivot tolerance for the linear solver MA27. 1520 # Ipopt may increase pivtol as high as pivtolmax to get a more accurate 1521 # solution to the linear system. This option is only available if Ipopt 1522 # has been compiled with MA27. 1523 # 1524 # ma27_liw_init_factor 1 <= ( 5) < +inf 1525 # Integer workspace memory for MA27. 1526 # The initial integer workspace memory = liw_init_factor * memory required 1527 # by unfactored system. Ipopt will increase the workspace size by 1528 # meminc_factor if required. This option is only available if Ipopt has 1529 # been compiled with MA27. 1530 # 1531 # ma27_la_init_factor 1 <= ( 5) < +inf 1532 # Real workspace memory for MA27. 1533 # The initial real workspace memory = la_init_factor * memory required by 1534 # unfactored system. Ipopt will increase the workspace size by 1535 # meminc_factor if required. This option is only available if Ipopt has 1536 # been compiled with MA27. 1537 # 1538 # ma27_meminc_factor 1 <= ( 10) < +inf 1539 # Increment factor for workspace size for MA27. 1540 # If the integer or real workspace is not large enough, Ipopt will increase 1541 # its size by this factor. This option is only available if Ipopt has been 1542 # compiled with MA27. 1543 # 1544 # ma27_skip_inertia_check ("no") 1545 # Always pretend inertia is correct. 1546 # Setting this option to "yes" essentially disables inertia check. This 1547 # option makes the algorithm non-robust and easily fail, but it might give 1548 # some insight into the necessity of inertia control. 1549 # Possible values: 1550 # - no [check inertia] 1551 # - yes [skip inertia check] 1552 # 1553 # ma27_ignore_singularity ("no") 1554 # Enables MA27's ability to solve a linear system even if the matrix is 1555 # singular. 1556 # Setting this option to "yes" means that Ipopt will call MA27 to compute 1557 # solutions for right hand sides, even if MA27 has detected that the matrix 1558 # is singular (but is still able to solve the linear system). In some cases 1559 # this might be better than using Ipopt's heuristic of small perturbation 1560 # of the lower diagonal of the KKT matrix. 1561 # Possible values: 1562 # - no [Don't have MA27 solve singular systems] 1563 # - yes [Have MA27 solve singular systems] 1564 # 1565 # 1566 # 1567 # ### MA57 Linear Solver ### 1568 # 1569 # ma57_pivtol 0 < ( 1e-08) < 1 1570 # Pivot tolerance for the linear solver MA57. 1571 # A smaller number pivots for sparsity, a larger number pivots for 1572 # stability. This option is only available if Ipopt has been compiled with 1573 # MA57. 1574 # 1575 # ma57_pivtolmax 0 < ( 0.0001) < 1 1576 # Maximum pivot tolerance for the linear solver MA57. 1577 # Ipopt may increase pivtol as high as ma57_pivtolmax to get a more 1578 # accurate solution to the linear system. This option is only available if 1579 # Ipopt has been compiled with MA57. 1580 # 1581 # ma57_pre_alloc 1 <= ( 3) < +inf 1582 # Safety factor for work space memory allocation for the linear solver MA57. 1583 # If 1 is chosen, the suggested amount of work space is used. However, 1584 # choosing a larger number might avoid reallocation if the suggest values 1585 # do not suffice. This option is only available if Ipopt has been compiled 1586 # with MA57. 1587 # 1588 # ma57_pivot_order 0 <= ( 5) <= 5 1589 # Controls pivot order in MA57 1590 # This is INCTL(6) in MA57. 1591 # 1592 # 1593 # 1594 # ### Pardiso Linear Solver ### 1595 # 1596 # pardiso_matching_strategy ("complete+2x2") 1597 # Matching strategy to be used by Pardiso 1598 # This is IPAR(13) in Pardiso manual. This option is only available if 1599 # Ipopt has been compiled with Pardiso. 1600 # Possible values: 1601 # - complete [Match complete (IPAR(13)=1)] 1602 # - complete+2x2 [Match complete+2x2 (IPAR(13)=2)] 1603 # - constraints [Match constraints (IPAR(13)=3)] 1604 # 1605 # pardiso_redo_symbolic_fact_only_if_inertia_wrong("no") 1606 # Toggle for handling case when elements were perturbed by Pardiso. 1607 # This option is only available if Ipopt has been compiled with Pardiso. 1608 # Possible values: 1609 # - no [Always redo symbolic factorization when 1610 # elements were perturbed] 1611 # - yes [Only redo symbolic factorization when elements 1612 # were perturbed if also the inertia was wrong] 1613 # 1614 # pardiso_repeated_perturbation_means_singular("no") 1615 # Interpretation of perturbed elements. 1616 # This option is only available if Ipopt has been compiled with Pardiso. 1617 # Possible values: 1618 # - no [Don't assume that matrix is singular if 1619 # elements were perturbed after recent symbolic 1620 # factorization] 1621 # - yes [Assume that matrix is singular if elements were 1622 # perturbed after recent symbolic factorization] 1623 # 1624 # pardiso_out_of_core_power 0 <= ( 0) < +inf 1625 # Enables out-of-core variant of Pardiso 1626 # Setting this option to a positive integer k makes Pardiso work in the 1627 # out-of-core variant where the factor is split in 2^k subdomains. This is 1628 # IPARM(50) in the Pardiso manual. This option is only available if Ipopt 1629 # has been compiled with Pardiso. 1630 # 1631 # pardiso_msglvl 0 <= ( 0) < +inf 1632 # Pardiso message level 1633 # This determines the amount of analysis output from the Pardiso solver. 1634 # This is MSGLVL in the Pardiso manual. 1635 # 1636 # pardiso_skip_inertia_check ("no") 1637 # Always pretent inertia is correct. 1638 # Setting this option to "yes" essentially disables inertia check. This 1639 # option makes the algorithm non-robust and easily fail, but it might give 1640 # some insight into the necessity of inertia control. 1641 # Possible values: 1642 # - no [check inertia] 1643 # - yes [skip inertia check] 1644 # 1645 # pardiso_max_iter 1 <= ( 500) < +inf 1646 # Maximum number of Krylov-Subspace Iteration 1647 # DPARM(1) 1648 # 1649 # pardiso_iter_relative_tol 0 < ( 1e-06) < 1 1650 # Relative Residual Convergence 1651 # DPARM(2) 1652 # 1653 # pardiso_iter_coarse_size 1 <= ( 5000) < +inf 1654 # Maximum Size of Coarse Grid Matrix 1655 # DPARM(3) 1656 # 1657 # pardiso_iter_max_levels 1 <= ( 10000) < +inf 1658 # Maximum Size of Grid Levels 1659 # DPARM(4) 1660 # 1661 # pardiso_iter_dropping_factor 0 < ( 0.5) < 1 1662 # dropping value for incomplete factor 1663 # DPARM(5) 1664 # 1665 # pardiso_iter_dropping_schur 0 < ( 0.1) < 1 1666 # dropping value for sparsify schur complement factor 1667 # DPARM(6) 1668 # 1669 # pardiso_iter_max_row_fill 1 <= ( 10000000) < +inf 1670 # max fill for each row 1671 # DPARM(7) 1672 # 1673 # pardiso_iter_inverse_norm_factor 1 < ( 5e+06) < +inf 1674 # 1675 # DPARM(8) 1676 # 1677 # pardiso_iterative ("no") 1678 # Switch on iterative solver in Pardiso library 1679 # Possible values: 1680 # - no [] 1681 # - yes [] 1682 # 1683 # pardiso_max_droptol_corrections 1 <= ( 4) < +inf 1684 # Maximal number of decreases of drop tolerance during one solve. 1685 # This is relevant only for iterative Pardiso options. 1686 # 1687 # 1688 # 1689 # ### Mumps Linear Solver ### 1690 # 1691 # mumps_pivtol 0 <= ( 1e-06) <= 1 1692 # Pivot tolerance for the linear solver MUMPS. 1693 # A smaller number pivots for sparsity, a larger number pivots for 1694 # stability. This option is only available if Ipopt has been compiled with 1695 # MUMPS. 1696 # 1697 # mumps_pivtolmax 0 <= ( 0.1) <= 1 1698 # Maximum pivot tolerance for the linear solver MUMPS. 1699 # Ipopt may increase pivtol as high as pivtolmax to get a more accurate 1700 # solution to the linear system. This option is only available if Ipopt 1701 # has been compiled with MUMPS. 1702 # 1703 # mumps_mem_percent 0 <= ( 1000) < +inf 1704 # Percentage increase in the estimated working space for MUMPS. 1705 # In MUMPS when significant extra fill-in is caused by numerical pivoting, 1706 # larger values of mumps_mem_percent may help use the workspace more 1707 # efficiently. On the other hand, if memory requirement are too large at 1708 # the very beginning of the optimization, choosing a much smaller value for 1709 # this option, such as 5, might reduce memory requirements. 1710 # 1711 # mumps_permuting_scaling 0 <= ( 7) <= 7 1712 # Controls permuting and scaling in MUMPS 1713 # This is ICNTL(6) in MUMPS. 1714 # 1715 # mumps_pivot_order 0 <= ( 7) <= 7 1716 # Controls pivot order in MUMPS 1717 # This is ICNTL(7) in MUMPS. 1718 # 1719 # mumps_scaling -2 <= ( 77) <= 77 1720 # Controls scaling in MUMPS 1721 # This is ICNTL(8) in MUMPS. 1722 # 1723 # mumps_dep_tol -inf < ( -1) < +inf 1724 # Pivot threshold for detection of linearly dependent constraints in MUMPS. 1725 # When MUMPS is used to determine linearly dependent constraints, this is 1726 # determines the threshold for a pivot to be considered zero. This is 1727 # CNTL(3) in MUMPS. 1728 # 1729 # 1730 # 1731 # ### MA28 Linear Solver ### 1732 # 1733 # ma28_pivtol 0 < ( 0.01) <= 1 1734 # Pivot tolerance for linear solver MA28. 1735 # This is used when MA28 tries to find the dependent constraints. 1736 # 1737 # 1738 # 1739 # ### Uncategorized ### 1740 # 1741 # warm_start_target_mu -inf < ( 0) < +inf 1742 # Unsupported! 1743