diff --git a/doc/OnlineDocs/library_reference/solvers/index.rst b/doc/OnlineDocs/library_reference/solvers/index.rst index e24cfeb74ac..b2e215c3070 100644 --- a/doc/OnlineDocs/library_reference/solvers/index.rst +++ b/doc/OnlineDocs/library_reference/solvers/index.rst @@ -7,3 +7,4 @@ Solver Interfaces gams.rst cplex_persistent.rst gurobi_persistent.rst + xpress_persistent.rst diff --git a/doc/OnlineDocs/library_reference/solvers/xpress_persistent.rst b/doc/OnlineDocs/library_reference/solvers/xpress_persistent.rst new file mode 100644 index 00000000000..2a98b4a09db --- /dev/null +++ b/doc/OnlineDocs/library_reference/solvers/xpress_persistent.rst @@ -0,0 +1,7 @@ +XpressPersistent +================ + +.. autoclass:: pyomo.solvers.plugins.solvers.xpress_persistent.XpressPersistent + :members: + :inherited-members: + :show-inheritance: diff --git a/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst b/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst index 652eb334367..d61728fc1c9 100644 --- a/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst +++ b/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst @@ -254,7 +254,7 @@ Here, we demonstrate this capability with a toy example: >>> # Before solve, Boolean vars have no value >>> Reference(m.d[:].indicator_var).display() - IndexedBooleanVar : Size=4, Index=s + IndexedBooleanVar : Size=4, Index=s, ReferenceTo=d[:].indicator_var Key : Value : Fixed : Stale 1 : None : False : True 2 : None : False : True @@ -265,7 +265,7 @@ Here, we demonstrate this capability with a toy example: >>> # based on the algebraic model results >>> run_data = SolverFactory('glpk').solve(m) >>> Reference(m.d[:].indicator_var).display() - IndexedBooleanVar : Size=4, Index=s + IndexedBooleanVar : Size=4, Index=s, ReferenceTo=d[:].indicator_var Key : Value : Fixed : Stale 1 : True : False : False 2 : False : False : False diff --git a/doc/OnlineDocs/related_packages.rst b/doc/OnlineDocs/related_packages.rst index 0d99989c4af..1861b98ba6e 100644 --- a/doc/OnlineDocs/related_packages.rst +++ b/doc/OnlineDocs/related_packages.rst @@ -44,14 +44,14 @@ Domain-Specific Applications +==========================+=========================================================+=============================================+ | Chama | https://github.com/sandialabs/chama | Sensor placement optimization | +--------------------------+---------------------------------------------------------+---------------------------------------------+ -| Egret | https://github.com/grid-parity-exchange/egret | Formulation and solution of Unit Commitment| +| Egret | https://github.com/grid-parity-exchange/egret | Formulation and solution of unit commitment| | | | and optimal power flow problems | +--------------------------+---------------------------------------------------------+---------------------------------------------+ | IDAES | https://github.com/idaes/idaes-pse | Institute for the Design of Advanced | | | | Energy Systems | +--------------------------+---------------------------------------------------------+---------------------------------------------+ -| Prescient | https://github.com/grid-parity-exchange/prescient | Parallel solution of structured | -| | | NLPs. | +| Prescient | https://github.com/grid-parity-exchange/prescient | Production Cost Model for power systems | +| | | simulation and analysis | +--------------------------+---------------------------------------------------------+---------------------------------------------+ | PyPSA | https://github.com/pypsa/pypsa | Python for Power system Analysis | +--------------------------+---------------------------------------------------------+---------------------------------------------+ diff --git a/examples/pyomobook/test_book_examples.py b/examples/pyomobook/test_book_examples.py index 2ae1b054953..8be6740cc9d 100644 --- a/examples/pyomobook/test_book_examples.py +++ b/examples/pyomobook/test_book_examples.py @@ -58,6 +58,7 @@ 'test_dae_ch_run_path_constraint_tester': ['ipopt'], # gdp_ch + 'test_gdp_ch_pyomo_gdp_uc_sh': ['glpk'], 'test_gdp_ch_pyomo_scont': ['glpk'], 'test_gdp_ch_pyomo_scont2': ['glpk'], 'test_gdp_ch_scont_script': ['glpk'], @@ -386,6 +387,8 @@ def compare_files(out_file, base_file, abstol, reltol, else: extra = out_filtered n = index_of_base_i_in_out + if n == float('inf'): + n = None extra_terms = extra[i:n] try: assert len(extra_terms) % 3 == 0 diff --git a/pyomo/contrib/gdpopt/GDPopt.py b/pyomo/contrib/gdpopt/GDPopt.py index 5f5a6c7e0ea..ec81f4cad73 100644 --- a/pyomo/contrib/gdpopt/GDPopt.py +++ b/pyomo/contrib/gdpopt/GDPopt.py @@ -57,7 +57,8 @@ doc='The GDPopt decomposition-based ' 'Generalized Disjunctive Programming (GDP) solver') class GDPoptSolver(object): - """Decomposition solver for Generalized Disjunctive Programming (GDP) problems. + """Decomposition solver for Generalized Disjunctive Programming (GDP) + problems. The GDPopt (Generalized Disjunctive Programming optimizer) solver applies a variety of decomposition-based approaches to solve Generalized Disjunctive @@ -71,8 +72,8 @@ class GDPoptSolver(object): - Partial surrogate cuts [pending] - Generalized Bender decomposition [pending] - This solver implementation was developed by Carnegie Mellon University in the - research group of Ignacio Grossmann. + This solver implementation was developed by Carnegie Mellon University in + the research group of Ignacio Grossmann. For nonconvex problems, LOA may not report rigorous lower/upper bounds. @@ -179,14 +180,22 @@ def _log_solver_intro_message(self, config): config.nlp_solver_args.display(ostream=nlp_args_output) config.minlp_solver_args.display(ostream=minlp_args_output) config.local_minlp_solver_args.display(ostream=lminlp_args_output) - mip_args_text = indent(mip_args_output.getvalue().rstrip(), prefix=" " * 2 + " - ") - nlp_args_text = indent(nlp_args_output.getvalue().rstrip(), prefix=" " * 2 + " - ") - minlp_args_text = indent(minlp_args_output.getvalue().rstrip(), prefix=" " * 2 + " - ") - lminlp_args_text = indent(lminlp_args_output.getvalue().rstrip(), prefix=" " * 2 + " - ") - mip_args_text = "" if len(mip_args_text.strip()) == 0 else "\n" + mip_args_text - nlp_args_text = "" if len(nlp_args_text.strip()) == 0 else "\n" + nlp_args_text - minlp_args_text = "" if len(minlp_args_text.strip()) == 0 else "\n" + minlp_args_text - lminlp_args_text = "" if len(lminlp_args_text.strip()) == 0 else "\n" + lminlp_args_text + mip_args_text = indent(mip_args_output.getvalue().rstrip(), prefix=" " * + 2 + " - ") + nlp_args_text = indent(nlp_args_output.getvalue().rstrip(), prefix=" " * + 2 + " - ") + minlp_args_text = indent(minlp_args_output.getvalue().rstrip(), + prefix=" " * 2 + " - ") + lminlp_args_text = indent(lminlp_args_output.getvalue().rstrip(), + prefix=" " * 2 + " - ") + mip_args_text = "" if len(mip_args_text.strip()) == 0 else \ + "\n" + mip_args_text + nlp_args_text = "" if len(nlp_args_text.strip()) == 0 else \ + "\n" + nlp_args_text + minlp_args_text = "" if len(minlp_args_text.strip()) == 0 else \ + "\n" + minlp_args_text + lminlp_args_text = "" if len(lminlp_args_text.strip()) == 0 else \ + "\n" + lminlp_args_text config.logger.info( """ Subsolvers: @@ -209,7 +218,7 @@ def _log_solver_intro_message(self, config): If you use this software, you may cite the following: - Implementation: Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE. -Pyomo.GDP: Disjunctive Models in Python. +Pyomo.GDP: Disjunctive Models in Python. Proc. of the 13th Intl. Symposium on Process Systems Eng. San Diego, 2018. """.strip() @@ -227,7 +236,8 @@ def _log_solver_intro_message(self, config): to_cite_text += """ - GLOA algorithm: Lee, S; Grossmann, IE. -A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems. +A Global Optimization Algorithm for Nonconvex Generalized Disjunctive +Programming and Applications to Process Systems. Comp. and Chem. Eng. 2001, 25, 1675-1697. DOI: 10.1016/S0098-1354(01)00732-3. """.strip() diff --git a/pyomo/contrib/gdpopt/branch_and_bound.py b/pyomo/contrib/gdpopt/branch_and_bound.py index d1ce391cdbf..346f08f93e1 100644 --- a/pyomo/contrib/gdpopt/branch_and_bound.py +++ b/pyomo/contrib/gdpopt/branch_and_bound.py @@ -15,7 +15,8 @@ from pyomo.common.collections import ComponentMap from pyomo.common.errors import InfeasibleConstraintException from pyomo.contrib.fbbt.fbbt import fbbt -from pyomo.contrib.gdpopt.util import copy_var_list_values, SuppressInfeasibleWarning, get_main_elapsed_time +from pyomo.contrib.gdpopt.util import ( + copy_var_list_values, SuppressInfeasibleWarning, get_main_elapsed_time) from pyomo.contrib.satsolver.satsolver import satisfiable from pyomo.core import minimize, Suffix, Constraint, TransformationFactory from pyomo.opt import SolverFactory, SolverStatus @@ -76,7 +77,9 @@ def _perform_branch_and_bound(solve_data): unfixed_disjuncts = [] disjuncts_fixed_True[0].indicator_var.fix(True) elif disjuncts_fixed_True and disjunction.xor: - assert len(disjuncts_fixed_True) == 1, "XOR (only one True) violated: %s" % disjunction.name + assert len(disjuncts_fixed_True) == 1, ("XOR (only one True) " + "violated: %s" % + disjunction.name) disjuncts_fixed_False.extend(unfixed_disjuncts) unfixed_disjuncts = [] @@ -87,19 +90,23 @@ def _perform_branch_and_bound(solve_data): # Deactivate nonlinear constraints in unfixed disjuncts for disjunct in unfixed_disjuncts: nonlinear_constraints_in_disjunct = [ - constr for constr in disjunct.component_data_objects(Constraint, active=True) + constr for constr in disjunct.component_data_objects( + Constraint, active=True) if constr.body.polynomial_degree() not in _linear_degrees] for constraint in nonlinear_constraints_in_disjunct: constraint.deactivate() if nonlinear_constraints_in_disjunct: - # TODO might be worthwhile to log number of nonlinear constraints in each disjunction - # for later branching purposes - root_util_blk.disjunct_to_nonlinear_constraints[disjunct] = nonlinear_constraints_in_disjunct + # TODO might be worthwhile to log number of nonlinear + # constraints in each disjunction for later branching purposes + root_util_blk.disjunct_to_nonlinear_constraints[ + disjunct] = nonlinear_constraints_in_disjunct - root_util_blk.disjunction_to_unfixed_disjuncts[disjunction] = unfixed_disjuncts + root_util_blk.disjunction_to_unfixed_disjuncts[ + disjunction] = unfixed_disjuncts pass - # Add the BigM suffix if it does not already exist. Used later during nonlinear constraint activation. + # Add the BigM suffix if it does not already exist. Used later during + # nonlinear constraint activation. # TODO is this still necessary? if not hasattr(root_node, 'BigM'): root_node.BigM = Suffix() @@ -107,8 +114,9 @@ def _perform_branch_and_bound(solve_data): # Set up the priority queue queue = solve_data.bb_queue = [] solve_data.created_nodes = 0 - unbranched_disjunction_indices = [i for i, disjunction in enumerate(root_util_blk.disjunction_list) - if disjunction in root_util_blk.disjunction_to_unfixed_disjuncts] + unbranched_disjunction_indices = [ + i for i, disjunction in enumerate(root_util_blk.disjunction_list) + if disjunction in root_util_blk.disjunction_to_unfixed_disjuncts] sort_tuple = BBNodeData( obj_lb=float('-inf'), obj_ub=float('inf'), @@ -129,7 +137,8 @@ def _perform_branch_and_bound(solve_data): # ) for x in sorted(queue)]) node_data, node_model = heappop(queue) config.logger.info("Nodes: %s LB %.10g Unbranched %s" % ( - solve_data.explored_nodes, node_data.obj_lb, node_data.num_unbranched_disjunctions)) + solve_data.explored_nodes, node_data.obj_lb, + node_data.num_unbranched_disjunctions)) # Check time limit elapsed = get_main_elapsed_time(solve_data.timing) @@ -140,8 +149,12 @@ def _perform_branch_and_bound(solve_data): 'Elapsed: {} seconds' .format(config.time_limit, elapsed)) no_feasible_soln = float('inf') - solve_data.LB = node_data.obj_lb if solve_data.objective_sense == minimize else -no_feasible_soln - solve_data.UB = no_feasible_soln if solve_data.objective_sense == minimize else -node_data.obj_lb + solve_data.LB = node_data.obj_lb if \ + solve_data.objective_sense == minimize else \ + -no_feasible_soln + solve_data.UB = no_feasible_soln if \ + solve_data.objective_sense == minimize else \ + -node_data.obj_lb config.logger.info( 'Final bound values: LB: {} UB: {}'. format(solve_data.LB, solve_data.UB)) @@ -153,14 +166,20 @@ def _perform_branch_and_bound(solve_data): # Node has not been evaluated. solve_data.explored_nodes += 1 new_node_data = _prescreen_node(node_data, node_model, solve_data) - heappush(queue, (new_node_data, node_model)) # replace with updated node data - elif node_data.obj_lb < node_data.obj_ub - config.bound_tolerance and not node_data.is_evaluated: + heappush(queue, (new_node_data, node_model)) # replace with updated + # node data + elif node_data.obj_lb < node_data.obj_ub - config.bound_tolerance and \ + not node_data.is_evaluated: # Node has not been fully evaluated. - # Note: infeasible and unbounded nodes will skip this condition, because of strict inequality + # Note: infeasible and unbounded nodes will skip this condition, + # because of strict inequality new_node_data = _evaluate_node(node_data, node_model, solve_data) - heappush(queue, (new_node_data, node_model)) # replace with updated node data - elif node_data.num_unbranched_disjunctions == 0 or node_data.obj_lb == float('inf'): - # We have reached a leaf node, or the best available node is infeasible. + heappush(queue, (new_node_data, node_model)) # replace with updated + # node data + elif node_data.num_unbranched_disjunctions == 0 or \ + node_data.obj_lb == float('inf'): + # We have reached a leaf node, or the best available node is + # infeasible. original_model = solve_data.original_model copy_var_list_values( from_list=node_model.GDPopt_utils.variable_list, @@ -168,8 +187,12 @@ def _perform_branch_and_bound(solve_data): config=config, ) - solve_data.LB = node_data.obj_lb if solve_data.objective_sense == minimize else -node_data.obj_ub - solve_data.UB = node_data.obj_ub if solve_data.objective_sense == minimize else -node_data.obj_lb + solve_data.LB = node_data.obj_lb if \ + solve_data.objective_sense == minimize else \ + -node_data.obj_ub + solve_data.UB = node_data.obj_ub if \ + solve_data.objective_sense == minimize else \ + -node_data.obj_lb solve_data.master_iteration = solve_data.explored_nodes if node_data.obj_lb == float('inf'): solve_data.results.solver.termination_condition = tc.infeasible @@ -186,50 +209,70 @@ def _branch_on_node(node_data, node_model, solve_data): # Keeping the naive branch selection config = solve_data.config disjunction_to_branch_idx = node_data.unbranched_disjunction_indices[0] - disjunction_to_branch = node_model.GDPopt_utils.disjunction_list[disjunction_to_branch_idx] - num_unfixed_disjuncts = len(node_model.GDPopt_utils.disjunction_to_unfixed_disjuncts[disjunction_to_branch]) - config.logger.info("Branching on disjunction %s" % disjunction_to_branch.name) + disjunction_to_branch = node_model.GDPopt_utils.disjunction_list[ + disjunction_to_branch_idx] + num_unfixed_disjuncts = len( + node_model.GDPopt_utils.disjunction_to_unfixed_disjuncts[ + disjunction_to_branch]) + config.logger.info("Branching on disjunction %s" % + disjunction_to_branch.name) node_count = solve_data.created_nodes newly_created_nodes = 0 for disjunct_index_to_fix_True in range(num_unfixed_disjuncts): # Create a new branch for each unfixed disjunct child_model = node_model.clone() - child_disjunction_to_branch = child_model.GDPopt_utils.disjunction_list[disjunction_to_branch_idx] - child_unfixed_disjuncts = child_model.GDPopt_utils.disjunction_to_unfixed_disjuncts[child_disjunction_to_branch] + child_disjunction_to_branch = child_model.GDPopt_utils.\ + disjunction_list[ + disjunction_to_branch_idx] + child_unfixed_disjuncts = child_model.GDPopt_utils.\ + disjunction_to_unfixed_disjuncts[ + child_disjunction_to_branch] for idx, child_disjunct in enumerate(child_unfixed_disjuncts): if idx == disjunct_index_to_fix_True: child_disjunct.indicator_var.fix(True) else: child_disjunct.deactivate() if not child_disjunction_to_branch.xor: - raise NotImplementedError("We still need to add support for non-XOR disjunctions.") - # This requires adding all combinations of activation status among unfixed_disjuncts - # Reactivate nonlinear constraints in the newly-fixed child disjunct - fixed_True_disjunct = child_unfixed_disjuncts[disjunct_index_to_fix_True] - for constr in child_model.GDPopt_utils.disjunct_to_nonlinear_constraints.get(fixed_True_disjunct, ()): + raise NotImplementedError("We still need to add support for " + "non-XOR disjunctions.") + # This requires adding all combinations of activation status among + # unfixed_disjuncts Reactivate nonlinear constraints in the newly-fixed + # child disjunct + fixed_True_disjunct = child_unfixed_disjuncts[ + disjunct_index_to_fix_True] + for constr in child_model.GDPopt_utils.\ + disjunct_to_nonlinear_constraints.get(fixed_True_disjunct, ()): constr.activate() - child_model.BigM[constr] = 1 # set arbitrary BigM (ok, because we fix corresponding Y=True) + child_model.BigM[constr] = 1 # set arbitrary BigM (ok, because we + # fix corresponding Y=True) - del child_model.GDPopt_utils.disjunction_to_unfixed_disjuncts[child_disjunction_to_branch] + del child_model.GDPopt_utils.disjunction_to_unfixed_disjuncts[ + child_disjunction_to_branch] for child_disjunct in child_unfixed_disjuncts: - child_model.GDPopt_utils.disjunct_to_nonlinear_constraints.pop(child_disjunct, None) + child_model.GDPopt_utils.disjunct_to_nonlinear_constraints.pop( + child_disjunct, None) newly_created_nodes += 1 child_node_data = node_data._replace( is_screened=False, is_evaluated=False, - num_unbranched_disjunctions=node_data.num_unbranched_disjunctions - 1, + num_unbranched_disjunctions=node_data.\ + num_unbranched_disjunctions - 1, node_count=node_count + newly_created_nodes, - unbranched_disjunction_indices=node_data.unbranched_disjunction_indices[1:], + unbranched_disjunction_indices=node_data.\ + unbranched_disjunction_indices[1:], obj_ub=float('inf'), ) heappush(solve_data.bb_queue, (child_node_data, child_model)) solve_data.created_nodes += newly_created_nodes - config.logger.info("Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s." % ( - num_unfixed_disjuncts, node_data.num_unbranched_disjunctions - 1, len(solve_data.bb_queue))) + config.logger.info("Added %s new nodes with %s relaxed disjunctions to " + "the heap. Size now %s." % + ( num_unfixed_disjuncts, + node_data.num_unbranched_disjunctions - 1, + len(solve_data.bb_queue))) def _prescreen_node(node_data, node_model, solve_data): @@ -237,23 +280,27 @@ def _prescreen_node(node_data, node_model, solve_data): # Check node for satisfiability if sat-solver is enabled if config.check_sat and satisfiable(node_model, config.logger) is False: if node_data.node_count == 0: - config.logger.info("Root node is not satisfiable. Problem is infeasible.") + config.logger.info("Root node is not satisfiable. Problem is " + "infeasible.") else: - config.logger.info("SAT solver pruned node %s" % node_data.node_count) + config.logger.info("SAT solver pruned node %s" % + node_data.node_count) new_lb = new_ub = float('inf') else: # Solve model subproblem if config.solve_local_rnGDP: solve_data.config.logger.debug( - "Screening node %s with LB %.10g and %s inactive disjunctions." % ( - node_data.node_count, node_data.obj_lb, node_data.num_unbranched_disjunctions - )) - new_lb, new_ub = _solve_local_rnGDP_subproblem(node_model, solve_data) + "Screening node %s with LB %.10g and %s inactive " + "disjunctions." % (node_data.node_count, node_data.obj_lb, + node_data.num_unbranched_disjunctions)) + new_lb, new_ub = _solve_local_rnGDP_subproblem(node_model, + solve_data) else: new_lb, new_ub = float('-inf'), float('inf') new_lb = max(node_data.obj_lb, new_lb) - new_node_data = node_data._replace(obj_lb=new_lb, obj_ub=new_ub, is_screened=True) + new_node_data = node_data._replace(obj_lb=new_lb, obj_ub=new_ub, + is_screened=True) return new_node_data @@ -261,11 +308,13 @@ def _evaluate_node(node_data, node_model, solve_data): config = solve_data.config # Solve model subproblem solve_data.config.logger.info( - "Exploring node %s with LB %.10g UB %.10g and %s inactive disjunctions." % ( - node_data.node_count, node_data.obj_lb, node_data.obj_ub, node_data.num_unbranched_disjunctions - )) + "Exploring node %s with LB %.10g UB %.10g and %s inactive " + "disjunctions." % (node_data.node_count, node_data.obj_lb, + node_data.obj_ub, + node_data.num_unbranched_disjunctions)) new_lb, new_ub = _solve_rnGDP_subproblem(node_model, solve_data) - new_node_data = node_data._replace(obj_lb=new_lb, obj_ub=new_ub, is_evaluated=True) + new_node_data = node_data._replace(obj_lb=new_lb, obj_ub=new_ub, + is_evaluated=True) return new_node_data @@ -290,8 +339,10 @@ def _solve_rnGDP_subproblem(model, solve_data): elapsed = get_main_elapsed_time(solve_data.timing) remaining = max(config.time_limit - elapsed, 1) minlp_args['add_options'] = minlp_args.get('add_options', []) - minlp_args['add_options'].append('option reslim=%s;' % remaining) - result = SolverFactory(config.minlp_solver).solve(subproblem, **minlp_args) + minlp_args['add_options'].append('option reslim=%s;' % + remaining) + result = SolverFactory(config.minlp_solver).solve(subproblem, + **minlp_args) except RuntimeError as e: config.logger.warning( "Solver encountered RuntimeError. Treating as infeasible. " @@ -306,8 +357,10 @@ def _solve_rnGDP_subproblem(model, solve_data): term_cond = result.solver.termination_condition if term_cond == tc.optimal: assert result.solver.status is SolverStatus.ok - lb = result.problem.lower_bound if not obj_sense_correction else -result.problem.upper_bound - ub = result.problem.upper_bound if not obj_sense_correction else -result.problem.lower_bound + lb = result.problem.lower_bound if not obj_sense_correction else \ + -result.problem.upper_bound + ub = result.problem.upper_bound if not obj_sense_correction else \ + -result.problem.lower_bound copy_var_list_values( from_list=subproblem.GDPopt_utils.variable_list, to_list=model.GDPopt_utils.variable_list, @@ -316,8 +369,10 @@ def _solve_rnGDP_subproblem(model, solve_data): return lb, ub elif term_cond == tc.locallyOptimal or term_cond == tc.feasible: assert result.solver.status is SolverStatus.ok - lb = result.problem.lower_bound if not obj_sense_correction else -result.problem.upper_bound - ub = result.problem.upper_bound if not obj_sense_correction else -result.problem.lower_bound + lb = result.problem.lower_bound if not obj_sense_correction else \ + -result.problem.upper_bound + ub = result.problem.upper_bound if not obj_sense_correction else \ + -result.problem.lower_bound # TODO handle LB absent copy_var_list_values( from_list=subproblem.GDPopt_utils.variable_list, @@ -340,7 +395,8 @@ def _solve_rnGDP_subproblem(model, solve_data): ) return float('inf'), float('inf') else: - config.logger.warning("Unknown termination condition of %s. Treating as infeasible." % term_cond) + config.logger.warning("Unknown termination condition of %s. " + "Treating as infeasible." % term_cond) copy_var_list_values( from_list=subproblem.GDPopt_utils.variable_list, to_list=model.GDPopt_utils.variable_list, @@ -357,7 +413,8 @@ def _solve_local_rnGDP_subproblem(model, solve_data): try: with SuppressInfeasibleWarning(): - result = SolverFactory(config.local_minlp_solver).solve(subproblem, **config.local_minlp_solver_args) + result = SolverFactory(config.local_minlp_solver).solve( + subproblem, **config.local_minlp_solver_args) except RuntimeError as e: config.logger.warning( "Solver encountered RuntimeError. Treating as infeasible. " @@ -372,8 +429,10 @@ def _solve_local_rnGDP_subproblem(model, solve_data): term_cond = result.solver.termination_condition if term_cond == tc.optimal: assert result.solver.status is SolverStatus.ok - lb = result.problem.lower_bound if not obj_sense_correction else -result.problem.upper_bound - ub = result.problem.upper_bound if not obj_sense_correction else -result.problem.lower_bound + lb = result.problem.lower_bound if not obj_sense_correction else \ + -result.problem.upper_bound + ub = result.problem.upper_bound if not obj_sense_correction else \ + -result.problem.lower_bound copy_var_list_values( from_list=subproblem.GDPopt_utils.variable_list, to_list=model.GDPopt_utils.variable_list, @@ -382,8 +441,10 @@ def _solve_local_rnGDP_subproblem(model, solve_data): return float('-inf'), ub elif term_cond == tc.locallyOptimal or term_cond == tc.feasible: assert result.solver.status is SolverStatus.ok - lb = result.problem.lower_bound if not obj_sense_correction else -result.problem.upper_bound - ub = result.problem.upper_bound if not obj_sense_correction else -result.problem.lower_bound + lb = result.problem.lower_bound if not obj_sense_correction else \ + -result.problem.upper_bound + ub = result.problem.upper_bound if not obj_sense_correction else \ + -result.problem.lower_bound # TODO handle LB absent copy_var_list_values( from_list=subproblem.GDPopt_utils.variable_list, @@ -406,7 +467,8 @@ def _solve_local_rnGDP_subproblem(model, solve_data): ) return float('-inf'), float('inf') else: - config.logger.warning("Unknown termination condition of %s. Treating as infeasible." % term_cond) + config.logger.warning("Unknown termination condition of %s. " + "Treating as infeasible." % term_cond) copy_var_list_values( from_list=subproblem.GDPopt_utils.variable_list, to_list=model.GDPopt_utils.variable_list, diff --git a/pyomo/contrib/gdpopt/config_options.py b/pyomo/contrib/gdpopt/config_options.py index 368b14b37a9..64f0cd159dc 100644 --- a/pyomo/contrib/gdpopt/config_options.py +++ b/pyomo/contrib/gdpopt/config_options.py @@ -110,32 +110,37 @@ def _add_OA_configs(CONFIG): )) CONFIG.declare("round_discrete_vars", ConfigValue( default=True, - description="flag to round subproblem discrete variable values to the nearest integer. " - "Rounding is done before fixing disjuncts." + description="flag to round subproblem discrete variable values to the " + "nearest integer. Rounding is done before fixing disjuncts." )) CONFIG.declare("force_subproblem_nlp", ConfigValue( default=False, - description="Force subproblems to be NLP, even if discrete variables exist." + description="Force subproblems to be NLP, even if discrete variables " + "exist." )) CONFIG.declare("mip_presolve", ConfigValue( default=True, - description="Flag to enable or diable GDPopt MIP presolve. Default=True.", + description="Flag to enable or diable GDPopt MIP presolve. " + "Default=True.", domain=bool )) CONFIG.declare("subproblem_presolve", ConfigValue( default=True, - description="Flag to enable or disable subproblem presolve. Default=True.", + description="Flag to enable or disable subproblem presolve. " + "Default=True.", domain=bool )) CONFIG.declare("calc_disjunctive_bounds", ConfigValue( default=False, - description="Calculate special disjunctive variable bounds for GLOA. False by default.", + description="Calculate special disjunctive variable bounds for GLOA. " + "False by default.", domain=bool )) CONFIG.declare("obbt_disjunctive_bounds", ConfigValue( default=False, - description="Use optimality-based bounds tightening rather than feasibility-based bounds tightening " - "to compute disjunctive variable bounds. False by default.", + description="Use optimality-based bounds tightening rather than " + "feasibility-based bounds tightening to compute disjunctive variable " + "bounds. False by default.", domain=bool )) return CONFIG @@ -151,7 +156,8 @@ def _add_BB_configs(CONFIG): CONFIG.declare("solve_local_rnGDP", ConfigValue( default=False, domain=bool, - description="When True, GDPopt-LBB will solve a local MINLP at each node." + description="When True, GDPopt-LBB will solve a local MINLP at each " + "node." )) diff --git a/pyomo/contrib/gdpopt/cut_generation.py b/pyomo/contrib/gdpopt/cut_generation.py index 49b7db40cd2..bc4f8fdb602 100644 --- a/pyomo/contrib/gdpopt/cut_generation.py +++ b/pyomo/contrib/gdpopt/cut_generation.py @@ -114,7 +114,8 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config): oa_cuts = oa_utils.GDPopt_OA_cuts slack_var = oa_utils.GDPopt_OA_slacks.add() - rhs = value(constr.lower) if constr.has_lb() else value(constr.upper) + rhs = value(constr.lower) if constr.has_lb() else value( + constr.upper) try: new_oa_cut = ( copysign(1, sign_adjust * dual_value) * ( @@ -129,7 +130,8 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config): counter += 1 except ZeroDivisionError: config.logger.warning( - "Zero division occured attempting to generate OA cut for constraint %s.\n" + "Zero division occured attempting to generate OA cut for " + "constraint %s.\n" "Skipping OA cut generation for this constraint." % (constr.name,) ) @@ -146,10 +148,9 @@ def add_affine_cuts(nlp_result, solve_data, config): m = solve_data.linear_GDP if config.calc_disjunctive_bounds: with time_code(solve_data.timing, "disjunctive variable bounding"): - TransformationFactory('contrib.compute_disj_var_bounds').apply_to( - m, - solver=config.mip_solver if config.obbt_disjunctive_bounds else None - ) + TransformationFactory('contrib.compute_disj_var_bounds').\ + apply_to( m, solver=config.mip_solver if + config.obbt_disjunctive_bounds else None ) config.logger.info("Adding affine cuts.") GDPopt = m.GDPopt_utils counter = 0 @@ -158,7 +159,8 @@ def add_affine_cuts(nlp_result, solve_data, config): var.value = val for constr in constraints_in_True_disjuncts(m, config): - # Note: this includes constraints that are deactivated in the current model (linear_GDP) + # Note: this includes constraints that are deactivated in the + # current model (linear_GDP) disjunctive_var_bounds = disjunctive_bounds(constr.parent_block()) @@ -174,14 +176,17 @@ def add_affine_cuts(nlp_result, solve_data, config): try: mc_eqn = mc(constr.body, disjunctive_var_bounds) except MCPP_Error as e: - config.logger.debug("Skipping constraint %s due to MCPP error %s" % (constr.name, str(e))) + config.logger.debug("Skipping constraint %s due to MCPP " + "error %s" % (constr.name, str(e))) continue # skip to the next constraint ccSlope = mc_eqn.subcc() cvSlope = mc_eqn.subcv() ccStart = mc_eqn.concave() cvStart = mc_eqn.convex() - ub_int = min(constr.upper, mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper() - lb_int = max(constr.lower, mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower() + ub_int = min(constr.upper, mc_eqn.upper()) if constr.has_ub() \ + else mc_eqn.upper() + lb_int = max(constr.lower, mc_eqn.lower()) if constr.has_lb() \ + else mc_eqn.lower() parent_block = constr.parent_block() # Create a block on which to put outer approximation cuts. @@ -204,7 +209,8 @@ def add_affine_cuts(nlp_result, solve_data, config): config.logger.info("Added %s affine cuts" % counter) -def add_integer_cut(var_values, target_model, solve_data, config, feasible=False): +def add_integer_cut(var_values, target_model, solve_data, config, + feasible=False): """Add an integer cut to the target GDP model.""" with time_code(solve_data.timing, 'integer cut generation'): m = target_model @@ -266,6 +272,5 @@ def add_integer_cut(var_values, target_model, solve_data, config, feasible=False if config.calc_disjunctive_bounds: with time_code(solve_data.timing, "disjunctive variable bounding"): TransformationFactory('contrib.compute_disj_var_bounds').apply_to( - m, - solver=config.mip_solver if config.obbt_disjunctive_bounds else None - ) + m, solver=config.mip_solver if config.obbt_disjunctive_bounds + else None ) diff --git a/pyomo/contrib/gdpopt/data_class.py b/pyomo/contrib/gdpopt/data_class.py index 3ee21fb616c..905dbde0a97 100644 --- a/pyomo/contrib/gdpopt/data_class.py +++ b/pyomo/contrib/gdpopt/data_class.py @@ -5,7 +5,8 @@ class GDPoptSolveData(object): """Data container to hold solve-instance data. Attributes: - - original_model (ConcreteModel): the original model that the user gave us to solve + - original_model (ConcreteModel): the original model that the user gave + us to solve - working_model (ConcreteModel): the original model after preprocessing - results (SolverResults): Pyomo results objective - timing (Bunch): dictionary of time elapsed for solver functions diff --git a/pyomo/contrib/gdpopt/iterate.py b/pyomo/contrib/gdpopt/iterate.py index 83d3d697fb8..0f3a7e6308a 100644 --- a/pyomo/contrib/gdpopt/iterate.py +++ b/pyomo/contrib/gdpopt/iterate.py @@ -5,7 +5,8 @@ add_outer_approximation_cuts, add_affine_cuts) from pyomo.contrib.gdpopt.mip_solve import solve_LOA_master -from pyomo.contrib.gdpopt.nlp_solve import (solve_global_subproblem, solve_local_subproblem) +from pyomo.contrib.gdpopt.nlp_solve import (solve_global_subproblem, + solve_local_subproblem) from pyomo.opt import TerminationCondition as tc from pyomo.contrib.gdpopt.util import time_code, get_main_elapsed_time @@ -38,19 +39,23 @@ def GDPopt_iteration_loop(solve_data, config): # Solve NLP subproblem if solve_data.active_strategy == 'LOA': with time_code(solve_data.timing, 'nlp'): - nlp_result = solve_local_subproblem(mip_result, solve_data, config) + nlp_result = solve_local_subproblem(mip_result, solve_data, + config) if nlp_result.feasible: add_outer_approximation_cuts(nlp_result, solve_data, config) elif solve_data.active_strategy == 'GLOA': with time_code(solve_data.timing, 'nlp'): - nlp_result = solve_global_subproblem(mip_result, solve_data, config) + nlp_result = solve_global_subproblem(mip_result, solve_data, + config) if nlp_result.feasible: add_affine_cuts(nlp_result, solve_data, config) elif solve_data.active_strategy == 'RIC': with time_code(solve_data.timing, 'nlp'): - nlp_result = solve_local_subproblem(mip_result, solve_data, config) + nlp_result = solve_local_subproblem(mip_result, solve_data, + config) else: - raise ValueError('Unrecognized strategy: ' + solve_data.active_strategy) + raise ValueError('Unrecognized strategy: ' + + solve_data.active_strategy) # Add integer cut add_integer_cut( diff --git a/pyomo/contrib/gdpopt/master_initialize.py b/pyomo/contrib/gdpopt/master_initialize.py index 2bc59a8f2d4..3575760972d 100644 --- a/pyomo/contrib/gdpopt/master_initialize.py +++ b/pyomo/contrib/gdpopt/master_initialize.py @@ -73,7 +73,8 @@ def init_custom_disjuncts(solve_data, config): clone_disj.indicator_var.fix(True) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result.feasible: - nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config) + nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, + config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut( @@ -100,7 +101,8 @@ def init_fixed_disjuncts(solve_data, config): TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result.feasible: - nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config) + nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, + config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut( @@ -138,11 +140,12 @@ def init_max_binaries(solve_data, config): # Solve mip_results = solve_linear_GDP(linear_GDP, solve_data, config) if mip_results.feasible: - nlp_result = solve_disjunctive_subproblem(mip_results, solve_data, config) + nlp_result = solve_disjunctive_subproblem(mip_results, solve_data, + config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) - add_integer_cut(mip_results.var_values, solve_data.linear_GDP, solve_data, config, - feasible=nlp_result.feasible) + add_integer_cut(mip_results.var_values, solve_data.linear_GDP, + solve_data, config, feasible=nlp_result.feasible) else: config.logger.info( "Linear relaxation for initialization was infeasible. " @@ -187,7 +190,8 @@ def init_set_covering(solve_data, config): # problem is infeasible. break return False # solve local NLP - subprob_result = solve_disjunctive_subproblem(mip_result, solve_data, config) + subprob_result = solve_disjunctive_subproblem(mip_result, solve_data, + config) if subprob_result.feasible: # if successful, updated sets active_disjuncts = list( diff --git a/pyomo/contrib/gdpopt/mip_solve.py b/pyomo/contrib/gdpopt/mip_solve.py index 8789f044b40..58b8f0b1b74 100644 --- a/pyomo/contrib/gdpopt/mip_solve.py +++ b/pyomo/contrib/gdpopt/mip_solve.py @@ -7,7 +7,8 @@ from pyomo.common.errors import InfeasibleConstraintException from pyomo.contrib.fbbt.fbbt import fbbt from pyomo.contrib.gdpopt.data_class import MasterProblemResult -from pyomo.contrib.gdpopt.util import SuppressInfeasibleWarning, _DoNothing, get_main_elapsed_time +from pyomo.contrib.gdpopt.util import (SuppressInfeasibleWarning, _DoNothing, + get_main_elapsed_time) from pyomo.core import (Block, Expression, Objective, TransformationFactory, Var, minimize, value, Constraint) from pyomo.gdp import Disjunct @@ -111,13 +112,15 @@ def solve_linear_GDP(linear_GDP_model, solve_data, config): results, terminate_cond = distinguish_mip_infeasible_or_unbounded( m, config) if terminate_cond is tc.unbounded: - # Solution is unbounded. Add an arbitrary bound to the objective and resolve. - # This occurs when the objective is nonlinear. The nonlinear objective is moved - # to the constraints, and deactivated for the linear master problem. + # Solution is unbounded. Add an arbitrary bound to the objective and + # resolve. This occurs when the objective is nonlinear. The nonlinear + # objective is moved to the constraints, and deactivated for the linear + # master problem. obj_bound = 1E15 config.logger.warning( 'Linear GDP was unbounded. ' - 'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) on the objective. ' + 'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) ' + 'on the objective. ' 'Check your initialization routine.'.format(obj_bound)) main_objective = next(m.component_data_objects(Objective, active=True)) GDPopt.objective_bound = Constraint( @@ -229,7 +232,8 @@ def solve_LOA_master(solve_data, config): mip_result.var_values ) config.logger.info( - 'ITER {:d}.{:d}.{:d}-MIP: OBJ: {:.10g} LB: {:.10g} UB: {:.10g}'.format( + 'ITER {:d}.{:d}.{:d}-MIP: OBJ: {:.10g} LB: {:.10g} UB: {:.10g}'.\ + format( solve_data.master_iteration, solve_data.mip_iteration, solve_data.nlp_iteration, diff --git a/pyomo/contrib/gdpopt/nlp_solve.py b/pyomo/contrib/gdpopt/nlp_solve.py index 55b6df81660..9a95b2fe0fe 100644 --- a/pyomo/contrib/gdpopt/nlp_solve.py +++ b/pyomo/contrib/gdpopt/nlp_solve.py @@ -18,7 +18,8 @@ from pyomo.contrib.gdpopt.data_class import SubproblemResult from pyomo.contrib.gdpopt.util import (SuppressInfeasibleWarning, is_feasible, get_main_elapsed_time) -from pyomo.core import Constraint, TransformationFactory, minimize, value, Objective +from pyomo.core import (Constraint, TransformationFactory, minimize, value, + Objective) from pyomo.core.expr import current as EXPR from pyomo.opt import SolverFactory, SolverResults from pyomo.opt import TerminationCondition as tc @@ -52,7 +53,8 @@ def solve_linear_subproblem(mip_model, solve_data, config): mip_solver = SolverFactory(config.mip_solver) if not mip_solver.available(): - raise RuntimeError("MIP solver %s is not available." % config.mip_solver) + raise RuntimeError("MIP solver %s is not available." % + config.mip_solver) with SuppressInfeasibleWarning(): mip_args = dict(config.mip_solver_args) elapsed = get_main_elapsed_time(solve_data.timing) @@ -61,14 +63,16 @@ def solve_linear_subproblem(mip_model, solve_data, config): mip_args['add_options'] = mip_args.get('add_options', []) mip_args['add_options'].append('option reslim=%s;' % remaining) elif config.mip_solver == 'multisolve': - mip_args['time_limit'] = min(mip_args.get('time_limit', float('inf')), remaining) + mip_args['time_limit'] = min(mip_args.get('time_limit', + float('inf')), remaining) results = mip_solver.solve(mip_model, **mip_args) subprob_result = SubproblemResult() subprob_result.feasible = True subprob_result.var_values = list(v.value for v in GDPopt.variable_list) subprob_result.pyomo_results = results - subprob_result.dual_values = list(mip_model.dual.get(c, None) for c in GDPopt.constraint_list) + subprob_result.dual_values = list(mip_model.dual.get(c, None) for c in + GDPopt.constraint_list) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond is tc.optimal: @@ -124,10 +128,13 @@ def solve_NLP(nlp_model, solve_data, config): nlp_args['add_options'] = nlp_args.get('add_options', []) nlp_args['add_options'].append('option reslim=%s;' % remaining) elif config.nlp_solver == 'multisolve': - nlp_args['time_limit'] = min(nlp_args.get('time_limit', float('inf')), remaining) + nlp_args['time_limit'] = min(nlp_args.get('time_limit', + float('inf')), + remaining) results = nlp_solver.solve(nlp_model, **nlp_args) except ValueError as err: - if 'Cannot load a SolverResults object with bad status: error' in str(err): + if 'Cannot load a SolverResults object with bad status: error' in \ + str(err): results = SolverResults() results.solver.termination_condition = tc.error results.solver.message = str(err) @@ -143,7 +150,8 @@ def solve_NLP(nlp_model, solve_data, config): for c in GDPopt.constraint_list) term_cond = results.solver.termination_condition - if any(term_cond == cond for cond in (tc.optimal, tc.locallyOptimal, tc.feasible)): + if any(term_cond == cond for cond in (tc.optimal, tc.locallyOptimal, + tc.feasible)): pass elif term_cond == tc.infeasible: config.logger.info('NLP subproblem was infeasible.') @@ -177,10 +185,12 @@ def solve_NLP(nlp_model, solve_data, config): results.solver.message) nlp_result.feasible = False elif term_cond == tc.error: - config.logger.info("NLP solver had a termination condition of 'error': %s" % results.solver.message) + config.logger.info("NLP solver had a termination condition of 'error': " + "%s" % results.solver.message) nlp_result.feasible = False elif term_cond == tc.maxTimeLimit: - config.logger.info("NLP solver ran out of time. Assuming infeasible for now.") + config.logger.info("NLP solver ran out of time. Assuming infeasible " + "for now.") nlp_result.feasible = False else: raise ValueError( @@ -223,7 +233,9 @@ def solve_MINLP(model, solve_data, config): minlp_args['add_options'] = minlp_args.get('add_options', []) minlp_args['add_options'].append('option reslim=%s;' % remaining) elif config.minlp_solver == 'multisolve': - minlp_args['time_limit'] = min(minlp_args.get('time_limit', float('inf')), remaining) + minlp_args['time_limit'] = min(minlp_args.get('time_limit', + float('inf')), + remaining) results = minlp_solver.solve(model, **minlp_args) subprob_result = SubproblemResult() @@ -235,7 +247,8 @@ def solve_MINLP(model, solve_data, config): for c in GDPopt.constraint_list) term_cond = results.solver.termination_condition - if any(term_cond == cond for cond in (tc.optimal, tc.locallyOptimal, tc.feasible)): + if any(term_cond == cond for cond in (tc.optimal, tc.locallyOptimal, + tc.feasible)): pass elif term_cond == tc.infeasible: config.logger.info('MINLP subproblem was infeasible.') @@ -252,7 +265,8 @@ def solve_MINLP(model, solve_data, config): else: subprob_result.feasible = False elif term_cond == tc.maxTimeLimit: - config.logger.info('MINLP subproblem failed to converge within time limit.') + config.logger.info('MINLP subproblem failed to converge within time ' + 'limit.') if is_feasible(model, config): config.logger.info( 'MINLP solution is still feasible. ' @@ -260,8 +274,8 @@ def solve_MINLP(model, solve_data, config): else: subprob_result.feasible = False elif term_cond == tc.intermediateNonInteger: - config.logger.info( - "MINLP solver could not find feasible integer solution: %s" % results.solver.message) + config.logger.info( "MINLP solver could not find feasible integer" + " solution: %s" % results.solver.message) subprob_result.feasible = False else: raise ValueError( @@ -289,7 +303,8 @@ def detect_unfixed_discrete_vars(model): constr.body, include_fixed=False) if not v.is_continuous()) for obj in model.component_data_objects(Objective, active=True): - var_set.update(v for v in EXPR.identify_variables(obj.expr, include_fixed=False) + var_set.update(v for v in EXPR.identify_variables(obj.expr, + include_fixed=False) if not v.is_continuous()) return var_set @@ -316,7 +331,8 @@ def preprocess_subproblem(m, config): def initialize_subproblem(model, solve_data): """Perform initialization of the subproblem. - Presently, this just restores the continuous variables to the original model values. + Presently, this just restores the continuous variables to the original + model values. """ # restore original continuous variable values @@ -336,7 +352,8 @@ def initialize_subproblem(model, solve_data): def update_subproblem_progress_indicators(solved_model, solve_data, config): """Update the progress indicators for the subproblem.""" GDPopt = solved_model.GDPopt_utils - objective = next(solved_model.component_data_objects(Objective, active=True)) + objective = next(solved_model.component_data_objects(Objective, + active=True)) if objective.sense == minimize: old_UB = solve_data.UB solve_data.UB = min(value(objective.expr), solve_data.UB) @@ -365,7 +382,8 @@ def update_subproblem_progress_indicators(solved_model, solve_data, config): if objective.sense == minimize else (improvement_tag, "")) config.logger.info( - 'ITER {:d}.{:d}.{:d}-NLP: OBJ: {:.10g} LB: {:.10g} {:s} UB: {:.10g} {:s}'.format( + 'ITER {:d}.{:d}.{:d}-NLP: OBJ: {:.10g} LB: {:.10g} {:s} UB: {:.10g} ' + '{:s}'.format( solve_data.master_iteration, solve_data.mip_iteration, solve_data.nlp_iteration, @@ -435,10 +453,10 @@ def solve_local_subproblem(mip_result, solve_data, config): continue rounded_val = int(round(val)) if fabs(val - rounded_val) > config.integer_tolerance: - raise ValueError( - "Discrete variable %s value %s is not " - "within tolerance %s of %s." % - (var.name, var.value, config.integer_tolerance, rounded_val)) + raise ValueError( "Discrete variable %s value %s is not " + "within tolerance %s of %s." % + (var.name, var.value, + config.integer_tolerance, rounded_val)) else: # variable is binary and within tolerances if config.round_discrete_vars: @@ -458,13 +476,14 @@ def solve_local_subproblem(mip_result, solve_data, config): return get_infeasible_result_object( subprob, "Preprocessing determined problem to be infeasible.") - if not any(constr.body.polynomial_degree() not in (1, 0) - for constr in subprob.component_data_objects(Constraint, active=True)): + if not any(constr.body.polynomial_degree() not in (1, 0) for constr in + subprob.component_data_objects(Constraint, active=True)): subprob_result = solve_linear_subproblem(subprob, solve_data, config) else: unfixed_discrete_vars = detect_unfixed_discrete_vars(subprob) if config.force_subproblem_nlp and len(unfixed_discrete_vars) > 0: - raise RuntimeError("Unfixed discrete variables found on the NLP subproblem.") + raise RuntimeError("Unfixed discrete variables found on the NLP " + "subproblem.") elif len(unfixed_discrete_vars) == 0: subprob_result = solve_NLP(subprob, solve_data, config) else: @@ -504,10 +523,10 @@ def solve_global_subproblem(mip_result, solve_data, config): continue rounded_val = int(round(val)) if fabs(val - rounded_val) > config.integer_tolerance: - raise ValueError( - "Discrete variable %s value %s is not " - "within tolerance %s of %s." % - (var.name, var.value, config.integer_tolerance, rounded_val)) + raise ValueError( "Discrete variable %s value %s is not " + "within tolerance %s of %s." % + (var.name, var.value, + config.integer_tolerance, rounded_val)) else: # variable is binary and within tolerances if config.round_discrete_vars: @@ -528,7 +547,8 @@ def solve_global_subproblem(mip_result, solve_data, config): unfixed_discrete_vars = detect_unfixed_discrete_vars(subprob) if config.force_subproblem_nlp and len(unfixed_discrete_vars) > 0: - raise RuntimeError("Unfixed discrete variables found on the NLP subproblem.") + raise RuntimeError("Unfixed discrete variables found on the NLP " + "subproblem.") elif len(unfixed_discrete_vars) == 0: subprob_result = solve_NLP(subprob, solve_data, config) else: @@ -541,9 +561,11 @@ def solve_global_subproblem(mip_result, solve_data, config): def get_infeasible_result_object(model, message=""): infeas_result = SubproblemResult() infeas_result.feasible = False - infeas_result.var_values = list(v.value for v in model.GDPopt_utils.variable_list) + infeas_result.var_values = list(v.value for v in + model.GDPopt_utils.variable_list) infeas_result.pyomo_results = SolverResults() infeas_result.pyomo_results.solver.termination_condition = tc.infeasible infeas_result.pyomo_results.message = message - infeas_result.dual_values = list(None for _ in model.GDPopt_utils.constraint_list) + infeas_result.dual_values = list(None for _ in + model.GDPopt_utils.constraint_list) return infeas_result diff --git a/pyomo/contrib/gdpopt/tests/test_LBB.py b/pyomo/contrib/gdpopt/tests/test_LBB.py index 451a41580bc..5cb093714a8 100644 --- a/pyomo/contrib/gdpopt/tests/test_LBB.py +++ b/pyomo/contrib/gdpopt/tests/test_LBB.py @@ -7,7 +7,8 @@ from pyomo.common.fileutils import import_file from pyomo.contrib.satsolver.satsolver import z3_available -from pyomo.environ import SolverFactory, value, ConcreteModel, Var, Objective, maximize +from pyomo.environ import (SolverFactory, value, ConcreteModel, Var, Objective, + maximize) from pyomo.gdp import Disjunction from pyomo.opt import TerminationCondition @@ -17,10 +18,12 @@ minlp_solver = 'baron' minlp_args = dict() solver_available = SolverFactory(minlp_solver).available() -license_available = SolverFactory(minlp_solver).license_is_valid() if solver_available else False +license_available = SolverFactory(minlp_solver).license_is_valid() if \ + solver_available else False -@unittest.skipUnless(solver_available, "Required subsolver %s is not available" % (minlp_solver,)) +@unittest.skipUnless(solver_available, + "Required subsolver %s is not available" % (minlp_solver,)) class TestGDPopt_LBB(unittest.TestCase): """Tests for logic-based branch and bound.""" @@ -38,9 +41,11 @@ def test_infeasible_GDP(self): minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) - self.assertEqual(result.solver.termination_condition, TerminationCondition.infeasible) + self.assertEqual(result.solver.termination_condition, + TerminationCondition.infeasible) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") def test_LBB_8PP(self): """Test the logic-based branch and bound algorithm.""" exfile = import_file( @@ -54,7 +59,8 @@ def test_LBB_8PP(self): ) self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") def test_LBB_8PP_max(self): """Test the logic-based branch and bound algorithm.""" exfile = import_file( @@ -71,7 +77,8 @@ def test_LBB_8PP_max(self): ) self.assertAlmostEqual(value(eight_process.profit.expr), -68, places=1) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") def test_LBB_strip_pack(self): """Test logic-based branch and bound with strip packing.""" exfile = import_file( @@ -86,7 +93,8 @@ def test_LBB_strip_pack(self): self.assertTrue( fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") @unittest.category('expensive') def test_LBB_constrained_layout(self): """Test LBB with constrained layout.""" @@ -117,8 +125,10 @@ def test_LBB_ex_633_trespalacios(self): objective_value = value(model.obj.expr) self.assertAlmostEqual(objective_value, 4.46, 2) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") - @unittest.skipUnless(SolverFactory('bonmin').available(exception_flag=False), "Bonmin is not avaialable") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(SolverFactory('bonmin').available( + exception_flag=False), "Bonmin is not avaialable") def test_LBB_8PP_with_screening(self): """Test the logic-based branch and bound algorithm.""" exfile = import_file( @@ -136,7 +146,8 @@ def test_LBB_8PP_with_screening(self): self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) -@unittest.skipUnless(solver_available, "Required subsolver %s is not available" % (minlp_solver,)) +@unittest.skipUnless(solver_available, + "Required subsolver %s is not available" % (minlp_solver,)) @unittest.skipUnless(z3_available, "Z3 SAT solver is not available.") class TestGDPopt_LBB_Z3(unittest.TestCase): """Tests for logic-based branch and bound with Z3 SAT solver integration.""" @@ -155,9 +166,11 @@ def test_infeasible_GDP(self): minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) - self.assertEqual(result.solver.termination_condition, TerminationCondition.infeasible) + self.assertEqual(result.solver.termination_condition, + TerminationCondition.infeasible) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") def test_LBB_8PP(self): """Test the logic-based branch and bound algorithm.""" exfile = import_file( @@ -171,7 +184,8 @@ def test_LBB_8PP(self): ) self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") def test_LBB_strip_pack(self): """Test logic-based branch and bound with strip packing.""" exfile = import_file( @@ -186,7 +200,8 @@ def test_LBB_strip_pack(self): self.assertTrue( fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) - @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, + "Problem is too big for unlicensed BARON.") @unittest.category('expensive') def test_LBB_constrained_layout(self): """Test LBB with constrained layout.""" diff --git a/pyomo/contrib/gdpopt/tests/test_gdpopt.py b/pyomo/contrib/gdpopt/tests/test_gdpopt.py index 300b0d5a657..2f711a2e70e 100644 --- a/pyomo/contrib/gdpopt/tests/test_gdpopt.py +++ b/pyomo/contrib/gdpopt/tests/test_gdpopt.py @@ -23,7 +23,8 @@ from pyomo.contrib.gdpopt.data_class import GDPoptSolveData from pyomo.contrib.gdpopt.mip_solve import solve_linear_GDP from pyomo.contrib.gdpopt.util import is_feasible, time_code -from pyomo.environ import ConcreteModel, Objective, SolverFactory, Var, value, Integers, Block, Constraint, maximize +from pyomo.environ import ( ConcreteModel, Objective, SolverFactory, Var, value, + Integers, Block, Constraint, maximize) from pyomo.gdp import Disjunct, Disjunction from pyomo.contrib.mcpp.pyomo_mcpp import mcpp_available from pyomo.opt import TerminationCondition @@ -40,13 +41,15 @@ GLOA_solvers = (mip_solver, global_nlp_solver, minlp_solver) LOA_solvers_available = all(SolverFactory(s).available() for s in LOA_solvers) GLOA_solvers_available = all(SolverFactory(s).available() for s in GLOA_solvers) -license_available = SolverFactory(global_nlp_solver).license_is_valid() if GLOA_solvers_available else False +license_available = SolverFactory(global_nlp_solver).license_is_valid() if \ + GLOA_solvers_available else False class TestGDPoptUnit(unittest.TestCase): """Real unit tests for GDPopt""" - @unittest.skipUnless(SolverFactory(mip_solver).available(), "MIP solver not available") + @unittest.skipUnless(SolverFactory(mip_solver).available(), + "MIP solver not available") def test_solve_linear_GDP_unbounded(self): m = ConcreteModel() m.GDPopt_utils = Block() @@ -58,17 +61,21 @@ def test_solve_linear_GDP_unbounded(self): ]) m.o = Objective(expr=m.z) m.GDPopt_utils.variable_list = [m.x, m.y, m.z] - m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], m.d._autodisjuncts[1]] + m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], + m.d._autodisjuncts[1]] output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): solver_data = GDPoptSolveData() solver_data.timing = Bunch() with time_code(solver_data.timing, 'main', is_main_timer=True): - solve_linear_GDP(m, solver_data, GDPoptSolver.CONFIG(dict(mip_solver=mip_solver, strategy='LOA'))) - self.assertIn("Linear GDP was unbounded. Resolving with arbitrary bound values", - output.getvalue().strip()) - - @unittest.skipUnless(SolverFactory(mip_solver).available(), "MIP solver not available") + solve_linear_GDP(m, solver_data, + GDPoptSolver.CONFIG(dict(mip_solver=mip_solver, + strategy='LOA'))) + self.assertIn("Linear GDP was unbounded. Resolving with arbitrary " + "bound values", output.getvalue().strip()) + + @unittest.skipUnless(SolverFactory(mip_solver).available(), + "MIP solver not available") def test_solve_lp(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) @@ -76,12 +83,14 @@ def test_solve_lp(self): m.o = Objective(expr=m.x) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - SolverFactory('gdpopt').solve(m, mip_solver=mip_solver, strategy='LOA') + SolverFactory('gdpopt').solve(m, mip_solver=mip_solver, + strategy='LOA') self.assertIn("Your model is an LP (linear program).", output.getvalue().strip()) self.assertAlmostEqual(value(m.o.expr), 1) - @unittest.skipUnless(SolverFactory(nlp_solver).available(), 'NLP solver not available') + @unittest.skipUnless(SolverFactory(nlp_solver).available(), + 'NLP solver not available') def test_solve_nlp(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) @@ -89,12 +98,14 @@ def test_solve_nlp(self): m.o = Objective(expr=m.x ** 2) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - SolverFactory('gdpopt').solve(m, nlp_solver=nlp_solver, strategy='LOA') + SolverFactory('gdpopt').solve(m, nlp_solver=nlp_solver, + strategy='LOA') self.assertIn("Your model is an NLP (nonlinear program).", output.getvalue().strip()) self.assertAlmostEqual(value(m.o.expr), 1) - @unittest.skipUnless(SolverFactory(mip_solver).available(), "MIP solver not available") + @unittest.skipUnless(SolverFactory(mip_solver).available(), + "MIP solver not available") def test_solve_constant_obj(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) @@ -102,56 +113,66 @@ def test_solve_constant_obj(self): m.o = Objective(expr=1) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - SolverFactory('gdpopt').solve(m, mip_solver=mip_solver, strategy='LOA') + SolverFactory('gdpopt').solve(m, mip_solver=mip_solver, + strategy='LOA') self.assertIn("Your model is an LP (linear program).", output.getvalue().strip()) self.assertAlmostEqual(value(m.o.expr), 1) - @unittest.skipUnless(SolverFactory(nlp_solver).available(), 'NLP solver not available') + @unittest.skipUnless(SolverFactory(nlp_solver).available(), + 'NLP solver not available') def test_no_objective(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) m.c = Constraint(expr=m.x ** 2 >= 1) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): - SolverFactory('gdpopt').solve(m, nlp_solver=nlp_solver, strategy='LOA') - self.assertIn("Model has no active objectives. Adding dummy objective.", - output.getvalue().strip()) + SolverFactory('gdpopt').solve(m, nlp_solver=nlp_solver, + strategy='LOA') + self.assertIn("Model has no active objectives. Adding dummy " + "objective.", output.getvalue().strip()) def test_multiple_objectives(self): m = ConcreteModel() m.x = Var() m.o = Objective(expr=m.x) m.o2 = Objective(expr=m.x + 1) - with self.assertRaisesRegex(ValueError, "Model has multiple active objectives"): + with self.assertRaisesRegex(ValueError, "Model has multiple active " + "objectives"): SolverFactory('gdpopt').solve(m, strategy='LOA') def test_is_feasible_function(self): m = ConcreteModel() m.x = Var(bounds=(0, 3), initialize=2) m.c = Constraint(expr=m.x == 2) - self.assertTrue(is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) + self.assertTrue( + is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) m.c2 = Constraint(expr=m.x <= 1) - self.assertFalse(is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) + self.assertFalse( + is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) m = ConcreteModel() m.x = Var(bounds=(0, 3), initialize=2) m.c = Constraint(expr=m.x >= 5) - self.assertFalse(is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) + self.assertFalse( + is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) m = ConcreteModel() m.x = Var(bounds=(3, 3), initialize=2) - self.assertFalse(is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) + self.assertFalse( + is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) m = ConcreteModel() m.x = Var(bounds=(0, 1), initialize=2) - self.assertFalse(is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) + self.assertFalse( + is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA')))) m = ConcreteModel() m.x = Var(bounds=(0, 1), initialize=2) m.d = Disjunct() - with self.assertRaisesRegex(NotImplementedError, "Found active disjunct"): + with self.assertRaisesRegex(NotImplementedError, + "Found active disjunct"): is_feasible(m, GDPoptSolver.CONFIG(dict(strategy='LOA'))) @@ -219,7 +240,8 @@ def test_LOA_8PP_default_init(self): tee=False) self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - @unittest.skipUnless(SolverFactory('gams').available(exception_flag=False), 'GAMS solver not available') + @unittest.skipUnless(SolverFactory('gams').available(exception_flag=False), + 'GAMS solver not available') def test_LOA_8PP_gams_solver(self): # Make sure that the duals are still correct exfile = import_file( @@ -430,7 +452,8 @@ def test_RIC_8PP_default_init(self): tee=False) self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - @unittest.skipUnless(SolverFactory('gams').available(exception_flag=False), 'GAMS solver not available') + @unittest.skipUnless(SolverFactory('gams').available(exception_flag=False), + 'GAMS solver not available') def test_RIC_8PP_gams_solver(self): # Make sure that the duals are still correct exfile = import_file( @@ -620,9 +643,11 @@ def test_GDP_integer_vars_infeasible(self): nlp_solver=nlp_solver, minlp_solver=minlp_solver ) - self.assertEqual(res.solver.termination_condition, TerminationCondition.infeasible) + self.assertEqual(res.solver.termination_condition, + TerminationCondition.infeasible) - @unittest.skipUnless(license_available, "Global NLP solver license not available.") + @unittest.skipUnless(license_available, + "Global NLP solver license not available.") def test_GLOA_8PP(self): """Test the global logic-based outer approximation algorithm.""" exfile = import_file( @@ -636,7 +661,8 @@ def test_GLOA_8PP(self): ) self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - @unittest.skipUnless(license_available, "Global NLP solver license not available.") + @unittest.skipUnless(license_available, + "Global NLP solver license not available.") def test_GLOA_8PP_force_NLP(self): """Test the global logic-based outer approximation algorithm.""" exfile = import_file( @@ -651,7 +677,8 @@ def test_GLOA_8PP_force_NLP(self): ) self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - @unittest.skipUnless(license_available, "Global NLP solver license not available.") + @unittest.skipUnless(license_available, + "Global NLP solver license not available.") def test_GLOA_strip_pack_default_init(self): """Test logic-based outer approximation with strip packing.""" exfile = import_file( @@ -665,7 +692,8 @@ def test_GLOA_strip_pack_default_init(self): self.assertTrue( fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) - @unittest.skipUnless(license_available, "Global NLP solver license not available.") + @unittest.skipUnless(license_available, + "Global NLP solver license not available.") @unittest.category('expensive') def test_GLOA_constrained_layout_default_init(self): """Test LOA with constrained layout.""" diff --git a/pyomo/contrib/gdpopt/util.py b/pyomo/contrib/gdpopt/util.py index 0d71a10e504..3100bedec85 100644 --- a/pyomo/contrib/gdpopt/util.py +++ b/pyomo/contrib/gdpopt/util.py @@ -63,7 +63,8 @@ class SuppressInfeasibleWarning(object): class InfeasibleWarningFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith( - "Loading a SolverResults object with a warning status into model=") + "Loading a SolverResults object with a warning status into " + "model=") warning_filter = InfeasibleWarningFilter() @@ -90,8 +91,9 @@ def presolve_lp_nlp(solve_data, config): prob.number_of_disjunctions == 0): config.logger.info('Problem has no discrete decisions.') obj = next(m.component_data_objects(Objective, active=True)) - if (any(c.body.polynomial_degree() not in (1, 0) for c in GDPopt.constraint_list) or - obj.expr.polynomial_degree() not in (1, 0)): + if (any(c.body.polynomial_degree() not in (1, 0) for c in + GDPopt.constraint_list) or obj.expr.polynomial_degree() not in + (1, 0)): config.logger.info( "Your model is an NLP (nonlinear program). " "Using NLP solver %s to solve." % config.nlp_solver) @@ -111,12 +113,14 @@ def presolve_lp_nlp(solve_data, config): return False, None -def process_objective(solve_data, config, move_linear_objective=False, use_mcpp=True, updata_var_con_list=True): +def process_objective(solve_data, config, move_linear_objective=False, + use_mcpp=True, updata_var_con_list=True): """Process model objective function. Check that the model has only 1 valid objective. If the objective is nonlinear, move it into the constraints. - If no objective function exists, emit a warning and create a dummy objective. + If no objective function exists, emit a warning and create a dummy + objective. Parameters ---------- @@ -141,7 +145,9 @@ def process_objective(solve_data, config, move_linear_objective=False, use_mcpp= raise ValueError('Model has multiple active objectives.') else: main_obj = active_objectives[0] - solve_data.results.problem.sense = ProblemSense.minimize if main_obj.sense == 1 else ProblemSense.maximize + solve_data.results.problem.sense = ProblemSense.minimize if \ + main_obj.sense == 1 else \ + ProblemSense.maximize solve_data.objective_sense = main_obj.sense # Move the objective to the constraints if it is nonlinear @@ -177,13 +183,15 @@ def process_objective(solve_data, config, move_linear_objective=False, use_mcpp= util_blk.objective = Objective( expr=util_blk.objective_value, sense=main_obj.sense) # Add the new variable and constraint to the working lists - if main_obj.expr.polynomial_degree() not in (1, 0) or (move_linear_objective and updata_var_con_list): + if main_obj.expr.polynomial_degree() not in (1, 0) or \ + (move_linear_objective and updata_var_con_list): util_blk.variable_list.append(util_blk.objective_value) util_blk.continuous_variable_list.append(util_blk.objective_value) util_blk.constraint_list.append(util_blk.objective_constr) util_blk.objective_list.append(util_blk.objective) if util_blk.objective_constr.body.polynomial_degree() in (0, 1): - util_blk.linear_constraint_list.append(util_blk.objective_constr) + util_blk.linear_constraint_list.append( + util_blk.objective_constr) else: util_blk.nonlinear_constraint_list.append( util_blk.objective_constr) @@ -202,8 +210,8 @@ def copy_var_list_values(from_list, to_list, config, ignore_integrality=False): """Copy variable values from one list to another. - Rounds to Binary/Integer if neccessary - Sets to zero for NonNegativeReals if neccessary + Rounds to Binary/Integer if necessary + Sets to zero for NonNegativeReals if necessary """ for v_from, v_to in zip(from_list, to_list): if skip_stale and v_from.stale: @@ -222,12 +230,14 @@ def copy_var_list_values(from_list, to_list, config, if ignore_integrality \ and v_to.is_integer(): # not v_to.is_continuous() v_to.value = value(v_from, exception=False) - elif v_to.is_integer() and (fabs(var_val - rounded_val) <= config.integer_tolerance): # not v_to.is_continuous() + elif v_to.is_integer() and (fabs(var_val - rounded_val) <= + config.integer_tolerance): # not v_to.is_continuous() v_to.set_value(rounded_val) - elif 'is not in domain NonNegativeReals' in err_msg and ( - fabs(var_val) <= config.zero_tolerance): + elif abs(var_val) <= config.zero_tolerance and 0 in v_to.domain: v_to.set_value(0) else: + config.logger.error( + 'Unknown validation domain error setting variable %s', (v_to.name,)) raise @@ -298,17 +308,16 @@ def build_ordered_component_lists(model, solve_data): model.component_data_objects( ctype=Constraint, active=True, descend_into=(Block, Disjunct)))) - # print(util_blk.constraint_list) setattr( - util_blk, 'linear_constraint_list', list(c for c in model.component_data_objects( + util_blk, 'linear_constraint_list', list( + c for c in model.component_data_objects( ctype=Constraint, active=True, descend_into=(Block, Disjunct)) if c.body.polynomial_degree() in (0, 1))) - # print(util_blk.linear_constraint_list) setattr( - util_blk, 'nonlinear_constraint_list', list(c for c in model.component_data_objects( + util_blk, 'nonlinear_constraint_list', list( + c for c in model.component_data_objects( ctype=Constraint, active=True, descend_into=(Block, Disjunct)) if c.body.polynomial_degree() not in (0, 1))) - # print(util_blk.nonlinear_constraint_list) setattr( util_blk, 'disjunct_list', list( model.component_data_objects( @@ -419,7 +428,8 @@ def setup_results_object(solve_data, config): def constraints_in_True_disjuncts(model, config): - """Yield constraints in disjuncts where the indicator value is set or fixed to True.""" + """Yield constraints in disjuncts where the indicator value is set or + fixed to True.""" for constr in model.component_data_objects(Constraint): yield constr observed_disjuncts = ComponentSet() @@ -460,7 +470,8 @@ def get_main_elapsed_time(timing_data_obj): except AttributeError as e: if 'main_timer_start_time' in str(e): raise e from AttributeError( - "You need to be in a 'time_code' context to use `get_main_elapsed_time()`." + "You need to be in a 'time_code' context to use " + "`get_main_elapsed_time()`." ) @@ -555,11 +566,12 @@ def setup_solver_environment(model, config): yield solve_data # yield setup solver environment - if (solve_data.best_solution_found is not None - and solve_data.best_solution_found is not solve_data.original_model): + if (solve_data.best_solution_found is not None and + solve_data.best_solution_found is not solve_data.original_model): # Update values on the original model copy_var_list_values( - from_list=solve_data.best_solution_found.GDPopt_utils.variable_list, + from_list=solve_data.best_solution_found.GDPopt_utils.\ + variable_list, to_list=solve_data.original_model.GDPopt_utils.variable_list, config=config) @@ -573,5 +585,6 @@ def setup_solver_environment(model, config): def indent(text, prefix): - """This should be replaced with textwrap.indent when we stop supporting python 2.7.""" + """This should be replaced with textwrap.indent when we stop supporting + python 2.7.""" return ''.join(prefix + line for line in text.splitlines(True)) diff --git a/pyomo/contrib/incidence_analysis/interface.py b/pyomo/contrib/incidence_analysis/interface.py index 77cbbaf16e4..b8b4a741e32 100644 --- a/pyomo/contrib/incidence_analysis/interface.py +++ b/pyomo/contrib/incidence_analysis/interface.py @@ -15,13 +15,20 @@ from pyomo.core.base.objective import Objective from pyomo.core.base.reference import Reference from pyomo.core.expr.visitor import identify_variables +from pyomo.util.subsystems import create_subsystem_block from pyomo.common.collections import ComponentSet, ComponentMap from pyomo.common.dependencies import scipy_available from pyomo.common.dependencies import networkx as nx from pyomo.contrib.incidence_analysis.matching import maximum_matching -from pyomo.contrib.incidence_analysis.triangularize import block_triangularize +from pyomo.contrib.incidence_analysis.triangularize import ( + block_triangularize, + get_diagonal_blocks, + get_blocks_from_maps, + ) from pyomo.contrib.incidence_analysis.dulmage_mendelsohn import ( dulmage_mendelsohn, + RowPartition, + ColPartition, ) if scipy_available: from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP @@ -69,12 +76,21 @@ def get_incidence_graph(variables, constraints, include_fixed=True): graph.add_nodes_from(range(M, M+N), bipartite=1) var_node_map = ComponentMap((v, M+i) for i, v in enumerate(variables)) for i, con in enumerate(constraints): - for var in identify_variables(con.body, include_fixed=include_fixed): + for var in identify_variables(con.expr, include_fixed=include_fixed): if var in var_node_map: graph.add_edge(i, var_node_map[var]) return graph +def _generate_variables_in_constraints(constraints, include_fixed=False): + known_vars = ComponentSet() + for con in constraints: + for var in identify_variables(con.expr, include_fixed=include_fixed): + if var not in known_vars: + known_vars.add(var) + yield var + + def get_structural_incidence_matrix(variables, constraints, include_fixed=True): """ This function gets the incidence matrix of Pyomo constraints and variables. @@ -101,7 +117,7 @@ def get_structural_incidence_matrix(variables, constraints, include_fixed=True): cols = [] for i, con in enumerate(constraints): cols.extend(var_idx_map[v] for v in - identify_variables(con.body, include_fixed=include_fixed) + identify_variables(con.expr, include_fixed=include_fixed) if v in var_idx_map) rows.extend([i]*(len(cols) - len(rows))) assert len(rows) == len(cols) @@ -116,39 +132,14 @@ def get_numeric_incidence_matrix(variables, constraints): constraints with respect to variables. """ # NOTE: There are several ways to get a numeric incidence matrix - # from a Pyomo model. This function implements a somewhat roundabout - # method, which is to construct a dummy Block with the necessary - # variables and constraints, then construct a PyNumero PyomoNLP - # from the block and have PyNumero evaluate the desired Jacobian - # via ASL. + # from a Pyomo model. Here we get the numeric incidence matrix by + # creating a temporary block and using the PyNumero ASL interface. comps = list(variables) + list(constraints) _check_unindexed(comps) - M, N = len(constraints), len(variables) - _block = Block() - _block.construct() - _block.obj = Objective(expr=0) - _block.vars = Reference(variables) - _block.cons = Reference(constraints) - var_set = ComponentSet(variables) - other_vars = [] - for con in constraints: - for var in identify_variables(con.body, include_fixed=False): - # Fixed vars will be ignored by the nl file write, so - # there is no point to including them here. - # A different method of assembling this matrix, e.g. - # Pyomo's automatic differentiation, could support taking - # derivatives with respect to fixed variables. - if var not in var_set: - other_vars.append(var) - var_set.add(var) - # These variables are necessary due to the nl writer's philosophy - # about what constitutes a model. Note that we take derivatives with - # respect to them even though this is not necessary. We could fix them - # here to avoid doing this extra work, but that would alter the user's - # model, which we would rather not do. - _block.other_vars = Reference(other_vars) - _nlp = PyomoNLP(_block) - return _nlp.extract_submatrix_jacobian(variables, constraints) + block = create_subsystem_block(constraints, variables) + block._obj = Objective(expr=0) + nlp = PyomoNLP(block) + return nlp.extract_submatrix_jacobian(variables, constraints) class IncidenceGraphInterface(object): @@ -158,7 +149,7 @@ class IncidenceGraphInterface(object): model without constructing multiple PyomoNLPs. """ - def __init__(self, model=None): + def __init__(self, model=None, active=True, include_fixed=False): """ """ # If the user gives us a model or an NLP, we assume they want us @@ -169,6 +160,18 @@ def __init__(self, model=None): if model is None: self.cached = IncidenceMatrixType.NONE elif isinstance(model, PyomoNLP): + if not active: + raise ValueError( + "Cannot get the Jacobian of inactive constraints from the " + "nl interface (PyomoNLP).\nPlease set the `active` flag " + "to True." + ) + if include_fixed: + raise ValueError( + "Cannot get the Jacobian with respect to fixed variables " + "from the nl interface (PyomoNLP).\nPlease set the " + "`include_fixed` flag to False." + ) nlp = model self.cached = IncidenceMatrixType.NUMERIC self.variables = nlp.get_pyomo_variables() @@ -180,8 +183,14 @@ def __init__(self, model=None): self.incidence_matrix = nlp.evaluate_jacobian_eq() elif isinstance(model, Block): self.cached = IncidenceMatrixType.STRUCTURAL - self.variables = list(model.component_data_objects(Var)) - self.constraints = list(model.component_data_objects(Constraint)) + self.constraints = list( + model.component_data_objects(Constraint, active=active) + ) + self.variables = list( + _generate_variables_in_constraints( + self.constraints, include_fixed=include_fixed + ) + ) self.var_index_map = ComponentMap( (var, i) for i, var in enumerate(self.variables)) self.con_index_map = ComponentMap( @@ -197,6 +206,9 @@ def __init__(self, model=None): % (PyomoNLP, Block, type(model)) ) + self.row_block_map = None + self.col_block_map = None + def _validate_input(self, variables, constraints): if variables is None: if self.cached is IncidenceMatrixType.NONE: @@ -269,6 +281,10 @@ def block_triangularize(self, variables=None, constraints=None): matrix = self._extract_submatrix(variables, constraints) row_block_map, col_block_map = block_triangularize(matrix.tocoo()) + # Cache maps in case we want to get diagonal blocks quickly in the + # future. + self.row_block_map = row_block_map + self.col_block_map = col_block_map con_block_map = ComponentMap((constraints[i], idx) for i, idx in row_block_map.items()) var_block_map = ComponentMap((variables[j], idx) @@ -277,6 +293,32 @@ def block_triangularize(self, variables=None, constraints=None): # Hopefully this does not get too confusing... return var_block_map, con_block_map + def get_diagonal_blocks(self, variables=None, constraints=None): + """ + Returns the diagonal blocks in a block triangularization of the + incidence matrix of the provided constraints with respect to the + provided variables. + + Returns + ------- + tuple of lists + The first list contains lists that partition the variables, + the second lists contains lists that partition the constraints. + + """ + variables, constraints = self._validate_input(variables, constraints) + matrix = self._extract_submatrix(variables, constraints) + + if self.row_block_map is None or self.col_block_map is None: + block_rows, block_cols = get_diagonal_blocks(matrix) + else: + block_rows, block_cols = get_blocks_from_maps( + self.row_block_map, self.col_block_map + ) + block_cons = [[constraints[i] for i in block] for block in block_rows] + block_vars = [[variables[i] for i in block] for block in block_cols] + return block_vars, block_cons + def dulmage_mendelsohn(self, variables=None, constraints=None): """ Returns the Dulmage-Mendelsohn partition of the incidence graph @@ -295,11 +337,11 @@ def dulmage_mendelsohn(self, variables=None, constraints=None): matrix = self._extract_submatrix(variables, constraints) row_partition, col_partition = dulmage_mendelsohn(matrix.tocoo()) - con_partition = tuple( - [constraints[i] for i in subset] for subset in row_partition + con_partition = RowPartition( + *[[constraints[i] for i in subset] for subset in row_partition] ) - var_partition = tuple( - [variables[i] for i in subset] for subset in col_partition + var_partition = ColPartition( + *[[variables[i] for i in subset] for subset in col_partition] ) # Switch the order of the maps here to match the method call. # Hopefully this does not get too confusing... diff --git a/pyomo/contrib/incidence_analysis/tests/test_interface.py b/pyomo/contrib/incidence_analysis/tests/test_interface.py index bd5b284e760..ecacc4e67af 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_interface.py +++ b/pyomo/contrib/incidence_analysis/tests/test_interface.py @@ -761,6 +761,105 @@ def test_triangularize(self): self.assertEqual(con_block_map[model.mbal[i]], i) self.assertEqual(con_block_map[model.ebal[i]], i) + def test_diagonal_blocks(self): + N = 5 + model = make_gas_expansion_model(N) + igraph = IncidenceGraphInterface() + + # These are the variables and constraints of the square, + # nonsingular subsystem + variables = [] + variables.extend(model.P.values()) + variables.extend(model.T[i] for i in model.streams + if i != model.streams.first()) + variables.extend(model.rho[i] for i in model.streams + if i != model.streams.first()) + variables.extend(model.F[i] for i in model.streams + if i != model.streams.first()) + + constraints = list(model.component_data_objects(pyo.Constraint)) + + var_blocks, con_blocks = igraph.get_diagonal_blocks( + variables, constraints + ) + self.assertIs(igraph.row_block_map, None) + self.assertIs(igraph.col_block_map, None) + self.assertEqual(len(var_blocks), N+1) + self.assertEqual(len(con_blocks), N+1) + + for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)): + var_set = ComponentSet(vars) + con_set = ComponentSet(cons) + + if i == 0: + pred_var_set = ComponentSet([model.P[0]]) + self.assertEqual(pred_var_set, var_set) + pred_con_set = ComponentSet([model.ideal_gas[0]]) + self.assertEqual(pred_con_set, con_set) + + else: + pred_var_set = ComponentSet([ + model.rho[i], model.T[i], model.P[i], model.F[i] + ]) + pred_con_set = ComponentSet([ + model.ideal_gas[i], + model.expansion[i], + model.mbal[i], + model.ebal[i], + ]) + self.assertEqual(pred_var_set, var_set) + self.assertEqual(pred_con_set, con_set) + + def test_diagonal_blocks_with_cached_maps(self): + N = 5 + model = make_gas_expansion_model(N) + igraph = IncidenceGraphInterface() + + # These are the variables and constraints of the square, + # nonsingular subsystem + variables = [] + variables.extend(model.P.values()) + variables.extend(model.T[i] for i in model.streams + if i != model.streams.first()) + variables.extend(model.rho[i] for i in model.streams + if i != model.streams.first()) + variables.extend(model.F[i] for i in model.streams + if i != model.streams.first()) + + constraints = list(model.component_data_objects(pyo.Constraint)) + + igraph.block_triangularize(variables, constraints) + var_blocks, con_blocks = igraph.get_diagonal_blocks( + variables, constraints + ) + self.assertIsNot(igraph.row_block_map, None) + self.assertIsNot(igraph.col_block_map, None) + self.assertEqual(len(var_blocks), N+1) + self.assertEqual(len(con_blocks), N+1) + + for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)): + var_set = ComponentSet(vars) + con_set = ComponentSet(cons) + + if i == 0: + pred_var_set = ComponentSet([model.P[0]]) + self.assertEqual(pred_var_set, var_set) + pred_con_set = ComponentSet([model.ideal_gas[0]]) + self.assertEqual(pred_con_set, con_set) + + else: + pred_var_set = ComponentSet([ + model.rho[i], model.T[i], model.P[i], model.F[i] + ]) + pred_con_set = ComponentSet([ + model.ideal_gas[i], + model.expansion[i], + model.mbal[i], + model.ebal[i], + ]) + self.assertEqual(pred_var_set, var_set) + self.assertEqual(pred_con_set, con_set) + @unittest.skipUnless(networkx_available, "networkx is not available.") @unittest.skipUnless(scipy_available, "scipy is not available.") @@ -800,6 +899,45 @@ def test_degenerate_solid_phase_model(self): for con in con_dmp[0]+con_dmp[1]: self.assertIn(con, overconstrained_cons) + def test_named_tuple(self): + m = make_degenerate_solid_phase_model() + variables = list(m.component_data_objects(pyo.Var)) + constraints = list(m.component_data_objects(pyo.Constraint)) + + igraph = IncidenceGraphInterface() + var_dmp, con_dmp = igraph.dulmage_mendelsohn(variables, constraints) + + underconstrained_vars = ComponentSet(m.flow_comp.values()) + underconstrained_vars.add(m.flow) + underconstrained_cons = ComponentSet(m.flow_eqn.values()) + + dmp_vars_under = var_dmp.unmatched + var_dmp.underconstrained + dmp_vars_over = var_dmp.overconstrained + dmp_cons_under = con_dmp.underconstrained + dmp_cons_over = con_dmp.unmatched + con_dmp.overconstrained + + self.assertEqual(len(dmp_vars_under), len(underconstrained_vars)) + for var in dmp_vars_under: + self.assertIn(var, underconstrained_vars) + + self.assertEqual(len(dmp_cons_under), len(underconstrained_cons)) + for con in dmp_cons_under: + self.assertIn(con, underconstrained_cons) + + overconstrained_cons = ComponentSet(m.holdup_eqn.values()) + overconstrained_cons.add(m.density_eqn) + overconstrained_cons.add(m.sum_eqn) + overconstrained_vars = ComponentSet(m.x.values()) + overconstrained_vars.add(m.rho) + + self.assertEqual(len(dmp_vars_over), len(overconstrained_vars)) + for var in dmp_vars_over: + self.assertIn(var, overconstrained_vars) + + self.assertEqual(len(dmp_cons_over), len(overconstrained_cons)) + for con in dmp_cons_over: + self.assertIn(con, overconstrained_cons) + def test_incidence_graph(self): m = make_degenerate_solid_phase_model() variables = list(m.component_data_objects(pyo.Var)) @@ -851,5 +989,53 @@ def test_dm_graph_interface(self): self.assertIn(con, overconstrained_cons) +@unittest.skipUnless(networkx_available, "networkx is not available.") +@unittest.skipUnless(scipy_available, "scipy is not available.") +class TestExtraVars(unittest.TestCase): + + def test_unused_var(self): + m = pyo.ConcreteModel() + m.v1 = pyo.Var() + m.v2 = pyo.Var() + m.c1 = pyo.Constraint(expr=m.v1 == 1.0) + igraph = IncidenceGraphInterface(m) + self.assertEqual(igraph.incidence_matrix.shape, (1, 1)) + + def test_reference(self): + m = pyo.ConcreteModel() + m.v1 = pyo.Var() + m.ref = pyo.Reference(m.v1) + m.c1 = pyo.Constraint(expr=m.v1 == 1.0) + igraph = IncidenceGraphInterface(m) + self.assertEqual(igraph.incidence_matrix.shape, (1, 1)) + + +@unittest.skipUnless(networkx_available, "networkx is not available.") +@unittest.skipUnless(scipy_available, "scipy is not available.") +@unittest.skipUnless(AmplInterface.available(), "pynumero_ASL is not available") +class TestExceptions(unittest.TestCase): + + def test_nlp_fixed_error(self): + m = pyo.ConcreteModel() + m.v1 = pyo.Var() + m.v2 = pyo.Var() + m.c1 = pyo.Constraint(expr=m.v1 + m.v2 == 1.0) + m.v2.fix(2.0) + m._obj = pyo.Objective(expr=0.0) + nlp = PyomoNLP(m) + with self.assertRaisesRegex(ValueError, "fixed variables"): + igraph = IncidenceGraphInterface(nlp, include_fixed=True) + + def test_nlp_active_error(self): + m = pyo.ConcreteModel() + m.v1 = pyo.Var() + m.c1 = pyo.Constraint(expr=m.v1 == 1.0) + m.c2 = pyo.Constraint(expr=m.v1 == 2.0) + m._obj = pyo.Objective(expr=0.0) + nlp = PyomoNLP(m) + with self.assertRaisesRegex(ValueError, "inactive constraints"): + igraph = IncidenceGraphInterface(nlp, active=False) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/incidence_analysis/tests/test_triangularize.py b/pyomo/contrib/incidence_analysis/tests/test_triangularize.py index 4618e96417c..66946889bbc 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_triangularize.py +++ b/pyomo/contrib/incidence_analysis/tests/test_triangularize.py @@ -10,7 +10,10 @@ import random from pyomo.contrib.incidence_analysis.matching import maximum_matching -from pyomo.contrib.incidence_analysis.triangularize import block_triangularize +from pyomo.contrib.incidence_analysis.triangularize import ( + block_triangularize, + get_diagonal_blocks, + ) from pyomo.common.dependencies import ( scipy, scipy_available, @@ -348,5 +351,53 @@ def test_decomposable_tridiagonal_shuffled(self): self.assertEqual(row_block_map[row_idx], i) self.assertEqual(col_block_map[col_idx], i) + def test_decomposable_tridiagonal_diagonal_blocks(self): + """ + This matrix decomposes into 2x2 blocks + |x x | + |x x | + | x x x | + | x x | + | x x| + """ + N = 5 + row = [] + col = [] + data = [] + + # Diagonal + row.extend(range(N)) + col.extend(range(N)) + data.extend(1 for _ in range(N)) + + # Below diagonal + row.extend(range(1, N)) + col.extend(range(N-1)) + data.extend(1 for _ in range(N-1)) + + # Above diagonal + row.extend(i for i in range(N-1) if not i%2) + col.extend(i+1 for i in range(N-1) if not i%2) + data.extend(1 for i in range(N-1) if not i%2) + + matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) + + row_blocks, col_blocks = get_diagonal_blocks(matrix) + + self.assertEqual(len(row_blocks), (N+1)//2) + self.assertEqual(len(col_blocks), (N+1)//2) + + for i in range((N+1)//2): + rows = row_blocks[i] + cols = col_blocks[i] + + if 2*i+1 < N: + self.assertEqual(set(rows), {2*i, 2*i+1}) + self.assertEqual(set(cols), {2*i, 2*i+1}) + else: + self.assertEqual(set(rows), {2*i}) + self.assertEqual(set(cols), {2*i}) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/incidence_analysis/triangularize.py b/pyomo/contrib/incidence_analysis/triangularize.py index 088c296021b..32ef28a0403 100644 --- a/pyomo/contrib/incidence_analysis/triangularize.py +++ b/pyomo/contrib/incidence_analysis/triangularize.py @@ -84,7 +84,7 @@ def block_triangularize(matrix, matching=None): # Reverse direction of edge. This corresponds to creating # a block lower triangular matrix. - scc_order = list(nxd.topological_sort(dag)) + scc_order = list(nxd.lexicographical_topological_sort(dag)) scc_block_map = {c: i for i, c in enumerate(scc_order)} row_block_map = {n: scc_block_map[c] for n, c in node_scc_map.items()} @@ -97,3 +97,70 @@ def block_triangularize(matrix, matching=None): col_block_map = {c: row_block_map[col_row_map[c]] for c in range(N)} return row_block_map, col_block_map + + +def get_blocks_from_maps(row_block_map, col_block_map): + """ + Gets the row and column coordinates of each diagonal block in a + block triangularization from maps of row/column coordinates to + block indices. + + Arguments + --------- + row_block_map: dict + Dict mapping each row coordinate to the coordinate of the + block it belongs to + + col_block_map: dict + Dict mapping each column coordinate to the coordinate of the + block it belongs to + + Returns + ------- + tuple of lists + The first list is a list-of-lists of row indices that partitions + the indices into diagonal blocks. The second list is a + list-of-lists of column indices that partitions the indices into + diagonal blocks. + + """ + blocks = set(row_block_map.values()) + assert blocks == set(col_block_map.values()) + n_blocks = len(blocks) + block_rows = [[] for _ in range(n_blocks)] + block_cols = [[] for _ in range(n_blocks)] + for r, b in row_block_map.items(): + block_rows[b].append(r) + for c, b in col_block_map.items(): + block_cols[b].append(c) + return block_rows, block_cols + + +def get_diagonal_blocks(matrix, matching=None): + """ + Gets the diagonal blocks of a block triangularization of the provided + matrix. + + Arguments + --------- + coo_matrix + Matrix to get the diagonal blocks of + + matching + Dict mapping row indices to column indices in the perfect matching + to be used by the block triangularization. + + Returns + ------- + tuple of lists + The first list is a list-of-lists of row indices that partitions + the indices into diagonal blocks. The second list is a + list-of-lists of column indices that partitions the indices into + diagonal blocks. + + """ + row_block_map, col_block_map = block_triangularize( + matrix, matching=matching + ) + block_rows, block_cols = get_blocks_from_maps(row_block_map, col_block_map) + return block_rows, block_cols diff --git a/pyomo/contrib/incidence_analysis/util.py b/pyomo/contrib/incidence_analysis/util.py index 51f66096994..aff78c829ef 100644 --- a/pyomo/contrib/incidence_analysis/util.py +++ b/pyomo/contrib/incidence_analysis/util.py @@ -54,7 +54,7 @@ def generate_strongly_connected_components( variables = [] for con in constraints: for var in identify_variables( - con.body, + con.expr, include_fixed=include_fixed, ): if var not in var_set: @@ -117,7 +117,7 @@ def solve_strongly_connected_components(block, solver=None, solve_kwds=None): var_set = ComponentSet() variables = [] for con in constraints: - for var in identify_variables(con.body, include_fixed=False): + for var in identify_variables(con.expr, include_fixed=False): # Because we are solving, we do not want to include fixed variables if var not in var_set: variables.append(var) diff --git a/pyomo/contrib/mindtpy/config_options.py b/pyomo/contrib/mindtpy/config_options.py index b12ddc5799b..685c001962f 100644 --- a/pyomo/contrib/mindtpy/config_options.py +++ b/pyomo/contrib/mindtpy/config_options.py @@ -127,6 +127,11 @@ def _get_MindtPy_config(): description='Use solution pool in solving the MILP main problem.', domain=bool )) + CONFIG.declare('num_solution_iteration', ConfigValue( + default=5, + description='The number of MIP solutions (from the solution pool) used to generate the fixed NLP subproblem in each iteration.', + domain=PositiveInt + )) CONFIG.declare('add_slack', ConfigValue( default=False, description='whether add slack variable here.' @@ -488,3 +493,6 @@ def check_config(config): config.threads = 1 config.logger.info( 'The threads parameter is corrected to 1 since incumbent callback conflicts with multi-threads mode.') + if config.solution_pool: + if config.mip_solver not in {'cplex_persistent', 'gurobi_persistent'}: + config.mip_solver = 'cplex_persistent' diff --git a/pyomo/contrib/mindtpy/iterate.py b/pyomo/contrib/mindtpy/iterate.py index a811a39c846..01169a8c532 100644 --- a/pyomo/contrib/mindtpy/iterate.py +++ b/pyomo/contrib/mindtpy/iterate.py @@ -11,7 +11,7 @@ """Iteration loop for MindtPy.""" from __future__ import division import logging -from pyomo.contrib.mindtpy.util import set_solver_options, get_integer_solution +from pyomo.contrib.mindtpy.util import set_solver_options, get_integer_solution, copy_var_list_values_from_solution_pool from pyomo.contrib.mindtpy.cut_generation import add_ecp_cuts from pyomo.contrib.mindtpy.mip_solve import solve_main, handle_main_optimal, handle_main_infeasible, handle_main_other_conditions, handle_regularization_main_tc @@ -23,6 +23,8 @@ from pyomo.opt import SolverFactory from pyomo.common.dependencies import attempt_import from pyomo.contrib.gdpopt.util import copy_var_list_values +from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy +from operator import itemgetter tabu_list, tabu_list_available = attempt_import( 'pyomo.contrib.mindtpy.tabu_list') @@ -97,7 +99,6 @@ def MindtPy_iteration_loop(solve_data, config): solve_data, config) handle_nlp_subproblem_tc( fixed_nlp, fixed_nlp_result, solve_data, config) - if algorithm_should_terminate(solve_data, config, check_cycling=True): last_iter_cuts = False break @@ -105,17 +106,74 @@ def MindtPy_iteration_loop(solve_data, config): if not config.single_tree and config.strategy != 'ECP': # if we don't use lazy callback, i.e. LP_NLP # Solve NLP subproblem # The constraint linearization happens in the handlers - fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config) + if not config.solution_pool: + fixed_nlp, fixed_nlp_result = solve_subproblem( + solve_data, config) + handle_nlp_subproblem_tc( + fixed_nlp, fixed_nlp_result, solve_data, config) - # Call the NLP post-solve callback - with time_code(solve_data.timing, 'Call after subproblem solve'): - config.call_after_subproblem_solve(fixed_nlp, solve_data) + # Call the NLP post-solve callback + with time_code(solve_data.timing, 'Call after subproblem solve'): + config.call_after_subproblem_solve(fixed_nlp, solve_data) - if algorithm_should_terminate(solve_data, config, check_cycling=False): - last_iter_cuts = True - break + if algorithm_should_terminate(solve_data, config, check_cycling=False): + last_iter_cuts = True + break + else: + if config.mip_solver == 'cplex_persistent': + solution_pool_names = main_mip_results._solver_model.solution.pool.get_names() + elif config.mip_solver == 'gurobi_persistent': + solution_pool_names = list( + range(main_mip_results._solver_model.SolCount)) + # list to store the name and objective value of the solutions in the solution pool + solution_name_obj = [] + for name in solution_pool_names: + if config.mip_solver == 'cplex_persistent': + obj = main_mip_results._solver_model.solution.pool.get_objective_value( + name) + elif config.mip_solver == 'gurobi_persistent': + main_mip_results._solver_model.setParam( + gurobipy.GRB.Param.SolutionNumber, name) + obj = main_mip_results._solver_model.PoolObjVal + solution_name_obj.append([name, obj]) + solution_name_obj.sort( + key=itemgetter(1), reverse=solve_data.objective_sense == maximize) + counter = 0 + for name, _ in solution_name_obj: + # the optimal solution of the main problem has been added to integer_list above + # so we should skip checking cycling for the first solution in the solution pool + if counter >= 1: + copy_var_list_values_from_solution_pool(solve_data.mip.MindtPy_utils.variable_list, + solve_data.working_model.MindtPy_utils.variable_list, + config, solver_model=main_mip_results._solver_model, + var_map=main_mip_results._pyomo_var_to_solver_var_map, + solution_name=name) + solve_data.curr_int_sol = get_integer_solution( + solve_data.working_model) + if solve_data.curr_int_sol in set(solve_data.integer_list): + config.logger.info( + 'The same combination has been explored and will be skipped here.') + continue + else: + solve_data.integer_list.append( + solve_data.curr_int_sol) + counter += 1 + fixed_nlp, fixed_nlp_result = solve_subproblem( + solve_data, config) + handle_nlp_subproblem_tc( + fixed_nlp, fixed_nlp_result, solve_data, config) + + # Call the NLP post-solve callback + with time_code(solve_data.timing, 'Call after subproblem solve'): + config.call_after_subproblem_solve( + fixed_nlp, solve_data) + + if algorithm_should_terminate(solve_data, config, check_cycling=False): + last_iter_cuts = True + break + + if counter >= config.num_solution_iteration: + break if config.strategy == 'ECP': add_ecp_cuts(solve_data.mip, solve_data, config) @@ -133,7 +191,7 @@ def MindtPy_iteration_loop(solve_data, config): # # bound does not improve before switching to OA # max_nonimprove_iter = 5 # making_progress = True - # # TODO-romeo Unneccesary for OA and ROA, right? + # # TODO-romeo Unnecessary for OA and ROA, right? # for i in range(1, max_nonimprove_iter + 1): # try: # if (sign_adjust * log[-i] diff --git a/pyomo/contrib/mindtpy/mip_solve.py b/pyomo/contrib/mindtpy/mip_solve.py index 26c96d8b2ba..99290567ed6 100644 --- a/pyomo/contrib/mindtpy/mip_solve.py +++ b/pyomo/contrib/mindtpy/mip_solve.py @@ -88,6 +88,9 @@ def solve_main(solve_data, config, fp=False, regularization_problem=False): "No-good cuts are added and GOA algorithm doesn't converge within the time limit. " 'No integer solution is found, so the cplex solver will report an error status. ') return None, None + if config.solution_pool: + main_mip_results._solver_model = mainopt._solver_model + main_mip_results._pyomo_var_to_solver_var_map = mainopt._pyomo_var_to_solver_var_map if main_mip_results.solver.termination_condition is tc.optimal: if config.single_tree and not config.add_no_good_cuts and not regularization_problem: if solve_data.objective_sense == minimize: diff --git a/pyomo/contrib/mindtpy/single_tree.py b/pyomo/contrib/mindtpy/single_tree.py index 1d59df20eb4..2f4262d0858 100644 --- a/pyomo/contrib/mindtpy/single_tree.py +++ b/pyomo/contrib/mindtpy/single_tree.py @@ -17,7 +17,6 @@ from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error import logging from pyomo.repn import generate_standard_repn -from math import fabs from pyomo.core.expr import current as EXPR import pyomo.environ as pyo from math import copysign @@ -38,8 +37,8 @@ def copy_lazy_var_list_values(self, opt, from_list, to_list, config, skip_stale=False, skip_fixed=True, ignore_integrality=False): """This function copies variable values from one list to another. - Rounds to Binary/Integer if neccessary - Sets to zero for NonNegativeReals if neccessary + Rounds to Binary/Integer if necessary + Sets to zero for NonNegativeReals if necessary Parameters ---------- @@ -72,7 +71,7 @@ def copy_lazy_var_list_values(self, opt, from_list, to_list, config, # ... or the nearest integer elif v_to.is_integer(): rounded_val = int(round(v_val)) - if (ignore_integrality or fabs(v_val - rounded_val) <= config.integer_tolerance) \ + if (ignore_integrality or abs(v_val - rounded_val) <= config.integer_tolerance) \ and rounded_val in v_to.domain: v_to.set_value(rounded_val) else: diff --git a/pyomo/contrib/mindtpy/tests/eight_process_problem.py b/pyomo/contrib/mindtpy/tests/eight_process_problem.py index 321e3b90d23..dcf7229b8eb 100644 --- a/pyomo/contrib/mindtpy/tests/eight_process_problem.py +++ b/pyomo/contrib/mindtpy/tests/eight_process_problem.py @@ -155,4 +155,4 @@ def __init__(self, convex=True, *args, **kwargs): 20: 10, 21: 2, 22: 10, 25: 3} # add bounds for variables in nonlinear constraints for i, x_ub in x_ubs.items(): X[i].setub(x_ub) - m.optimal_value = 68 + m.optimal_value = 68.0097 diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy.py b/pyomo/contrib/mindtpy/tests/test_mindtpy.py index 990bcb5fec0..d2c49582a71 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy.py @@ -9,7 +9,7 @@ # ___________________________________________________________________________ """Tests for the MindtPy solver.""" -import pyomo.core.base.symbolic +from pyomo.core.expr.calculus.diff_with_sympy import differentiate_available import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet @@ -40,12 +40,6 @@ model_list = [EightProcessFlowsheet(convex=True), ConstraintQualificationExample(), SimpleMINLP2(), - # SimpleMINLP(), - # SimpleMINLP3(), - # SimpleMINLP4(), - # SimpleMINLP5(), - # ProposalModel(), - # OnlineDocExample() ] nonconvex_model_list = [EightProcessFlowsheet(convex=False)] @@ -57,7 +51,6 @@ extreme_model_list = [LP_model.model, QCP_model.model] required_solvers = ('ipopt', 'glpk') -# required_solvers = ('gams', 'gams') if all(SolverFactory(s).available() for s in required_solvers): subsolvers_available = True else: @@ -67,7 +60,7 @@ @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % (required_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, +@unittest.skipIf(not differentiate_available, 'Symbolic differentiation is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py index 6e9b5cd76fe..cab15bdf628 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" -from math import fabs -import pyomo.core.base.symbolic import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet @@ -10,12 +8,7 @@ from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample -from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.environ import SolverFactory, value -from pyomo.environ import * -from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded -from pyomo.solvers.tests.models.QCP_simple import QCP_simple -from pyomo.solvers.tests.models.MIQCP_simple import MIQCP_simple from pyomo.opt import TerminationCondition required_solvers = ('ipopt', 'glpk') @@ -37,8 +30,6 @@ @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % (required_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - 'Symbolic differentiation is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py index ba68cfbb1cb..777091a9db1 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" -from math import fabs -import pyomo.core.base.symbolic -from pyomo.core.expr import template_expr import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet @@ -13,10 +10,6 @@ from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.environ import SolverFactory, value -from pyomo.environ import * -from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded -from pyomo.solvers.tests.models.QCP_simple import QCP_simple -from pyomo.solvers.tests.models.MIQCP_simple import MIQCP_simple from pyomo.opt import TerminationCondition from pyomo.contrib.gdpopt.util import is_feasible from pyomo.util.infeasible import log_infeasible_constraints @@ -44,8 +37,6 @@ @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % (required_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - 'Symbolic differentiation is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver.""" diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py index df932078e30..decf6eff52c 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py @@ -1,16 +1,8 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" -from math import fabs -import pyomo.core.base.symbolic import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet -from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP -from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 -from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 -from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample -from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.contrib.mindtpy.tests.nonconvex1 import Nonconvex1 from pyomo.contrib.mindtpy.tests.nonconvex2 import Nonconvex2 from pyomo.contrib.mindtpy.tests.nonconvex3 import Nonconvex3 @@ -38,8 +30,6 @@ @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % (required_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - 'Symbolic differentiation is not available') @unittest.skipIf(not pyomo.contrib.mcpp.pyomo_mcpp.mcpp_available(), 'MC++ is not available') class TestMindtPy(unittest.TestCase): diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py index 8db0dea7b1b..0df2fe6fd01 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py @@ -1,27 +1,15 @@ # -*- coding: utf-8 -*- -"""Tests for the MindtPy solver.""" -from math import fabs -import pyomo.core.base.symbolic +"""Tests for global LP/NLP in the MindtPy solver.""" import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet -from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP -from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 -from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 -from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample -from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.contrib.mindtpy.tests.nonconvex1 import Nonconvex1 from pyomo.contrib.mindtpy.tests.nonconvex2 import Nonconvex2 from pyomo.contrib.mindtpy.tests.nonconvex3 import Nonconvex3 from pyomo.contrib.mindtpy.tests.nonconvex4 import Nonconvex4 from pyomo.environ import SolverFactory, value -from pyomo.environ import * -from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded -from pyomo.solvers.tests.models.QCP_simple import QCP_simple -from pyomo.solvers.tests.models.MIQCP_simple import MIQCP_simple - from pyomo.opt import TerminationCondition +from pyomo.contrib.mcpp import pyomo_mcpp required_solvers = ('baron', 'cplex_persistent') if not all(SolverFactory(s).available(False) for s in required_solvers): @@ -43,9 +31,7 @@ @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % (required_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - 'Symbolic differentiation is not available') -@unittest.skipIf(not pyomo.contrib.mcpp.pyomo_mcpp.mcpp_available(), +@unittest.skipIf(not pyomo_mcpp.mcpp_available(), 'MC++ is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py index c40e35c52ad..be68d482988 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py @@ -9,17 +9,11 @@ # ___________________________________________________________________________ """Tests for the MindtPy solver.""" -import pyomo.core.base.symbolic import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP -from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 -from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 -from pyomo.contrib.mindtpy.tests.MINLP5_simple import SimpleMINLP5 -from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample -from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.environ import SolverFactory, value from pyomo.opt import TerminationCondition @@ -43,8 +37,6 @@ @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % ([required_nlp_solvers] + required_mip_solvers)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - 'Symbolic differentiation is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py index fd8d86683cb..6d81dbf2b9f 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py @@ -1,24 +1,11 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" -from math import fabs -import pyomo.core.base.symbolic import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet -from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP -from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 -from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 -from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample -from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.environ import SolverFactory, value -from pyomo.environ import * -from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded -from pyomo.solvers.tests.models.QCP_simple import QCP_simple -from pyomo.solvers.tests.models.MIQCP_simple import MIQCP_simple from pyomo.opt import TerminationCondition -from pyomo.contrib.mindtpy.tests.MINLP4_simple import SimpleMINLP4 -from pyomo.contrib.mindtpy.tests.MINLP5_simple import SimpleMINLP5 required_solvers = ('ipopt', 'cplex') # required_solvers = ('gams', 'gams') @@ -29,22 +16,13 @@ model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample(), - # SimpleMINLP(), - # SimpleMINLP2(), - # SimpleMINLP3(), - # SimpleMINLP4(), - # SimpleMINLP5(), - # ProposalModel(), - # OnlineDocExample() + ConstraintQualificationExample() ] @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % (required_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - 'Symbolic differentiation is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py new file mode 100644 index 00000000000..ca3f196d038 --- /dev/null +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py @@ -0,0 +1,94 @@ +"""Tests for solution pool in the MindtPy solver.""" +from pyomo.core.expr.calculus.diff_with_sympy import differentiate_available +import pyomo.common.unittest as unittest +from pyomo.contrib.mindtpy.tests.eight_process_problem import \ + EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.environ import SolverFactory, value, maximize +from pyomo.opt import TerminationCondition + + +model_list = [EightProcessFlowsheet(convex=True), + ConstraintQualificationExample(), + SimpleMINLP2(), + ] + + +try: + import cplex + cplexpy_available = True +except ImportError: + cplexpy_available = False + +required_solvers = ('ipopt', 'cplex_persistent', 'gurobi_persistent') +ipopt_available = SolverFactory('ipopt').available() +cplex_persistent_available = SolverFactory( + 'cplex_persistent').available(exception_flag=False) +gurobi_persistent_available = SolverFactory( + 'gurobi_persistent').available(exception_flag=False) + + +@unittest.skipIf(not differentiate_available, + 'Symbolic differentiation is not available') +class TestMindtPy(unittest.TestCase): + """Tests for the MindtPy solver plugin.""" + @unittest.skipIf(not(ipopt_available and cplex_persistent_available and cplexpy_available), + 'Required subsolvers are not available') + def test_OA_solution_pool_cplex(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + print('\n Solving 8PP problem with Outer Approximation') + for model in model_list: + results = opt.solve(model, strategy='OA', + init_strategy='rNLP', + solution_pool=True, + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + self.assertIn(results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertAlmostEqual( + value(model.objective.expr), model.optimal_value, places=2) + + @unittest.skipIf(not(ipopt_available and gurobi_persistent_available), + 'Required subsolvers are not available') + def test_OA_solution_pool_gurobi(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + print('\n Solving 8PP problem with Outer Approximation') + for model in model_list: + results = opt.solve(model, strategy='OA', + init_strategy='rNLP', + solution_pool=True, + mip_solver=required_solvers[2], + nlp_solver=required_solvers[0], + ) + self.assertIn(results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertAlmostEqual( + value(model.objective.expr), model.optimal_value, places=2) + + # the following tests are used to increase the code coverage + @unittest.skipIf(not(ipopt_available and cplex_persistent_available), + 'Required subsolvers are not available') + def test_OA_solution_pool_coverage1(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + print('\n Solving 8PP problem with Outer Approximation') + for model in model_list: + results = opt.solve(model, strategy='OA', + init_strategy='rNLP', + solution_pool=True, + mip_solver='glpk', + nlp_solver=required_solvers[0], + num_solution_iteration=1 + ) + self.assertIn(results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertAlmostEqual( + value(model.objective.expr), model.optimal_value, places=2) + + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/contrib/mindtpy/tests/unit_test.py b/pyomo/contrib/mindtpy/tests/unit_test.py index b2ad4ddc20e..61ddf88aa84 100644 --- a/pyomo/contrib/mindtpy/tests/unit_test.py +++ b/pyomo/contrib/mindtpy/tests/unit_test.py @@ -8,23 +8,14 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -"""Tests for the MindtPy solver.""" -import pyomo.core.base.symbolic +"""Unit tests for the MindtPy solver.""" import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP -from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 -from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 -from pyomo.contrib.mindtpy.tests.MINLP4_simple import SimpleMINLP4 -from pyomo.contrib.mindtpy.tests.MINLP5_simple import SimpleMINLP5 -from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample -from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample -from pyomo.environ import SolverFactory, value, maximize +from pyomo.environ import SolverFactory, maximize from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded from pyomo.solvers.tests.models.QCP_simple import QCP_simple -from pyomo.opt import TerminationCondition from pyomo.contrib.mindtpy.config_options import _get_MindtPy_config from pyomo.contrib.mindtpy.util import setup_solve_data, add_feas_slacks, set_solver_options from pyomo.contrib.mindtpy.nlp_solve import handle_subproblem_other_termination, handle_feasibility_subproblem_tc, solve_subproblem, handle_nlp_subproblem_tc @@ -34,10 +25,10 @@ from pyomo.contrib.mindtpy.initialization import MindtPy_initialize_main, init_rNLP from pyomo.contrib.mindtpy.feasibility_pump import generate_norm_constraint, handle_feas_main_tc from pyomo.core import Block, ConstraintList -from pyomo.contrib.mindtpy.mip_solve import solve_main, handle_main_optimal, handle_main_infeasible, handle_main_other_conditions, handle_regularization_main_tc +from pyomo.contrib.mindtpy.mip_solve import solve_main, handle_main_other_conditions from pyomo.opt import SolutionStatus, SolverStatus from pyomo.core import (Constraint, Objective, - TransformationFactory, minimize, value, Var, RangeSet, NonNegativeReals) + TransformationFactory, minimize, Var, RangeSet, NonNegativeReals) from pyomo.contrib.mindtpy.iterate import algorithm_should_terminate nonconvex_model_list = [EightProcessFlowsheet(convex=False)] @@ -60,8 +51,6 @@ @unittest.skipIf(not subsolvers_available, 'Required subsolvers %s are not available' % (required_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - 'Symbolic differentiation is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" diff --git a/pyomo/contrib/mindtpy/util.py b/pyomo/contrib/mindtpy/util.py index ee0e001c93e..af0140eb2ea 100644 --- a/pyomo/contrib/mindtpy/util.py +++ b/pyomo/contrib/mindtpy/util.py @@ -22,6 +22,7 @@ from pyomo.core.expr.calculus.derivatives import differentiate from pyomo.common.dependencies import attempt_import from pyomo.contrib.fbbt.fbbt import fbbt +from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy from pyomo.solvers.plugins.solvers.gurobi_persistent import GurobiPersistent pyomo_nlp = attempt_import('pyomo.contrib.pynumero.interfaces.pyomo_nlp')[0] @@ -554,6 +555,14 @@ def get_integer_solution(model, string_zero=False): def setup_solve_data(model, config): + """ define and initialize solve_data for MindtPy + + Args: + model: Pyomo model + the model to extract value of integer variables + config: MindtPy configurations + contains the specific configurations for the algorithm + """ solve_data = MindtPySolveData() solve_data.results = SolverResults() solve_data.timing = Bunch() @@ -609,6 +618,54 @@ def setup_solve_data(model, config): return solve_data +def copy_var_list_values_from_solution_pool(from_list, to_list, config, solver_model, var_map, solution_name, + ignore_integrality=False): + """Copy variable values from one list to another. + + Rounds to Binary/Integer if necessary + Sets to zero for NonNegativeReals if necessary + + Args: + from_list: variable list + contains variables and their values + to_list: variable list + contains the variables that need to set value + config: ConfigBlock + contains the specific configurations for the algorithm + solver_model: solver model + the solver model + var_map: dict + the map of pyomo variables to solver variables + solution_name: int or str + the name of the solution in the solution pool + """ + for v_from, v_to in zip(from_list, to_list): + try: + if config.mip_solver == 'cplex_persistent': + var_val = solver_model.solution.pool.get_values( + solution_name, var_map[v_from]) + elif config.mip_solver == 'gurobi_persistent': + solver_model.setParam( + gurobipy.GRB.Param.SolutionNumber, solution_name) + var_val = var_map[v_from].Xn + v_to.set_value(var_val) + except ValueError as err: + err_msg = getattr(err, 'message', str(err)) + rounded_val = int(round(var_val)) + # Check to see if this is just a tolerance issue + if ignore_integrality \ + and v_to.is_integer(): + v_to.value = var_val + elif v_to.is_integer() and (abs(var_val - rounded_val) <= config.integer_tolerance): + v_to.set_value(rounded_val) + elif abs(var_val) <= config.zero_tolerance and 0 in v_to.domain: + v_to.set_value(0) + else: + config.logger.error( + 'Unknown validation domain error setting variable %s', (v_to.name,)) + raise + + class GurobiPersistent4MindtPy(GurobiPersistent): def _intermediate_callback(self): diff --git a/pyomo/contrib/parmest/examples/reaction_kinetics/__init__.py b/pyomo/contrib/parmest/examples/reaction_kinetics/__init__.py new file mode 100644 index 00000000000..cd6b0b75748 --- /dev/null +++ b/pyomo/contrib/parmest/examples/reaction_kinetics/__init__.py @@ -0,0 +1,9 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ diff --git a/pyomo/contrib/parmest/examples/reaction_kinetics/simple_reaction_parmest_example.py b/pyomo/contrib/parmest/examples/reaction_kinetics/simple_reaction_parmest_example.py new file mode 100644 index 00000000000..c17cd7fc22e --- /dev/null +++ b/pyomo/contrib/parmest/examples/reaction_kinetics/simple_reaction_parmest_example.py @@ -0,0 +1,108 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +''' +Example from Y. Bard, "Nonlinear Parameter Estimation", (pg. 124) + +This example shows: +1. How to define the unknown (to be regressed parameters) with an index +2. How to call parmest to only estimate some of the parameters (and fix the rest) + +Code provided by Paul Akula. +''' +import pandas as pd +from pandas import DataFrame +from os import path + +from pyomo.environ import (ConcreteModel, Param, Var, PositiveReals, Objective, + Constraint, RangeSet, Expression, minimize, exp, value) + +#from idaes.core.util import get_default_solver +import pyomo.contrib.parmest.parmest as parmest + +# ======================================================================= +''' Data from Table 5.2 in Y. Bard, "Nonlinear Parameter Estimation", (pg. 124) +''' + +data = [{'experiment': 1, 'x1': 0.1, 'x2': 100, 'y': 0.98}, + {'experiment': 2, 'x1': 0.2, 'x2': 100, 'y': 0.983}, + {'experiment': 3, 'x1': 0.3, 'x2': 100, 'y': 0.955}, + {'experiment': 4, 'x1': 0.4, 'x2': 100, 'y': 0.979}, + {'experiment': 5, 'x1': 0.5, 'x2': 100, 'y': 0.993}, + {'experiment': 6, 'x1': 0.05, 'x2': 200, 'y': 0.626}, + {'experiment': 7, 'x1': 0.1, 'x2': 200, 'y': 0.544}, + {'experiment': 8, 'x1': 0.15, 'x2': 200, 'y': 0.455}, + {'experiment': 9, 'x1': 0.2, 'x2': 200, 'y': 0.225}, + {'experiment': 10, 'x1': 0.25, 'x2': 200, 'y': 0.167}, + {'experiment': 11, 'x1': 0.02, 'x2': 300, 'y': 0.566}, + {'experiment': 12, 'x1': 0.04, 'x2': 300, 'y': 0.317}, + {'experiment': 13, 'x1': 0.06, 'x2': 300, 'y': 0.034}, + {'experiment': 14, 'x1': 0.08, 'x2': 300, 'y': 0.016}, + {'experiment': 15, 'x1': 0.1, 'x2': 300, 'y': 0.006}] + +# ======================================================================= + +def simple_reaction_model(data): + + # Create the concrete model + model = ConcreteModel() + + model.x1 = Param(initialize=float(data['x1'])) + model.x2 = Param(initialize=float(data['x2'])) + + # Rate constants + model.rxn = RangeSet(2) + initial_guess = {1: 750, 2: 1200} + model.k = Var(model.rxn, initialize=initial_guess, within=PositiveReals) + + # reaction product + model.y = Expression(expr=exp(-model.k[1] * + model.x1 * exp(-model.k[2] / model.x2))) + + # fix all of the regressed parameters + model.k.fix() + + + #=================================================================== + # Stage-specific cost computations + def ComputeFirstStageCost_rule(model): + return 0 + model.FirstStageCost = Expression(rule=ComputeFirstStageCost_rule) + + def AllMeasurements(m): + return (float(data['y']) - m.y) ** 2 + model.SecondStageCost = Expression(rule=AllMeasurements) + + def total_cost_rule(m): + return m.FirstStageCost + m.SecondStageCost + model.Total_Cost_Objective = Objective(rule=total_cost_rule, + sense=minimize) + + return model + +if __name__ == "__main__": + + # ======================================================================= + # Parameter estimation without covariance estimate + # Only estimate the parameter k[1]. The parameter k[2] will remain fixed + # at its initial value + theta_names = ['k[1]'] + pest = parmest.Estimator(simple_reaction_model, data, theta_names) + obj, theta = pest.theta_est() + print(obj) + print(theta) + print() + #======================================================================= + # Estimate both k1 and k2 and compute the covariance matrix + theta_names = ['k'] + pest = parmest.Estimator(simple_reaction_model, data, theta_names) + obj, theta, cov = pest.theta_est(calc_cov=True) + print(obj) + print(theta) + print(cov) diff --git a/pyomo/contrib/parmest/examples/reactor_design/__init__.py b/pyomo/contrib/parmest/examples/reactor_design/__init__.py index 8d1c8b69c3f..6b39dd18d6a 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/__init__.py +++ b/pyomo/contrib/parmest/examples/reactor_design/__init__.py @@ -1 +1,10 @@ - +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py b/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py index d9f70706c29..cd6b0b75748 100644 --- a/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py +++ b/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py @@ -1,4 +1,4 @@ - # ___________________________________________________________________________ +# ___________________________________________________________________________ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC diff --git a/pyomo/contrib/parmest/examples/semibatch/__init__.py b/pyomo/contrib/parmest/examples/semibatch/__init__.py index 5296dafcc78..6b39dd18d6a 100644 --- a/pyomo/contrib/parmest/examples/semibatch/__init__.py +++ b/pyomo/contrib/parmest/examples/semibatch/__init__.py @@ -1,4 +1,4 @@ - # ___________________________________________________________________________ +# ___________________________________________________________________________ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC diff --git a/pyomo/contrib/parmest/parmest.py b/pyomo/contrib/parmest/parmest.py index 899d04eb2ab..127b7faa39d 100644 --- a/pyomo/contrib/parmest/parmest.py +++ b/pyomo/contrib/parmest/parmest.py @@ -273,7 +273,7 @@ class Estimator(object): model_function: function Function that generates an instance of the Pyomo model using 'data' as the input argument - data: pandas DataFrame, list of dictionaries, or list of json file names + data: pd.DataFrame, list of dictionaries, or list of json file names Data that is used to build an instance of the Pyomo model and build the objective function theta_names: list of strings @@ -337,7 +337,7 @@ def _create_parmest_model(self, data): # If the component that was found is not a variable, # this will generate an exception (and the warning # in the 'except') - var_validate.fixed = False + var_validate.unfix() # We want to standardize on the CUID string # representation self.theta_names[i] = repr(var_cuid) @@ -502,6 +502,8 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", cov = 2 * sse / (n - l) * inv_red_hes cov = pd.DataFrame(cov, index=thetavals.keys(), columns=thetavals.keys()) + thetavals = pd.Series(thetavals) + if len(return_values) > 0: var_values = [] for exp_i in self.ef_instance.component_objects(Block, descend_into=False): @@ -646,7 +648,7 @@ def theta_est(self, solver="ef_ipopt", return_values=[], bootlist=None, calc_cov Parameters ---------- solver: string, optional - "ef_ipopt" or "k_aug". Default is "ef_ipopt". + Currently only "ef_ipopt" is supported. Default is "ef_ipopt". return_values: list, optional List of Variable names used to return values from the model bootlist: list, optional @@ -658,12 +660,10 @@ def theta_est(self, solver="ef_ipopt", return_values=[], bootlist=None, calc_cov ------- objectiveval: float The objective function value - thetavals: dict - A dictionary of all values for theta + thetavals: pd.Series + Estimated values for theta variable values: pd.DataFrame Variable values for each variable name in return_values (only for solver='ef_ipopt') - Hessian: dict - A dictionary of dictionaries for the Hessian (only for solver='k_aug') cov: pd.DataFrame Covariance matrix of the fitted parameters (only for solver='ef_ipopt') """ @@ -696,7 +696,7 @@ def theta_est_bootstrap(self, bootstrap_samples, samplesize=None, Returns ------- - bootstrap_theta: DataFrame + bootstrap_theta: pd.DataFrame Theta values for each sample and (if return_samples = True) the sample numbers used in each estimation """ @@ -758,7 +758,7 @@ def theta_est_leaveNout(self, lNo, lNo_samples=None, seed=None, Returns ------- - lNo_theta: DataFrame + lNo_theta: pd.DataFrame Theta values for each sample and (if return_samples = True) the sample numbers left out of each estimation """ @@ -887,12 +887,12 @@ def objective_at_theta(self, theta_values): Parameters ---------- - theta_values: DataFrame, columns=theta_names + theta_values: pd.DataFrame, columns=theta_names Values of theta used to compute the objective Returns ------- - obj_at_theta: DataFrame + obj_at_theta: pd.DataFrame Objective value for each theta (infeasible solutions are omitted). """ @@ -927,7 +927,7 @@ def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas, Parameters ---------- - obj_at_theta: DataFrame, columns = theta_names + 'obj' + obj_at_theta: pd.DataFrame, columns = theta_names + 'obj' Objective values for each theta value (returned by objective_at_theta) obj_value: int or float @@ -939,10 +939,10 @@ def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas, Returns ------- - LR: DataFrame + LR: pd.DataFrame Objective values for each theta value along with True or False for each alpha - thresholds: dictionary + thresholds: pd.Series If return_threshold = True, the thresholds are also returned. """ assert isinstance(obj_at_theta, pd.DataFrame) @@ -958,6 +958,8 @@ def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas, thresholds[a] = obj_value * ((chi2_val / (S - 2)) + 1) LR[a] = LR['obj'] < thresholds[a] + thresholds = pd.Series(thresholds) + if return_thresholds: return LR, thresholds else: @@ -972,7 +974,7 @@ def confidence_region_test(self, theta_values, distribution, alphas, Parameters ---------- - theta_values: DataFrame, columns = theta_names + theta_values: pd.DataFrame, columns = theta_names Theta values used to generate a confidence region (generally returned by theta_est_bootstrap) distribution: string @@ -982,25 +984,24 @@ def confidence_region_test(self, theta_values, distribution, alphas, alphas: list List of alpha values used to determine if theta values are inside or outside the region. - test_theta_values: dictionary or DataFrame, keys/columns = theta_names, optional + test_theta_values: pd.Series or pd.DataFrame, keys/columns = theta_names, optional Additional theta values that are compared to the confidence region to determine if they are inside or outside. Returns - ------- - training_results: DataFrame + training_results: pd.DataFrame Theta value used to generate the confidence region along with True (inside) or False (outside) for each alpha - test_results: DataFrame + test_results: pd.DataFrame If test_theta_values is not None, returns test theta value along with True (inside) or False (outside) for each alpha """ assert isinstance(theta_values, pd.DataFrame) assert distribution in ['Rect', 'MVN', 'KDE'] assert isinstance(alphas, list) - assert isinstance(test_theta_values, (type(None), dict, pd.DataFrame)) + assert isinstance(test_theta_values, (type(None), dict, pd.Series, pd.DataFrame)) - if isinstance(test_theta_values, dict): + if isinstance(test_theta_values, (dict, pd.Series)): test_theta_values = pd.Series(test_theta_values).to_frame().transpose() training_results = theta_values.copy() diff --git a/pyomo/contrib/parmest/tests/test_parmest.py b/pyomo/contrib/parmest/tests/test_parmest.py index bac451ee419..c424c9303d2 100644 --- a/pyomo/contrib/parmest/tests/test_parmest.py +++ b/pyomo/contrib/parmest/tests/test_parmest.py @@ -41,13 +41,14 @@ @unittest.skipIf(not parmest.parmest_available, "Cannot test parmest: required dependencies are missing") @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") -class parmest_object_Tester_RB(unittest.TestCase): +class TestRooneyBiegler(unittest.TestCase): def setUp(self): from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import rooney_biegler_model + # Note, the data used in this test has been corrected to use data.loc[5,'hour'] = 7 (instead of 6) data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[6,19.8]], columns=['hour', 'y']) + [4,16.0],[5,15.6],[7,19.8]], columns=['hour', 'y']) theta_names = ['asymptote', 'rate_constant'] @@ -65,9 +66,9 @@ def SSE(model, data): def test_theta_est(self): objval, thetavals = self.pest.theta_est() - self.assertAlmostEqual(objval, 4.4675, places=2) - self.assertAlmostEqual(thetavals['asymptote'], 19.2189, places=2) # 19.1426 from the paper - self.assertAlmostEqual(thetavals['rate_constant'], 0.5312, places=2) # 0.5311 from the paper + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual(thetavals['asymptote'], 19.1426, places=2) # 19.1426 from the paper + self.assertAlmostEqual(thetavals['rate_constant'], 0.5311, places=2) # 0.5311 from the paper @unittest.skipIf(not graphics.imports_available, "parmest.graphics imports are unavailable") @@ -109,8 +110,8 @@ def test_likelihood_ratio(self): LR = self.pest.likelihood_ratio_test(obj_at_theta, objval, [0.8, 0.9, 1.0]) self.assertTrue(set(LR.columns) >= set([0.8, 0.9, 1.0])) - self.assertTrue(LR[0.8].sum() == 7) - self.assertTrue(LR[0.9].sum() == 11) + self.assertTrue(LR[0.8].sum() == 6) + self.assertTrue(LR[0.9].sum() == 10) self.assertTrue(LR[1.0].sum() == 60) # all true graphics.pairwise_plot(LR, thetavals, 0.8) @@ -185,37 +186,6 @@ def test_theta_k_aug_for_Hessian(self): objval, thetavals, Hessian = self.pest.theta_est(solver="k_aug") self.assertAlmostEqual(objval, 4.4675, places=2) - -''' -The test cases above were developed with a transcription mistake in the dataset. -This test works with the correct dataset. -''' -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") -class parmest_object_Tester_RB_match_paper(unittest.TestCase): - - def setUp(self): - from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import rooney_biegler_model - - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[7,19.8]], columns=['hour', 'y']) - - theta_names = ['asymptote', 'rate_constant'] - - def SSE(model, data): - expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) - return expr - - self.pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE) - - def test_theta_est(self): - objval, thetavals = self.pest.theta_est(calc_cov=False) - - self.assertAlmostEqual(objval, 4.3317112, places=2) - self.assertAlmostEqual(thetavals['asymptote'], 19.1426, places=2) # 19.1426 from the paper - self.assertAlmostEqual(thetavals['rate_constant'], 0.5311, places=2) # 0.5311 from the paper - @unittest.skipIf(not pynumero_ASL_available, "pynumero ASL is not available") @unittest.skipIf(not parmest.inverse_reduced_hessian_available, "Cannot test covariance matrix: required ASL dependency is missing") @@ -244,7 +214,7 @@ def test_theta_est_cov(self): @unittest.skipIf(not parmest.parmest_available, "Cannot test parmest: required dependencies are missing") @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") -class Test_parmest_indexed_variables(unittest.TestCase): +class TestIndexedVariables(unittest.TestCase): def make_model(self, theta_names): @@ -262,7 +232,9 @@ def rooney_biegler_model_alternate(data): model.var_names = pyo.Set(initialize=['asymptote','rate_constant']) model.theta = pyo.Var(model.var_names, initialize={'asymptote':15, 'rate_constant':0.5}) - + model.theta['asymptote'].fixed = True # parmest will unfix theta variables, even when they are indexed + model.theta['rate_constant'].fixed = True + def response_rule(m, h): expr = m.theta['asymptote'] * (1 - pyo.exp(-m.theta['rate_constant'] * h)) return expr @@ -279,7 +251,18 @@ def SSE(model, data): return expr return parmest.Estimator(rooney_biegler_model_alternate, data, theta_names, SSE) + + def test_theta_est(self): + theta_names = ["theta"] + + pest = self.make_model(theta_names) + objval, thetavals = pest.theta_est(calc_cov=False) + + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual(thetavals["theta[asymptote]"], 19.1426, places=2) + self.assertAlmostEqual(thetavals["theta[rate_constant]"], 0.5311, places=2) + def test_theta_est_quotedIndex(self): theta_names = ["theta['asymptote']", "theta['rate_constant']"] @@ -288,8 +271,8 @@ def test_theta_est_quotedIndex(self): objval, thetavals = pest.theta_est(calc_cov=False) self.assertAlmostEqual(objval, 4.3317112, places=2) - self.assertAlmostEqual(thetavals["theta[asymptote]"], 19.1426, places=2) # 19.1426 from the paper - self.assertAlmostEqual(thetavals['theta[rate_constant]'], 0.5311, places=2) # 0.5311 from the paper + self.assertAlmostEqual(thetavals["theta[asymptote]"], 19.1426, places=2) + self.assertAlmostEqual(thetavals["theta[rate_constant]"], 0.5311, places=2) def test_theta_est_impliedStrIndex(self): @@ -300,7 +283,7 @@ def test_theta_est_impliedStrIndex(self): self.assertAlmostEqual(objval, 4.3317112, places=2) self.assertAlmostEqual(thetavals["theta[asymptote]"], 19.1426, places=2) # 19.1426 from the paper - self.assertAlmostEqual(thetavals['theta[rate_constant]'], 0.5311, places=2) # 0.5311 from the paper + self.assertAlmostEqual(thetavals["theta[rate_constant]"], 0.5311, places=2) # 0.5311 from the paper @unittest.skipIf(not pynumero_ASL_available, "pynumero ASL is not available") @@ -308,7 +291,7 @@ def test_theta_est_impliedStrIndex(self): not parmest.inverse_reduced_hessian_available, "Cannot test covariance matrix: required ASL dependency is missing") def test_theta_est_cov(self): - theta_names = ["theta[asymptote]", "theta[rate_constant]"] + theta_names = ["theta"] pest = self.make_model(theta_names) objval, thetavals, cov = pest.theta_est(calc_cov=True) @@ -329,7 +312,7 @@ def test_theta_est_cov(self): "Cannot test parmest: required dependencies are missing") @unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") -class parmest_object_Tester_reactor_design(unittest.TestCase): +class TestReactorDesign(unittest.TestCase): def setUp(self): from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model @@ -389,7 +372,7 @@ def test_return_values(self): @unittest.skipIf(not graphics.imports_available, "parmest.graphics imports are unavailable") @unittest.skipIf(is_osx, "Disabling graphics tests on OSX due to issue in Matplotlib, see Pyomo PR #1337") -class parmest_graphics(unittest.TestCase): +class TestGraphics(unittest.TestCase): def setUp(self): self.A = pd.DataFrame(np.random.randint(0,100,size=(100,4)), columns=list('ABCD')) diff --git a/pyomo/contrib/parmest/tests/test_scenariocreator.py b/pyomo/contrib/parmest/tests/test_scenariocreator.py index ee58ebb8f4b..92794d334f4 100644 --- a/pyomo/contrib/parmest/tests/test_scenariocreator.py +++ b/pyomo/contrib/parmest/tests/test_scenariocreator.py @@ -31,7 +31,7 @@ @unittest.skipIf(not parmest.parmest_available, "Cannot test parmest: required dependencies are missing") @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") -class pamest_Scenario_creator_reactor_design(unittest.TestCase): +class TestScenarioReactorDesign(unittest.TestCase): def setUp(self): from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model @@ -99,7 +99,7 @@ def test_no_csv_if_empty(self): @unittest.skipIf(not parmest.parmest_available, "Cannot test parmest: required dependencies are missing") @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") -class pamest_Scenario_creator_semibatch(unittest.TestCase): +class TestScenarioSemibatch(unittest.TestCase): def setUp(self): import pyomo.contrib.parmest.examples.semibatch.semibatch as sb diff --git a/pyomo/contrib/preprocessing/plugins/var_aggregator.py b/pyomo/contrib/preprocessing/plugins/var_aggregator.py index 80849e97ec5..e04e36be440 100644 --- a/pyomo/contrib/preprocessing/plugins/var_aggregator.py +++ b/pyomo/contrib/preprocessing/plugins/var_aggregator.py @@ -281,21 +281,26 @@ def _apply_to(self, model, detect_fixed_vars=True): # Do the substitution substitution_map = {id(var): z_var for var, z_var in var_to_z.items()} + visitor = ExpressionReplacementVisitor( + substitute=substitution_map, + descend_into_named_expressions=True, + remove_named_expressions=False, + ) for constr in model.component_data_objects( ctype=Constraint, active=True ): - new_body = ExpressionReplacementVisitor( - substitute=substitution_map - ).dfs_postorder_stack(constr.body) - constr.set_value((constr.lower, new_body, constr.upper)) + orig_body = constr.body + new_body = visitor.walk_expression(constr.body) + if orig_body is not new_body: + constr.set_value((constr.lower, new_body, constr.upper)) for objective in model.component_data_objects( ctype=Objective, active=True ): - new_expr = ExpressionReplacementVisitor( - substitute=substitution_map - ).dfs_postorder_stack(objective.expr) - objective.set_value(new_expr) + orig_expr = objective.expr + new_expr = visitor.walk_expression(objective.expr) + if orig_expr is not new_expr: + objective.set_value(new_expr) def update_variables(self, model): """Update the values of the variables that were replaced by aggregates. diff --git a/pyomo/contrib/pynumero/asl.py b/pyomo/contrib/pynumero/asl.py index 7e55f92ea54..f7b25e5b0a0 100644 --- a/pyomo/contrib/pynumero/asl.py +++ b/pyomo/contrib/pynumero/asl.py @@ -223,7 +223,7 @@ def __init__(self, filename=None, nl_buffer=None): # the ASL. This should prevent it from potentially caching an # AMPLFUNC from the initial load and letting it bleed into # (potentially unrelated) subsequent instances - b_amplfunc = os.environ.pop('AMPLFUNC', '').encode('utf-8') + amplfunc = os.environ.pop('AMPLFUNC', '') if AmplInterface.ASLib is None: AmplInterface.ASLib, AmplInterface.interface_version \ @@ -235,8 +235,15 @@ def __init__(self, filename=None, nl_buffer=None): b_data = filename.encode('utf-8') if self.interface_version >= 2: - args = (b_data, b_amplfunc) + args = (b_data, amplfunc.encode('utf-8')) else: + # Old ASL interface library. + if amplfunc: + # we need to put AMPLFUNC back into the environment, + # as old versions of the library rely on ONLY the + # environment variable for passing the library(ies) + # locations to the ASL + os.environ['AMPLFUNC'] = amplfunc args = (b_data,) self._obj = self.ASLib.EXTERNAL_AmplInterface_new_file(*args) elif nl_buffer is not None: diff --git a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py index 5958d1bf31b..4fd540016c9 100644 --- a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py @@ -135,11 +135,6 @@ def _collect_nlp_structure(self): print(np.where(bounds_difference < 0)) raise RuntimeError("Some variables have lower bounds that are larger than the upper bounds.") - inconsistent_bounds = np.any(bounds_difference < 0.0) - if inconsistent_bounds: - # TODO: improve error message - raise RuntimeError("Variables found with upper bounds set below the lower bounds.") - # Build the maps for converting from the full constraint # vector (which includes all equality and inequality constraints) # to separate vectors of equality and inequality constraints. diff --git a/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py b/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py index 02c4e6ae294..a5e3e12ec1f 100644 --- a/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py +++ b/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py @@ -15,14 +15,18 @@ from pyomo.core.base.objective import Objective from pyomo.core.expr.visitor import identify_variables from pyomo.common.collections import ComponentSet +from pyomo.util.calc_var_value import calculate_variable_from_constraint from pyomo.util.subsystems import ( - create_subsystem_block, - TemporarySubsystemManager, - ) + create_subsystem_block, + TemporarySubsystemManager, +) from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP from pyomo.contrib.pynumero.interfaces.external_grey_box import ( - ExternalGreyBoxModel, - ) + ExternalGreyBoxModel, +) +from pyomo.contrib.incidence_analysis.util import ( + generate_strongly_connected_components, +) import numpy as np import scipy.sparse as sps @@ -114,7 +118,7 @@ def get_hessian_of_constraint(constraint, wrt1=None, wrt2=None, nlp=None): class ExternalPyomoModel(ExternalGreyBoxModel): """ - This is an ExternalGreyBoxModel used to create an exteral model + This is an ExternalGreyBoxModel used to create an external model from existing Pyomo components. Given a system of variables and equations partitioned into "input" and "external" variables and "residual" and "external" equations, this class computes the @@ -151,6 +155,10 @@ def __init__(self, self._block._obj = Objective(expr=0.0) self._nlp = PyomoNLP(self._block) + self._scc_list = list(generate_strongly_connected_components( + external_cons, variables=external_vars + )) + assert len(external_vars) == len(external_cons) self.input_vars = input_vars @@ -181,24 +189,110 @@ def set_input_values(self, input_values): for var, val in zip(input_vars, input_values): var.set_value(val) - _temp = create_subsystem_block(external_cons, variables=external_vars) - possible_input_vars = ComponentSet(input_vars) - #for var in _temp.input_vars.values(): - # # TODO: Is this check necessary? - # assert var in possible_input_vars - - with TemporarySubsystemManager(to_fix=list(_temp.input_vars.values())): - solver.solve(_temp) - - # Should we create the NLP from the original block or the temp block? - # Need to create it from the original block because temp block won't - # have residual constraints, whose derivatives are necessary. - self._nlp = PyomoNLP(self._block) + for block, inputs in self._scc_list: + if len(block.vars) == 1: + calculate_variable_from_constraint( + block.vars[0], block.cons[0] + ) + else: + with TemporarySubsystemManager(to_fix=inputs): + solver.solve(block) + + # Send updated variable values to NLP for dervative evaluation + primals = self._nlp.get_primals() + to_update = input_vars + external_vars + indices = self._nlp.get_primal_indices(to_update) + values = np.fromiter((var.value for var in to_update), float) + primals[indices] = values + self._nlp.set_primals(primals) def set_equality_constraint_multipliers(self, eq_con_multipliers): + """ + Sets multipliers for residual equality constraints seen by the + outer solver. + + """ for i, val in enumerate(eq_con_multipliers): self.residual_con_multipliers[i] = val + def set_external_constraint_multipliers(self, eq_con_multipliers): + eq_con_multipliers = np.array(eq_con_multipliers) + external_multipliers = self.calculate_external_constraint_multipliers( + eq_con_multipliers, + ) + multipliers = np.concatenate((eq_con_multipliers, external_multipliers)) + cons = self.residual_cons + self.external_cons + n_con = len(cons) + assert n_con == self._nlp.n_constraints() + duals = np.zeros(n_con) + indices = self._nlp.get_constraint_indices(cons) + duals[indices] = multipliers + self._nlp.set_duals(duals) + + def calculate_external_constraint_multipliers(self, resid_multipliers): + """ + Calculates the multipliers of the external constraints from the + multipliers of the residual constraints (which are provided by + the "outer" solver). + + """ + # NOTE: This method implicitly relies on the value of inputs stored + # in the nlp. Should we also rely on the multiplier that are in + # the nlp? + # We would then need to call nlp.set_duals twice. Once with the + # residual multipliers and once with the full multipliers. + # I like the current approach better for now. + nlp = self._nlp + y = self.external_vars + f = self.residual_cons + g = self.external_cons + jfy = nlp.extract_submatrix_jacobian(y, f) + jgy = nlp.extract_submatrix_jacobian(y, g) + + jgy_t = jgy.transpose() + jfy_t = jfy.transpose() + dfdg = - sps.linalg.splu(jgy_t.tocsc()).solve(jfy_t.toarray()) + resid_multipliers = np.array(resid_multipliers) + external_multipliers = dfdg.dot(resid_multipliers) + return external_multipliers + + def get_full_space_lagrangian_hessians(self): + """ + Calculates terms of Hessian of full-space Lagrangian due to + external and residual constraints. Note that multipliers are + set by set_equality_constraint_multipliers. These matrices + are used to calculate the Hessian of the reduced-space + Lagrangian. + + """ + nlp = self._nlp + x = self.input_vars + y = self.external_vars + hlxx = nlp.extract_submatrix_hessian_lag(x, x) + hlxy = nlp.extract_submatrix_hessian_lag(x, y) + hlyy = nlp.extract_submatrix_hessian_lag(y, y) + return hlxx, hlxy, hlyy + + def calculate_reduced_hessian_lagrangian(self, hlxx, hlxy, hlyy): + """ + Performs the matrix multiplications necessary to get the + reduced space Hessian-of-Lagrangian term from the full-space + terms. + + """ + # Converting to dense is faster for the distillation + # example. Does this make sense? + hlxx = hlxx.toarray() + hlxy = hlxy.toarray() + hlyy = hlyy.toarray() + dydx = self.evaluate_jacobian_external_variables() + term1 = hlxx + prod = hlxy.dot(dydx) + term2 = prod + prod.transpose() + term3 = hlyy.dot(dydx).transpose().dot(dydx) + hess_lag = term1 + term2 + term3 + return hess_lag + def evaluate_equality_constraints(self): return self._nlp.extract_subvector_constraints(self.residual_cons) @@ -336,13 +430,21 @@ def evaluate_hessian_equality_constraints(self): This method actually evaluates the sum of Hessians times multipliers, i.e. the term in the Hessian of the Lagrangian due to these equality constraints. + """ - d2fdx2 = self.evaluate_hessians_of_residuals() - multipliers = self.residual_con_multipliers - - sum_ = sum(mult*matrix for mult, matrix in zip(multipliers, d2fdx2)) - # Return a sparse matrix with every entry accounted for because it - # is difficult to determine rigorously which coordinates - # _could possibly_ be nonzero. - sparse = _dense_to_full_sparse(sum_) + # External multipliers must be calculated after both primals and duals + # are set, and are only necessary for this Hessian calculation. + # We know this Hessian calculation wants to use the most recently + # set primals and duals, so we can safely calculate external + # multipliers here. + eq_con_multipliers = self.residual_con_multipliers + self.set_external_constraint_multipliers(eq_con_multipliers) + + # These are full-space Hessian-of-Lagrangian terms + hlxx, hlxy, hlyy = self.get_full_space_lagrangian_hessians() + + # These terms can be used to calculate the corresponding + # Hessian-of-Lagrangian term in the full space. + hess_lag = self.calculate_reduced_hessian_lagrangian(hlxx, hlxy, hlyy) + sparse = _dense_to_full_sparse(hess_lag) return sps.tril(sparse) diff --git a/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py b/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py index b4bfe235112..99866477414 100644 --- a/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py @@ -39,6 +39,7 @@ def __init__(self, pyomo_model): # this is done over *all* variables in active blocks, even # if they are not included in this model self._pyomo_model_var_names_to_datas = None + _name_buffer = {} try: # We support Pynumero's ExternalGreyBoxBlock modeling # objects that are provided through ExternalGreyBoxBlock objects @@ -56,10 +57,22 @@ def __init__(self, pyomo_model): # build a PyomoNLP object (will include the "pyomo" # part of the model only) self._pyomo_nlp = PyomoNLP(pyomo_model) - self._pyomo_model_var_names_to_datas = \ - {v.getname(fully_qualified=True):v for v in pyomo_model.component_data_objects(ctype=pyo.Var, descend_into=True)} - self._pyomo_model_constraint_names_to_datas = \ - {c.getname(fully_qualified=True):c for c in pyomo_model.component_data_objects(ctype=pyo.Constraint, descend_into=True)} + self._pyomo_model_var_names_to_datas = { + v.getname( + fully_qualified=True, name_buffer=_name_buffer + ): v + for v in pyomo_model.component_data_objects( + ctype=pyo.Var, descend_into=True + ) + } + self._pyomo_model_constraint_names_to_datas = { + c.getname( + fully_qualified=True, name_buffer=_name_buffer + ): c + for c in pyomo_model.component_data_objects( + ctype=pyo.Constraint, descend_into=True + ) + } finally: # Restore the ctypes of the ExternalGreyBoxBlock components @@ -448,22 +461,31 @@ def __init__(self, external_grey_box_block): # create the list of primals and constraint names # primals will be ordered inputs, followed by outputs - self._primals_names = \ - [self._block.inputs[k].getname(fully_qualified=True) \ - for k in self._block.inputs] + _name_buffer = dict() + self._primals_names = [ + self._block.inputs[k].getname( + fully_qualified=True, name_buffer=_name_buffer + ) for k in self._block.inputs + ] self._primals_names.extend( - [self._block.outputs[k].getname(fully_qualified=True) \ - for k in self._block.outputs] + self._block.outputs[k].getname( + fully_qualified=True, name_buffer=_name_buffer + ) + for k in self._block.outputs ) n_primals = len(self._primals_names) - prefix = self._block.getname(fully_qualified=True) + prefix = self._block.getname( + fully_qualified=True, name_buffer=_name_buffer + ) self._constraint_names = \ ['{}.{}'.format(prefix, nm) \ for nm in self._ex_model.equality_constraint_names()] - output_var_names = \ - [self._block.outputs[k].getname(fully_qualified=False) \ - for k in self._block.outputs] + output_var_names = [ + self._block.outputs[k].getname( + fully_qualified=False, name_buffer=_name_buffer + ) for k in self._block.outputs + ] self._constraint_names.extend( ['{}.output_constraints[{}]'.format(prefix, nm) \ for nm in self._ex_model.output_names()]) diff --git a/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py b/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py index ec7cc8d44ba..9eb5cfeaad5 100644 --- a/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py @@ -180,7 +180,11 @@ def primals_names(self): names in the order corresponding to the primals """ pyomo_variables = self.get_pyomo_variables() - return [v.getname(fully_qualified=True) for v in pyomo_variables] + name_buffer = {} + return [ + v.getname(fully_qualified=True, name_buffer=name_buffer) + for v in pyomo_variables + ] def constraint_names(self): """ @@ -188,7 +192,11 @@ def constraint_names(self): names in the order corresponding to internal constraint order """ pyomo_constraints = self.get_pyomo_constraints() - return [v.getname(fully_qualified=True) for v in pyomo_constraints] + name_buffer = {} + return [ + v.getname(fully_qualified=True, name_buffer=name_buffer) + for v in pyomo_constraints + ] def equality_constraint_names(self): """ @@ -196,7 +204,11 @@ def equality_constraint_names(self): the order corresponding to the equality constraints. """ equality_constraints = self.get_pyomo_equality_constraints() - return [v.getname(fully_qualified=True) for v in equality_constraints] + name_buffer = {} + return [ + v.getname(fully_qualified=True, name_buffer=name_buffer) + for v in equality_constraints + ] def inequality_constraint_names(self): """ @@ -204,7 +216,11 @@ def inequality_constraint_names(self): the order corresponding to the inequality constraints. """ inequality_constraints = self.get_pyomo_inequality_constraints() - return [v.getname(fully_qualified=True) for v in inequality_constraints] + name_buffer = {} + return [ + v.getname(fully_qualified=True, name_buffer=name_buffer) + for v in inequality_constraints + ] def get_primal_indices(self, pyomo_variables): """ diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py index 7bfad8dbdf5..cb9e6f11017 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py @@ -261,6 +261,66 @@ def evaluate_external_hessian(self, x): ]) return [d2y0dxdx, d2y1dxdx] + def calculate_external_multipliers(self, lam, x): + r""" + Calculates the multipliers of the external constraints + from the multipliers of the residual constraints, + assuming zero dual infeasibility in the coordinates of + the external variables. + This is calculated analytically from: + + \nabla_y f^T \lambda_f + \nabla_y g^T \lambda_g = 0 + + """ + y = self.evaluate_external_variables(x) + lg0 = -2*y[1]*lam[0]/(x[0]**2 * x[1]**0.5 * y[0]) + lg1 = -(2*y[0]*lam[0] + x[0]**2*x[1]**0.5*y[1]*lg0)/(x[0]*x[1]) + return [lg0, lg1] + + def calculate_full_space_lagrangian_hessians(self, lam, x): + y = self.evaluate_external_variables(x) + lam_g = self.calculate_external_multipliers(lam, x) + d2fdx0dx0 = 2.0 + d2fdx1dx1 = 2.0 + d2fdy0dy0 = 2.0 + d2fdy1dy1 = 2.0 + hfxx = np.array([[d2fdx0dx0, 0], [0, d2fdx1dx1]]) + hfxy = np.array([[0, 0], [0, 0]]) + hfyy = np.array([[d2fdy0dy0, 0], [0, d2fdy1dy1]]) + + dg0dx0dx0 = 2*y[0]*x[1]**0.5*y[1] + dg0dx0dx1 = x[0]*y[0]*y[1]/x[1]**0.5 + dg0dx1dx1 = -1/4*x[0]**2*y[0]*y[1]/x[1]**(3/2) + dg0dx0dy0 = 2*x[0]*x[1]**0.5*y[1] + dg0dx0dy1 = 2*x[0]*y[0]*x[1]**0.5 + dg0dx1dy0 = 0.5*x[0]**2*y[1]/x[1]**0.5 + dg0dx1dy1 = 0.5*x[0]**2*y[0]/x[1]**0.5 + dg0dy0dy1 = x[0]**2*x[1]**0.5 + hg0xx = np.array([[dg0dx0dx0, dg0dx0dx1], [dg0dx0dx1, dg0dx1dx1]]) + hg0xy = np.array([[dg0dx0dy0, dg0dx0dy1], [dg0dx1dy0, dg0dx1dy1]]) + hg0yy = np.array([[0, dg0dy0dy1], [dg0dy0dy1, 0]]) + + dg1dx0dx1 = y[0] + dg1dx0dy0 = x[1] + dg1dx1dy0 = x[0] + hg1xx = np.array([[0, dg1dx0dx1], [dg1dx0dx1, 0]]) + hg1xy = np.array([[dg1dx0dy0, 0], [dg1dx1dy0, 0]]) + hg1yy = np.zeros((2, 2)) + + hlxx = lam[0]*hfxx + lam_g[0]*hg0xx + lam_g[1]*hg1xx + hlxy = lam[0]*hfxy + lam_g[0]*hg0xy + lam_g[1]*hg1xy + hlyy = lam[0]*hfyy + lam_g[0]*hg0yy + lam_g[1]*hg1yy + return hlxx, hlxy, hlyy + + def calculate_reduced_lagrangian_hessian(self, lam, x): + dydx = self.evaluate_external_jacobian(x) + hlxx, hlxy, hlyy = self.calculate_full_space_lagrangian_hessians(lam, x) + return ( + hlxx + + (hlxy.dot(dydx)).transpose() + hlxy.dot(dydx) + + dydx.transpose().dot(hlyy).dot(dydx) + ) + class TestGetHessianOfConstraint(unittest.TestCase): @@ -771,5 +831,207 @@ def test_evaluate_hessian_lagrangian_SimpleModel2x2_1(self): np.testing.assert_allclose(hess_lag, expected_hess_lag, rtol=1e-8) +class TestUpdatedHessianCalculationMethods(unittest.TestCase): + """ + These tests exercise the methods for fast Hessian-of-Lagrangian + computation. + They use Model2by2 because it has constraints that are nonlinear + in both x and y. + + """ + + def test_external_multipliers_from_residual_multipliers(self): + model = Model2by2() + m = model.make_model() + m.x[0].set_value(1.0) + m.x[1].set_value(2.0) + m.y[0].set_value(3.0) + m.y[1].set_value(4.0) + x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] + x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] + lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] + init_list = list( + itertools.product(x0_init_list, x1_init_list, lam_init_list) + ) + external_model = ExternalPyomoModel( + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) + + for x0, x1, lam in init_list: + x = [x0, x1] + lam = [lam] + external_model.set_input_values(x) + lam_g = external_model.calculate_external_constraint_multipliers(lam) + pred_lam_g = model.calculate_external_multipliers(lam, x) + np.testing.assert_allclose(lam_g, pred_lam_g, rtol=1e-8) + + def test_full_space_lagrangian_hessians(self): + model = Model2by2() + m = model.make_model() + m.x[0].set_value(1.0) + m.x[1].set_value(2.0) + m.y[0].set_value(3.0) + m.y[1].set_value(4.0) + x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] + x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] + lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] + init_list = list( + itertools.product(x0_init_list, x1_init_list, lam_init_list) + ) + external_model = ExternalPyomoModel( + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) + + for x0, x1, lam in init_list: + x = [x0, x1] + lam = [lam] + external_model.set_input_values(x) + # Note that these multiplier calculations are dependent on x, + # so if we switch their order, we will get "wrong" answers. + # (This is wrong in the sense that the residual and external + # multipliers won't necessarily correspond). + external_model.set_external_constraint_multipliers(lam) + hlxx, hlxy, hlyy = \ + external_model.get_full_space_lagrangian_hessians() + pred_hlxx, pred_hlxy, pred_hlyy = \ + model.calculate_full_space_lagrangian_hessians(lam, x) + + # TODO: Is comparing the array representation sufficient here? + # Should I make sure I get the sparse representation I expect? + np.testing.assert_allclose(hlxx.toarray(), pred_hlxx, rtol=1e-8) + np.testing.assert_allclose(hlxy.toarray(), pred_hlxy, rtol=1e-8) + np.testing.assert_allclose(hlyy.toarray(), pred_hlyy, rtol=1e-8) + + def test_reduced_hessian_lagrangian(self): + model = Model2by2() + m = model.make_model() + m.x[0].set_value(1.0) + m.x[1].set_value(2.0) + m.y[0].set_value(3.0) + m.y[1].set_value(4.0) + x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] + x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] + lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] + init_list = list( + itertools.product(x0_init_list, x1_init_list, lam_init_list) + ) + external_model = ExternalPyomoModel( + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) + + for x0, x1, lam in init_list: + x = [x0, x1] + lam = [lam] + external_model.set_input_values(x) + # Same comment as previous test regarding calculation order + external_model.set_external_constraint_multipliers(lam) + hlxx, hlxy, hlyy = \ + external_model.get_full_space_lagrangian_hessians() + hess = external_model.calculate_reduced_hessian_lagrangian( + hlxx, hlxy, hlyy + ) + pred_hess = model.calculate_reduced_lagrangian_hessian(lam, x) + # This test asserts that we are doing the block reduction properly. + np.testing.assert_allclose(np.array(hess), pred_hess, rtol=1e-8) + + from_individual = external_model.evaluate_hessians_of_residuals() + hl_from_individual = sum(l*h for l, h in zip(lam, from_individual)) + # This test asserts that the block reduction is correct. + np.testing.assert_allclose( + np.array(hess), hl_from_individual, rtol=1e-8 + ) + + def test_evaluate_hessian_equality_constraints(self): + model = Model2by2() + m = model.make_model() + m.x[0].set_value(1.0) + m.x[1].set_value(2.0) + m.y[0].set_value(3.0) + m.y[1].set_value(4.0) + x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] + x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] + lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] + init_list = list( + itertools.product(x0_init_list, x1_init_list, lam_init_list) + ) + external_model = ExternalPyomoModel( + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) + + for x0, x1, lam in init_list: + x = [x0, x1] + lam = [lam] + external_model.set_input_values(x) + external_model.set_equality_constraint_multipliers(lam) + hess = external_model.evaluate_hessian_equality_constraints() + pred_hess = model.calculate_reduced_lagrangian_hessian(lam, x) + # This test asserts that we are doing the block reduction properly. + np.testing.assert_allclose( + hess.toarray(), np.tril(pred_hess), rtol=1e-8 + ) + + from_individual = external_model.evaluate_hessians_of_residuals() + hl_from_individual = sum(l*h for l, h in zip(lam, from_individual)) + # This test asserts that the block reduction is correct. + np.testing.assert_allclose( + hess.toarray(), np.tril(hl_from_individual), rtol=1e-8 + ) + + def test_evaluate_hessian_equality_constraints_order(self): + model = Model2by2() + m = model.make_model() + m.x[0].set_value(1.0) + m.x[1].set_value(2.0) + m.y[0].set_value(3.0) + m.y[1].set_value(4.0) + x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] + x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] + lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] + init_list = list( + itertools.product(x0_init_list, x1_init_list, lam_init_list) + ) + external_model = ExternalPyomoModel( + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) + + for x0, x1, lam in init_list: + x = [x0, x1] + lam = [lam] + external_model.set_equality_constraint_multipliers(lam) + external_model.set_input_values(x) + # Using evaluate_hessian_equality_constraints, which calculates + # external multiplier values, we can calculate the correct Hessian + # regardless of the order in which primal and dual variables are + # set. + hess = external_model.evaluate_hessian_equality_constraints() + pred_hess = model.calculate_reduced_lagrangian_hessian(lam, x) + # This test asserts that we are doing the block reduction properly. + np.testing.assert_allclose( + hess.toarray(), np.tril(pred_hess), rtol=1e-8 + ) + + from_individual = external_model.evaluate_hessians_of_residuals() + hl_from_individual = sum(l*h for l, h in zip(lam, from_individual)) + # This test asserts that the block reduction is correct. + np.testing.assert_allclose( + hess.toarray(), np.tril(hl_from_individual), rtol=1e-8 + ) + + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py index 6c0fd9b1883..401330cd33d 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py @@ -39,7 +39,7 @@ def create_pyomo_model1(): xb = dict() xb[1] = (-1,1) - xb[2] = (-np.inf,2) + xb[2] = (2,2) xb[3] = (-3,np.inf) xb[4] = (-np.inf, np.inf) xb[5] = (-5,5) @@ -104,7 +104,7 @@ def execute_extended_nlp_interface(self, anlp): self.assertEqual(anlp.nnz_jacobian_ineq(), 7*9) self.assertEqual(anlp.nnz_hessian_lag(), 9*9) - expected_primals_lb = np.asarray([-1, -np.inf, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64) + expected_primals_lb = np.asarray([-1, 2, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64) expected_primals_ub = np.asarray([1, 2, np.inf, np.inf, 5, 6, np.inf, np.inf, 9], dtype=np.float64) self.assertTrue(np.array_equal(expected_primals_lb, anlp.primals_lb())) self.assertTrue(np.array_equal(expected_primals_ub, anlp.primals_ub())) @@ -645,7 +645,7 @@ def test_util_maps(self): # test build_bounds_mask - should be the same as above self.assertTrue(np.array_equal(full_to_compressed_mask, build_bounds_mask(anlp.primals_lb()))) - expected_compressed_primals_lb = np.asarray([-1, -3, -5, -7, -9], dtype=np.float64) + expected_compressed_primals_lb = np.asarray([-1, 2, -3, -5, -7, -9], dtype=np.float64) # test build_compression_matrix C = build_compression_matrix(full_to_compressed_mask) @@ -662,7 +662,7 @@ def test_util_maps(self): self.assertTrue(np.array_equal(expected_compressed_primals_lb, compressed_primals_lb)) # test compressed_to_full - expected_full_primals_lb = np.asarray([-1, -np.inf, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64) + expected_full_primals_lb = np.asarray([-1, 2, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64) full_primals_lb = compressed_to_full(compressed_primals_lb, full_to_compressed_mask, default=-np.inf) self.assertTrue(np.array_equal(expected_full_primals_lb, full_primals_lb)) # test in place @@ -672,13 +672,13 @@ def test_util_maps(self): self.assertTrue(np.array_equal(expected_full_primals_lb, full_primals_lb)) # test no default - expected_full_primals_lb = np.asarray([-1, np.nan, -3, np.nan, -5, np.nan, -7, np.nan, -9], dtype=np.float64) + expected_full_primals_lb = np.asarray([-1, 2, -3, np.nan, -5, np.nan, -7, np.nan, -9], dtype=np.float64) full_primals_lb = compressed_to_full(compressed_primals_lb, full_to_compressed_mask) print(expected_full_primals_lb) print(full_primals_lb) np.testing.assert_array_equal(expected_full_primals_lb, full_primals_lb) # test in place no default - expected_full_primals_lb = np.asarray([-1, 0.0, -3, 0.0, -5, 0.0, -7, 0.0, -9], dtype=np.float64) + expected_full_primals_lb = np.asarray([-1, 2, -3, 0.0, -5, 0.0, -7, 0.0, -9], dtype=np.float64) full_primals_lb.fill(0.0) ret = compressed_to_full(compressed_primals_lb, full_to_compressed_mask, out=full_primals_lb) self.assertTrue(ret is full_primals_lb) diff --git a/pyomo/contrib/pyros/tests/test_grcs.py b/pyomo/contrib/pyros/tests/test_grcs.py index 77d48ba6280..72005987581 100644 --- a/pyomo/contrib/pyros/tests/test_grcs.py +++ b/pyomo/contrib/pyros/tests/test_grcs.py @@ -1443,9 +1443,9 @@ def test_coefficient_matching_robust_infeasible_proof(self): self.assertEqual(robust_infeasible, True, msg="Coefficient matching should be proven robust infeasible.") # === regression test for the solver +@unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), "Global NLP solver is not available.") class RegressionTest(unittest.TestCase): - @unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), "Global NLP solver is not available.") def regression_test_constant_drs(self): model = m = ConcreteModel() m.name = "s381" @@ -1480,8 +1480,6 @@ def regression_test_constant_drs(self): self.assertTrue(results.pyros_termination_condition, pyrosTerminationCondition.robust_feasible) - @unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), - "Global NLP solver is not available.") def regression_test_affine_drs(self): model = m = ConcreteModel() m.name = "s381" @@ -1517,8 +1515,6 @@ def regression_test_affine_drs(self): self.assertTrue(results.pyros_termination_condition, pyrosTerminationCondition.robust_feasible) - @unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), - "Global NLP solver is not available.") def regression_test_quad_drs(self): model = m = ConcreteModel() m.name = "s381" @@ -1554,7 +1550,7 @@ def regression_test_quad_drs(self): self.assertTrue(results.pyros_termination_condition, pyrosTerminationCondition.robust_feasible) - @unittest.skipUnless(SolverFactory('baron').available(exception_flag=False) and SolverFactory('baron').license_is_valid(), + @unittest.skipUnless(SolverFactory('baron').license_is_valid(), "Global NLP solver is not available and licensed.") def test_minimize_dr_norm(self): m = ConcreteModel() @@ -1599,9 +1595,8 @@ def test_minimize_dr_norm(self): self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal, msg="Minimize dr norm did not solve to optimality.") - @unittest.skipUnless( - SolverFactory('baron').available(exception_flag=False) and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless(SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.") def test_identifying_violating_param_realization(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -1642,9 +1637,8 @@ def test_identifying_violating_param_realization(self): self.assertGreater(results.iterations, 0, msg="Robust infeasible model terminated in 0 iterations (nominal case).") - @unittest.skipUnless( - SolverFactory('baron').available(exception_flag=False) and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless(SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.") def test_terminate_with_max_iter(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -1685,9 +1679,8 @@ def test_terminate_with_max_iter(self): self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.max_iter, msg="Returned termination condition is not return max_iter.") - @unittest.skipUnless( - SolverFactory('baron').available(exception_flag=False) and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless(SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.") def test_terminate_with_time_limit(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -1727,9 +1720,8 @@ def test_terminate_with_time_limit(self): self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.time_out, msg="Returned termination condition is not return time_out.") - @unittest.skipUnless( - SolverFactory('baron').available(exception_flag=False) and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless(SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.") def test_discrete_separation(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -1768,9 +1760,8 @@ def test_discrete_separation(self): self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, msg="Returned termination condition is not return robust_optimal.") - @unittest.skipUnless( - SolverFactory('baron').available(exception_flag=False) and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless(SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.") def test_higher_order_decision_rules(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -1810,9 +1801,8 @@ def test_higher_order_decision_rules(self): self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, msg="Returned termination condition is not return robust_optimal.") - @unittest.skipUnless( - SolverFactory('baron').available(exception_flag=False) and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless(SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.") def test_coefficient_matching_solve(self): # Write the deterministic Pyomo model @@ -1909,7 +1899,9 @@ def test_coefficient_matching_nonlinear_expr(self): global_subsolver = SolverFactory("baron") # Call the PyROS solver - try: + with self.assertRaises( + ValueError, msg="ValueError should be raised for general " + "nonlinear expressions in h(x,z,q)=0 constraints."): results = pyros_solver.solve(model=m, first_stage_variables=[m.x1], second_stage_variables=[m.x2], @@ -1922,9 +1914,6 @@ def test_coefficient_matching_nonlinear_expr(self): "solve_master_globally": True, "decision_rule_order":1 }) - except: - self.assertRaises(ValueError, msg="ValueError should be " - "raised for general nonlinear expressions in h(x,z,q)=0 constraints.") diff --git a/pyomo/contrib/sensitivity_toolbox/sens.py b/pyomo/contrib/sensitivity_toolbox/sens.py index d643701ad31..01bdcf901b6 100644 --- a/pyomo/contrib/sensitivity_toolbox/sens.py +++ b/pyomo/contrib/sensitivity_toolbox/sens.py @@ -600,7 +600,7 @@ def _replace_parameters_in_constraints(self, variableSubMap): active=True, descend_into=True)): tempName = unique_component_name(block, obj.local_name) - new_expr = param_replacer.dfs_postorder_stack(obj.expr) + new_expr = param_replacer.walk_expression(obj.expr) block.add_component(tempName, Objective(expr=new_expr)) new_old_comp_map[block.component(tempName)] = obj obj.deactivate() @@ -615,16 +615,16 @@ def _replace_parameters_in_constraints(self, variableSubMap): last_idx = 0 for con in old_con_list: if (con.equality or con.lower is None or con.upper is None): - new_expr = param_replacer.dfs_postorder_stack(con.expr) + new_expr = param_replacer.walk_expression(con.expr) block.constList.add(expr=new_expr) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con else: # Constraint must be a ranged inequality, break into # separate constraints - new_body = param_replacer.dfs_postorder_stack(con.body) - new_lower = param_replacer.dfs_postorder_stack(con.lower) - new_upper = param_replacer.dfs_postorder_stack(con.upper) + new_body = param_replacer.walk_expression(con.body) + new_lower = param_replacer.walk_expression(con.lower) + new_upper = param_replacer.walk_expression(con.upper) # Add constraint for lower bound block.constList.add(expr=(new_lower <= new_body)) diff --git a/pyomo/contrib/trustregion/PyomoInterface.py b/pyomo/contrib/trustregion/PyomoInterface.py index 0067042196f..36db224b66b 100644 --- a/pyomo/contrib/trustregion/PyomoInterface.py +++ b/pyomo/contrib/trustregion/PyomoInterface.py @@ -41,7 +41,8 @@ def __init__(self, trf_block, efSet): self.trf = trf_block self.efSet = efSet - def visit(self, node, values): + def exitNode(self, node, values): + node = super().exitNode(node, values) if node.__class__ is not EXPR.ExternalFunctionExpression: return node if id(node._fcn) not in self.efSet: @@ -62,7 +63,7 @@ def visit(self, node, values): # PythonCallbackFunction API (that restriction leads unfortunate # things later; i.e., accessing the private _fcn attribute # below). - for arg in list(values)[1:]: + for arg in values[1][1:]: if type(arg) in nonpyomo_leaf_types or arg.is_fixed(): # We currently do not allow constants or parameters for # the external functions. diff --git a/pyomo/core/base/block.py b/pyomo/core/base/block.py index 53edff89996..b599cd57751 100644 --- a/pyomo/core/base/block.py +++ b/pyomo/core/base/block.py @@ -899,9 +899,26 @@ def model(self): def find_component(self, label_or_component): """ - Return a block component given a name. + Returns a component in the block given a name. + + Parameters + ---------- + label_or_component : str, Component, or ComponentUID + The name of the component to find in this block. String or + Component arguments are first converted to ComponentUID. + + Returns + ------- + Component + Component on the block identified by the ComponentUID. If + a matching component is not found, None is returned. + """ - return ComponentUID(label_or_component).find_component_on(self) + if type(label_or_component) is ComponentUID: + cuid = label_or_component + else: + cuid = ComponentUID(label_or_component) + return cuid.find_component_on(self) def add_component(self, name, val): """ diff --git a/pyomo/core/base/boolean_var.py b/pyomo/core/base/boolean_var.py index d8ee0301d22..0e8b4857ffa 100644 --- a/pyomo/core/base/boolean_var.py +++ b/pyomo/core/base/boolean_var.py @@ -9,22 +9,55 @@ # ___________________________________________________________________________ import logging -from weakref import ref as weakref_ref +from weakref import ref as weakref_ref, ReferenceType from pyomo.common.deprecation import RenamedClass from pyomo.common.log import is_debug_set from pyomo.common.timing import ConstructionTimer +from pyomo.common.modeling import unique_component_name +from pyomo.common.deprecation import deprecation_warning from pyomo.core.expr.boolean_value import BooleanValue from pyomo.core.expr.numvalue import value from pyomo.core.base.component import ComponentData, ModelComponentFactory -from pyomo.core.base.indexed_component import IndexedComponent, UnindexedComponent_set +from pyomo.core.base.indexed_component import (IndexedComponent, + UnindexedComponent_set) from pyomo.core.base.misc import apply_indexed_rule -from pyomo.core.base.set import Set, BooleanSet +from pyomo.core.base.set import Set, BooleanSet, Binary from pyomo.core.base.util import is_functor +from pyomo.core.base.var import Var logger = logging.getLogger('pyomo.core') +class _DeprecatedImplicitAssociatedBinaryVariable(object): + __slots__ = ('_boolvar',) + + def __init__(self, boolvar): + self._boolvar = weakref_ref(boolvar) + + def __call__(self): + deprecation_warning( + "Relying on core.logical_to_linear to transform " + "BooleanVars that do not appear in LogicalConstraints " + "is deprecated. Please associate your own binaries if " + "you have BooleanVars not used in logical expressions.", + version='TBD') + + parent_block = self._boolvar().parent_block() + new_var = Var(domain=Binary) + parent_block.add_component( + unique_component_name(parent_block, + self._boolvar().local_name + "_asbinary"), + new_var) + self._boolvar()._associated_binary = None + self._boolvar().associate_binary_var(new_var) + return new_var + + def __getstate__(self): + return {'_boolvar': self._boolvar()} + + def __setstate__(self, state): + self._boolvar = weakref_ref(state['_boolvar']) class _BooleanVarData(ComponentData, BooleanValue): """ @@ -126,17 +159,6 @@ def unfix(self): free=unfix - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): - """Return the component name""" - if self.fixed and compute_values: - try: - return str(self()) - except: - pass - if smap: - return smap.getSymbol(self, labeler) - return self.name - class _GeneralBooleanVarData(_BooleanVarData): """ @@ -182,7 +204,7 @@ def __getstate__(self): state = super().__getstate__() for i in _GeneralBooleanVarData.__slots__: state[i] = getattr(self, i) - if self._associated_binary is not None: + if isinstance(self._associated_binary, ReferenceType): state['_associated_binary'] = self._associated_binary() return state @@ -193,7 +215,9 @@ def __setstate__(self, state): """ super().__setstate__(state) - if self._associated_binary is not None: + if self._associated_binary is not None and \ + type(self._associated_binary) is not \ + _DeprecatedImplicitAssociatedBinaryVariable: self._associated_binary = weakref_ref(self._associated_binary) # @@ -229,7 +253,8 @@ def fix(self, *val): if len(val) == 1: self.value = val[0] elif len(val) > 1: - raise TypeError("fix expected at most 1 arguments, got %d" % (len(val))) + raise TypeError("fix expected at most 1 arguments, got %d" % + (len(val))) def unfix(self): """Sets the fixed indicator to False.""" @@ -238,17 +263,21 @@ def unfix(self): free = unfix def get_associated_binary(self): - """Get the binary _VarData associated with this _GeneralBooleanVarData""" - return self._associated_binary() if self._associated_binary is not None else None + """Get the binary _VarData associated with this + _GeneralBooleanVarData""" + return self._associated_binary() if self._associated_binary \ + is not None else None def associate_binary_var(self, binary_var): """Associate a binary _VarData to this _GeneralBooleanVarData""" - if self._associated_binary is not None: + if self._associated_binary is not None and \ + type(self._associated_binary) is not \ + _DeprecatedImplicitAssociatedBinaryVariable: raise RuntimeError( "Reassociating BooleanVar '%s' (currently associated " "with '%s') with '%s' is not allowed" % ( self.name, - self._associated_binary.name + self._associated_binary().name if self._associated_binary is not None else None, binary_var.name if binary_var is not None else None)) if binary_var is not None: @@ -293,7 +322,8 @@ def __init__(self, *args, **kwd): self._value_init_value = None self._value_init_rule = None - if is_functor(initialize) and (not isinstance(initialize, BooleanValue)): + if is_functor(initialize) and ( + not isinstance(initialize, BooleanValue)): self._value_init_rule = initialize else: self._value_init_value = initialize diff --git a/pyomo/core/base/component.py b/pyomo/core/base/component.py index c3f9fc3f110..7a245b1e803 100644 --- a/pyomo/core/base/component.py +++ b/pyomo/core/base/component.py @@ -322,6 +322,10 @@ def _pprint_base_impl(self, ostream, verbose, prefix, _name, _doc, # The first line should be a hanging indent (i.e., not indented) ostream.newline = False + if self.is_reference(): + _attr = list(_attr) if _attr else [] + _attr.append(('ReferenceTo', self.referent)) + if _name: ostream.write(_name+" : ") if _doc: @@ -544,15 +548,6 @@ def __str__(self): """Return the component name""" return self.name - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): - """Return the component name""" - if compute_values: - try: - return str(self()) - except: - pass - return self.name - def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): """Returns the component name associated with this object. @@ -854,23 +849,6 @@ def __str__(self): """Return a string with the component name and index""" return self.name - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): - """ - Return a string representation of this component, - applying the labeler if passed one. - """ - if compute_values: - try: - return str(self()) - except: - pass - if smap: - return smap.getSymbol(self, labeler) - if labeler is not None: - return labeler(self) - else: - return self.__str__() - def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): """Return a string with the component name and index""" # diff --git a/pyomo/core/base/constraint.py b/pyomo/core/base/constraint.py index 8120a84d1cd..67d0001d652 100644 --- a/pyomo/core/base/constraint.py +++ b/pyomo/core/base/constraint.py @@ -496,7 +496,7 @@ def set_value(self, expr): " Inequality: (lower, expression, upper)" % (self.name, len(expr))) # - # Ignore an 'empty' constraints + # Ignore an 'empty' constraint # elif _expr_type is type: del self.parent_component()[self.index()] @@ -569,7 +569,8 @@ def set_value(self, expr): # Error check: ensure equality does not have infinite RHS raise ValueError( "Equality constraint '%s' defined with " - "non-finite term." % (self.name)) + "non-finite term (%sHS == None)." % ( + self.name, 'L' if args[0] is None else 'R')) if args[0].__class__ in native_numeric_types or \ not args[0].is_potentially_variable(): self._lower = self._upper = args[0] @@ -686,26 +687,17 @@ def __new__(cls, *args, **kwds): return super(Constraint, cls).__new__(IndexedConstraint) def __init__(self, *args, **kwargs): - _init = tuple( _arg for _arg in ( - kwargs.pop('rule', None), - kwargs.pop('expr', None) ) if _arg is not None ) - if len(_init) == 1: - _init = _init[0] - elif not _init: - _init = None - else: - raise ValueError("Duplicate initialization: Constraint() only " - "accepts one of 'rule=' and 'expr='") - - kwargs.setdefault('ctype', Constraint) - ActiveIndexedComponent.__init__(self, *args, **kwargs) - + _init = self._pop_from_kwargs( + 'Constraint', kwargs, ('rule', 'expr'), None) # Special case: we accept 2- and 3-tuples as constraints if type(_init) is tuple: self.rule = Initializer(_init, treat_sequences_as_mappings=False) else: self.rule = Initializer(_init) + kwargs.setdefault('ctype', Constraint) + ActiveIndexedComponent.__init__(self, *args, **kwargs) + def construct(self, data=None): """ Construct the expression(s) for this constraint. diff --git a/pyomo/core/base/expression.py b/pyomo/core/base/expression.py index 0a2fb4c7737..57961072345 100644 --- a/pyomo/core/base/expression.py +++ b/pyomo/core/base/expression.py @@ -269,28 +269,20 @@ def __new__(cls, *args, **kwds): return IndexedExpression.__new__(IndexedExpression) def __init__(self, *args, **kwds): - _init = tuple( - arg for arg in - (kwds.pop(_arg, None) for _arg in ('rule', 'expr', 'initialize')) - if arg is not None - ) - if len(_init) == 1: - _init = _init[0] - elif not _init: - _init = None - else: - raise ValueError( - "Duplicate initialization: Expression() only " - "accepts one of 'rule=', 'expr=', and 'initialize='") - - kwds.setdefault('ctype', Expression) - IndexedComponent.__init__(self, *args, **kwds) - + _init = self._pop_from_kwargs( + 'Expression', kwds, ('rule', 'expr', 'initialize'), None) # Historically, Expression objects were dense (but None): # setting arg_not_specified causes Initializer to recognize # _init==None as a constant initializer returning None + # + # To initialize a completely empty Expression, pass either + # initialize={} (to require explicit setitem before a getitem), + # or initialize=NOTSET (to allow getitem before setitem) self._rule = Initializer(_init, arg_not_specified=NOTSET) + kwds.setdefault('ctype', Expression) + IndexedComponent.__init__(self, *args, **kwds) + def _pprint(self): return ( [('Size', len(self)), @@ -359,10 +351,9 @@ def _getitem_when_not_present(self, idx): #raise KeyError(idx) else: _init = self._rule(self.parent_block(), idx) - obj = self._setitem_when_not_present(idx, _init) - #if obj is None: - # raise KeyError(idx) - return obj + if _init is Expression.Skip: + raise KeyError(idx) + return self._setitem_when_not_present(idx, _init) def construct(self, data=None): """ Apply the rule to construct values in this set """ diff --git a/pyomo/core/base/global_set.py b/pyomo/core/base/global_set.py index 7e22999b220..2987207759c 100644 --- a/pyomo/core/base/global_set.py +++ b/pyomo/core/base/global_set.py @@ -67,7 +67,7 @@ def get(self, value, default): return default def __iter__(self): return (None,).__iter__() - def subsets(self): + def subsets(self, expand_all_set_operators=None): return [ self ] def construct(self): pass diff --git a/pyomo/core/base/indexed_component.py b/pyomo/core/base/indexed_component.py index f9a7a091529..f8bf5f46953 100644 --- a/pyomo/core/base/indexed_component.py +++ b/pyomo/core/base/indexed_component.py @@ -13,6 +13,7 @@ import inspect import logging import sys +import textwrap from pyomo.core.expr.expr_errors import TemplateExpressionError from pyomo.core.expr.numvalue import native_types, NumericNDArray @@ -25,6 +26,7 @@ from pyomo.common.dependencies import numpy as np, numpy_available from pyomo.common.deprecation import deprecated, deprecation_warning from pyomo.common.modeling import NOTSET +from pyomo.common.sorting import sorted_robust from collections.abc import Sequence @@ -379,12 +381,27 @@ def __contains__(self, idx): # keys/values/items continue to work for components that implement # other definitions for __iter__ (e.g., Set) def __iter__(self): - """Return an iterator of the keys in the dictionary""" + """Return an iterator of the component data keys""" return self.keys() - def keys(self): - """Iterate over the keys in the dictionary""" + def keys(self, ordered=False): + """Return an iterator over the component data keys + This method sets the ordering of component data objects within + this IndexedComponent container. For consistency, + :py:meth:`__init__()`, :py:meth:`values`, and :py:meth:`items` + all leverage this method to ensure consistent ordering. + + Parameters + ---------- + ordered: bool + If True, then the keys are returned in a deterministic + order. If the underlying indexing set is ordered then that + ordering is used. Otherwise, the keys are sorted using + :py:func:`sorted_robust`. + + """ + sort_needed = ordered if hasattr(self._index, 'isfinite') and not self._index.isfinite(): # # If the index set is virtual (e.g., Any) then return the @@ -392,14 +409,17 @@ def keys(self): # of the underlying Set, there should be no warning if the # user iterates over the set when the _data dict is empty. # - return self._data.__iter__() + ans = self._data.__iter__() elif self.is_reference(): - return self._data.__iter__() - elif len(self._data) == len(self._index): + ans = self._data.__iter__() + elif len(self) == len(self._index): # # If the data is dense then return the index iterator. # - return self._index.__iter__() + ans = self._index.__iter__() + if ordered and self._index.isordered(): + # As this iterator is ordered, we do not need to sort it + sort_needed = False else: if not self._data and self._index and PyomoOptions.paranoia_level: logger.warning( @@ -422,13 +442,14 @@ def keys(self): where it is empty. """ % (self.name,) ) - if not hasattr(self._index, 'isordered') or not self._index.isordered(): + if not hasattr(self._index, 'isordered') or \ + not self._index.isordered(): # # If the index set is not ordered, then return the # data iterator. This is in an arbitrary order, which is # fine because the data is unordered. # - return self._data.__iter__() + ans = self._data.__iter__() else: # # Test each element of a sparse data with an ordered @@ -438,19 +459,40 @@ def keys(self): # small number of indices. However, this provides a # consistent ordering that the user expects. # - def _sparse_iter_gen(self): - for idx in self._index.__iter__(): - if idx in self._data: - yield idx - return _sparse_iter_gen(self) - - def values(self): - """Return an iterator of the component data objects in the dictionary""" - return (self[s] for s in self.keys()) + ans = filter(self._data.__contains__, self._index) + # As the iterator is ordered, we do not need to sort it + sort_needed = False + if sort_needed: + return iter(sorted_robust(ans)) + else: + return ans - def items(self): - """Return an iterator of (index,data) tuples from the dictionary""" - return((s, self[s]) for s in self.keys()) + def values(self, ordered=False): + """Return an iterator of the component data objects + + Parameters + ---------- + ordered: bool + If True, then the values are returned in a deterministic + order. If the underlying indexing set is ordered then that + ordering is used. Otherwise, the component keys are sorted + using :py:func:`sorted_robust` and the values are returned + in that order. + """ + return map(self.__getitem__, self.keys(ordered)) + + def items(self, ordered=False): + """Return an iterator of (index,data) component data tuples + + Parameters + ---------- + ordered: bool + If True, then the items are returned in a deterministic + order. If the underlying indexing set is ordered then that + ordering is used. Otherwise, the items are sorted using + :py:func:`sorted_robust`. + """ + return((s, self[s]) for s in self.keys(ordered)) @deprecated('The iterkeys method is deprecated. Use dict.keys().', version='6.0') @@ -743,7 +785,7 @@ def _processUnhashableIndex(self, idx): There are three basic ways to get here: 1) the index contains one or more slices or ellipsis 2) the index contains an unhashable type (e.g., a Pyomo - (Simple)Component + (Scalar)Component 3) the index contains an IndexTemplate """ from pyomo.core.expr import current as EXPR @@ -859,6 +901,8 @@ def _processUnhashableIndex(self, idx): structurally_valid = False if slice_dim == set_dim or set_dim is None: structurally_valid = True + elif type(set_dim) is type: + pass # UnknownSetDimen elif ellipsis is not None and slice_dim < set_dim: structurally_valid = True elif set_dim == 0 and idx == (slice(None),): @@ -872,10 +916,23 @@ def _processUnhashableIndex(self, idx): structurally_valid = True if not structurally_valid: - raise IndexError( - "Index %s contains an invalid number of entries for " - "component %s. Expected %s, got %s." - % (idx, self.name, set_dim, slice_dim)) + msg = ("Index %s contains an invalid number of entries for " + "component '%s'. Expected %s, got %s.") + if type(set_dim) is type: + set_dim = set_dim.__name__ + msg += '\n ' + '\n '.join( + textwrap.wrap(textwrap.dedent(""" + Slicing components relies on knowing the + underlying set dimensionality (even if the + dimensionality is None). The underlying + component set ('%s') dimensionality has not been + determined (likely because it is an empty Set). + You can avoid this error by specifying the Set + dimensionality (with the 'dimen=' keyword).""" % ( + self.index_set(), )).strip())) + raise IndexError(msg % ( + IndexedComponent_slice._getitem_args_to_str(list(idx)), + self.name, set_dim, slice_dim)) return IndexedComponent_slice(self, fixed, sliced, ellipsis) elif _found_numeric: if len(idx) == 1: diff --git a/pyomo/core/base/indexed_component_slice.py b/pyomo/core/base/indexed_component_slice.py index e77ba2202c5..67fd137bb2f 100644 --- a/pyomo/core/base/indexed_component_slice.py +++ b/pyomo/core/base/indexed_component_slice.py @@ -9,7 +9,10 @@ # ___________________________________________________________________________ import copy +import itertools + from pyomo.common import DeveloperError +from pyomo.common.collections import Sequence class IndexedComponent_slice(object): @@ -204,7 +207,7 @@ def __delitem__(self, idx): pass return None - def __call__(self, *idx, **kwds): + def __call__(self, *args, **kwds): """Special handling of the "()" operator for component slices. Creating a slice of a component returns a IndexedComponent_slice @@ -230,7 +233,7 @@ def __call__(self, *idx, **kwds): self._len -= 1 ans = IndexedComponent_slice(self, ( - IndexedComponent_slice.call, idx, kwds ) ) + IndexedComponent_slice.call, args, kwds ) ) # Because we just duplicated the slice and added a new entry, we # know that the _len == len(_call_stack) if ans._call_stack[-2][1] == 'component': @@ -240,6 +243,52 @@ def __call__(self, *idx, **kwds): # recursion in python2.6 return list( i for i in ans ) + @classmethod + def _getitem_args_to_str(cls, args): + for i, v in enumerate(args): + if v is Ellipsis: + args[i] = '...' + elif type(v) is slice: + args[i] = ( + (repr(v.start) if v.start is not None else '') + ':' + + (repr(v.stop) if v.stop is not None else '') + + (':%r' % v.step if v.step is not None else '')) + else: + args[i] = repr(v) + return '[' + ', '.join(args) + ']' + + def __str__(self): + ans = '' + for level in self._call_stack: + if level[0] == IndexedComponent_slice.slice_info: + ans += level[1][0].name + tmp = dict(level[1][1]) + tmp.update(level[1][2]) + if level[1][3] is not None: + tmp[level[1][3]] = Ellipsis + ans += self._getitem_args_to_str([tmp[i] for i in sorted(tmp)]) + elif level[0] & IndexedComponent_slice.ITEM_MASK: + if isinstance(level[1], Sequence): + tmp = list(level[1]) + else: + tmp = [level[1]] + ans += self._getitem_args_to_str(tmp) + elif level[0] & IndexedComponent_slice.ATTR_MASK: + ans += '.' + level[1] + elif level[0] & IndexedComponent_slice.CALL_MASK: + ans += ( + '(' + ', '.join( + itertools.chain( + (repr(_) for _ in level[1]), + ('%s=%r' % kv for kv in level[2].items())) + ) + ')' + ) + if level[0] & IndexedComponent_slice.SET_MASK: + ans += ' = %r' % (level[2],) + elif level[0] & IndexedComponent_slice.DEL_MASK: + ans = 'del ' + ans + return ans + def __hash__(self): return hash(tuple(_freeze(x) for x in self._call_stack[:self._len])) @@ -323,7 +372,7 @@ def __init__(self, component, fixed, sliced, ellipsis, iter_over_index): or len(self.component._implicit_subsets) == 1 ) self.explicit_index_count = len(fixed) + len(sliced) - if iter_over_index: + if iter_over_index and component.index_set().isfinite(): # This should be used to iterate over all the potential # indices of a sparse IndexedComponent. self.component_iter = component.index_set().__iter__() diff --git a/pyomo/core/base/initializer.py b/pyomo/core/base/initializer.py index e18d322d55d..e392e52aee1 100644 --- a/pyomo/core/base/initializer.py +++ b/pyomo/core/base/initializer.py @@ -83,9 +83,9 @@ def Initializer(init, sequence_types.add(init.__class__) elif any(c.__name__ == 'Series' for c in init.__class__.__mro__): if pandas_available and isinstance(init, pandas.Series): - initializer_map[init.__class__] = ItemInitializer + sequence_types.add(init.__class__) elif any(c.__name__ == 'DataFrame' for c in init.__class__.__mro__): - if pandas_available and isinstance(init, pandas.DataFrams): + if pandas_available and isinstance(init, pandas.DataFrame): initializer_map[init.__class__] = DataFrameInitializer else: # Note: this picks up (among other things) all string instances @@ -115,6 +115,8 @@ def Initializer(init, return ScalarCallInitializer(init) else: return IndexedCallInitializer(init) + if isinstance(init, InitializerBase): + return init if isinstance(init, PyomoObject): # We re-check for PyomoObject here, as that picks up / caches # non-components like component data objects and expressions @@ -207,6 +209,31 @@ def indices(self): return range(len(self._dict)) +class DataFrameInitializer(InitializerBase): + """Initializer for dict-like values supporting __getitem__()""" + __slots__ = ('_df', '_column',) + + def __init__(self, dataframe, column=None): + self._df = dataframe + if column is not None: + self._column = column + elif len(dataframe.columns) == 1: + self._column = dataframe.columns[0] + else: + raise ValueError( + "Cannot construct DataFrameInitializer for DataFrame with " + "multiple columns without also specifying the data column") + + def __call__(self, parent, idx): + return self._df.at[idx, self._column] + + def contains_indices(self): + return True + + def indices(self): + return self._df.index + + class IndexedCallInitializer(InitializerBase): """Initializer for functions and callable objects""" __slots__ = ('_fcn',) diff --git a/pyomo/core/base/param.py b/pyomo/core/base/param.py index edc5bc2b543..92818e88e12 100644 --- a/pyomo/core/base/param.py +++ b/pyomo/core/base/param.py @@ -268,9 +268,6 @@ def __new__(cls, *args, **kwds): def __init__(self, *args, **kwd): _init = self._pop_from_kwargs( 'Param', kwd, ('rule', 'initialize'), NOTSET) - self._rule = Initializer(_init, - treat_sequences_as_mappings=False, - arg_not_specified=NOTSET) self.domain = self._pop_from_kwargs('Param', kwd, ('domain', 'within')) if self.domain is None: self.domain = _ImplicitAny(owner=self, name='Any') @@ -287,6 +284,11 @@ def __init__(self, *args, **kwd): kwd.setdefault('ctype', Param) IndexedComponent.__init__(self, *args, **kwd) + # After IndexedComponent.__init__ so we can call is_indexed(). + self._rule = Initializer(_init, + treat_sequences_as_mappings=self.is_indexed(), + arg_not_specified=NOTSET) + def __len__(self): """ Return the number of component data objects stored by this @@ -306,14 +308,10 @@ def __contains__(self, idx): return idx in self._data return idx in self._index - def keys(self): - """ - Iterate over the keys in the dictionary. If the default value is - specified, then iterate over all keys in the component index. - """ - if self._default_val is Param.NoValue: - return self._data.__iter__() - return self._index.__iter__() + # We do not need to override keys(), as the __len__ override will + # cause the base class keys() to correctly correctly handle default + # values + #def keys(self, ordered=False): @property def mutable(self): diff --git a/pyomo/core/base/range.py b/pyomo/core/base/range.py index 6304d97035a..922206c04c9 100644 --- a/pyomo/core/base/range.py +++ b/pyomo/core/base/range.py @@ -21,6 +21,7 @@ def remainder(a,b): return ans _inf = float('inf') +_infinite = {_inf, -_inf} class RangeDifferenceError(ValueError): pass @@ -40,9 +41,9 @@ class NumericRange(object): Parameters ---------- - start : int + start : float The starting value for this NumericRange - end : int + end : float The last value for this NumericRange step : int The interval between values in the range. 0 indicates a @@ -64,26 +65,21 @@ def __init__(self, start, end, step, closed=(True,True)): raise ValueError( "NumericRange step must be int (got %s)" % (step,)) step = int(step) - if start == -_inf: - start = None - if end == _inf: - end = None if start is None: - if step: - raise ValueError("NumericRange: start must not be None " + start = -_inf + if end is None: + end = math.copysign(_inf, step) + + if step: + if start == -_inf: + raise ValueError("NumericRange: start must not be None/-inf " "for non-continuous steps") - elif end is not None: - if step == 0 and end < start: - raise ValueError( - "NumericRange: start must be <= end for " - "continuous ranges (got %s..%s)" % (start,end) - ) - elif (end-start)*step < 0: + if (end-start)*step < 0: raise ValueError( "NumericRange: start, end ordering incompatible " "with step direction (got [%s:%s:%s])" % (start,end,step) ) - if step: + if end not in _infinite: n = int( (end - start) // step ) new_end = start + n*step assert abs(end - new_end) < abs(step) @@ -93,6 +89,11 @@ def __init__(self, start, end, step, closed=(True,True)): if step < 0: start, end = end, start step *= -1 + elif end < start: # and step == 0 + raise ValueError( + "NumericRange: start must be <= end for " + "continuous ranges (got %s..%s)" % (start,end) + ) if start == end: # If this is a scalar, we will force the step to be 0 (so that # things like [1:5:10] == [1:50:100] are easier to validate) @@ -186,21 +187,16 @@ def __contains__(self, value): if self.step: _dir = math.copysign(1, self.step) + _from_start = value - self.start return ( - (value - self.start) * math.copysign(1, self.step) >= 0 - and (self.end is None or - _dir*(self.end - self.start) >= _dir*(value - self.start)) - and abs(remainder(value - self.start, self.step)) <= self._EPS + 0 <= _dir*_from_start <= _dir*(self.end - self.start) + and abs(remainder(_from_start, self.step)) <= self._EPS ) else: return ( - self.start is None - or ( value >= self.start if self.closed[0] else - value > self.start ) + value >= self.start if self.closed[0] else value > self.start ) and ( - self.end is None - or ( value <= self.end if self.closed[1] else - value < self.end ) + value <= self.end if self.closed[1] else value < self.end ) @staticmethod @@ -209,11 +205,9 @@ def _continuous_discrete_disjoint(cont, disc): # beginning of isdisjoint() d_lb = disc.start if disc.step > 0 else disc.end d_ub = disc.end if disc.step > 0 else disc.start - if cont.start is None or ( - d_lb is not None and cont.start <= d_lb): + if cont.start <= d_lb: return False - if cont.end is None or ( - d_ub is not None and cont.end >= d_ub): + if cont.end >= d_ub: return False EPS = NumericRange._EPS @@ -234,28 +228,12 @@ def _continuous_discrete_disjoint(cont, disc): and (rStart - rEnd > 0 or not any(cont.closed)) ) - - @staticmethod - def _firstNonNull(minimize, *args): - ans = None - for x in args: - if ans is None: - ans = x - elif minimize: - if x is not None and x < ans: - ans = x - else: - if x is not None and x > ans: - ans = x - return ans - def isdiscrete(self): - return self.step != 0 or \ - (self.start == self.end and self.start is not None) + return self.step or self.start == self.end def isfinite(self): - return self.start is not None and self.end is not None \ - and self.isdiscrete() + return (self.step and self.end not in _infinite + ) or self.end == self.start def isdisjoint(self, other): if not isinstance(other, NumericRange): @@ -301,7 +279,7 @@ def isdisjoint(self, other): # Personally, anyone making a discrete set with a non-integer # step is asking for trouble. Maybe the better solution is to # require that the step be integer (which is what we do). - elif self.end is None and other.end is None \ + elif self.end in _infinite and other.end in _infinite \ and self.step*other.step > 0: gcd = NumericRange._gcd(self.step, other.step) return abs(remainder(other.start-self.start, gcd)) \ @@ -309,12 +287,10 @@ def isdisjoint(self, other): # OK - at this point, there are a finite number of set members # that can overlap. Just check all the members of one set # against the other - end = NumericRange._firstNonNull( - self.step > 0, - self.end, - NumericRange._firstNonNull( - self.step < 0, other.start, other.end) - ) + if self.step > 0: + end = min(self.end, max(other.start, other.end)) + else: + end = max(self.end, min(other.start, other.end)) i = 0 item = self.start while (self.step>0 and item <= end) or (self.step<0 and item >= end): @@ -334,22 +310,18 @@ def issubset(self, other): # AttributeError exceptions below # First, do a simple sanity check on the endpoints - s1, e1, c1 = self._normalize_bounds() - s2, e2, c2 = other._normalize_bounds() + s1, e1, c1 = self.normalize_bounds() + s2, e2, c2 = other.normalize_bounds() # Checks for unbounded ranges and to make sure self's endpoints are # within other's endpoints. - if s1 is None: - if s2 is not None: - return False - elif s2 is not None: - if s1 < s2 or ( s1 == s2 and c1[0] and not c2[0] ): - return False - if e1 is None: - if e2 is not None: - return False - elif e2 is not None: - if e1 > e2 or ( e1 == e2 and c1[1] and not c2[1] ): - return False + if s1 < s2: + return False + if e1 > e2: + return False + if s1 == s2 and c1[0] and not c2[0]: + return False + if e1 == e2 and c1[1] and not c2[1]: + return False # If other is continuous (even a single point), then by # definition, self is a subset (regardless of step) if other.step == 0: @@ -367,9 +339,9 @@ def issubset(self, other): if abs(remainder(self.step, other.step)) > EPS: return False # ...and they must shart a point in common - return abs(remainder(other.start-self.start, other.step)) <= EPS + return abs(remainder(other.start - self.start, other.step)) <= EPS - def _normalize_bounds(self): + def normalize_bounds(self): """Normalizes this NumericRange. This returns a normalized range by reversing lb and ub if the @@ -389,75 +361,15 @@ def _normalize_bounds(self): def _nooverlap(self, other): """Return True if the ranges for self and other are strictly separate - Note: a(None) == +inf and b(None) == -inf - """ - s1, e1, c1 = self._normalize_bounds() - s2, e2, c2 = other._normalize_bounds() - if e1 is not None and s2 is not None: - if e1 < s2 or ( e1 == s2 and not ( c1[1] and c2[0] )): - return True - if e2 is not None and s1 is not None: - if e2 < s1 or ( e2 == s1 and not ( c2[1] and c1[0] )): - return True - - @staticmethod - def _lt(a,b): - "Return True if a is strictly less than b, with None == -inf" - if a is None: - return b is not None - if b is None: - return False - return a < b - - @staticmethod - def _gt(a,b): - "Return True if a is strictly greater than b, with None == +inf" - if a is None: - return b is not None - if b is None: - return False - return a > b - - @staticmethod - def _min(*args): - """Modified implementation of min() with special None handling - - In NumericRange objects, None can represent {positive, - negative} infintiy. In the context that this method is used, - None will always be positive infinity, so None is greater than any - non-None value. - - """ - a = args[0] - for b in args[1:]: - if a is None: - a = b - elif b is None: - pass - else: - a = min(a, b) - return a - - @staticmethod - def _max(*args): - """Modified implementation of max() with special None handling - - In NumericRange objects, None can represent {positive, - negative} infintiy. In the context that this method is used, - None will always be negative infinity, so None is less than - any non-None value. - - """ - a = args[0] - for b in args[1:]: - if a is None: - a = b - elif b is None: - pass - else: - a = max(a, b) - return a + s1, e1, c1 = self.normalize_bounds() + s2, e2, c2 = other.normalize_bounds() + if ( e1 < s2 + or e2 < s1 + or ( e1 == s2 and not ( c1[1] and c2[0] )) + or ( e2 == s1 and not ( c2[1] and c1[0] )) ): + return True + return False @staticmethod def _split_ranges(cnr, new_step): @@ -484,8 +396,7 @@ def _split_ranges(cnr, new_step): _dir = math.copysign(1, cnr.step) _subranges = [] for i in range(int(abs(new_step // cnr.step))): - if ( cnr.end is not None - and _dir*(cnr.start + i*cnr.step) > _dir*cnr.end ): + if _dir*(cnr.start + i*cnr.step) > _dir*cnr.end: # Once we walk past the end of the range, we are done # (all remaining offsets will be farther past the end) break @@ -532,7 +443,7 @@ def _step_lcm(self,other_ranges): return abs(a) def _push_to_discrete_element(self, val, push_to_next_larger_value): - if val is None or not self.step: + if not self.step or val in _infinite: return val else: # self is discrete and val is a numeric value. Move val to @@ -602,27 +513,26 @@ def range_difference(self, other_ranges): _new_subranges.append(t) continue - t_min, t_max, t_c = t._normalize_bounds() - s_min, s_max, s_c = s._normalize_bounds() + t_min, t_max, t_c = t.normalize_bounds() + s_min, s_max, s_c = s.normalize_bounds() if s.isdiscrete() and not t.isdiscrete(): # # This handles the special case of continuous-discrete - if ((s_min is None and t.start is None) or - (s_max is None and t.end is None)): + if ((s_min == -_inf and t.start == -_inf) or + (s_max == _inf and t.end == _inf)): raise RangeDifferenceError( "We do not support subtracting an infinite " "discrete range %s from an infinite continuous " "range %s" % (s,t)) - # At least one of s_min amd t.start must be non-None - start = NumericRange._max( + # At least one of s_min amd t.start must be non-inf + start = max( s_min, s._push_to_discrete_element(t.start, True)) - # At least one of s_max amd t.end must be non-None - end = NumericRange._min( - s_max, s._push_to_discrete_element(t.end, False)) + # At least one of s_max amd t.end must be non-inf + end = min(s_max, s._push_to_discrete_element(t.end, False)) - if NumericRange._lt(t.start, start): + if t.start < start: _new_subranges.append(NumericRange( t.start, start, 0, (t.closed[0], False) )) @@ -631,7 +541,7 @@ def range_difference(self, other_ranges): _new_subranges.append(NumericRange( i*s.step, (i+1)*s.step, 0, '()' )) - if NumericRange._gt(t.end, end): + if t.end > end: _new_subranges.append(NumericRange( end, t.end, 0, (False,t.closed[1]) )) @@ -640,37 +550,40 @@ def range_difference(self, other_ranges): # This handles discrete-discrete, # continuous-continuous, and discrete-continuous # - if NumericRange._lt(t_min, s_min): - # Note that s_min will never be None due to the - # _lt test + if t_min < s_min: + # Note s_min will never be -inf due to the < test if t.step: s_min -= lcm closed1 = True - _min = NumericRange._min(t_max, s_min) + _min = min(t_max, s_min) if not t.step: closed1 = not s_c[0] if _min is s_min else t_c[1] _closed = ( t_c[0], closed1 ) _step = abs(t.step) _rng = t_min, _min - if t_min is None and t.step: + if t_min == -_inf and t.step: _step = -_step _rng = _rng[1], _rng[0] _closed = _closed[1], _closed[0] _new_subranges.append(NumericRange( _rng[0], _rng[1], _step, _closed)) + elif t_min == s_min and t_c[0] and not s_c[0]: + _new_subranges.append(NumericRange(t_min, t_min, 0)) - if NumericRange._gt(t_max, s_max): - # Note that s_max will never be None due to the _gt test + if t_max > s_max: + # Note s_max will never be inf due to the > test if t.step: s_max += lcm closed0 = True - _max = NumericRange._max(t_min, s_max) + _max = max(t_min, s_max) if not t.step: closed0 = not s_c[1] if _max is s_max else t_c[0] _new_subranges.append(NumericRange( _max, t_max, abs(t.step), (closed0, t_c[1]) )) + elif t_max == s_max and t_c[1] and not s_c[1]: + _new_subranges.append(NumericRange(t_max, t_max, 0)) _this = _new_subranges return _this @@ -725,16 +638,16 @@ def range_intersection(self, other_ranges): if t._nooverlap(s): continue - t_min, t_max, t_c = t._normalize_bounds() - s_min, s_max, s_c = s._normalize_bounds() + t_min, t_max, t_c = t.normalize_bounds() + s_min, s_max, s_c = s.normalize_bounds() step = abs(t.step if t.step else s.step) - intersect_start = NumericRange._max( + intersect_start = max( t._push_to_discrete_element(s_min, True), s._push_to_discrete_element(t_min, True), ) - intersect_end = NumericRange._min( + intersect_end = min( t._push_to_discrete_element(s_max, False), s._push_to_discrete_element(t_max, False), ) @@ -747,7 +660,7 @@ def range_intersection(self, other_ranges): c[1] &= t_c[1] if intersect_end == s_max: c[1] &= s_c[1] - if step and intersect_start is None: + if step and intersect_start == -_inf: ans.append(NumericRange( intersect_end, intersect_start, -step, (c[1], c[0]) )) diff --git a/pyomo/core/base/reference.py b/pyomo/core/base/reference.py index 43189333073..8918187b572 100644 --- a/pyomo/core/base/reference.py +++ b/pyomo/core/base/reference.py @@ -16,7 +16,7 @@ from pyomo.core.base.set import SetOf, OrderedSetOf, _SetDataBase from pyomo.core.base.component import Component, ComponentData from pyomo.core.base.indexed_component import ( - IndexedComponent, UnindexedComponent_set + IndexedComponent, UnindexedComponent_set, normalize_index ) from pyomo.core.base.indexed_component_slice import ( IndexedComponent_slice, _IndexedComponent_slice_iter @@ -199,8 +199,16 @@ def __contains__(self, key): if _iter.get_last_index_wildcards() == key: return True return False - except (StopIteration, LookupError): + except StopIteration: return False + except LookupError as e: + if normalize_index.flatten: + return False + try: + next(self._get_iter(self._slice, (key,))) + return True + except LookupError: + return False def __getitem__(self, key): try: @@ -339,10 +347,12 @@ def _get_iter(self, _slice, key, get_if_not_present=False): # This is how this object does lookups. if key.__class__ not in (tuple, list): key = (key,) + if normalize_index.flatten: + key = flatten_tuple(key) return _IndexedComponent_slice_iter( _slice, - _fill_in_known_wildcards(flatten_tuple(key), - get_if_not_present=get_if_not_present) + _fill_in_known_wildcards( + key, get_if_not_present=get_if_not_present) ) @@ -398,12 +408,17 @@ def __len__(self): def _get_iter(self, _slice, key): if key.__class__ not in (tuple, list): key = (key,) + if normalize_index.flatten: + key = flatten_tuple(key) return _IndexedComponent_slice_iter( _slice, - _fill_in_known_wildcards(flatten_tuple(key), look_in_index=True), + _fill_in_known_wildcards(key, look_in_index=True), iter_over_index=True ) + def __str__(self): + return "ReferenceSet(%s)" % (self._slice,) + def _identify_wildcard_sets(iter_stack, index): # Note that we can only _identify_wildcard_sets for a Reference if @@ -430,7 +445,8 @@ def _identify_wildcard_sets(iter_stack, index): wildcard_sets = {} # `wildcard_sets` maps position in the current level's # "subsets list" to its set if that set is a wildcard. - for j, s in enumerate(level.component.index_set().subsets()): + for j, s in enumerate(level.component.index_set().subsets( + expand_all_set_operators=False)): # Iterate over the sets that could possibly be wildcards if s is UnindexedComponent_set: wildcard_sets[j] = s @@ -556,7 +572,7 @@ def Reference(reference, ctype=_NotSpecified): ... >>> m.r1 = Reference(m.b[:,:].x) >>> m.r1.pprint() - r1 : Size=4, Index=r1_index + r1 : Size=4, Index=r1_index, ReferenceTo=b[:, :].x Key : Lower : Value : Upper : Fixed : Stale : Domain (1, 3) : 1 : None : 3 : False : True : Reals (1, 4) : 1 : None : 4 : False : True : Reals @@ -569,7 +585,7 @@ def Reference(reference, ctype=_NotSpecified): >>> m.r2 = Reference(m.b[:,3].x) >>> m.r2.pprint() - r2 : Size=2, Index=b_index_0 + r2 : Size=2, Index=b_index_0, ReferenceTo=b[:, 3].x Key : Lower : Value : Upper : Fixed : Stale : Domain 1 : 1 : None : 3 : False : True : Reals 2 : 2 : None : 3 : False : True : Reals @@ -579,7 +595,6 @@ def Reference(reference, ctype=_NotSpecified): .. doctest:: - >>> from pyomo.environ import * >>> m = ConcreteModel() >>> @m.Block([1,2]) ... def b(b,i): @@ -587,7 +602,7 @@ def Reference(reference, ctype=_NotSpecified): ... >>> m.r3 = Reference(m.b[:].x[:]) >>> m.r3.pprint() - r3 : Size=4, Index=r3_index + r3 : Size=4, Index=r3_index, ReferenceTo=b[:].x[:] Key : Lower : Value : Upper : Fixed : Stale : Domain (1, 3) : 1 : None : None : False : True : Reals (1, 4) : 1 : None : None : False : True : Reals @@ -620,6 +635,29 @@ def Reference(reference, ctype=_NotSpecified): _iter = iter(reference) slice_idx = [] index = None + elif isinstance(reference, ComponentData): + # Create a dummy IndexedComponent container with a "normal" + # Scalar interface. This relies on the assumption that the + # Component uses a standard storage model. + _idx = next(iter(UnindexedComponent_set)) + _parent = reference.parent_component() + comp = _parent.__class__(SetOf(UnindexedComponent_set)) + comp.construct() + comp._data[_idx] = reference + # + # HACK: Set the _parent to match the ComponentData's container's + # parent so that block.clone() infers the correct block scope + # for this "hidden" component + # + # TODO: When Block supports proper "hidden" / "anonymous" + # components, switch this HACK over to that API + comp._parent = _parent._parent + # + reference = comp[...] + _data = _ReferenceDict(reference) + _iter = iter(reference) + slice_idx = [] + index = None elif isinstance(reference, Mapping): _data = _ReferenceDict_mapping(dict(reference)) _iter = _data.values() diff --git a/pyomo/core/base/set.py b/pyomo/core/base/set.py index 6443d89b803..c3dab922e7c 100644 --- a/pyomo/core/base/set.py +++ b/pyomo/core/base/set.py @@ -45,7 +45,7 @@ ) from collections.abc import Sequence - +from operator import itemgetter logger = logging.getLogger('pyomo.core') @@ -557,35 +557,27 @@ def ranges(self): def bounds(self): try: - _bnds = list((r.start, r.end) if r.step >= 0 else (r.end, r.start) - for r in self.ranges()) + _bnds = [(r.start, r.end) if r.step >= 0 else (r.end, r.start) + for r in self.ranges()] except AttributeError: return None, None - if not _bnds: + + if len(_bnds) == 1: + lb, ub = _bnds[0] + elif not _bnds: return None, None + else: + lb = min(_bnds, key=itemgetter(0))[0] + ub = max(_bnds, key=itemgetter(1))[1] - lb, ub = _bnds.pop() - for _lb, _ub in _bnds: - if lb is not None: - if _lb is None: - lb = None - if ub is None: - break - else: - lb = min(lb, _lb) - if ub is not None: - if _ub is None: - ub = None - if lb is None: - break - else: - ub = max(ub, _ub) - if lb is not None: - if int(lb) == lb: - lb = int(lb) - if ub is not None: - if int(ub) == ub: - ub = int(ub) + if lb == -_inf: + lb = None + elif int(lb) == lb: + lb = int(lb) + if ub == _inf: + ub = None + elif int(ub) == ub: + ub = int(ub) return lb, ub def get_interval(self): @@ -609,11 +601,19 @@ def get_interval(self): def _get_discrete_interval(self): # # Note: I'd like to use set() for ranges, since we will be - # randomly removing elelments from the list; however, since we + # randomly removing elements from the list; however, since we # do it by enumerating over ranges, using set() would make this # routine nondeterministic. Not a huge issue for the result, # but problemmatic for code coverage. ranges = list(self.ranges()) + if len(ranges) == 1: + start, end, c = ranges[0].normalize_bounds() + return ( + None if start == -_inf else start, + None if end == _inf else end, + abs(ranges[0].step), + ) + try: step = min(abs(r.step) for r in ranges if r.step != 0) except ValueError: @@ -661,39 +661,35 @@ def _get_discrete_interval(self): else: rend, rstart = r.start, r.end if not r.step or abs(r.step) == step: - if ( start is None or rend is None or - start <= rend+step ) and ( - end is None or rstart is None or - rstart <= end+step ): + if start <= rend+step and rstart <= end+step: ranges[i] = None - if rstart is None: - start = None - elif start is not None and start > rstart: + if start > rstart: start = rstart - if rend is None: - end = None - elif end is not None and end < rend: + if end < rend: end = rend else: # The range has a step bigger than the base # interval we are building. For us to absorb # it, it has to be contained within the current # interval +/- step. - if (start is None or ( rstart is not None and - start <= rstart + step ))\ - and (end is None or ( rend is not None and - end >= rend - step )): + if start <= rstart + step and end >= rend - step: ranges[i] = None - if start is not None and start > rstart: + if start > rstart: start = rstart - if end is not None and end < rend: + if end < rend: end = rend ranges = list(_ for _ in ranges if _ is not None) _rlen = len(ranges) if ranges: return self.bounds() + (None,) - return (start, end, step) + # Note: while unbounded NumericRanges are -inf..inf, Pyomo + # Sets are None..None + return ( + None if start == -_inf else start, + None if end == _inf else end, + step, + ) def _get_continuous_interval(self): @@ -722,6 +718,14 @@ def _get_continuous_interval(self): ranges.append( NumericRange(r.start, r.end, r.step, r.closed)) + if len(ranges) == 1 and not discrete: + r = ranges[0] + return ( + None if r.start == -_inf else r.start, + None if r.end == _inf else r.end, + abs(r.step), + ) + # There is a particular edge case where we could get 2 disjoint # continuous ranges that are joined by a discrete range... When # we encounter an open range, check to see if the endpoint is @@ -752,20 +756,17 @@ def _get_continuous_interval(self): continue # r and interval overlap: merge r into interval ranges[i] = None - if r.start is None: - interval.start = None - interval.closed = (True, interval.closed[1]) - elif interval.start is not None \ - and r.start < interval.start: + if r.start < interval.start: interval.start = r.start interval.closed = (r.closed[0], interval.closed[1]) + elif not interval.closed[0] and r.start == interval.start: + interval.closed = (r.closed[0], interval.closed[1]) - if r.end is None: - interval.end = None - interval.closed = (interval.closed[0], True) - elif interval.end is not None and r.end > interval.end: + if r.end > interval.end: interval.end = r.end interval.closed = (interval.closed[0], r.closed[1]) + elif not interval.closed[1] and r.end == interval.end: + interval.closed = (interval.closed[0], r.closed[1]) ranges = list(_ for _ in ranges if _ is not None) _rlen = len(ranges) @@ -777,7 +778,13 @@ def _get_continuous_interval(self): # The discrete range extends outside the continuous # interval return self.bounds() + (None,) - return (interval.start, interval.end, interval.step) + start = interval.start + if start == -_inf: + start = None + end = interval.end + if end == _inf: + end = None + return (start, end, interval.step) @property @deprecated("The 'virtual' attribute is no longer supported", version='5.7') @@ -1139,13 +1146,10 @@ def ordered_data(self): def bounds(self): try: - lb = min(self) + lb = min(self, default=None) + ub = max(self, default=None) except: - lb = None - try: - ub = max(self) - except: - ub = None + lb = ub = None # Python2/3 consistency: We will follow the Python3 convention # and not assume numeric/nonnumeric types are comparable. If a # set is mixed non-numeric type, then we will report the bounds @@ -2274,16 +2278,24 @@ class AbstractSortedSimpleSet(metaclass=RenamedClass): ############################################################################ -class SetOf(_FiniteSetMixin, _SetData, Component): +class SetOf(_SetData, Component): """""" def __new__(cls, *args, **kwds): if cls is not SetOf: return super(SetOf, cls).__new__(cls) reference, = args - if isinstance(reference, (tuple, list)): + if isinstance(reference, _SetData): + if reference.isfinite(): + if reference.isordered(): + return super(SetOf, cls).__new__(OrderedSetOf) + else: + return super(SetOf, cls).__new__(FiniteSetOf) + else: + return super(SetOf, cls).__new__(InfiniteSetOf) + if isinstance(reference, Sequence): return super(SetOf, cls).__new__(OrderedSetOf) else: - return super(SetOf, cls).__new__(UnorderedSetOf) + return super(SetOf, cls).__new__(FiniteSetOf) def __init__(self, reference, **kwds): _SetData.__init__(self, component=self) @@ -2291,24 +2303,6 @@ def __init__(self, reference, **kwds): Component.__init__(self, **kwds) self._ref = reference - def get(self, value, default=None): - # Note that the efficiency of this depends on the reference object - # - # The bulk of single-value set members were stored as scalars. - # Check that first. - if value.__class__ is tuple and len(value) == 1: - if value[0] in self._ref: - return value[0] - if value in self._ref: - return value - return default - - def __len__(self): - return len(self._ref) - - def _iter_impl(self): - return iter(self._ref) - def __str__(self): if self.parent_block() is not None: return self.name @@ -2326,6 +2320,8 @@ def construct(self, data=None): @property def dimen(self): + if isinstance(self._ref, _SetData): + return self._ref.dimen _iter = iter(self) try: x = next(_iter) @@ -2360,10 +2356,39 @@ def _pprint(self): str(v._ref), ]) -class UnorderedSetOf(SetOf): - pass -class OrderedSetOf(_ScalarOrderedSetMixin, _OrderedSetMixin, SetOf): +class InfiniteSetOf(SetOf): + def ranges(self): + # InfiniteSetOf references are assumed to implement the Set API + return self._ref.ranges() + + +class FiniteSetOf(_FiniteSetMixin, SetOf): + def get(self, value, default=None): + # Note that the efficiency of this depends on the reference object + # + # The bulk of single-value set members were stored as scalars. + # Check that first. + if value.__class__ is tuple and len(value) == 1: + if value[0] in self._ref: + return value[0] + if value in self._ref: + return value + return default + + def __len__(self): + return len(self._ref) + + def _iter_impl(self): + return iter(self._ref) + + +class UnorderedSetOf(metaclass=RenamedClass): + __renamed__new_class__ = FiniteSetOf + __renamed__version__ = 'TBD' + + +class OrderedSetOf(_ScalarOrderedSetMixin, _OrderedSetMixin, FiniteSetOf): def at(self, index): i = self._to_0_based_index(index) try: diff --git a/pyomo/core/base/units_container.py b/pyomo/core/base/units_container.py index 0a7f52e4d4a..7afbae3d943 100644 --- a/pyomo/core/base/units_container.py +++ b/pyomo/core/base/units_container.py @@ -111,7 +111,11 @@ import sys from pyomo.common.dependencies import attempt_import -from pyomo.core.expr.numvalue import NumericValue, nonpyomo_leaf_types, value, native_types, native_numeric_types, pyomo_constant_types +from pyomo.common.modeling import NOTSET +from pyomo.core.expr.numvalue import ( + NumericValue, nonpyomo_leaf_types, value, native_types, + native_numeric_types, pyomo_constant_types, +) from pyomo.core.expr.template_expr import IndexTemplate from pyomo.core.expr import current as EXPR @@ -366,7 +370,7 @@ def __str__(self): # delta temperatures). So that things work cleanly in Python 2 # and 3, we will generate the string as unicode, then explicitly # encode it to UTF-8 in Python 2 - retstr = u'{:!~C}'.format(self._pint_unit) + retstr = u'{:~C}'.format(self._pint_unit) if retstr == '': retstr = 'dimensionless' return retstr @@ -426,9 +430,9 @@ def pprint(self, ostream=None, verbose=False): ostream.write(str(self)) # There is also a long form, but the verbose flag is not really the correct indicator # if verbose: - # ostream.write('{:!s}'.format(self._pint_unit)) + # ostream.write('{:s}'.format(self._pint_unit)) # else: - # ostream.write('{:!~s}'.format(self._pint_unit)) + # ostream.write('{:~s}'.format(self._pint_unit)) class PintUnitExtractionVisitor(EXPR.StreamBasedExpressionVisitor): @@ -500,51 +504,6 @@ def _get_unit_for_equivalent_children(self, node, child_units): # checks were OK, return the first one in the list return pint_unit_0 - def _get_unit_for_linear_expression(self, node, child_units): - """ - Return (and test) the units corresponding to a :py:class:`LinearExpression` node - in the expression tree. - - This is a special node since it does not use "args" the way - other expression types do. Because of this, the StreamBasedExpressionVisitor - does not pick up on the "children", and child_units is empty. - Therefore, we implement the recursion into coeffs and vars ourselves. - - Parameters - ---------- - node : Pyomo expression node - The parent node of the children - - child_units : list - This is a list of pint units (one for each of the children) - - Returns - ------- - : pint unit - """ - # StreamBasedExpressionVisitor does not handle the children of this node - assert not child_units - - # TODO: This may be expensive for long summations and, in the - # case of reporting only, we may want to skip the checks - term_unit_list = [] - if node.constant not in { 0. }: - # we have a non-zero constant term, get its units - term_unit_list.append( - self._pyomo_units_container._get_pint_units(node.constant)) - - # go through the coefficients and variables - assert len(node.linear_coefs) == len(node.linear_vars) - for k,v in enumerate(node.linear_vars): - c = node.linear_coefs[k] - v_units = self._pyomo_units_container._get_pint_units(v) - c_units = self._pyomo_units_container._get_pint_units(c) - term_unit_list.append(c_units*v_units) - - assert term_unit_list - - return self._get_unit_for_equivalent_children(node, term_unit_list) - def _get_unit_for_product(self, node, child_units): """ Return (and test) the units corresponding to a product expression node @@ -928,7 +887,7 @@ def _get_unit_sqrt(self, node, child_units): EXPR.GetItemExpression: _get_dimensionless_with_dimensionless_children, EXPR.ExternalFunctionExpression: _get_units_ExternalFunction, EXPR.NPV_ExternalFunctionExpression: _get_units_ExternalFunction, - EXPR.LinearExpression: _get_unit_for_linear_expression + EXPR.LinearExpression: _get_unit_for_equivalent_children, } unary_function_method_map = { @@ -1010,11 +969,15 @@ class PyomoUnitsContainer(object): on the class until they are requested. """ - def __init__(self): + def __init__(self, pint_registry=NOTSET): """Create a PyomoUnitsContainer instance.""" - self._pint_registry = pint_module.UnitRegistry() - self._pint_dimensionless = self._pint_registry.dimensionless - + if pint_registry is NOTSET: + pint_registry = pint_module.UnitRegistry() + self._pint_registry = pint_registry + if pint_registry is None: + self._pint_dimensionless = None + else: + self._pint_dimensionless = self._pint_registry.dimensionless def load_definitions_from_file(self, definition_file): """Load new units definitions from a file @@ -1035,7 +998,7 @@ def load_definitions_from_file(self, definition_file): :skipif: not pint_available :hide: - # get a local units object (to avoid duplicate registration + # Get a local units object (to avoid duplicate registration # with the example in load_definitions_from_strings) >>> import pyomo.core.base.units_container as _units >>> u = _units.PyomoUnitsContainer() @@ -1049,6 +1012,14 @@ def load_definitions_from_file(self, definition_file): >>> print(u.USD) USD + .. doctest:: + :skipif: not pint_available + :hide: + + # Clean up the file we just created + >>> import os + >>> os.remove('my_additional_units.txt') + """ self._pint_registry.load_definitions(definition_file) self._pint_dimensionless = self._pint_registry.dimensionless @@ -1070,6 +1041,7 @@ def load_definitions_from_strings(self, definition_string_list): # get a local units object (to avoid duplicate registration # with the example in load_definitions_from_strings) + >>> import pint >>> import pyomo.core.base.units_container as _units >>> u = _units.PyomoUnitsContainer() @@ -1382,6 +1354,22 @@ def convert_value(self, num_value, from_units=None, to_units=None): to_quantity = from_quantity.to(to_pint_unit) return to_quantity.magnitude + def set_pint_registry(self, pint_registry): + if pint_registry is self._pint_registry: + return + if self._pint_registry is not None: + logger.warning( + "Changing the pint registry used by the Pyomo Units " + "system after the PyomoUnitsContainer was constructed. " + "Pint requires that all units and dimensioned quantities " + "are generated by a single pint registry.") + self._pint_registry = pint_registry + self._pint_dimensionless = self._pint_registry.dimensionless + + @property + def pint_registry(self): + return self._pint_registry + class _DeferredUnitsSingleton(PyomoUnitsContainer): """A class supporting deferred interrogation of pint_available. @@ -1401,9 +1389,22 @@ def __init__(self): pass def __getattribute__(self, attr): + # Note that this methos will only be called ONCE: either pint is + # present, at which point this instance __class__ will fall back + # to PyomoUnitsContainer (where this method is not declared, OR + # pint is not available and an ImportError will be raised. if pint_available: + # If the first thing that is being called is + # "units.set_pint_registry(...)", then we will call __init__ + # with None so that the subsequent call to set_pint_registry + # will work cleanly. In all other cases, we will initialize + # PyomoUnitsContainer with a new (default) pint registry. + if attr == 'set_pint_registry': + pint_registry = None + else: + pint_registry = pint_module.UnitRegistry() self.__class__ = PyomoUnitsContainer - self.__init__() + self.__init__(pint_registry) return getattr(self, attr) else: # Generate the ImportError diff --git a/pyomo/core/base/var.py b/pyomo/core/base/var.py index 383a25d9863..74d808babaf 100644 --- a/pyomo/core/base/var.py +++ b/pyomo/core/base/var.py @@ -290,17 +290,6 @@ def unfix(self): free=unfix - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): - """Return the component name""" - if self.fixed and compute_values: - try: - return str(self()) - except: - pass - if smap: - return smap.getSymbol(self, labeler) - return self.name - class _GeneralVarData(_VarData): """ diff --git a/pyomo/core/expr/boolean_value.py b/pyomo/core/expr/boolean_value.py index 96c56d60229..c495788d1ce 100644 --- a/pyomo/core/expr/boolean_value.py +++ b/pyomo/core/expr/boolean_value.py @@ -181,7 +181,7 @@ def to_string(self, verbose=None, labeler=None, smap=None, Returns: A string representation for the expression tree. """ - if compute_values: + if compute_values and self.is_fixed(): try: return str(self()) except: @@ -191,7 +191,7 @@ def to_string(self, verbose=None, labeler=None, smap=None, return smap.getSymbol(self, labeler) elif labeler is not None: return labeler(self) - return self.__str__() + return str(self) class BooleanConstant(BooleanValue): diff --git a/pyomo/core/expr/calculus/diff_with_pyomo.py b/pyomo/core/expr/calculus/diff_with_pyomo.py index cb4d12182fe..00e43a92926 100644 --- a/pyomo/core/expr/calculus/diff_with_pyomo.py +++ b/pyomo/core/expr/calculus/diff_with_pyomo.py @@ -8,7 +8,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.collections import ComponentMap +from pyomo.common.collections import ComponentMap, ComponentSet from pyomo.core.expr import current as _expr from pyomo.core.expr.visitor import ExpressionValueVisitor, nonpyomo_leaf_types from pyomo.core.expr.numvalue import value @@ -63,15 +63,6 @@ def _diff_SumExpression(node, val_dict, der_dict): der_dict[arg] += der -def _diff_LinearExpression(node, val_dict, der_dict): - der = der_dict[node] - for ndx, v in enumerate(node.linear_vars): - coef = node.linear_coefs[ndx] - der_dict[v] += der * val_dict[coef] - der_dict[coef] += der * val_dict[v] - - der_dict[node.constant] += der - def _diff_PowExpression(node, val_dict, der_dict): """ @@ -318,6 +309,19 @@ def _diff_UnaryFunctionExpression(node, val_dict, der_dict): raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node))) +def _diff_GeneralExpression(node, val_dict, der_dict): + """ + Reverse automatic differentiation for named expressions. + + Parameters + ---------- + node: The named expression + val_dict: ComponentMap + der_dict: ComponentMap + """ + der_dict[node.expr] += der_dict[node] + + def _diff_ExternalFunctionExpression(node, val_dict, der_dict): """ @@ -344,7 +348,7 @@ def _diff_ExternalFunctionExpression(node, val_dict, der_dict): _diff_map[_expr.NegationExpression] = _diff_NegationExpression _diff_map[_expr.UnaryFunctionExpression] = _diff_UnaryFunctionExpression _diff_map[_expr.ExternalFunctionExpression] = _diff_ExternalFunctionExpression -_diff_map[_expr.LinearExpression] = _diff_LinearExpression +_diff_map[_expr.LinearExpression] = _diff_SumExpression _diff_map[_expr.NPV_ProductExpression] = _diff_ProductExpression _diff_map[_expr.NPV_DivisionExpression] = _diff_DivisionExpression @@ -356,51 +360,20 @@ def _diff_ExternalFunctionExpression(node, val_dict, der_dict): _diff_map[_expr.NPV_ExternalFunctionExpression] = _diff_ExternalFunctionExpression -class _NamedExpressionCollector(ExpressionValueVisitor): - def __init__(self): - self.named_expressions = list() +def _symbolic_value(x): + return x - def visit(self, node, values): - return None - def visiting_potential_leaf(self, node): - if node.__class__ in nonpyomo_leaf_types: - return True, None - - if not node.is_expression_type(): - return True, None +def _numeric_apply_operation(node, values): + return node._apply_operation(values) - if node.is_named_expression_type(): - self.named_expressions.append(node) - return False, None - return False, None +def _symbolic_apply_operation(node, values): + return node -def _collect_ordered_named_expressions(expr): - """ - The purpose of this function is to collect named expressions in a - particular order. The order is very important. In the resulting - list each named expression can only appear once, and any named - expressions that are used in other named expressions have to come - after the named expression that use them. - """ - visitor = _NamedExpressionCollector() - visitor.dfs_postorder_stack(expr) - named_expressions = visitor.named_expressions - seen = set() - res = list() - for e in reversed(named_expressions): - if id(e) in seen: - continue - seen.add(id(e)) - res.append(e) - res = list(reversed(res)) - return res - - -class _ReverseADVisitorLeafToRoot(ExpressionValueVisitor): - def __init__(self, val_dict, der_dict): +class _LeafToRootVisitor(ExpressionValueVisitor): + def __init__(self, val_dict, der_dict, expr_list, numeric=True): """ Parameters ---------- @@ -409,75 +382,64 @@ def __init__(self, val_dict, der_dict): """ self.val_dict = val_dict self.der_dict = der_dict + self.expr_list = expr_list + assert len(self.expr_list) == 0 + assert len(self.val_dict) == 0 + assert len(self.der_dict) == 0 + if numeric: + self.value_func = value + self.operation_func = _numeric_apply_operation + else: + self.value_func = _symbolic_value + self.operation_func = _symbolic_apply_operation def visit(self, node, values): - self.val_dict[node] = node._apply_operation(values) + self.val_dict[node] = self.operation_func(node, values) self.der_dict[node] = 0 + self.expr_list.append(node) return self.val_dict[node] def visiting_potential_leaf(self, node): + if node in self.val_dict: + return True, self.val_dict[node] + if node.__class__ in nonpyomo_leaf_types: self.val_dict[node] = node - if node not in self.der_dict: - self.der_dict[node] = 0 + self.der_dict[node] = 0 return True, node - if node.__class__ is _expr.LinearExpression: - for v in node.linear_vars + node.linear_coefs + [node.constant]: - val = value(v) - self.val_dict[v] = val - if v not in self.der_dict: - self.der_dict[v] = 0 - val = value(node) - self.val_dict[node] = val - if node not in self.der_dict: - self.der_dict[node] = 0 - return True, val - if not node.is_expression_type(): - val = value(node) + val = self.value_func(node) self.val_dict[node] = val - if node not in self.der_dict: - self.der_dict[node] = 0 + self.der_dict[node] = 0 return True, val return False, None -class _ReverseADVisitorRootToLeaf(ExpressionValueVisitor): - def __init__(self, val_dict, der_dict): - """ - Parameters - ---------- - val_dict: ComponentMap - der_dict: ComponentMap - """ - self.val_dict = val_dict - self.der_dict = der_dict - - def visit(self, node, values): - pass - - def visiting_potential_leaf(self, node): - if node.__class__ in nonpyomo_leaf_types: - return True, None - - if not node.is_expression_type(): - return True, None +def _reverse_diff_helper(expr, numeric=True): + val_dict = ComponentMap() + der_dict = ComponentMap() + expr_list = list() - if node.is_named_expression_type(): - return True, None + visitorA = _LeafToRootVisitor(val_dict, der_dict, expr_list, numeric=numeric) + visitorA.dfs_postorder_stack(expr) - if node.__class__ in _diff_map: - _diff_map[node.__class__](node, self.val_dict, self.der_dict) - return False, None + der_dict[expr] = 1 + for e in reversed(expr_list): + if e.__class__ in _diff_map: + _diff_map[e.__class__](e, val_dict, der_dict) + elif e.is_named_expression_type(): + _diff_GeneralExpression(e, val_dict, der_dict) else: - raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node))) + raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(e))) + + return der_dict def reverse_ad(expr): """ - First order reverse ad + First order reverse automatic differentiation Parameters ---------- @@ -490,101 +452,12 @@ def reverse_ad(expr): component_map mapping variables to derivatives with respect to the corresponding variable """ - val_dict = ComponentMap() - der_dict = ComponentMap() - - visitorA = _ReverseADVisitorLeafToRoot(val_dict, der_dict) - visitorA.dfs_postorder_stack(expr) - named_expressions = _collect_ordered_named_expressions(expr) - der_dict[expr] = 1 - visitorB = _ReverseADVisitorRootToLeaf(val_dict, der_dict) - visitorB.dfs_postorder_stack(expr) - for named_expr in named_expressions: - der_dict[named_expr.expr] = der_dict[named_expr] - visitorB.dfs_postorder_stack(named_expr.expr) - - return der_dict - - -class _ReverseSDVisitorLeafToRoot(ExpressionValueVisitor): - def __init__(self, val_dict, der_dict): - """ - Parameters - ---------- - val_dict: ComponentMap - der_dict: ComponentMap - """ - self.val_dict = val_dict - self.der_dict = der_dict - - def visit(self, node, values): - self.val_dict[node] = node.create_node_with_local_data(tuple(values)) - self.der_dict[node] = 0 - return self.val_dict[node] - - def visiting_potential_leaf(self, node): - if node.__class__ in nonpyomo_leaf_types: - self.val_dict[node] = node - if node not in self.der_dict: - self.der_dict[node] = 0 - return True, node - - if node.__class__ is _expr.LinearExpression: - for v in node.linear_vars + node.linear_coefs + [node.constant]: - val = v - self.val_dict[v] = val - if v not in self.der_dict: - self.der_dict[v] = 0 - val = node - self.val_dict[node] = val - if node not in self.der_dict: - self.der_dict[node] = 0 - return True, val - - if not node.is_expression_type(): - val = node - self.val_dict[node] = val - if node not in self.der_dict: - self.der_dict[node] = 0 - return True, val - - return False, None - - -class _ReverseSDVisitorRootToLeaf(ExpressionValueVisitor): - def __init__(self, val_dict, der_dict): - """ - Parameters - ---------- - val_dict: ComponentMap - der_dict: ComponentMap - """ - self.val_dict = val_dict - self.der_dict = der_dict - - def visit(self, node, values): - pass - - def visiting_potential_leaf(self, node): - if node.__class__ in nonpyomo_leaf_types: - return True, None - - if not node.is_expression_type(): - return True, None - - if node.is_named_expression_type(): - return True, None - - if node.__class__ in _diff_map: - _diff_map[node.__class__](node, self.val_dict, self.der_dict) - return False, None - else: - raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node))) + return _reverse_diff_helper(expr, True) def reverse_sd(expr): """ - First order reverse ad + First order reverse symbolic differentiation Parameters ---------- @@ -597,17 +470,4 @@ def reverse_sd(expr): component_map mapping variables to derivatives with respect to the corresponding variable """ - val_dict = ComponentMap() - der_dict = ComponentMap() - - visitorA = _ReverseSDVisitorLeafToRoot(val_dict, der_dict) - visitorA.dfs_postorder_stack(expr) - named_expressions = _collect_ordered_named_expressions(expr) - der_dict[expr] = 1 - visitorB = _ReverseSDVisitorRootToLeaf(val_dict, der_dict) - visitorB.dfs_postorder_stack(expr) - for named_expr in named_expressions: - der_dict[named_expr.expr] = der_dict[named_expr] - visitorB.dfs_postorder_stack(named_expr.expr) - - return der_dict + return _reverse_diff_helper(expr, False) diff --git a/pyomo/core/expr/numeric_expr.py b/pyomo/core/expr/numeric_expr.py index 86b636c9d91..5dd645fec09 100644 --- a/pyomo/core/expr/numeric_expr.py +++ b/pyomo/core/expr/numeric_expr.py @@ -12,6 +12,7 @@ import math import logging +from operator import attrgetter from itertools import islice logger = logging.getLogger('pyomo.core') @@ -234,7 +235,8 @@ def __str__(self): """ return expression_to_string(self) - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): + def to_string(self, verbose=None, labeler=None, smap=None, + compute_values=False): """ Return a string representation of the expression tree. @@ -245,8 +247,9 @@ def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False) Defaults to :const:`False`. labeler: An object that generates string labels for variables in the expression tree. Defaults to :const:`None`. - smap: If specified, this :class:`SymbolMap ` is - used to cache labels for variables. + smap: If specified, this + :class:`SymbolMap ` + is used to cache labels for variables. compute_values (bool): If :const:`True`, then parameters and fixed variables are evaluated before the expression string is generated. Default is :const:`False`. @@ -254,7 +257,8 @@ def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False) Returns: A string representation for the expression tree. """ - return expression_to_string(self, verbose=verbose, labeler=labeler, smap=smap, compute_values=compute_values) + return expression_to_string(self, verbose=verbose, labeler=labeler, + smap=smap, compute_values=compute_values) def _precedence(self): return ExpressionBase.PRECEDENCE @@ -270,7 +274,7 @@ def _associativity(self): # Most operators in Python are left-to-right associative return 1 - def _to_string(self, values, verbose, smap, compute_values): #pragma: no cover + def _to_string(self, values, verbose, smap, compute_values): #pragma: no cover """ Construct a string representation for this node, using the string representations of its children. @@ -295,7 +299,9 @@ def _to_string(self, values, verbose, smap, compute_values): #pragma: Returns: A string representation for this node. """ - pass + raise NotImplementedError( + "Derived expression (%s) failed to implement _to_string()" + % ( str(self.__class__), )) def getname(self, *args, **kwds): #pragma: no cover """ @@ -335,7 +341,7 @@ def clone(self, substitute=None): """ return clone_expression(self, substitute=substitute) - def create_node_with_local_data(self, args): + def create_node_with_local_data(self, args, classtype=None): """ Construct a node using given arguments. @@ -351,17 +357,14 @@ def create_node_with_local_data(self, args): Args: args (list): A list of child nodes for the new expression object - memo (dict): A dictionary that maps object ids to clone - objects generated earlier during a cloning process. - This argument is needed to clone objects that are - owned by a model, and it can be safely ignored for - most expression classes. Returns: A new expression object with the same type as the current class. """ - return self.__class__(args) + if classtype is None: + classtype = self.__class__ + return classtype(args) def create_potentially_variable_object(self): """ @@ -542,6 +545,33 @@ def _apply_operation(self, result): #pragma: no cover "implement _apply_operation()" % ( str(self.__class__), )) +class NPV_Mixin(object): + __slots__ = () + + def is_potentially_variable(self): + return False + + def create_node_with_local_data(self, args, classtype=None): + assert classtype is None + try: + npv_args = all( + type(arg) in native_types or not arg.is_potentially_variable() + for arg in args + ) + except AttributeError: + # We can hit this during expression replacement when the new + # type is not a PyomoObject type, but is not in the + # native_types set. We will play it safe and clear the NPV flag + npv_args = False + if npv_args: + return super().create_node_with_local_data(args, None) + else: + cls = list(self.__class__.__bases__) + cls.remove(NPV_Mixin) + assert len(cls) == 1 + return super().create_node_with_local_data(args, cls[0]) + + class NegationExpression(ExpressionBase): """ Negation expressions:: @@ -580,12 +610,9 @@ def _apply_operation(self, result): return -result[0] -class NPV_NegationExpression(NegationExpression): +class NPV_NegationExpression(NPV_Mixin, NegationExpression): __slots__ = () - def is_potentially_variable(self): - return False - class ExternalFunctionExpression(ExpressionBase): """ @@ -611,8 +638,10 @@ def __init__(self, args, fcn=None): def nargs(self): return len(self._args_) - def create_node_with_local_data(self, args): - return self.__class__(args, self._fcn) + def create_node_with_local_data(self, args, classtype=None): + if classtype is None: + classtype = self.__class__ + return classtype(args, self._fcn) def __getstate__(self): state = super(ExternalFunctionExpression, self).__getstate__() @@ -640,12 +669,9 @@ def get_units(self): """ Get the units of the return value for this external function """ return self._fcn.get_units() -class NPV_ExternalFunctionExpression(ExternalFunctionExpression): +class NPV_ExternalFunctionExpression(NPV_Mixin, ExternalFunctionExpression): __slots__ = () - def is_potentially_variable(self): - return False - class PowExpression(ExpressionBase): """ @@ -710,12 +736,9 @@ def _to_string(self, values, verbose, smap, compute_values): return "{0}**{1}".format(values[0], values[1]) -class NPV_PowExpression(PowExpression): +class NPV_PowExpression(NPV_Mixin, PowExpression): __slots__ = () - def is_potentially_variable(self): - return False - class ProductExpression(ExpressionBase): """ @@ -771,12 +794,9 @@ def _to_string(self, values, verbose, smap, compute_values): _to_string.minus_one = {"-1", "-1.0", "(-1)", "(-1.0)"} -class NPV_ProductExpression(ProductExpression): +class NPV_ProductExpression(NPV_Mixin, ProductExpression): __slots__ = () - def is_potentially_variable(self): - return False - class MonomialTermExpression(ProductExpression): __slots__ = () @@ -784,6 +804,24 @@ class MonomialTermExpression(ProductExpression): def getname(self, *args, **kwds): return 'mon' + def create_node_with_local_data(self, args, classtype=None): + if classtype is None: + # If this doesn't look like a MonomialTermExpression, then + # fall back on the expression generation system to sort out + # what the appropriate return type is. + try: + if not (args[0].__class__ in native_types + or not args[0].is_potentially_variable()): + return args[0] * args[1] + elif (args[1].__class__ in native_types + or not args[1].is_variable_type()): + return args[0] * args[1] + except AttributeError: + # Fall back on general expression generation + return args[0] * args[1] + return self.__class__(args) + + class DivisionExpression(ExpressionBase): """ Division expressions:: @@ -816,12 +854,9 @@ def _apply_operation(self, result): return result[0] / result[1] -class NPV_DivisionExpression(DivisionExpression): +class NPV_DivisionExpression(NPV_Mixin, DivisionExpression): __slots__ = () - def is_potentially_variable(self): - return False - @deprecated("Use DivisionExpression", version='5.6.7') class ReciprocalExpression(ExpressionBase): @@ -862,12 +897,9 @@ def _apply_operation(self, result): return 1 / result[0] -class NPV_ReciprocalExpression(ReciprocalExpression): +class NPV_ReciprocalExpression(NPV_Mixin, ReciprocalExpression): __slots__ = () - def is_potentially_variable(self): - return False - class _LinearOperatorExpression(ExpressionBase): """ @@ -980,8 +1012,8 @@ def _precedence(self): def _apply_operation(self, result): return sum(result) - def create_node_with_local_data(self, args): - return self.__class__(list(args)) + def create_node_with_local_data(self, args, classtype=None): + return super().create_node_with_local_data(list(args), classtype) def __getstate__(self): state = super(SumExpression, self).__getstate__() @@ -1154,7 +1186,7 @@ class UnaryFunctionExpression(ExpressionBase): __slots__ = ('_fcn', '_name') def __init__(self, args, name=None, fcn=None): - if not type(args) is tuple: + if type(args) is not tuple: args = (args,) self._args_ = args self._name = name @@ -1163,8 +1195,10 @@ def __init__(self, args, name=None, fcn=None): def nargs(self): return 1 - def create_node_with_local_data(self, args): - return self.__class__(args, self._name, self._fcn) + def create_node_with_local_data(self, args, classtype=None): + if classtype is None: + classtype = self.__class__ + return classtype(args, self._name, self._fcn) def __getstate__(self): state = super(UnaryFunctionExpression, self).__getstate__() @@ -1215,16 +1249,10 @@ class AbsExpression(UnaryFunctionExpression): def __init__(self, arg): super(AbsExpression, self).__init__(arg, 'abs', abs) - def create_node_with_local_data(self, args): - return self.__class__(args) - -class NPV_AbsExpression(AbsExpression): +class NPV_AbsExpression(NPV_Mixin, AbsExpression): __slots__ = () - def is_potentially_variable(self): - return False - class LinearExpression(ExpressionBase): """ @@ -1233,54 +1261,106 @@ class LinearExpression(ExpressionBase): Args: args (tuple): Children nodes """ - __slots__ = ('constant', # The constant term - 'linear_coefs', # Linear coefficients - 'linear_vars') # Linear variables + __slots__ = ( + 'constant', # The constant term + 'linear_coefs', # Linear coefficients + 'linear_vars', # Linear variables + '_args_cache_', + ) PRECEDENCE = 6 def __init__(self, args=None, constant=None, linear_coefs=None, linear_vars=None): - """ - Build a linear expression object that stores the constant, as well as - coefficients and variables to represent const + sum_i(c_i*x_i) - - You can specify args OR (constant, linear_coefs, and linear_vars) - If args is provided, it should be a list that contains the constant, - followed by the coefficients, followed by the variables. - - Alternatively, you can specify the constant, the list of linear_coeffs - and the list of linear_vars separately. Note that these lists are NOT - copied. + """A linear expression of the form `const + sum_i(c_i*x_i). + + You can specify args OR (constant, linear_coefs, and + linear_vars). If args is provided, it should be a list that + contains the constant, followed by a series of + :py:class:`MonomialTermExpression` objects. Alternatively, you + can specify the constant, the list of linear_coeffs and the list + of linear_vars separately. Note that these lists are NOT copied. + """ # I am not sure why LinearExpression allows omitting args, but - # it does. If they are provided, they should be the constant - # followed by the coefficients followed by the variables. + # it does. If they are provided, they should be the (non-zero) + # constant followed by MonomialTermExpressions. if args: - self.constant = args[0] - n = (len(args)-1) // 2 - self.linear_coefs = args[1:n+1] - self.linear_vars = args[n+1:] + if any(arg is not None for arg in + (constant, linear_coefs, linear_vars)): + raise ValueError("Cannot specify both args and any of " + "{constant, linear_coeffs, or linear_vars}") + if len(args) > 1 and (args[1].__class__ in native_types + or not args[1].is_potentially_variable()): + deprecation_warning( + "LinearExpression has been updated to expect args= to " + "be a constant followed by MonomialTermExpressions. " + "The older format (`[const, coefficient_1, ..., " + "variable_1, ...]`) is deprecated.", version='TBD') + args = args[:1] + list(map( + MonomialTermExpression, + zip(args[1:1+len(args)//2], args[1+len(args)//2:]))) + self._args_ = args else: self.constant = constant if constant is not None else 0 self.linear_coefs = linear_coefs if linear_coefs else [] self.linear_vars = linear_vars if linear_vars else [] - - self._args_ = tuple() + self._args_cache_ = [] def nargs(self): - return 0 + return len(self.linear_vars) + ( + 0 if (self.constant is None + or (self.constant.__class__ in native_numeric_types + and not self.constant)) else 1 + ) + + @property + def _args_(self): + nargs = self.nargs() + if len(self._args_cache_) != nargs: + if len(self.linear_vars) == nargs: + self._args_cache_ = [] + else: + self._args_cache_ = [self.constant] + self._args_cache_.extend( + map(MonomialTermExpression, + zip(self.linear_coefs, self.linear_vars))) + elif len(self.linear_vars) != nargs: + self._args_cache_[0] = self.constant + return self._args_cache_ + + @_args_.setter + def _args_(self, value): + self._args_cache_ = list(value) + if self._args_cache_[0].__class__ is not MonomialTermExpression: + self.constant = value[0] + first_var = 1 + else: + self.constant = 0 + first_var = 0 + self.linear_coefs, self.linear_vars = zip( + *map(attrgetter('args'), value[first_var:])) + self.linear_coefs = list(self.linear_coefs) + self.linear_vars = list(self.linear_vars) def _precedence(self): return LinearExpression.PRECEDENCE - def __getstate__(self): - state = super(LinearExpression, self).__getstate__() - for i in LinearExpression.__slots__: - state[i] = getattr(self,i) - return state - - def create_node_with_local_data(self, args): - return self.__class__(args) + # __getstate__ is not needed, as while we are defining local slots, + # all the data in the slot is redundant to the information already + # being pickled through the base class _args_ attribute. + + def create_node_with_local_data(self, args, classtype=None): + if classtype is None: + if not args: + classtype = self.__class__ + elif ( args[0].__class__ is MonomialTermExpression or + (args[0].__class__ in native_types or args[0].is_constant() + ) and all(arg.__class__ is MonomialTermExpression + for arg in args[1:])): + classtype = self.__class__ + else: + classtype = SumExpression + return classtype(args) def getname(self, *args, **kwds): return 'sum' @@ -1298,69 +1378,24 @@ def is_fixed(self): return self._is_fixed() def _to_string(self, values, verbose, smap, compute_values): - tmp = [] - if compute_values: - const_ = value(self.constant) - if not isclose(const_,0): - tmp = [str(const_)] - elif self.constant.__class__ in native_numeric_types: - if not isclose(self.constant, 0): - tmp = [str(self.constant)] - else: - tmp = [self.constant.to_string(compute_values=False)] + if not values: + values = ['0'] if verbose: - for c,v in zip(self.linear_coefs, self.linear_vars): - if smap: # TODO: coverage - v_ = smap.getSymbol(v) - else: - v_ = str(v) - if c.__class__ in native_numeric_types or compute_values: - c_ = value(c) - if isclose(c_,1): - tmp.append(str(v_)) - elif isclose(c_,0): - continue - else: - tmp.append("prod(%s, %s)" % (str(c_),str(v_))) - else: - tmp.append("prod(%s, %s)" % (str(c), v_)) - return "{0}({1})".format(self.getname(), ', '.join(tmp)) - for c,v in zip(self.linear_coefs, self.linear_vars): - if smap: - v_ = smap.getSymbol(v) - else: - v_ = str(v) - if c.__class__ in native_numeric_types or compute_values: - c_ = value(c) - if isclose(c_,1): - tmp.append(" + %s" % v_) - elif isclose(c_,0): - continue - elif isclose(c_,-1): - tmp.append(" - %s" % v_) - elif c_ < 0: - tmp.append(" - %s*%s" % (str(math.fabs(c_)), v_)) - else: - tmp.append(" + %s*%s" % (str(c_), v_)) - else: - c_str = str(c) - if any(_ in c_str for _ in '+-*/'): - c_str = '('+c_str+')' - tmp.append(" + %s*%s" % (c_str, v_)) - s = "".join(tmp) - if len(s) == 0: #pragma: no cover - return s - if s[0] == " ": - if s[1] == "+": - return s[3:] - return s[1:] - return s + return "%s(%s)" % (self.getname(), ', '.join(values)) + + for i in range(1, len(values)): + term = values[i] + if term[0] not in '+-': + values[i] = '+ ' + term + elif term[1] != ' ': + values[i] = term[0] + ' ' + term[1:] + return ' '.join(values) def is_potentially_variable(self): return len(self.linear_vars) > 0 def _apply_operation(self, result): - return value(self.constant) + sum(value(c)*v.value for c,v in zip(self.linear_coefs, self.linear_vars)) + return sum(result) #@profile def _combine_expr(self, etype, _other): diff --git a/pyomo/core/expr/numvalue.py b/pyomo/core/expr/numvalue.py index 009b3f961fe..d3f581c8352 100644 --- a/pyomo/core/expr/numvalue.py +++ b/pyomo/core/expr/numvalue.py @@ -890,17 +890,17 @@ def to_string(self, verbose=None, labeler=None, smap=None, Returns: A string representation for the expression tree. """ - if compute_values: + if compute_values and self.is_fixed(): try: return str(self()) except: pass if not self.is_constant(): - if smap: + if smap is not None: return smap.getSymbol(self, labeler) elif labeler is not None: return labeler(self) - return self.__str__() + return str(self) class NumericConstant(NumericValue): diff --git a/pyomo/core/expr/template_expr.py b/pyomo/core/expr/template_expr.py index 1d2aac22cf1..86b5db6e94e 100644 --- a/pyomo/core/expr/template_expr.py +++ b/pyomo/core/expr/template_expr.py @@ -337,7 +337,9 @@ def _to_string(self, values, verbose, smap, compute_values): val = val[1:-1] iterStrGenerator = ( ( ', '.join(str(i) for i in iterGroup), - iterGroup[0]._set.to_string(verbose=verbose) ) + ( iterGroup[0]._set.to_string(verbose=verbose) + if hasattr(iterGroup[0]._set, 'to_string') + else str(iterGroup[0]._set) ) ) for iterGroup in self._iters ) if verbose: @@ -456,9 +458,6 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): _set_name += "(%s)" % (self._index,) return "{"+_set_name+"}" - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): - return self.name - def set_value(self, values=_NotSpecified, lock=None): # It might be nice to check if the value is valid for the base # set, but things are tricky when the base set is not dimention @@ -527,21 +526,21 @@ def exitNode(node, args): class ReplaceTemplateExpression(ExpressionReplacementVisitor): + template_types = {GetItemExpression, IndexTemplate} - def __init__(self, substituter, *args): - super(ReplaceTemplateExpression, self).__init__() + def __init__(self, substituter, *args, **kwargs): + kwargs.setdefault('remove_named_expressions', True) + super().__init__(**kwargs) self.substituter = substituter self.substituter_args = args - def visiting_potential_leaf(self, node): - if type(node) is GetItemExpression or type(node) is IndexTemplate: - return True, self.substituter(node, *self.substituter_args) - - return super( - ReplaceTemplateExpression, self).visiting_potential_leaf(node) + def beforeChild(self, node, child, child_idx): + if type(child) in ReplaceTemplateExpression.template_types: + return False, self.substituter(child, *self.substituter_args) + return super().beforeChild(node, child, child_idx) -def substitute_template_expression(expr, substituter, *args): +def substitute_template_expression(expr, substituter, *args, **kwargs): """Substitute IndexTemplates in an expression tree. This is a general utility function for walking the expression tree @@ -556,8 +555,8 @@ def substitute_template_expression(expr, substituter, *args): Returns: a new expression tree with all substitutions done """ - visitor = ReplaceTemplateExpression(substituter, *args) - return visitor.dfs_postorder_stack(expr) + visitor = ReplaceTemplateExpression(substituter, *args, **kwargs) + return visitor.walk_expression(expr) class _GetItemIndexer(object): diff --git a/pyomo/core/expr/visitor.py b/pyomo/core/expr/visitor.py index d54b8d9a2d5..037f39c8c4b 100644 --- a/pyomo/core/expr/visitor.py +++ b/pyomo/core/expr/visitor.py @@ -20,9 +20,11 @@ from .symbol_map import SymbolMap from . import expr_common as common from .expr_errors import TemplateExpressionError -from pyomo.common.deprecation import deprecation_warning +from pyomo.common.deprecation import deprecated, deprecation_warning +from pyomo.common.errors import DeveloperError from pyomo.core.expr.numvalue import ( nonpyomo_leaf_types, + native_types, native_numeric_types, value,) @@ -595,10 +597,11 @@ def dfs_postorder_stack(self, node): else: return self.finalize(ans) + def replace_expressions(expr, substitution_map, descend_into_named_expressions=True, - remove_named_expressions=False): + remove_named_expressions=True): """ Parameters @@ -618,250 +621,96 @@ def replace_expressions(expr, ------- Pyomo expression : returns the new expression object """ - new_expr = ExpressionReplacementVisitor( - substitute=substitution_map, - descend_into_named_expressions=descend_into_named_expressions, - remove_named_expressions=remove_named_expressions - ).dfs_postorder_stack(expr) - return new_expr - + return ExpressionReplacementVisitor( + substitute=substitution_map, + descend_into_named_expressions=descend_into_named_expressions, + remove_named_expressions=remove_named_expressions, + ).walk_expression(expr) -class ExpressionReplacementVisitor(object): - """ - Note: - This class is a customization of the PyUtilib :class:`ValueVisitor - ` class that is tailored - to support replacement of sub-trees in a Pyomo expression - tree. However, this class is not a subclass of the PyUtilib - :class:`ValueVisitor ` - class because all key methods are reimplemented. - """ +class ExpressionReplacementVisitor(StreamBasedExpressionVisitor): def __init__(self, substitute=None, descend_into_named_expressions=True, - remove_named_expressions=False): - """ - Contruct a visitor that is tailored to support the - replacement of sub-trees in a pyomo expression tree. - - Args: - memo (dict): A dictionary mapping object ids to - objects. This dictionary has the same semantics as - the memo object used with ``copy.deepcopy``. Defaults - to None, which indicates that no user-defined - dictionary is used. - """ + remove_named_expressions=True): + if substitute is None: + substitute = {} + # Note: preserving the attribute names from the previous + # implementation of the expression walker. + self.substitute = substitute self.enter_named_expr = descend_into_named_expressions self.rm_named_expr = remove_named_expressions - if substitute is None: - self.substitute = {} - else: - self.substitute = substitute - def visit(self, node, values): - """ - Visit and clone nodes that have been expanded. - - Note: - This method normally does not need to be re-defined - by a user. - - Args: - node: The node that will be cloned. - values (list): The list of child nodes that have been - cloned. These values are used to define the - cloned node. - - Returns: - The cloned node. Default is to simply return the node. - """ + kwds = {} + if hasattr(self, 'visiting_potential_leaf'): + deprecation_warning( + "ExpressionReplacementVisitor: this walker has been ported " + "to derive from StreamBasedExpressionVisitor. " + "visiting_potential_leaf() has been replaced by beforeChild()" + "(note to implementers: the sense of the bool return value " + "has been inverted).", version='TBD') + def beforeChild(node, child, child_idx): + is_leaf, ans = self.visiting_potential_leaf(child) + return not is_leaf, ans + kwds['beforeChild'] = beforeChild + + if hasattr(self, 'visit'): + raise DeveloperError( + "ExpressionReplacementVisitor: this walker has been ported " + "to derive from StreamBasedExpressionVisitor. " + "overriding visit() has no effect (and is likely to generate " + "invalid expression trees)") + super().__init__(**kwds) + + def initializeWalker(self, expr): + walk, result = self.beforeChild(None, expr, 0) + if not walk: + return False, result + return True, expr + + def beforeChild(self, node, child, child_idx): + if id(child) in self.substitute: + return False, self.substitute[id(child)] + elif type(child) in native_types: + return False, child + elif not child.is_expression_type(): + return False, child + elif child.is_named_expression_type(): + if not self.enter_named_expr: + return False, child + return True, None + + def enterNode(self, node): + args = list(node.args) + return args, [False, args] + + def acceptChildResult(self, node, data, child_result, child_idx): + if data[1][child_idx] is not child_result: + data[1][child_idx] = child_result + data[0] = True + return data + + def exitNode(self, node, data): + if node.is_named_expression_type(): + assert len(data[1]) == 1 + if self.rm_named_expr: + return data[1][0] + elif data[0]: + node.set_value(data[1][0]) + return node + elif data[0]: + return node.create_node_with_local_data(tuple(data[1])) return node - def visiting_potential_leaf(self, node): #pragma: no cover - """ - Visit a node and return a cloned node if it is a leaf. - - Note: - This method needs to be over-written for a specific - visitor application. - - Args: - node: a node in a tree - - Returns: - A tuple: ``(flag, value)``. If ``flag`` is False, - then the node is not a leaf and ``value`` is :const:`None`. - Otherwise, ``value`` is a cloned node. - """ - _id = id(node) - if _id in self.substitute: - return True, self.substitute[_id] - elif type(node) in nonpyomo_leaf_types or not node.is_expression_type(): - return True, node - elif not self.enter_named_expr and node.is_named_expression_type(): - return True, node - else: - return False, None - - def finalize(self, ans): - """ - This method defines the return value for the search methods - in this class. - - The default implementation returns the value of the - initial node (aka the root node), because - this visitor pattern computes and returns value for each - node to enable the computation of this value. - - Args: - ans: The final value computed by the search method. - - Returns: - The final value after the search. Defaults to simply - returning :attr:`ans`. - """ - return ans - - def construct_node(self, node, values): - """ - Call the expression create_node_with_local_data() method. - """ - return node.create_node_with_local_data( tuple(values) ) - - def dfs_postorder_stack(self, node): - """ - Perform a depth-first search in postorder using a stack - implementation. - - This method replaces subtrees. This method detects if the - :func:`visit` method returns a different object. If so, then - the node has been replaced and search process is adapted - to replace all subsequent parent nodes in the tree. - - Note: - This method has the same functionality as the - PyUtilib :class:`ValueVisitor.dfs_postorder_stack ` - method that is tailored to support the - replacement of sub-trees in a Pyomo expression tree. - - Args: - node: The root node of the expression tree - that is searched. + @deprecated( + "ExpressionReplacementVisitor: this walker has been ported " + "to derive from StreamBasedExpressionVisitor. " + "dfs_postorder_stack() has been replaced with walk_expression()", + version='TBD') + def dfs_postorder_stack(self, expr): + return self.walk_expression(expr) - Returns: - The return value is determined by the :func:`finalize` function, - which may be defined by the user. - """ - if node.__class__ is LinearExpression: - _argList = [node.constant] + node.linear_coefs + node.linear_vars - _len = len(_argList) - _stack = [ (node, _argList, 0, _len, [False])] - else: - flag, value = self.visiting_potential_leaf(node) - if flag: - return value - _stack = [ (node, node._args_, 0, node.nargs(), [False])] - # - # Iterate until the stack is empty - # - # Note: 1 is faster than True for Python 2.x - # - while 1: - # - # Get the top of the stack - # _obj Current expression object - # _argList The arguments for this expression objet - # _idx The current argument being considered - # _len The number of arguments - # _result The 'dirty' flag followed by return values - # - _obj, _argList, _idx, _len, _result = _stack.pop() - # - # Iterate through the arguments, entering each one - # - while _idx < _len: - _sub = _argList[_idx] - _idx += 1 - flag, value = self.visiting_potential_leaf(_sub) - if flag: - if id(value) != id(_sub): - _result[0] = True - _result.append( value ) - else: - # - # Push an expression onto the stack - # - _stack.append( (_obj, _argList, _idx, _len, _result) ) - _obj = _sub - _idx = 0 - _result = [False] - if _sub.__class__ is LinearExpression: - _argList = [_sub.constant] + _sub.linear_coefs \ - + _sub.linear_vars - _len = len(_argList) - else: - _argList = _sub._args_ - _len = _sub.nargs() - # - # Finalize (exit) the current node - # - # If the user has defined a visit() function in a - # subclass, then call that function. But if the user - # hasn't created a new class and we need to, then - # call the ExpressionReplacementVisitor.visit() function. - # - ans = self.visit(_obj, _result[1:]) - if ans.is_named_expression_type(): - if self.rm_named_expr: - ans = _result[1] - _result[0] = True - else: - _result[0] = False - assert(len(_result) == 2) - ans.expr = _result[1] - elif _result[0]: - if ans.__class__ is LinearExpression: - ans = _result[1] - nterms = (len(_result)-2)//2 - for i in range(nterms): - ans += _result[2+i]*_result[2+i+nterms] - if id(ans) == id(_obj): - ans = self.construct_node(_obj, _result[1:]) - if ans.__class__ is MonomialTermExpression: - # CDL This code wass trying to determine if we needed to change the MonomialTermExpression - # to a ProductExpression, but it fails for the case of a MonomialExpression - # that has its rhs Var replaced with another MonomialExpression (and might - # fail for other cases as well. - # Rather than trying to update the logic to catch all cases, I am choosing - # to execute the actual product operator code instead to ensure things are - # consistent - # See WalkerTests.test_replace_expressions_with_monomial_term in test_expr_pyomo5.py - # to see the behavior - # if ( ( ans._args_[0].__class__ not in native_numeric_types - # and ans._args_[0].is_potentially_variable ) - # or - # ( ans._args_[1].__class__ in native_numeric_types - # or not ans._args_[1].is_potentially_variable() ) ): - # ans.__class__ = ProductExpression - ans = ans._args_[0] * ans._args_[1] - elif ans.__class__ in NPV_expression_types: - # For simplicity, not-potentially-variable expressions are - # replaced with their potentially variable counterparts. - ans = ans.create_potentially_variable_object() - elif id(ans) != id(_obj): - _result[0] = True - if _stack: - if _result[0]: - _stack[-1][-1][0] = True - # - # "return" the recursion by putting the return value on - # the end of the results stack - # - _stack[-1][-1].append( ans ) - else: - return self.finalize(ans) #------------------------------------------------------- @@ -896,7 +745,7 @@ def clone_expression(expr, substitute=None): clone_counter._count += 1 memo = {'__block_scope__': {id(None): False}} if substitute: - memo.update(substitute) + expr = replace_expressions(expr, substitute) return deepcopy(expr, memo) @@ -957,8 +806,6 @@ def visiting_potential_leaf(self, node): return True, node - - class FixedExpressionError(Exception): def __init__(self, *args, **kwds): @@ -1355,13 +1202,12 @@ def visiting_potential_leaf(self, node): if node.is_expression_type(): return False, None - if node.is_variable_type(): - if not node.fixed: - return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=False) - return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=self.compute_values) - if hasattr(node, 'to_string'): - return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=self.compute_values) + return True, node.to_string( + verbose=self.verbose, + smap=self.smap, + compute_values=self.compute_values + ) else: return True, str(node) diff --git a/pyomo/core/plugins/transform/logical_to_linear.py b/pyomo/core/plugins/transform/logical_to_linear.py index 2b427522433..8aac3c0988e 100644 --- a/pyomo/core/plugins/transform/logical_to_linear.py +++ b/pyomo/core/plugins/transform/logical_to_linear.py @@ -1,53 +1,217 @@ -"""Transformation from BooleanVar and LogicalConstraint to Binary and Constraints.""" +"""Transformation from BooleanVar and LogicalConstraint to Binary and +Constraints.""" from pyomo.common.collections import ComponentMap from pyomo.common.modeling import unique_component_name +from pyomo.common.config import ConfigBlock, ConfigValue from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from pyomo.core import TransformationFactory, BooleanVar, VarList, Binary, LogicalConstraint, Block, ConstraintList, \ - native_types, BooleanVarList +from pyomo.core import (TransformationFactory, BooleanVar, VarList, Binary, + LogicalConstraint, Block, ConstraintList, native_types, + BooleanVarList) +from pyomo.core.base.block import _BlockData +from pyomo.core.base.boolean_var import ( + _DeprecatedImplicitAssociatedBinaryVariable) from pyomo.core.expr.cnf_walker import to_cnf -from pyomo.core.expr.logical_expr import AndExpression, OrExpression, NotExpression, AtLeastExpression, \ - AtMostExpression, ExactlyExpression, special_boolean_atom_types, EqualityExpression, InequalityExpression, \ - RangedExpression +from pyomo.core.expr.logical_expr import (AndExpression, OrExpression, + NotExpression, AtLeastExpression, + AtMostExpression, ExactlyExpression, + special_boolean_atom_types, + EqualityExpression, + InequalityExpression, + RangedExpression) from pyomo.core.expr.numvalue import native_logical_types, value from pyomo.core.expr.visitor import StreamBasedExpressionVisitor +from pyomo.core.expr.current import identify_variables from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation -from pyomo.gdp import Disjunct +from pyomo.core.util import target_list - -@TransformationFactory.register("core.logical_to_linear", doc="Convert logic to linear constraints") +@TransformationFactory.register("core.logical_to_linear", + doc="Convert logic to linear constraints") class LogicalToLinear(IsomorphicTransformation): """ Re-encode logical constraints as linear constraints, converting Boolean variables to binary. """ + CONFIG = ConfigBlock('core.logical_to_linear') + CONFIG.declare('targets', ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be relaxed", + doc=""" + This specifies the list of LogicalConstraints to transform, or the + list of Blocks or Disjuncts on which to transform all of the + LogicalConstraints. Note that if the transformation is done out + of place, the list of targets should be attached to the model before it + is cloned, and the list will specify the targets on the cloned + instance. + """ + )) def _apply_to(self, model, **kwds): - for boolean_var in model.component_objects(ctype=BooleanVar, descend_into=(Block, Disjunct)): - new_varlist = None - for bool_vardata in boolean_var.values(): - if new_varlist is None and bool_vardata.get_associated_binary() is None: - new_var_list_name = unique_component_name(model, boolean_var.local_name + '_asbinary') - new_varlist = VarList(domain=Binary) - setattr(model, new_var_list_name, new_varlist) - - if bool_vardata.get_associated_binary() is None: - new_binary_vardata = new_varlist.add() - bool_vardata.associate_binary_var(new_binary_vardata) - if bool_vardata.value is not None: - new_binary_vardata.value = int(bool_vardata.value) - if bool_vardata.fixed: - new_binary_vardata.fix() - - # Process statements in global (entire model) context - _process_logical_constraints_in_logical_context(model) - # Process statements that appear in disjuncts - for disjunct in model.component_data_objects(Disjunct, descend_into=(Block, Disjunct), active=True): - _process_logical_constraints_in_logical_context(disjunct) + config = self.CONFIG(kwds.pop('options', {})) + config.set_value(kwds) + targets = config.targets + if targets is None: + targets = (model, ) + + new_var_lists = ComponentMap() + transBlocks = {} + for t in targets: + # If the user promises that the target is Block-like, we will go + # with it. Note, however, that they can only use targets for + # this--when we go searching for stuff to transform we will only + # look on Blocks. And yes, this means we are ignoring Disjuncts. We + # are in fact ignoring all GDP components because this + # transformation is a promise only to transform LogicalConstraints + # and the relevant BooleanVars, not to create an algebraic + # model. (We are making this decision largely because having this + # transformation do anything to GDP stuff is an assumption on how + # the GDP will be solved, and it would be wrong to assume that a GDP + # will *necessarily* be solved as an algebraic model. The star + # example of not doing so being GDPopt.) + if t.ctype is Block or isinstance(t, _BlockData): + self._transform_block(t, model, new_var_lists, transBlocks) + elif t.ctype is LogicalConstraint: + if t.is_indexed(): + self._transform_constraint(t, new_var_lists, transBlocks) + else: + self._transform_constraintData(t, new_var_lists, + transBlocks) + else: + raise RuntimeError("Target '%s' was not a Block, Disjunct, or" + " LogicalConstraint. It was of type %s " + "and can't be transformed." % (t.name, + type(t))) + + def _transform_boolean_varData(self, bool_vardata, new_varlists): + # This transformation tries to group the binaries it creates for indexed + # BooleanVars onto the same VarList. This won't work across separate + # calls to the transformation, but within one call it's fine. So we have + # two cases: 1) either we have created a VarList for this + # BooleanVarData's parent_component, but have yet to add its binary to + # said list, or 2) we have neither the binary nor the VarList + + parent_component = bool_vardata.parent_component() + new_varlist = new_varlists.get(parent_component) + if new_varlist is None and \ + bool_vardata.get_associated_binary() is None: + # Case 2) we have neither the VarList nor an associated binary + parent_block = bool_vardata.parent_block() + new_var_list_name = unique_component_name( + parent_block, + parent_component.local_name + '_asbinary') + new_varlist = VarList(domain=Binary) + setattr(parent_block, new_var_list_name, new_varlist) + new_varlists[parent_component] = new_varlist + + if bool_vardata.get_associated_binary() is None: + # Case 1) we already have a VarList, but need to create the + # associated binary + new_binary_vardata = new_varlist.add() + bool_vardata.associate_binary_var(new_binary_vardata) + if bool_vardata.value is not None: + new_binary_vardata.value = int(bool_vardata.value) + if bool_vardata.fixed: + new_binary_vardata.fix() + + def _transform_constraint(self, constraint, new_varlists, transBlocks): + for i in constraint.keys(ordered=True): + self._transform_constraintData(constraint[i], new_varlists, + transBlocks) + + def _transform_block(self, target_block, model, new_varlists, transBlocks): + for logical_constraint in target_block.component_data_objects( + ctype=LogicalConstraint, active=True, + descend_into=Block): + self._transform_constraintData(logical_constraint, new_varlists, + transBlocks) + + # This can go away when we deprecate this transformation transforming + # BooleanVars. This just marks the BooleanVars as "seen" so that if + # someone asks for their binary var later, we can create it on the fly + # and complain. + for bool_vardata in target_block.component_data_objects( + BooleanVar, descend_into=Block): + if bool_vardata._associated_binary is None: + bool_vardata._associated_binary = \ + _DeprecatedImplicitAssociatedBinaryVariable( + bool_vardata) + + def _transform_constraintData(self, logical_constraint, new_varlists, + transBlocks): + # first find all the relevant BooleanVars and associate a binary (if + # they don't have one already) + for bool_vardata in identify_variables(logical_constraint.expr): + if bool_vardata.ctype is BooleanVar: + self._transform_boolean_varData(bool_vardata, new_varlists) + + # now create a transformation block on the constraint's parent block (if + # we don't have one already) + parent_block = logical_constraint.parent_block() + xfrm_block = transBlocks.get(parent_block) + if xfrm_block is None: + xfrm_block = self._create_transformation_block(parent_block) + transBlocks[parent_block] = xfrm_block + new_constrlist = xfrm_block.transformed_constraints + new_boolvarlist = xfrm_block.augmented_vars + new_varlist = xfrm_block.augmented_vars_asbinary + + old_boolvarlist_length = len(new_boolvarlist) + + indicator_map = ComponentMap() + cnf_statements = to_cnf(logical_constraint.body, new_boolvarlist, + indicator_map) + logical_constraint.deactivate() + # Associate new Boolean vars to new binary variables + num_new = len(new_boolvarlist) - old_boolvarlist_length + list_o_vars = list(new_boolvarlist.values()) + if num_new: + for bool_vardata in list_o_vars[-num_new:]: + new_binary_vardata = new_varlist.add() + bool_vardata.associate_binary_var(new_binary_vardata) + + # Add constraints associated with each CNF statement + for cnf_statement in cnf_statements: + for linear_constraint in _cnf_to_linear_constraint_list( + cnf_statement): + new_constrlist.add(expr=linear_constraint) + + # Add bigM associated with special atoms + # Note: this ad-hoc reformulation may be revisited for tightness in the + # future. + old_varlist_length = len(new_varlist) + for indicator_var, special_atom in indicator_map.items(): + for linear_constraint in _cnf_to_linear_constraint_list( + special_atom, + indicator_var, + new_varlist): + new_constrlist.add(expr=linear_constraint) + + # Previous step may have added auxiliary binaries. Associate augmented + # Booleans to them. + num_new = len(new_varlist) - old_varlist_length + list_o_vars = list(new_varlist.values()) + if num_new: + for binary_vardata in list_o_vars[-num_new:]: + new_bool_vardata = new_boolvarlist.add() + new_bool_vardata.associate_binary_var(binary_vardata) + + def _create_transformation_block(self, context): + new_xfrm_block_name = unique_component_name(context, 'logic_to_linear') + new_xfrm_block = Block(doc="Transformation objects for logic_to_linear") + setattr(context, new_xfrm_block_name, new_xfrm_block) + + new_xfrm_block.transformed_constraints = ConstraintList() + new_xfrm_block.augmented_vars = BooleanVarList() + new_xfrm_block.augmented_vars_asbinary = VarList( domain=Binary) + + return new_xfrm_block def update_boolean_vars_from_binary(model, integer_tolerance=1e-5): - """Updates all Boolean variables based on the value of their linked binary variables.""" - for boolean_var in model.component_data_objects(BooleanVar, descend_into=(Block, Disjunct)): + """Updates all Boolean variables based on the value of their linked binary + variables.""" + for boolean_var in model.component_data_objects(BooleanVar, + descend_into=Block): binary_var = boolean_var.get_associated_binary() if binary_var is not None and binary_var.value is not None: if abs(binary_var.value - 1) <= integer_tolerance: @@ -55,86 +219,31 @@ def update_boolean_vars_from_binary(model, integer_tolerance=1e-5): elif abs(binary_var.value) <= integer_tolerance: boolean_var.value = False else: - raise ValueError("Binary variable has non-{0,1} value: %s = %s" % (binary_var.name, binary_var.value)) + raise ValueError("Binary variable has non-{0,1} value: " + "%s = %s" % (binary_var.name, + binary_var.value)) boolean_var.stale = binary_var.stale - -def _process_logical_constraints_in_logical_context(context): - new_xfrm_block_name = unique_component_name(context, 'logic_to_linear') - new_xfrm_block = Block(doc="Transformation objects for logic_to_linear") - setattr(context, new_xfrm_block_name, new_xfrm_block) - - new_constrlist = new_xfrm_block.transformed_constraints = ConstraintList() - new_boolvarlist = new_xfrm_block.augmented_vars = BooleanVarList() - new_varlist = new_xfrm_block.augmented_vars_asbinary = VarList(domain=Binary) - - indicator_map = ComponentMap() - cnf_statements = [] - # Convert all logical constraints to CNF - for logical_constraint in context.component_data_objects(ctype=LogicalConstraint, active=True): - cnf_statements.extend(to_cnf(logical_constraint.body, new_boolvarlist, indicator_map)) - logical_constraint.deactivate() - - # Associate new Boolean vars to new binary variables - for bool_vardata in new_boolvarlist.values(): - new_binary_vardata = new_varlist.add() - bool_vardata.associate_binary_var(new_binary_vardata) - - # Add constraints associated with each CNF statement - for cnf_statement in cnf_statements: - for linear_constraint in _cnf_to_linear_constraint_list(cnf_statement): - new_constrlist.add(expr=linear_constraint) - - # Add bigM associated with special atoms - # Note: this ad-hoc reformulation may be revisited for tightness in the future. - old_varlist_length = len(new_varlist) - for indicator_var, special_atom in indicator_map.items(): - for linear_constraint in _cnf_to_linear_constraint_list(special_atom, indicator_var, new_varlist): - new_constrlist.add(expr=linear_constraint) - - # Previous step may have added auxiliary binaries. Associate augmented Booleans to them. - num_new = len(new_varlist) - old_varlist_length - list_o_vars = list(new_varlist.values()) - if num_new: - for binary_vardata in list_o_vars[-num_new:]: - new_bool_vardata = new_boolvarlist.add() - new_bool_vardata.associate_binary_var(binary_vardata) - - # If added components were not used, remove them. - # Note: it is ok to simply delete the index_set for these components, because by - # default, a new set object is generated for each [Thing]List. - if len(new_constrlist) == 0: - new_xfrm_block.del_component(new_constrlist.index_set()) - new_xfrm_block.del_component(new_constrlist) - if len(new_boolvarlist) == 0: - new_xfrm_block.del_component(new_boolvarlist.index_set()) - new_xfrm_block.del_component(new_boolvarlist) - if len(new_varlist) == 0: - new_xfrm_block.del_component(new_varlist.index_set()) - new_xfrm_block.del_component(new_varlist) - - # If block was entirely unused, remove it - if all(len(l) == 0 for l in (new_constrlist, new_boolvarlist, new_varlist)): - context.del_component(new_xfrm_block) - - -def _cnf_to_linear_constraint_list(cnf_expr, indicator_var=None, binary_varlist=None): +def _cnf_to_linear_constraint_list(cnf_expr, indicator_var=None, + binary_varlist=None): # Screen for constants if type(cnf_expr) in native_types or cnf_expr.is_constant(): if value(cnf_expr) is True: return [] else: raise ValueError( - "Cannot build linear constraint for logical expression with constant value False: %s" + "Cannot build linear constraint for logical expression with " + "constant value False: %s" % cnf_expr) if cnf_expr.is_expression_type(): - return CnfToLinearVisitor(indicator_var, binary_varlist).walk_expression(cnf_expr) + return CnfToLinearVisitor(indicator_var, binary_varlist).\ + walk_expression(cnf_expr) else: - return [cnf_expr.get_associated_binary() == 1] # Assume that cnf_expr is a BooleanVar - - -_numeric_relational_types = {InequalityExpression, EqualityExpression, RangedExpression} + return [cnf_expr.get_associated_binary() == 1] # Assume that cnf_expr + # is a BooleanVar +_numeric_relational_types = {InequalityExpression, EqualityExpression, + RangedExpression} class CnfToLinearVisitor(StreamBasedExpressionVisitor): """Convert CNF logical constraint to linear constraints. @@ -150,13 +259,14 @@ def __init__(self, indicator_var, binary_varlist): def exitNode(self, node, values): if type(node) == AndExpression: - return list((v if type(v) in _numeric_relational_types else v == 1) for v in values) + return list((v if type(v) in _numeric_relational_types else v == 1) + for v in values) elif type(node) == OrExpression: return sum(values) >= 1 elif type(node) == NotExpression: return 1 - values[0] - # Note: the following special atoms should only be encountered as root nodes. - # If they are encountered otherwise, something went wrong. + # Note: the following special atoms should only be encountered as root + # nodes. If they are encountered otherwise, something went wrong. sum_values = sum(values[1:]) num_args = node.nargs() - 1 # number of logical arguments if self._indicator is None: @@ -169,30 +279,36 @@ def exitNode(self, node, values): else: rhs_lb, rhs_ub = compute_bounds_on_expr(values[0]) if rhs_lb == float('-inf') or rhs_ub == float('inf'): - raise ValueError( - "Cannnot generate linear constraints for %s([N, *logical_args]) with unbounded N. " - "Detected %s <= N <= %s." % (type(node).__name__, rhs_lb, rhs_ub) - ) + raise ValueError( "Cannnot generate linear constraints for %s" + "([N, *logical_args]) with unbounded N. " + "Detected %s <= N <= %s." % + (type(node).__name__, rhs_lb, rhs_ub) ) indicator_binary = self._indicator.get_associated_binary() if type(node) == AtLeastExpression: return [ sum_values >= values[0] - rhs_ub * (1 - indicator_binary), - sum_values <= values[0] - 1 + (-(rhs_lb - 1) + num_args) * indicator_binary + sum_values <= values[0] - 1 + (-(rhs_lb - 1) + num_args) * \ + indicator_binary ] elif type(node) == AtMostExpression: return [ - sum_values <= values[0] + (-rhs_lb + num_args) * (1 - indicator_binary), - sum_values >= (values[0] + 1) - (rhs_ub + 1) * indicator_binary + sum_values <= values[0] + (-rhs_lb + num_args) * \ + (1 - indicator_binary), + sum_values >= (values[0] + 1) - (rhs_ub + 1) * \ + indicator_binary ] elif type(node) == ExactlyExpression: less_than_binary = self._binary_varlist.add() more_than_binary = self._binary_varlist.add() return [ - sum_values <= values[0] + (-rhs_lb + num_args) * (1 - indicator_binary), + sum_values <= values[0] + (-rhs_lb + num_args) * \ + (1 - indicator_binary), sum_values >= values[0] - rhs_ub * (1 - indicator_binary), indicator_binary + less_than_binary + more_than_binary >= 1, - sum_values <= values[0] - 1 + (-(rhs_lb - 1) + num_args) * (1 - less_than_binary), - sum_values >= values[0] + 1 - (rhs_ub + 1) * (1 - more_than_binary), + sum_values <= values[0] - 1 + (-(rhs_lb - 1) + num_args) * \ + (1 - less_than_binary), + sum_values >= values[0] + 1 - (rhs_ub + 1) * \ + (1 - more_than_binary), ] pass diff --git a/pyomo/core/tests/unit/test_block.py b/pyomo/core/tests/unit/test_block.py index d4175cd08b7..866d23eb55b 100644 --- a/pyomo/core/tests/unit/test_block.py +++ b/pyomo/core/tests/unit/test_block.py @@ -29,7 +29,7 @@ Objective, Expression, SOSConstraint, SortComponents, NonNegativeIntegers, TraversalStrategy, RangeSet, SolverFactory, - value, sum_product) + value, sum_product, ComponentUID) from pyomo.common.log import LoggingIntercept from pyomo.common.tempfiles import TempfileManager from pyomo.core.base.block import ScalarBlock, SubclassOf, _BlockData, declare_custom_block @@ -2615,5 +2615,40 @@ def b_rule(b, i, **kwds): self.assertEqual(value(m.b[2].p), 10) self.assertEqual(value(m.b[3].p), 0) + def test_find_component_name(self): + b = Block(concrete=True) + b.v1 = Var() + b.v2 = Var([1, 2]) + self.assertIs(b.find_component("v1"), b.v1) + self.assertIs(b.find_component("v2[2]"), b.v2[2]) + + def test_find_component_cuid(self): + b = Block(concrete=True) + b.v1 = Var() + b.v2 = Var([1, 2]) + cuid1 = ComponentUID("v1") + cuid2 = ComponentUID("v2[2]") + self.assertIs(b.find_component(cuid1), b.v1) + self.assertIs(b.find_component(cuid2), b.v2[2]) + + def test_find_component_hierarchical(self): + b1 = Block(concrete=True) + b1.b2 = Block() + b1.b2.v1 = Var() + b1.b2.v2 = Var([1, 2]) + self.assertIs(b1.find_component("b2.v1"), b1.b2.v1) + self.assertIs(b1.find_component("b2.v2[2]"), b1.b2.v2[2]) + + def test_find_component_hierarchical_cuid(self): + b1 = Block(concrete=True) + b1.b2 = Block() + b1.b2.v1 = Var() + b1.b2.v2 = Var([1, 2]) + cuid1 = ComponentUID("b2.v1") + cuid2 = ComponentUID("b2.v2[2]") + self.assertIs(b1.find_component(cuid1), b1.b2.v1) + self.assertIs(b1.find_component(cuid2), b1.b2.v2[2]) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_derivs.py b/pyomo/core/tests/unit/test_derivs.py index f8234a63b28..d18c1566807 100644 --- a/pyomo/core/tests/unit/test_derivs.py +++ b/pyomo/core/tests/unit/test_derivs.py @@ -253,3 +253,31 @@ def test_NPV(self): symbolic = reverse_sd(e) self.assertAlmostEqual(derivs[m.p], pyo.value(symbolic[m.p]), tol) self.assertAlmostEqual(derivs[m.p], approx_deriv(e, m.p), tol) + + def test_duplicate_expressions(self): + m = pyo.ConcreteModel() + m.x = pyo.Var(initialize=0.23) + m.y = pyo.Var(initialize=0.88) + a = (m.x + 1)**2 + b = 3*(a + m.y) + e = 2*a + 2*b + 2*b + 2*a + derivs = reverse_ad(e) + symbolic = reverse_sd(e) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) + + def test_nested_named_expressions(self): + m = pyo.ConcreteModel() + m.x = pyo.Var(initialize=0.23) + m.y = pyo.Var(initialize=0.88) + m.a = pyo.Expression(expr=(m.x + 1)**2) + m.b = pyo.Expression(expr=3*(m.a + m.y)) + e = 2*m.a + 2*m.b + 2*m.b + 2*m.a + derivs = reverse_ad(e) + symbolic = reverse_sd(e) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) diff --git a/pyomo/core/tests/unit/test_expression.py b/pyomo/core/tests/unit/test_expression.py index feb15986528..cd379c554ab 100644 --- a/pyomo/core/tests/unit/test_expression.py +++ b/pyomo/core/tests/unit/test_expression.py @@ -485,12 +485,31 @@ def _some_rule(model, i): self.assertEqual(len(model.E), 2) def test_implicit_definition(self): + model = ConcreteModel() + model.idx = Set(initialize=[1,2,3]) + model.E = Expression(model.idx) + self.assertEqual(len(model.E), 3) + expr = model.E[1] + self.assertIs(type(expr), _GeneralExpressionData) + model.E[1] = None + self.assertIs(expr, model.E[1]) + self.assertIs(type(expr), _GeneralExpressionData) + self.assertIs(expr.value, None) + model.E[1] = 5 + self.assertIs(expr, model.E[1]) + self.assertEqual(model.E.extract_values(), {1:5, 2:None, 3:None}) + model.E[2] = 6 + self.assertIsNot(expr, model.E[2]) + self.assertEqual(model.E.extract_values(), {1:5, 2:6, 3:None}) + + def test_explicit_skip_definition(self): model = ConcreteModel() model.idx = Set(initialize=[1,2,3]) model.E = Expression(model.idx, rule=lambda m,i: Expression.Skip) self.assertEqual(len(model.E), 0) - expr = model.E[1] - self.assertIsNone(expr) + with self.assertRaises(KeyError): + expr = model.E[1] + model.E[1] = None expr = model.E[1] self.assertIs(type(expr), _GeneralExpressionData) diff --git a/pyomo/core/tests/unit/test_indexed.py b/pyomo/core/tests/unit/test_indexed.py index 01ed0f204b4..09277a4ef8b 100644 --- a/pyomo/core/tests/unit/test_indexed.py +++ b/pyomo/core/tests/unit/test_indexed.py @@ -172,6 +172,34 @@ def test_index_by_unhashable_type(self): TypeError, '.*', m.x.__getitem__, {}) + def test_ordered_keys(self): + m = ConcreteModel() + # Pick a set whose unordered iteration order should never match + # the "ordered" iteration order. + init_keys = [2, 1, (1, 2), (1, 'a'), (1, 1)] + m.I = Set(ordered=False, dimen=None, initialize=init_keys) + ordered_keys = [1, 2, (1, 1), (1, 2), (1, 'a')] + m.x = Var(m.I) + self.assertNotEqual(list(m.x.keys()), list(m.x.keys(True))) + self.assertEqual(set(m.x.keys()), set(m.x.keys(True))) + self.assertEqual(ordered_keys, list(m.x.keys(True))) + + m.P = Param(m.I, initialize={k:v for v,k in enumerate(init_keys)}) + self.assertNotEqual(list(m.P.keys()), list(m.P.keys(True))) + self.assertEqual(set(m.P.keys()), set(m.P.keys(True))) + self.assertEqual(ordered_keys, list(m.P.keys(True))) + self.assertEqual([1, 0, 4, 2, 3], list(m.P.values(True))) + self.assertEqual(list(zip(ordered_keys, [1, 0, 4, 2, 3])), + list(m.P.items(True))) + + m.P = Param(m.I, initialize={(1,2): 30, 1:10, 2:20}, default=1) + self.assertNotEqual(list(m.P.keys()), list(m.P.keys(True))) + self.assertEqual(set(m.P.keys()), set(m.P.keys(True))) + self.assertEqual(ordered_keys, list(m.P.keys(True))) + self.assertEqual([10, 20, 1, 30, 1], list(m.P.values(True))) + self.assertEqual(list(zip(ordered_keys, [10, 20, 1, 30, 1])), + list(m.P.items(True))) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_indexed_slice.py b/pyomo/core/tests/unit/test_indexed_slice.py index 0610eadbe50..9d2fa190d5c 100644 --- a/pyomo/core/tests/unit/test_indexed_slice.py +++ b/pyomo/core/tests/unit/test_indexed_slice.py @@ -18,6 +18,7 @@ from pyomo.environ import Var, Block, ConcreteModel, RangeSet, Set from pyomo.core.base.block import _BlockData from pyomo.core.base.indexed_component_slice import IndexedComponent_slice +from pyomo.core.base.set import normalize_index def _x_init(m, k): return k @@ -601,7 +602,6 @@ def test_nondim_set(self): self.assertEqual(len(ref), 1) self.assertIs(ref[0], m.x[2,3]) - from pyomo.core.base.set import normalize_index _old_flatten = normalize_index.flatten try: normalize_index.flatten = False @@ -624,8 +624,19 @@ def test_nondim_set(self): finally: normalize_index.flatten = _old_flatten + def test_UnknownSetDimen(self): + m = ConcreteModel() + m.I = Set(initialize=[1,2,3]) + m.J = Set() + m.x = Var(m.I, m.J) + + with self.assertRaisesRegex( + IndexError, + 'Slicing components relies on knowing the underlying ' + 'set dimensionality'): + ref = list(m.x[:,:]) + def test_flatten_false(self): - from pyomo.core.base.set import normalize_index _old_flatten = normalize_index.flatten try: normalize_index.flatten = False @@ -692,6 +703,27 @@ def b(b, i, j): self.assertEqual(m.b[0,:].v[:], m.b[0,:].v[:]) self.assertNotEqual(m.b[0,:].v[:], m.b[0,:].v['a']) + def test_str(self): + m = ConcreteModel() + m.b = Block() + # Note that we are testing the string representation of a slice, + # not if the slice is valid + s = m.b[...].x[:, 1:2, 1:5:2, ::1, 5, 'a'].component('foo', kwarg=1) + self.assertEqual( + str(s), + "b[...].x[:, 1:2, 1:5:2, ::1, 5, 'a'].component('foo', kwarg=1)") + + # To test set / del, we want to form the IndexedComponent_slice + # without evaluating it + s = m.b[...] + self.assertEqual( + str(IndexedComponent_slice( + s, (IndexedComponent_slice.del_attribute, 'bogus'))), + 'del b[...].bogus') + self.assertEqual( + str(IndexedComponent_slice( + s, (IndexedComponent_slice.set_attribute, 'bogus', 10))), + 'b[...].bogus = 10') if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_initializer.py b/pyomo/core/tests/unit/test_initializer.py index 45b902257cc..e0421669aed 100644 --- a/pyomo/core/tests/unit/test_initializer.py +++ b/pyomo/core/tests/unit/test_initializer.py @@ -11,11 +11,13 @@ import pickle import pyomo.common.unittest as unittest +from pyomo.common.dependencies import pandas as pd, pandas_available from pyomo.core.base.util import flatten_tuple from pyomo.core.base.initializer import ( Initializer, ConstantInitializer, ItemInitializer, ScalarCallInitializer, IndexedCallInitializer, CountedCallInitializer, CountedCallGenerator, + DataFrameInitializer, ) from pyomo.environ import ( ConcreteModel, Var, @@ -52,7 +54,6 @@ def test_constant(self): a.indices() self.assertEqual(a(None, 1), 5) - def test_dict(self): m = ConcreteModel() a = Initializer({1:5}) @@ -63,7 +64,6 @@ def test_dict(self): self.assertEqual(list(a.indices()), [1]) self.assertEqual(a(None, 1), 5) - def test_sequence(self): m = ConcreteModel() a = Initializer([0,5]) @@ -81,7 +81,6 @@ def test_sequence(self): self.assertFalse(a.contains_indices()) self.assertEqual(a(None, 1), [0,5]) - def test_function(self): m = ConcreteModel() def a_init(m): @@ -135,7 +134,6 @@ def y_init(m, i, j): self.assertEqual(next(c), 3) self.assertEqual(next(c), 4) - def test_method(self): class Init(object): def a_init(self, m): @@ -196,7 +194,6 @@ def y_init(self, m, i, j): self.assertEqual(next(c), 3) self.assertEqual(next(c), 4) - def test_classmethod(self): class Init(object): @classmethod @@ -259,7 +256,6 @@ def y_init(cls, m, i, j): self.assertEqual(next(c), 3) self.assertEqual(next(c), 4) - def test_staticmethod(self): class Init(object): @staticmethod @@ -322,7 +318,6 @@ def y_init(m, i, j): self.assertEqual(next(c), 3) self.assertEqual(next(c), 4) - def test_generator_fcn(self): m = ConcreteModel() def a_init(m): @@ -358,7 +353,6 @@ def y_init(m, i, j): self.assertFalse(a.verified) self.assertEqual(list(a(None, (1, 4))), [4,2]) - def test_generator_method(self): class Init(object): def a_init(self, m): @@ -399,7 +393,6 @@ def y_init(self, m, i, j): self.assertFalse(a.verified) self.assertEqual(list(a(None, (1, 4))), [4,2]) - def test_generators(self): m = ConcreteModel() with self.assertRaisesRegex( @@ -425,6 +418,61 @@ def x_init(): self.assertFalse(a.verified) self.assertEqual(list(a(None, 1)), [0,3]) + @unittest.skipUnless(pandas_available, "Pandas is not installed") + def test_dataframe(self): + d = {'col1': [1, 2, 4]} + df = pd.DataFrame(data=d) + a = Initializer(df) + self.assertIs(type(a), DataFrameInitializer) + self.assertFalse(a.constant()) + self.assertFalse(a.verified) + self.assertTrue(a.contains_indices()) + self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(a(None, 0), 1) + self.assertEqual(a(None, 1), 2) + self.assertEqual(a(None, 2), 4) + + d = {'col1': [1, 2, 4], 'col2': [10, 20, 40]} + df = pd.DataFrame(data=d) + with self.assertRaisesRegex( + ValueError, + 'DataFrameInitializer for DataFrame with multiple columns'): + a = Initializer(df) + a = DataFrameInitializer(df, 'col2') + self.assertIs(type(a), DataFrameInitializer) + self.assertFalse(a.constant()) + self.assertFalse(a.verified) + self.assertTrue(a.contains_indices()) + self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(a(None, 0), 10) + self.assertEqual(a(None, 1), 20) + self.assertEqual(a(None, 2), 40) + + df = pd.DataFrame([10, 20, 30, 40], index=[[0,0,1,1],[0,1,0,1]]) + a = Initializer(df) + self.assertIs(type(a), DataFrameInitializer) + self.assertFalse(a.constant()) + self.assertFalse(a.verified) + self.assertTrue(a.contains_indices()) + self.assertEqual(list(a.indices()), [(0, 0), (0, 1), (1, 0), (1, 1)]) + self.assertEqual(a(None, (0, 0)), 10) + self.assertEqual(a(None, (0, 1)), 20) + self.assertEqual(a(None, (1, 0)), 30) + self.assertEqual(a(None, (1, 1)), 40) + + @unittest.skipUnless(pandas_available, "Pandas is not installed") + def test_initializer_initializer(self): + d = {'col1': [1, 2, 4], 'col2': [10, 20, 40]} + df = pd.DataFrame(data=d) + a = Initializer(DataFrameInitializer(df, 'col2')) + self.assertIs(type(a), DataFrameInitializer) + self.assertFalse(a.constant()) + self.assertFalse(a.verified) + self.assertTrue(a.contains_indices()) + self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(a(None, 0), 10) + self.assertEqual(a(None, 1), 20) + self.assertEqual(a(None, 2), 40) def test_pickle(self): m = ConcreteModel() diff --git a/pyomo/core/tests/unit/test_logical_to_linear.py b/pyomo/core/tests/unit/test_logical_to_linear.py index 31b45dc3093..a82b6ae64ed 100644 --- a/pyomo/core/tests/unit/test_logical_to_linear.py +++ b/pyomo/core/tests/unit/test_logical_to_linear.py @@ -9,31 +9,38 @@ # ___________________________________________________________________________ import pyomo.common.unittest as unittest +from pyomo.common.log import LoggingIntercept +import logging from pyomo.core.expr.sympy_tools import sympy_available -from pyomo.core.plugins.transform.logical_to_linear import update_boolean_vars_from_binary -from pyomo.environ import ( - ConcreteModel, BooleanVar, LogicalConstraint, lor, TransformationFactory, RangeSet, - Var, Constraint, ComponentMap, value, BooleanSet, atleast, atmost, exactly) +from pyomo.core.plugins.transform.logical_to_linear import \ + update_boolean_vars_from_binary +from pyomo.environ import ( ConcreteModel, BooleanVar, LogicalConstraint, lor, + TransformationFactory, RangeSet, Var, Constraint, + ComponentMap, value, BooleanSet, atleast, atmost, + exactly, Block, Binary) from pyomo.gdp import Disjunct, Disjunction from pyomo.repn import generate_standard_repn - +from io import StringIO def _generate_boolean_model(nvars): m = ConcreteModel() m.s = RangeSet(nvars) m.Y = BooleanVar(m.s) + # make sure all the variables are used in at least one logical constraint + m.constraint = LogicalConstraint(expr=exactly(2, m.Y)) return m - def _constrs_contained_within(test_case, test_constr_tuples, constraint_list): - """Checks to see if constraints defined by test_constr_tuples are in the constraint list. + """Checks to see if constraints defined by test_constr_tuples are in the + constraint list. Parameters ---------- constraint_list : Constraint test_constr_tuples : list of tuple test_case : unittest.TestCase + """ # Move const term from body def _move_const_from_body(lower, repn, upper): @@ -47,26 +54,34 @@ def _move_const_from_body(lower, repn, upper): def _repns_match(repn, test_repn): if not len(repn.linear_vars) == len(test_repn.linear_vars): return False - coef_map = ComponentMap((var, coef) for var, coef in zip(repn.linear_vars, repn.linear_coefs)) + coef_map = ComponentMap((var, coef) for var, coef in + zip(repn.linear_vars, repn.linear_coefs)) for var, coef in zip(test_repn.linear_vars, test_repn.linear_coefs): if not coef_map.get(var, 0) == coef: return False return True - constr_list_tuples = [ - _move_const_from_body(constr.lower, generate_standard_repn(constr.body), constr.upper) - for constr in constraint_list.values()] + constr_list_tuples = [ _move_const_from_body( + constr.lower, + generate_standard_repn(constr.body), + constr.upper) for constr in + constraint_list.values()] for test_lower, test_body, test_upper in test_constr_tuples: test_repn = generate_standard_repn(test_body) - test_lower, test_repn, test_upper = _move_const_from_body(test_lower, test_repn, test_upper) + test_lower, test_repn, test_upper = _move_const_from_body(test_lower, + test_repn, + test_upper) found_match = False # Make sure one of the list tuples matches for lower, repn, upper in constr_list_tuples: - if lower == test_lower and upper == test_upper and _repns_match(repn, test_repn): + if lower == test_lower and upper == test_upper and \ + _repns_match(repn, test_repn): found_match = True break - test_case.assertTrue(found_match, "{} <= {} <= {} was not found in constraint list.".format( - test_lower, test_body, test_upper)) + test_case.assertTrue( + found_match, + "{} <= {} <= {} was not found in constraint list.".format( + test_lower, test_body, test_upper)) @unittest.skipUnless(sympy_available, "Sympy not available") @@ -78,21 +93,23 @@ def test_implies(self): m.y = BooleanVar() m.p = LogicalConstraint(expr=m.x.implies(m.y)) TransformationFactory('core.logical_to_linear').apply_to(m) - _constrs_contained_within( - self, [(1, (1 - m.x.get_associated_binary()) + m.y.get_associated_binary(), None)], - m.logic_to_linear.transformed_constraints) + _constrs_contained_within( self, [(1, (1 - m.x.get_associated_binary()) + + m.y.get_associated_binary(), + None)], + m.logic_to_linear.transformed_constraints) def test_literal(self): m = ConcreteModel() m.Y = BooleanVar() m.p = LogicalConstraint(expr=m.Y) TransformationFactory('core.logical_to_linear').apply_to(m) - _constrs_contained_within( - self, [(1, m.Y.get_associated_binary(), 1)], m.logic_to_linear.transformed_constraints) + _constrs_contained_within( self, [(1, m.Y.get_associated_binary(), 1)], + m.logic_to_linear.transformed_constraints) def test_constant_True(self): m = ConcreteModel() - with self.assertRaisesRegex(ValueError, "LogicalConstraint 'p' is always True."): + with self.assertRaisesRegex(ValueError, + "LogicalConstraint 'p' is always True."): m.p = LogicalConstraint(expr=True) TransformationFactory('core.logical_to_linear').apply_to(m) self.assertIsNone(m.component('logic_to_linear')) @@ -129,7 +146,9 @@ def test_xfrm_atleast_statement(self): _constrs_contained_within( self, [ (2, - m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary() + m.Y[3].get_associated_binary(), + m.Y[1].get_associated_binary() + \ + m.Y[2].get_associated_binary() + \ + m.Y[3].get_associated_binary(), None) ], m.logic_to_linear.transformed_constraints) @@ -142,7 +161,9 @@ def test_xfrm_atmost_statement(self): _constrs_contained_within( self, [ (None, - m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary() + m.Y[3].get_associated_binary(), + m.Y[1].get_associated_binary() + \ + m.Y[2].get_associated_binary() + \ + m.Y[3].get_associated_binary(), 2) ], m.logic_to_linear.transformed_constraints) @@ -154,56 +175,72 @@ def test_xfrm_exactly_statement(self): TransformationFactory('core.logical_to_linear').apply_to(m) _constrs_contained_within( self, [ - (2, m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary() + m.Y[3].get_associated_binary(), 2) + (2, m.Y[1].get_associated_binary() + \ + m.Y[2].get_associated_binary() + \ + m.Y[3].get_associated_binary(), 2) ], m.logic_to_linear.transformed_constraints) def test_xfrm_special_atoms_nonroot(self): m = ConcreteModel() m.s = RangeSet(3) m.Y = BooleanVar(m.s) - m.p = LogicalConstraint(expr=m.Y[1].implies(atleast(2, m.Y[1], m.Y[2], m.Y[3]))) + m.p = LogicalConstraint(expr=m.Y[1].implies(atleast(2, m.Y[1], m.Y[2], + m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 1) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( self, [ - (None, sum(m.Y[:].get_associated_binary()) - (1 + 2 * Y_aug[1].get_associated_binary()), 0), - (1, (1 - m.Y[1].get_associated_binary()) + Y_aug[1].get_associated_binary(), None), - (None, 2 - 2 * (1 - Y_aug[1].get_associated_binary()) - sum(m.Y[:].get_associated_binary()), 0) + (None, sum(m.Y[:].get_associated_binary()) - \ + (1 + 2 * Y_aug[1].get_associated_binary()), 0), + (1, (1 - m.Y[1].get_associated_binary()) + \ + Y_aug[1].get_associated_binary(), None), + (None, 2 - 2 * (1 - Y_aug[1].get_associated_binary()) - \ + sum(m.Y[:].get_associated_binary()), 0) ], m.logic_to_linear.transformed_constraints) m = ConcreteModel() m.s = RangeSet(3) m.Y = BooleanVar(m.s) - m.p = LogicalConstraint(expr=m.Y[1].implies(atmost(2, m.Y[1], m.Y[2], m.Y[3]))) + m.p = LogicalConstraint(expr=m.Y[1].implies(atmost(2, m.Y[1], m.Y[2], + m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 1) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( self, [ - (None, sum(m.Y[:].get_associated_binary()) - (1 - Y_aug[1].get_associated_binary() + 2), 0), - (1, (1 - m.Y[1].get_associated_binary()) + Y_aug[1].get_associated_binary(), None), - (None, 3 - 3 * Y_aug[1].get_associated_binary() - sum(m.Y[:].get_associated_binary()), 0) + (None, sum(m.Y[:].get_associated_binary()) - \ + (1 - Y_aug[1].get_associated_binary() + 2), 0), + (1, (1 - m.Y[1].get_associated_binary()) + \ + Y_aug[1].get_associated_binary(), None), + (None, 3 - 3 * Y_aug[1].get_associated_binary() - \ + sum(m.Y[:].get_associated_binary()), 0) ], m.logic_to_linear.transformed_constraints) m = ConcreteModel() m.s = RangeSet(3) m.Y = BooleanVar(m.s) - m.p = LogicalConstraint(expr=m.Y[1].implies(exactly(2, m.Y[1], m.Y[2], m.Y[3]))) + m.p = LogicalConstraint(expr=m.Y[1].implies(exactly(2, m.Y[1], m.Y[2], + m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 3) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( self, [ - (1, (1 - m.Y[1].get_associated_binary()) + Y_aug[1].get_associated_binary(), None), - (None, sum(m.Y[:].get_associated_binary()) - (1 - Y_aug[1].get_associated_binary() + 2), 0), - (None, 2 - 2 * (1 - Y_aug[1].get_associated_binary()) - sum(m.Y[:].get_associated_binary()), 0), + (1, (1 - m.Y[1].get_associated_binary()) + \ + Y_aug[1].get_associated_binary(), None), + (None, sum(m.Y[:].get_associated_binary()) - \ + (1 - Y_aug[1].get_associated_binary() + 2), 0), + (None, 2 - 2 * (1 - Y_aug[1].get_associated_binary()) - \ + sum(m.Y[:].get_associated_binary()), 0), (1, sum(Y_aug[:].get_associated_binary()), None), - (None, sum(m.Y[:].get_associated_binary()) - (1 + 2 * (1 - Y_aug[2].get_associated_binary())), 0), - (None, 3 - 3 * (1 - Y_aug[3].get_associated_binary()) - sum(m.Y[:].get_associated_binary()), 0), + (None, sum(m.Y[:].get_associated_binary()) - \ + (1 + 2 * (1 - Y_aug[2].get_associated_binary())), 0), + (None, 3 - 3 * (1 - Y_aug[3].get_associated_binary()) - \ + sum(m.Y[:].get_associated_binary()), 0), ], m.logic_to_linear.transformed_constraints) # Note: x is now a variable @@ -211,47 +248,68 @@ def test_xfrm_special_atoms_nonroot(self): m.s = RangeSet(3) m.Y = BooleanVar(m.s) m.x = Var(bounds=(1, 3)) - m.p = LogicalConstraint(expr=m.Y[1].implies(exactly(m.x, m.Y[1], m.Y[2], m.Y[3]))) + m.p = LogicalConstraint(expr=m.Y[1].implies(exactly(m.x, m.Y[1], m.Y[2], + m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 3) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( self, [ - (1, (1 - m.Y[1].get_associated_binary()) + Y_aug[1].get_associated_binary(), None), - (None, sum(m.Y[:].get_associated_binary()) - (m.x + 2 * (1 - Y_aug[1].get_associated_binary())), 0), - (None, m.x - 3 * (1 - Y_aug[1].get_associated_binary()) - sum(m.Y[:].get_associated_binary()), 0), + (1, (1 - m.Y[1].get_associated_binary()) + \ + Y_aug[1].get_associated_binary(), None), + (None, sum(m.Y[:].get_associated_binary()) - \ + (m.x + 2 * (1 - Y_aug[1].get_associated_binary())), 0), + (None, m.x - 3 * (1 - Y_aug[1].get_associated_binary()) - \ + sum(m.Y[:].get_associated_binary()), 0), (1, sum(Y_aug[:].get_associated_binary()), None), - (None, sum(m.Y[:].get_associated_binary()) - (m.x - 1 + 3 * (1 - Y_aug[2].get_associated_binary())), 0), - (None, m.x + 1 - 4 * (1 - Y_aug[3].get_associated_binary()) - sum(m.Y[:].get_associated_binary()), 0), + (None, sum(m.Y[:].get_associated_binary()) - \ + (m.x - 1 + 3 * (1 - Y_aug[2].get_associated_binary())), 0), + (None, m.x + 1 - 4 * (1 - Y_aug[3].get_associated_binary()) - \ + sum(m.Y[:].get_associated_binary()), 0), ], m.logic_to_linear.transformed_constraints) def test_xfrm_atleast_nested(self): m = _generate_boolean_model(4) - m.p = LogicalConstraint(expr=atleast(1, atleast(2, m.Y[1], m.Y[1].lor(m.Y[2]), m.Y[2]).lor(m.Y[3]), m.Y[4])) + m.p = LogicalConstraint(expr=atleast(1, atleast(2, m.Y[1], + m.Y[1].lor(m.Y[2]), + m.Y[2]).lor(m.Y[3]), + m.Y[4])) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 3) _constrs_contained_within( self, [ - (1, Y_aug[1].get_associated_binary() + m.Y[4].get_associated_binary(), None), - (1, 1 - Y_aug[2].get_associated_binary() + Y_aug[1].get_associated_binary(), None), - (1, 1 - m.Y[3].get_associated_binary() + Y_aug[1].get_associated_binary(), None), + (1, Y_aug[1].get_associated_binary() + \ + m.Y[4].get_associated_binary(), None), + (1, 1 - Y_aug[2].get_associated_binary() + \ + Y_aug[1].get_associated_binary(), None), + (1, 1 - m.Y[3].get_associated_binary() + \ + Y_aug[1].get_associated_binary(), None), (1, - Y_aug[2].get_associated_binary() + m.Y[3].get_associated_binary() + Y_aug[2].get_associated_binary() + \ + m.Y[3].get_associated_binary() + 1 - Y_aug[1].get_associated_binary(), None), - (1, 1 - m.Y[1].get_associated_binary() + Y_aug[3].get_associated_binary(), None), - (1, 1 - m.Y[2].get_associated_binary() + Y_aug[3].get_associated_binary(), None), + (1, 1 - m.Y[1].get_associated_binary() + \ + Y_aug[3].get_associated_binary(), None), + (1, 1 - m.Y[2].get_associated_binary() + \ + Y_aug[3].get_associated_binary(), None), (1, - m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary() + 1 - Y_aug[3].get_associated_binary(), + m.Y[1].get_associated_binary() + \ + m.Y[2].get_associated_binary() + 1 - \ + Y_aug[3].get_associated_binary(), None), (None, 2 - 2 * (1 - Y_aug[2].get_associated_binary()) - - (m.Y[1].get_associated_binary() + Y_aug[3].get_associated_binary() + m.Y[2].get_associated_binary()), + - (m.Y[1].get_associated_binary() + \ + Y_aug[3].get_associated_binary() + \ + m.Y[2].get_associated_binary()), 0), (None, - m.Y[1].get_associated_binary() + Y_aug[3].get_associated_binary() + m.Y[2].get_associated_binary() + m.Y[1].get_associated_binary() + \ + Y_aug[3].get_associated_binary() + \ + m.Y[2].get_associated_binary() - (1 + 2 * Y_aug[2].get_associated_binary()), 0) ], m.logic_to_linear.transformed_constraints) @@ -266,18 +324,21 @@ def test_link_with_gdp_indicators(self): m.d2.c = Constraint(expr=m.x <= 10) m.dd[1].c = Constraint(expr=m.x >= 5) m.dd[2].c = Constraint(expr=m.x <= 6) - m.Y[1].associate_binary_var(m.d1.indicator_var) - m.Y[2].associate_binary_var(m.d2.indicator_var) - m.Y[3].associate_binary_var(m.dd[1].indicator_var) - m.Y[4].associate_binary_var(m.dd[2].indicator_var) + m.Y[1].associate_binary_var(m.d1.binary_indicator_var) + m.Y[2].associate_binary_var(m.d2.binary_indicator_var) + m.Y[3].associate_binary_var(m.dd[1].binary_indicator_var) + m.Y[4].associate_binary_var(m.dd[2].binary_indicator_var) m.p = LogicalConstraint(expr=m.Y[1].implies(lor(m.Y[3], m.Y[4]))) m.p2 = LogicalConstraint(expr=atmost(2, *m.Y[:])) TransformationFactory('core.logical_to_linear').apply_to(m) - _constrs_contained_within( - self, [ - (1, m.dd[1].indicator_var + m.dd[2].indicator_var + 1 - m.d1.indicator_var, None), - (None, m.d1.indicator_var + m.d2.indicator_var + m.dd[1].indicator_var + m.dd[2].indicator_var, 2) - ], m.logic_to_linear.transformed_constraints) + _constrs_contained_within( self, [ (1, m.dd[1].binary_indicator_var + + m.dd[2].binary_indicator_var + 1 - + m.d1.binary_indicator_var, None), + (None, m.d1.binary_indicator_var + + m.d2.binary_indicator_var + + m.dd[1].binary_indicator_var + + m.dd[2].binary_indicator_var, 2) ], + m.logic_to_linear.transformed_constraints) def test_gdp_nesting(self): m = _generate_boolean_model(2) @@ -285,19 +346,264 @@ def test_gdp_nesting(self): [m.Y[1].implies(m.Y[2])], [m.Y[2].equivalent_to(False)] ]) - TransformationFactory('core.logical_to_linear').apply_to(m) + TransformationFactory('core.logical_to_linear').apply_to( + m, + targets=[m.disj.disjuncts[0], m.disj.disjuncts[1]]) _constrs_contained_within( self, [ - (1, 1 - m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary(), None), + (1, 1 - m.Y[1].get_associated_binary() + \ + m.Y[2].get_associated_binary(), None), ], m.disj_disjuncts[0].logic_to_linear.transformed_constraints) _constrs_contained_within( self, [ (1, 1 - m.Y[2].get_associated_binary(), 1), ], m.disj_disjuncts[1].logic_to_linear.transformed_constraints) + def test_transformed_components_on_parent_block(self): + m = ConcreteModel() + m.b = Block() + m.b.s = RangeSet(3) + m.b.Y = BooleanVar(m.b.s) + m.b.p = LogicalConstraint(expr=m.b.Y[1].implies(lor(m.b.Y[2], + m.b.Y[3]))) + TransformationFactory('core.logical_to_linear').apply_to(m) + + boolean_var = m.b.component("Y_asbinary") + self.assertIsInstance(boolean_var, Var) + notAVar = m.component("Y_asbinary") + self.assertIsNone(notAVar) + + transBlock = m.b.component("logic_to_linear") + self.assertIsInstance(transBlock, Block) + notAThing = m.component("logic_to_linear") + self.assertIsNone(notAThing) + + # check the constraints on the transBlock + _constrs_contained_within( + self, [ + (1, + m.b.Y[2].get_associated_binary() + \ + m.b.Y[3].get_associated_binary() + + (1 - m.b.Y[1].get_associated_binary()), + None) + ], m.b.logic_to_linear.transformed_constraints) + + def make_nested_block_model(self): + """For the next two tests: Has BooleanVar on model, but + LogicalConstraints on a Block and a Block nested on that Block.""" + m = ConcreteModel() + m.b = Block() + m.Y = BooleanVar([1,2]) + m.b.logical = LogicalConstraint(expr=~m.Y[1]) + m.b.b = Block() + m.b.b.logical = LogicalConstraint(expr=m.Y[1].xor(m.Y[2])) + return m + + def test_transform_block(self): + m = self.make_nested_block_model() + TransformationFactory('core.logical_to_linear').apply_to(m.b) + + _constrs_contained_within( self, [(1, 1 - + m.Y[1].get_associated_binary(), 1)], + m.b.logic_to_linear.transformed_constraints) + # ESJ: This is kinda whacky looking... Why not Y[1] + Y[2] == 1? (It's + # special case of an exactly(1, ...) constraint. + _constrs_contained_within(self, [(1, m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary(), None), + (1, 1 - m.Y[1].get_associated_binary() + + 1 - m.Y[2].get_associated_binary(), + None)], + m.b.b.logic_to_linear.transformed_constraints) + self.assertEqual(len(m.b.logic_to_linear.transformed_constraints), 1) + self.assertEqual(len(m.b.b.logic_to_linear.transformed_constraints), 2) + + def test_transform_targets_on_block(self): + m = self.make_nested_block_model() + TransformationFactory('core.logical_to_linear').apply_to(m.b, + targets=m.b.b) + # didn't transform anything on m.b + self.assertIsNone(m.b.component("logic_to_linear")) + # got what we expected on m.b.b + _constrs_contained_within(self, [(1, m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary(), None), + (1, 1 - m.Y[1].get_associated_binary() + + 1 - m.Y[2].get_associated_binary(), + None)], + m.b.b.logic_to_linear.transformed_constraints) + self.assertEqual(len(m.b.b.logic_to_linear.transformed_constraints), 2) + + def test_logical_constraint_target(self): + m = _generate_boolean_model(3) + TransformationFactory('core.logical_to_linear').apply_to( + m, targets=m.constraint) + _constrs_contained_within( + self, [ + (2, m.Y[1].get_associated_binary() + \ + m.Y[2].get_associated_binary() + \ + m.Y[3].get_associated_binary(), 2) + ], m.logic_to_linear.transformed_constraints) + + def make_indexed_logical_constraint_model(self): + m = _generate_boolean_model(3) + m.cons = LogicalConstraint([1,2]) + m.cons[1] = exactly(2, m.Y) + m.cons[2] = m.Y[1].implies(lor(m.Y[2], m.Y[3])) + return m + + def test_indexed_logical_constraint_target(self): + m = self.make_indexed_logical_constraint_model() + TransformationFactory('core.logical_to_linear').apply_to( + m, targets=m.cons) + _constrs_contained_within( + self, [ + (2, m.Y[1].get_associated_binary() + \ + m.Y[2].get_associated_binary() + \ + m.Y[3].get_associated_binary(), 2) + ], m.logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, [ + (1, + m.Y[2].get_associated_binary() + \ + m.Y[3].get_associated_binary() + + (1 - m.Y[1].get_associated_binary()), + None) + ], m.logic_to_linear.transformed_constraints) + + # and verify only the targets were transformed + self.assertEqual(len(m.logic_to_linear.transformed_constraints), 2) + self.assertTrue(m.constraint.active) + + def test_logical_constraintData_target(self): + m = self.make_indexed_logical_constraint_model() + TransformationFactory('core.logical_to_linear').apply_to( + m, targets=m.cons[2]) + _constrs_contained_within( + self, [ + (1, + m.Y[2].get_associated_binary() + \ + m.Y[3].get_associated_binary() + + (1 - m.Y[1].get_associated_binary()), + None) + ], m.logic_to_linear.transformed_constraints) + # only transformed the second one. + self.assertEqual(len(m.logic_to_linear.transformed_constraints), 1) + + def test_blockData_target(self): + m = ConcreteModel() + m.b = Block([1,2]) + m.b[1].transfer_attributes_from( + self.make_indexed_logical_constraint_model()) + TransformationFactory('core.logical_to_linear').apply_to(m, + targets=m.b[1]) + _constrs_contained_within( + self, [ + (2, m.b[1].Y[1].get_associated_binary() + \ + m.b[1].Y[2].get_associated_binary() + \ + m.b[1].Y[3].get_associated_binary(), 2) + ], m.b[1].logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, [ + (1, + m.b[1].Y[2].get_associated_binary() + \ + m.b[1].Y[3].get_associated_binary() + + (1 - m.b[1].Y[1].get_associated_binary()), + None) + ], m.b[1].logic_to_linear.transformed_constraints) + + def test_disjunctData_target(self): + m = ConcreteModel() + m.d = Disjunct([1,2]) + m.d[1].transfer_attributes_from( + self.make_indexed_logical_constraint_model()) + TransformationFactory('core.logical_to_linear').apply_to(m, + targets=m.d[1]) + _constrs_contained_within( + self, [ + (2, m.d[1].Y[1].get_associated_binary() + \ + m.d[1].Y[2].get_associated_binary() + \ + m.d[1].Y[3].get_associated_binary(), 2) + ], m.d[1].logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, [ + (1, + m.d[1].Y[2].get_associated_binary() + \ + m.d[1].Y[3].get_associated_binary() + + (1 - m.d[1].Y[1].get_associated_binary()), + None) + ], m.d[1].logic_to_linear.transformed_constraints) + + def test_target_with_unrecognized_type(self): + m = _generate_boolean_model(2) + with self.assertRaisesRegex(ValueError, + r"invalid value for configuration " + r"'targets':\n\tFailed casting 1\n\tto " + r"target_list\n\tError: " + r"Expected Component or list of Components." + r"\n\tReceived "): + TransformationFactory('core.logical_to_linear').apply_to( + m, targets=1) @unittest.skipUnless(sympy_available, "Sympy not available") class TestLogicalToLinearBackmap(unittest.TestCase): + def test_backmap_deprecated(self): + m = ConcreteModel() + m.s = RangeSet(3) + m.Y = BooleanVar(m.s) + TransformationFactory('core.logical_to_linear').apply_to(m) + output = StringIO() + with LoggingIntercept(output, 'pyomo.core.base', + logging.WARNING): + y1 = m.Y[1].get_associated_binary() + self.assertIn("DEPRECATED: Relying on core.logical_to_linear to " + "transform BooleanVars that do not appear in " + "LogicalConstraints is deprecated. Please " + "associate your own binaries if you have BooleanVars " + "not used in logical expressions.", + output.getvalue().replace('\n', ' ')) + output = StringIO() + with LoggingIntercept(output, 'pyomo.core.base', + logging.WARNING): + y2 = m.Y[2].get_associated_binary() + self.assertIn("DEPRECATED: Relying on core.logical_to_linear to " + "transform BooleanVars that do not appear in " + "LogicalConstraints is deprecated. Please " + "associate your own binaries if you have BooleanVars " + "not used in logical expressions.", + output.getvalue().replace('\n', ' ')) + y1.value = 1 + y2.value = 0 + update_boolean_vars_from_binary(m) + self.assertTrue(m.Y[1].value) + self.assertFalse(m.Y[2].value) + self.assertIsNone(m.Y[3].value) + + def test_can_associate_unused_boolean_after_transformation(self): + m = ConcreteModel() + m.Y = BooleanVar() + TransformationFactory('core.logical_to_linear').apply_to(m) + m.y = Var(domain=Binary) + output = StringIO() + with LoggingIntercept(output, 'pyomo.core.base', + logging.WARNING): + m.Y.associate_binary_var(m.y) + y = m.Y.get_associated_binary() + self.assertIs(y, m.y) + # we didn't whine about this + self.assertEqual(output.getvalue(), '') + + def test_cannot_reassociate_boolean_error(self): + m = _generate_boolean_model(2) + TransformationFactory('core.logical_to_linear').apply_to(m) + # both of the variable have been associated with binaries, we're not + # allowed to change now. + m.y = Var(domain=Binary) + with self.assertRaisesRegex( + RuntimeError, + r"Reassociating BooleanVar 'Y\[1\]' " + r"\(currently associated with 'Y_asbinary\[1\]'\)" + r" with 'y' is not allowed"): + m.Y[1].associate_binary_var(m.y) + def test_backmap(self): m = _generate_boolean_model(3) TransformationFactory('core.logical_to_linear').apply_to(m) @@ -308,6 +614,21 @@ def test_backmap(self): self.assertFalse(m.Y[2].value) self.assertIsNone(m.Y[3].value) + def test_backmap_hierarchical_model(self): + m = _generate_boolean_model(3) + m.b = Block() + m.b.Y = BooleanVar() + m.b.lc = LogicalConstraint(expr=m.Y[1].lor(m.b.Y)) + TransformationFactory('core.logical_to_linear').apply_to(m) + m.Y_asbinary[1].value = 1 + m.Y_asbinary[2].value = 0 + m.b.Y.get_associated_binary().value = 1 + update_boolean_vars_from_binary(m) + self.assertTrue(m.Y[1].value) + self.assertFalse(m.Y[2].value) + self.assertIsNone(m.Y[3].value) + self.assertTrue(m.b.Y.value) + def test_backmap_noninteger(self): m = _generate_boolean_model(2) TransformationFactory('core.logical_to_linear').apply_to(m) @@ -315,7 +636,8 @@ def test_backmap_noninteger(self): update_boolean_vars_from_binary(m, integer_tolerance=0.1) self.assertTrue(m.Y[1].value) # Now try it without the tolerance set - with self.assertRaisesRegex(ValueError, r"Binary variable has non-\{0,1\} value"): + with self.assertRaisesRegex(ValueError, + r"Binary variable has non-\{0,1\} value"): update_boolean_vars_from_binary(m) diff --git a/pyomo/core/tests/unit/test_numeric_expr.py b/pyomo/core/tests/unit/test_numeric_expr.py index 83c89fda34b..961ade5bc32 100644 --- a/pyomo/core/tests/unit/test_numeric_expr.py +++ b/pyomo/core/tests/unit/test_numeric_expr.py @@ -1928,19 +1928,19 @@ def test_linearsum(self): model.p = Param(A, initialize=2, mutable=True) expr = quicksum(i*model.a[i] for i in A) - self.assertEqual("sum(a[1], prod(2, a[2]), prod(3, a[3]), prod(4, a[4]))", str(expr)) + self.assertEqual("sum(mon(1, a[1]), mon(2, a[2]), mon(3, a[3]), mon(4, a[4]))", str(expr)) expr = quicksum((i-2)*model.a[i] for i in A) - self.assertEqual("sum(prod(-2, a[0]), prod(-1, a[1]), a[3], prod(2, a[4]))", str(expr)) + self.assertEqual("sum(mon(-2, a[0]), mon(-1, a[1]), mon(1, a[3]), mon(2, a[4]))", str(expr)) expr = quicksum(model.a[i] for i in A) - self.assertEqual("sum(a[0], a[1], a[2], a[3], a[4])", str(expr)) + self.assertEqual("sum(mon(1, a[0]), mon(1, a[1]), mon(1, a[2]), mon(1, a[3]), mon(1, a[4]))", str(expr)) model.p[1].value = 0 model.p[3].value = 3 expr = quicksum(model.p[i]*model.a[i] if i != 3 else model.p[i] for i in A) - self.assertEqual("sum(3, prod(2, a[0]), prod(2, a[2]), prod(2, a[4]))", expression_to_string(expr, compute_values=True)) - self.assertEqual("sum(p[3], prod(p[0], a[0]), prod(p[1], a[1]), prod(p[2], a[2]), prod(p[4], a[4]))", expression_to_string(expr, compute_values=False)) + self.assertEqual("sum(3, mon(2, a[0]), mon(0, a[1]), mon(2, a[2]), mon(2, a[4]))", expression_to_string(expr, compute_values=True)) + self.assertEqual("sum(p[3], mon(p[0], a[0]), mon(p[1], a[1]), mon(p[2], a[2]), mon(p[4], a[4]))", expression_to_string(expr, compute_values=False)) def test_expr(self): # @@ -2115,8 +2115,8 @@ def test_linearsum(self): self.assertEqual("a[1] + 2*a[2] + 3*a[3] + 4*a[4] + 3", expression_to_string(expr, compute_values=True)) expr = quicksum((i-2)*model.a[i] for i in A) + 3 - self.assertEqual("- 2.0*a[0] - a[1] + a[3] + 2*a[4] + 3", str(expr)) - self.assertEqual("- 2.0*a[0] - a[1] + a[3] + 2*a[4] + 3", expression_to_string(expr, compute_values=True)) + self.assertEqual("-2*a[0] - a[1] + a[3] + 2*a[4] + 3", str(expr)) + self.assertEqual("-2*a[0] - a[1] + a[3] + 2*a[4] + 3", expression_to_string(expr, compute_values=True)) expr = quicksum(model.a[i] for i in A) + 3 self.assertEqual("a[0] + a[1] + a[2] + a[3] + a[4] + 3", str(expr)) @@ -2129,7 +2129,7 @@ def test_linearsum(self): model.p[1].value = 0 model.p[3].value = 3 expr = quicksum(model.p[i]*model.a[i] if i != 3 else model.p[i] for i in A) - self.assertEqual("3 + 2*a[0] + 2*a[2] + 2*a[4]", expression_to_string(expr, compute_values=True)) + self.assertEqual("3 + 2*a[0] + 0*a[1] + 2*a[2] + 2*a[4]", expression_to_string(expr, compute_values=True)) expr = quicksum(model.p[i]*model.a[i] if i != 3 else -3 for i in A) self.assertEqual("-3 + p[0]*a[0] + p[1]*a[1] + p[2]*a[2] + p[4]*a[4]", expression_to_string(expr, compute_values=False)) @@ -2449,14 +2449,14 @@ def test_labeler(self): labeler = NumericLabeler('x') self.assertEqual( expression_to_string(e, labeler=labeler), - "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (q[0]*x3 + q[1]*x4 + q[2]*x5)/x1") + "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (x6*x3 + x7*x4 + x8*x5)/x1") from pyomo.core.expr.symbol_map import SymbolMap labeler = NumericLabeler('x') smap = SymbolMap(labeler) self.assertEqual( expression_to_string(e, smap=smap), - "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (q[0]*x3 + q[1]*x4 + q[2]*x5)/x1") + "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (x6*x3 + x7*x4 + x8*x5)/x1") self.assertEqual( expression_to_string(e, smap=smap, compute_values=True), "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (3*x3 + 3*x4 + 3*x5)/x1") @@ -3526,7 +3526,7 @@ def test_summation1(self): self.assertIs(type(e), LinearExpression) self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) - self.assertEqual(e.size(), 1) + self.assertEqual(e.size(), 16) def test_summation2(self): e = sum_product(self.m.p, self.m.a) @@ -3534,7 +3534,7 @@ def test_summation2(self): self.assertIs(type(e), LinearExpression) self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) - self.assertEqual(e.size(), 1) + self.assertEqual(e.size(), 16) def test_summation3(self): e = sum_product(self.m.q, self.m.a) @@ -3542,7 +3542,7 @@ def test_summation3(self): self.assertIs(type(e), LinearExpression) self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) - self.assertEqual(e.size(), 1) + self.assertEqual(e.size(), 16) def test_summation4(self): e = sum_product(self.m.a, self.m.b) @@ -3562,9 +3562,9 @@ def test_summation6(self): e = sum_product(self.m.a, denom=self.m.p) self.assertEqual( e(), 25 ) self.assertIs(type(e), LinearExpression) - #self.assertEqual( id(self.m.a[1]), id(e.arg(0).arg(0)) ) - #self.assertEqual( id(self.m.a[2]), id(e.arg(1).arg(0)) ) - #self.assertEqual(e.size(), 21) + self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) + self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) + self.assertEqual(e.size(), 26) def test_summation7(self): e = sum_product(self.m.p, self.m.q, index=self.m.I) @@ -3580,7 +3580,7 @@ def test_summation_compression(self): self.assertEqual( e(), 75 ) self.assertIs(type(e), SumExpression) self.assertEqual( e.nargs(), 2) - self.assertEqual(e.size(), 3) + self.assertEqual(e.size(), 33) class TestSumExpression(unittest.TestCase): @@ -3802,7 +3802,7 @@ def test_SumExpressionY(self): self.assertEqual( expr1(), 25 ) self.assertEqual( expr2(), 25 ) self.assertNotEqual( id(expr1), id(expr2) ) - self.assertEqual( id(expr1._args_), id(expr2._args_) ) + self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) self.assertNotEqual( id(expr1.linear_vars[0]), id(expr2.linear_vars[0]) ) self.assertNotEqual( id(expr1.linear_vars[1]), id(expr2.linear_vars[1]) ) expr1 += self.m.b @@ -4547,6 +4547,74 @@ def test_external_func(self): # It's probably worth confirming the final linear expression that is generated. class TestLinearExpression(unittest.TestCase): + def test_init(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + e = LinearExpression( + constant=5, linear_vars=[m.x, m.y], linear_coefs=[2,3]) + self.assertEqual(e._args_cache_, []) + self.assertEqual(e.constant, 5) + self.assertEqual(e.linear_vars, [m.x, m.y]) + self.assertEqual(e.linear_coefs, [2, 3]) + + args = [10, + MonomialTermExpression((4, m.y)), + MonomialTermExpression((5, m.x))] + with LoggingIntercept() as OUT: + e = LinearExpression(args) + self.assertEqual(OUT.getvalue(), "") + self.assertEqual(e._args_cache_, args) + self.assertEqual(e.constant, 10) + self.assertEqual(e.linear_vars, [m.y, m.x]) + self.assertEqual(e.linear_coefs, [4, 5]) + + with LoggingIntercept() as OUT: + e = LinearExpression([20, 6, 7, m.x, m.y]) + self.assertIn("LinearExpression has been updated to expect args= " + "to be a constant followed by MonomialTermExpressions", + OUT.getvalue().replace("\n", " ")) + self.assertIsNotNone(e._args_cache_) + self.assertEqual(len(e._args_cache_), 3) + self.assertEqual(e._args_cache_[0], 20) + self.assertIs(e._args_cache_[1].__class__, MonomialTermExpression) + self.assertEqual(e._args_cache_[1].args, (6, m.x)) + self.assertEqual(e._args_cache_[2].args, (7, m.y)) + self.assertEqual(e.constant, 20) + self.assertEqual(e.linear_vars, [m.x, m.y]) + self.assertEqual(e.linear_coefs, [6, 7]) + + with LoggingIntercept() as OUT: + e = LinearExpression([20, 6, 7, 8, m.x, m.y, m.x]) + self.assertIn("LinearExpression has been updated to expect args= " + "to be a constant followed by MonomialTermExpressions", + OUT.getvalue().replace("\n", " ")) + self.assertIsNotNone(e._args_cache_) + self.assertEqual(len(e._args_cache_), 4) + self.assertEqual(e._args_cache_[0], 20) + self.assertIs(e._args_cache_[1].__class__, MonomialTermExpression) + self.assertEqual(e._args_cache_[1].args, (6, m.x)) + self.assertEqual(e._args_cache_[2].args, (7, m.y)) + self.assertEqual(e._args_cache_[3].args, (8, m.x)) + self.assertEqual(e.constant, 20) + self.assertEqual(e.linear_vars, [m.x, m.y, m.x]) + self.assertEqual(e.linear_coefs, [6, 7, 8]) + + def test_to_string(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + e = LinearExpression() + self.assertEqual(e.to_string(), "0") + e = LinearExpression(constant=0, + linear_coefs=[-1, 1, -2, 2], + linear_vars=[m.x, m.y, m.x, m.y]) + self.assertEqual(e.to_string(), "- x + y - 2*x + 2*y") + e = LinearExpression(constant=10, + linear_coefs=[-1, 1, -2, 2], + linear_vars=[m.x, m.y, m.x, m.y]) + self.assertEqual(e.to_string(), "10 - x + y - 2*x + 2*y") + def test_sum_other(self): m = ConcreteModel() m.v = Var(range(5)) @@ -4659,7 +4727,7 @@ def test_div(self): with linear_expression() as e: e += m.v[0] e /= m.p - self.assertEqual("(1/p)*v[0]", str(e)) + self.assertEqual("1/p*v[0]", str(e)) self.assertIs(e.__class__, _MutableLinearExpression) with linear_expression() as e: diff --git a/pyomo/core/tests/unit/test_numpy_expr.py b/pyomo/core/tests/unit/test_numpy_expr.py index 2cd637f31a6..51250d8290a 100644 --- a/pyomo/core/tests/unit/test_numpy_expr.py +++ b/pyomo/core/tests/unit/test_numpy_expr.py @@ -155,6 +155,25 @@ def rule(m, l): #model.Constraint1a = Constraint(model.I, rule=model.P1 <= model.V) #model.Constraint2a = Constraint(model.I, rule=model.P2 <= model.V) + @unittest.skipUnless(pandas_available, "pandas is not available") + def test_param_from_pandas_series_index(self): + m = ConcreteModel() + s = pd.Series([1, 3, 5], index=['T1', 'T2', 'T3']) + + # Params treat Series as maps (so the Series index matters) + m.I = Set(initialize=s.index) + m.p1 = Param(m.I, initialize=s) + self.assertEqual(m.p1.extract_values(), {'T1':1, 'T2':3, 'T3':5}) + m.p2 = Param(s.index, initialize=s) + self.assertEqual(m.p2.extract_values(), {'T1':1, 'T2':3, 'T3':5}) + with self.assertRaisesRegex( + KeyError, "Index 'T1' is not valid for indexed component 'p3'"): + m.p3 = Param([0,1,2], initialize=s) + + # Sets treat Series as lists + m.J = Set(initialize=s) + self.assertEqual(set(m.J), {1, 3, 5}) + def test_numpy_float(self): # Test issue #31 m = ConcreteModel() diff --git a/pyomo/core/tests/unit/test_param.py b/pyomo/core/tests/unit/test_param.py index 1ab82f483fe..070bfb458b7 100644 --- a/pyomo/core/tests/unit/test_param.py +++ b/pyomo/core/tests/unit/test_param.py @@ -775,7 +775,7 @@ def test_dimen1(self): model.C = Set(dimen=1, initialize=[9,8,7,6,5]) model.x = Param(model.A, model.B, model.C, initialize=-1) #model.y = Param(model.B, initialize=(1,1)) - model.y = Param(model.B, initialize=((1,1,7),2)) + model.y = Param(model.B, initialize=1) instance=model.create_instance() self.assertEqual( instance.x.dim(), 6) self.assertEqual( instance.y.dim(), 3) diff --git a/pyomo/core/tests/unit/test_range.py b/pyomo/core/tests/unit/test_range.py index d39dd9ae2cd..05039675a2f 100644 --- a/pyomo/core/tests/unit/test_range.py +++ b/pyomo/core/tests/unit/test_range.py @@ -20,21 +20,23 @@ Any ) +_inf = float('inf') + class TestNumericRange(unittest.TestCase): def test_init(self): a = NR(None, None, 0) - self.assertIsNone(a.start) - self.assertIsNone(a.end) + self.assertEqual(a.start, -_inf) + self.assertEqual(a.end, _inf) self.assertEqual(a.step, 0) - a = NR(-float('inf'), float('inf'), 0) - self.assertIsNone(a.start) - self.assertIsNone(a.end) + a = NR(-_inf, _inf, 0) + self.assertEqual(a.start, -_inf) + self.assertEqual(a.end, _inf) self.assertEqual(a.step, 0) a = NR(0, None, 0) self.assertEqual(a.start, 0) - self.assertIsNone(a.end) + self.assertEqual(a.end, _inf) self.assertEqual(a.step, 0) a = NR(0, 0, 0) @@ -70,9 +72,14 @@ def test_init(self): a = NR(0, None, 1) self.assertEqual(a.start, 0) - self.assertEqual(a.end, None) + self.assertEqual(a.end, _inf) self.assertEqual(a.step, 1) + a = NR(0, None, -1) + self.assertEqual(a.start, 0) + self.assertEqual(a.end, -_inf) + self.assertEqual(a.step, -1) + a = NR(0, 5, 1) self.assertEqual(a.start, 0) self.assertEqual(a.end, 5) @@ -569,9 +576,13 @@ def test_range_difference(self): [NR(None,-5,0,'[)')], ) self.assertEqual( - NR(None,0,0).range_difference([NR(-5,0,0,'[)')]), + NR(None,0,0).range_difference([NR(-5,0,0,'[]')]), [NR(None,-5,0,'[)')], ) + self.assertEqual( + NR(None,0,0).range_difference([NR(-5,0,0,'[)')]), + [NR(None,-5,0,'[)'), NR(0,0,0)], + ) self.assertEqual( NR(0,10,0).range_difference([NR(None,5,0,'[)')]), [NR(5,10,0,'[]')], @@ -615,11 +626,21 @@ def test_range_difference(self): a = NR(0.25, None, 1) self.assertEqual(a.range_difference([NR(0.5, None, 1)]), [a]) - # And the onee thing we don't support: + # open/closed infinite ranges + a = NR(None, None, 0) + self.assertEqual( + a.range_difference([NR(None, None, 0, "()")]), + [NR(-_inf, -_inf, 0), NR(_inf, _inf, 0)]) + self.assertEqual( + a.range_difference([NR(None, None, 0, "()"), + NR(None, None, 0, "[)")]), + [NR(_inf, _inf, 0)]) + + # And the one thing we don't support: with self.assertRaisesRegex( RangeDifferenceError, 'We do not support subtracting an ' - r'infinite discrete range \[0:None\] from an infinite ' - r'continuous range \[None..None\]'): + r'infinite discrete range \[0:inf\] from an infinite ' + r'continuous range \[-inf..inf\]'): NR(None,None,0).range_difference([NR(0,None,1)]) def test_range_intersection(self): @@ -896,13 +917,14 @@ def test_isdisjoint(self): def test_range_difference(self): a = NNR('a') b = NR(0,5,0) + b1 = NR(0,5,0,'[)') # Note: b & c overlap, so [b,c]-c != b c = NR(5,10,1) x = RP([[a],[b,c]]) y = RP([[a],[c]]) z = RP([[a],[b],[c]]) w = RP([list(Any.ranges()), [b]]) self.assertEqual(x.range_difference([x]), []) - self.assertEqual(x.range_difference([y]), [RP([[a],[b]])]) + self.assertEqual(x.range_difference([y]), [RP([[a],[b1]])]) self.assertEqual(x.range_difference([z]), [x]) self.assertEqual(x.range_difference(Any.ranges()), []) self.assertEqual(x.range_difference([w]), [RP([[a],[NR(6,10,1)]])]) diff --git a/pyomo/core/tests/unit/test_reference.py b/pyomo/core/tests/unit/test_reference.py index f228abd6426..5fb912a5c84 100644 --- a/pyomo/core/tests/unit/test_reference.py +++ b/pyomo/core/tests/unit/test_reference.py @@ -20,10 +20,13 @@ from pyomo.environ import ( ConcreteModel, Block, Var, Set, RangeSet, Param, value, + NonNegativeIntegers, ) from pyomo.common.collections import ComponentSet from pyomo.core.base.var import IndexedVar -from pyomo.core.base.set import SetProduct, UnorderedSetOf +from pyomo.core.base.set import ( + SetProduct, FiniteSetOf, UnknownSetDimen, normalize_index, +) from pyomo.core.base.indexed_component import ( UnindexedComponent_set, IndexedComponent ) @@ -282,6 +285,17 @@ def test_attribute_deletion(self): self.assertEqual(len(list(x.value for x in rd.values())), 2-1) class TestReferenceSet(unittest.TestCase): + def test_str(self): + m = ConcreteModel() + @m.Block([1,2], [4,5]) + def b(b,i,j): + b.x = Var([7,8],[10,11], initialize=0) + b.y = Var([7,8], initialize=0) + b.z = Var() + + rs = _ReferenceSet(m.b[:,5].z) + self.assertEqual(str(rs), 'ReferenceSet(b[:, 5].z)') + def test_lookup_and_iter_dense_data(self): m = ConcreteModel() @m.Block([1,2], [4,5]) @@ -398,7 +412,7 @@ def test_component_reference(self): self.assertIs(m.r.ctype, Var) self.assertIsNot(m.r.index_set(), m.x.index_set()) self.assertIs(m.x.index_set(), UnindexedComponent_set) - self.assertIs(type(m.r.index_set()), UnorderedSetOf) + self.assertIs(type(m.r.index_set()), FiniteSetOf) self.assertEqual(len(m.r), 1) self.assertTrue(m.r.is_indexed()) self.assertIn(None, m.r) @@ -412,7 +426,7 @@ def test_component_reference(self): self.assertIs(m.s.ctype, Var) self.assertIsNot(m.s.index_set(), m.x.index_set()) self.assertIs(m.x.index_set(), UnindexedComponent_set) - self.assertIs(type(m.s.index_set()), UnorderedSetOf) + self.assertIs(type(m.s.index_set()), FiniteSetOf) self.assertEqual(len(m.s), 1) self.assertTrue(m.s.is_indexed()) self.assertIn(None, m.s) @@ -435,6 +449,62 @@ def test_component_reference(self): with self.assertRaises(KeyError): m.t[3] + def test_component_data_reference(self): + m = ConcreteModel() + m.y = Var([1,2]) + m.r = Reference(m.y[2]) + + self.assertIs(m.r.ctype, Var) + self.assertIsNot(m.r.index_set(), m.y.index_set()) + self.assertIs(m.y.index_set(), m.y_index) + self.assertIs(type(m.r.index_set()), FiniteSetOf) + self.assertEqual(len(m.r), 1) + self.assertTrue(m.r.is_indexed()) + self.assertIn(None, m.r) + self.assertNotIn(1, m.r) + self.assertIs(m.r[None], m.y[2]) + with self.assertRaises(KeyError): + m.r[2] + + def test_component_data_reference_clone(self): + m = ConcreteModel() + m.b = Block() + m.b.x = Var([1,2]) + m.c = Block() + m.c.r = Reference(m.b.x[2]) + + self.assertIs(m.c.r[None], m.b.x[2]) + m.d = m.c.clone() + self.assertIs(m.d.r[None], m.b.x[2]) + + i = m.clone() + self.assertIs(i.c.r[None], i.b.x[2]) + self.assertIsNot(i.c.r[None], m.b.x[2]) + self.assertIs(i.d.r[None], i.b.x[2]) + + + def test_reference_var_pprint(self): + m = ConcreteModel() + m.x = Var([1,2], initialize={1:4,2:8}) + m.r = Reference(m.x) + buf = StringIO() + m.r.pprint(ostream=buf) + self.assertEqual(buf.getvalue(), +"""r : Size=2, Index=x_index, ReferenceTo=x + Key : Lower : Value : Upper : Fixed : Stale : Domain + 1 : None : 4 : None : False : False : Reals + 2 : None : 8 : None : False : False : Reals +""") + m.s = Reference(m.x[:,...]) + buf = StringIO() + m.s.pprint(ostream=buf) + self.assertEqual(buf.getvalue(), +"""s : Size=2, Index=x_index, ReferenceTo=x[:, ...] + Key : Lower : Value : Upper : Fixed : Stale : Domain + 1 : None : 4 : None : False : False : Reals + 2 : None : 8 : None : False : False : Reals +""") + def test_reference_indexedcomponent_pprint(self): m = ConcreteModel() m.x = Var([1,2], initialize={1:4,2:8}) @@ -442,7 +512,16 @@ def test_reference_indexedcomponent_pprint(self): buf = StringIO() m.r.pprint(ostream=buf) self.assertEqual(buf.getvalue(), -"""r : Size=2, Index=x_index +"""r : Size=2, Index=x_index, ReferenceTo=x + Key : Object + 1 : + 2 : +""") + m.s = Reference(m.x[:,...], ctype=IndexedComponent) + buf = StringIO() + m.s.pprint(ostream=buf) + self.assertEqual(buf.getvalue(), +"""s : Size=2, Index=x_index, ReferenceTo=x[:, ...] Key : Object 1 : 2 : @@ -528,7 +607,7 @@ def b(b,i): m.r = Reference(m.b[:].x[3,:]) self.assertIs(m.r.ctype, Var) - self.assertIs(type(m.r.index_set()), UnorderedSetOf) + self.assertIs(type(m.r.index_set()), FiniteSetOf) self.assertEqual(len(m.r), 2*1) self.assertEqual(m.r[1,3].lb, 1) self.assertEqual(m.r[2,3].lb, 2) @@ -551,7 +630,7 @@ def b(b,i): m.r = Reference(m.b[:].x[:]) self.assertIs(m.r.ctype, Var) - self.assertIs(type(m.r.index_set()), UnorderedSetOf) + self.assertIs(type(m.r.index_set()), FiniteSetOf) self.assertEqual(len(m.r), 2*2) self.assertEqual(m.r[1,3].lb, 1) self.assertEqual(m.r[2,4].lb, 2) @@ -574,7 +653,7 @@ def b(b,i): m.r = Reference(m.b[:].x[:]) self.assertIs(m.r.ctype, Var) - self.assertIs(type(m.r.index_set()), UnorderedSetOf) + self.assertIs(type(m.r.index_set()), FiniteSetOf) self.assertEqual(len(m.r), 2*2) self.assertEqual(m.r[1,3].lb, 1) self.assertEqual(m.r[2,4].lb, 2) @@ -597,7 +676,7 @@ def test_nested_reference_nonuniform_index_size(self): m.r = Reference(m.b[:].x[:,:]) self.assertIs(m.r.ctype, Var) - self.assertIs(type(m.r.index_set()), UnorderedSetOf) + self.assertIs(type(m.r.index_set()), FiniteSetOf) self.assertEqual(len(m.r), 2*2*2) self.assertEqual(m.r[1,3,3].lb, 1) self.assertEqual(m.r[2,4,3].lb, 2) @@ -619,8 +698,8 @@ def test_nested_scalars(self): self.assertEqual(m.r.index_set().dimen, 2) base_sets = list(m.r.index_set().subsets()) self.assertEqual(len(base_sets), 2) - self.assertIs(type(base_sets[0]), UnorderedSetOf) - self.assertIs(type(base_sets[1]), UnorderedSetOf) + self.assertIs(type(base_sets[0]), FiniteSetOf) + self.assertIs(type(base_sets[1]), FiniteSetOf) def test_ctype_detection(self): m = ConcreteModel() @@ -830,7 +909,7 @@ def test_referent(self): varlist = [m.v2[1, 'a'], m.v2[1, 'b']] vardict = { - 0: m.v0, + 0: m.v0, 1: m.v2[1, 'a'], 2: m.v2[2, 'a'], 3: m.v2[3, 'a'], @@ -856,5 +935,100 @@ def test_referent(self): dict_ref = Reference(vardict) self.assertIs(dict_ref.referent, vardict) + def test_UnknownSetDimen(self): + # Replicate the bug reported in #1928 + m = ConcreteModel() + m.thinga = Set(initialize=['e1', 'e2', 'e3']) + m.thingb = Set(initialize=[]) + m.v = Var(m.thinga | m.thingb) + self.assertIs(m.v.dim(), UnknownSetDimen) + with self.assertRaisesRegex( + IndexError, + 'Slicing components relies on knowing the underlying ' + 'set dimensionality'): + Reference(m.v) + + def test_contains_with_nonflattened(self): + # test issue #1800 + _old_flatten = normalize_index.flatten + try: + normalize_index.flatten = False + m = ConcreteModel() + m.d1 = Set(initialize=[1,2]) + m.d2 = Set(initialize=[('a', 1), ('b', 2)]) + m.v = Var(m.d2, m.d1) + m.ref = Reference(m.v[:,1]) + self.assertIn(('a', 1), m.ref) + self.assertNotIn(('a', 10), m.ref) + finally: + normalize_index.flatten = _old_flatten + + def test_pprint_nonfinite_sets(self): + # test issue #2039 + self.maxDiff = None + m = ConcreteModel() + m.v = Var(NonNegativeIntegers, dense=False) + m.ref = Reference(m.v) + buf = StringIO() + m.pprint(ostream=buf) + self.assertEqual(buf.getvalue().strip(), """ +1 Var Declarations + v : Size=0, Index=NonNegativeIntegers + Key : Lower : Value : Upper : Fixed : Stale : Domain + +1 IndexedComponent Declarations + ref : Size=0, Index=ref_index, ReferenceTo=v + Key : Object + +1 SetOf Declarations + ref_index : Dimen=0, Size=0, Bounds=(None, None) + Key : Ordered : Members + None : False : ReferenceSet(v[...]) + +3 Declarations: v ref_index ref +""".strip()) + + m.v[3] + m.ref[5] + buf = StringIO() + m.pprint(ostream=buf) + self.assertEqual(buf.getvalue().strip(), """ +1 Var Declarations + v : Size=2, Index=NonNegativeIntegers + Key : Lower : Value : Upper : Fixed : Stale : Domain + 3 : None : None : None : False : True : Reals + 5 : None : None : None : False : True : Reals + +1 IndexedComponent Declarations + ref : Size=2, Index=ref_index, ReferenceTo=v + Key : Object + 3 : + 5 : + +1 SetOf Declarations + ref_index : Dimen=1, Size=2, Bounds=(3, 5) + Key : Ordered : Members + None : False : ReferenceSet(v[...]) + +3 Declarations: v ref_index ref +""".strip()) + + def test_pprint_nested(self): + m = ConcreteModel() + @m.Block([1,2]) + def b(b,i): + b.x = Var([3,4], bounds=(i,None)) + m.r = Reference(m.b[:].x[:]) + buf = StringIO() + m.r.pprint(ostream=buf) + self.assertEqual(buf.getvalue().strip(), """ +r : Size=4, Index=r_index, ReferenceTo=b[:].x[:] + Key : Lower : Value : Upper : Fixed : Stale : Domain + (1, 3) : 1 : None : None : False : True : Reals + (1, 4) : 1 : None : None : False : True : Reals + (2, 3) : 2 : None : None : False : True : Reals + (2, 4) : 2 : None : None : False : True : Reals +""".strip()) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_set.py b/pyomo/core/tests/unit/test_set.py index ac85fe15115..6a9052e540e 100644 --- a/pyomo/core/tests/unit/test_set.py +++ b/pyomo/core/tests/unit/test_set.py @@ -39,7 +39,7 @@ Integers, PositiveIntegers, NegativeIntegers, NonNegativeIntegers, Set, - SetOf, OrderedSetOf, UnorderedSetOf, + SetOf, OrderedSetOf, FiniteSetOf, InfiniteSetOf, RangeSet, _FiniteRangeSetData, _InfiniteRangeSetData, FiniteScalarRangeSet, InfiniteScalarRangeSet, AbstractFiniteScalarRangeSet, @@ -829,9 +829,29 @@ def test_constructor(self): self.assertEqual(i, j) i = SetOf({1,2,3}) - self.assertIs(type(i), UnorderedSetOf) - j = UnorderedSetOf([1,2,3]) - self.assertIs(type(i), UnorderedSetOf) + self.assertIs(type(i), FiniteSetOf) + j = FiniteSetOf([1,2,3]) + self.assertIs(type(i), FiniteSetOf) + self.assertEqual(i, j) + + i = SetOf(NonNegativeReals) + self.assertIs(type(i), InfiniteSetOf) + j = InfiniteSetOf(NonNegativeReals) + self.assertIs(type(i), InfiniteSetOf) + self.assertEqual(i, j) + + i = SetOf(Binary) + self.assertIs(type(i), OrderedSetOf) + j = OrderedSetOf(Binary) + self.assertIs(type(i), OrderedSetOf) + self.assertEqual(i, j) + + I = Set(initialize={1,3,2}, ordered=False) + I.construct() + i = SetOf(I) + self.assertIs(type(i), FiniteSetOf) + j = FiniteSetOf(I) + self.assertIs(type(i), FiniteSetOf) self.assertEqual(i, j) i = RangeSet(3) @@ -1616,6 +1636,13 @@ def test_dimen(self): self.assertEqual(SetOf([(1,2),(2,3),(4,5)]).dimen, 2) self.assertEqual(SetOf([1,(2,3)]).dimen, None) + self.assertEqual(SetOf(Integers).dimen, 1) + self.assertEqual(SetOf(Binary).dimen, 1) + + m = ConcreteModel() + m.I = Set(initialize=[(1,2), (3,4)]) + self.assertEqual(SetOf(m.I).dimen, 2) + a = [1,2,3,'abc'] SetOf_a = SetOf(a) self.assertEqual(SetOf_a.dimen, 1) @@ -2857,7 +2884,7 @@ def test_domain_and_pprint(self): ref=""" J : Size=1, Index=None, Ordered=False Key : Dimen : Domain : Size : Members - None : 2 : Reals*I : Inf : <[None..None], ([1], [2], [3])> + None : 2 : Reals*I : Inf : <[-inf..inf], ([1], [2], [3])> """.strip() self.assertEqual(output.getvalue().strip(), ref) @@ -3530,11 +3557,22 @@ def test_scalar_set_initialize_and_iterate(self): m = ConcreteModel() m.I = Set(initialize=(1,3,2,4)) + self.assertTrue(m.I._init_values.constant()) self.assertEqual(list(m.I), [1,3,2,4]) self.assertEqual(list(reversed(m.I)), [4,2,3,1]) self.assertEqual(m.I.data(), (1,3,2,4)) self.assertEqual(m.I.dimen, 1) + m = ConcreteModel() + with self.assertRaisesRegexp( + ValueError, 'Set rule or initializer returned None'): + m.I = Set(initialize=lambda m: None, dimen=2) + self.assertTrue(m.I._init_values.constant()) + self.assertEqual(list(m.I), []) + self.assertEqual(list(reversed(m.I)), []) + self.assertEqual(m.I.data(), ()) + self.assertIs(m.I.dimen, 2) + def I_init(m): yield 1 yield 3 @@ -3613,6 +3651,13 @@ def I_init(m): "of type int.") self.assertIn(ref, output.getvalue()) + def test_scalar_indexed_api(self): + m = ConcreteModel() + m.I = Set(initialize=range(3)) + self.assertEqual(list(m.I.keys()), [None]) + self.assertEqual(list(m.I.values()), [m.I]) + self.assertEqual(list(m.I.items()), [(None, m.I)]) + def test_insertion_deletion(self): def _verify(_s, _l): self.assertTrue(_s.isordered()) @@ -4230,7 +4275,7 @@ def myFcn(x): None : 2 : Any : 2 : {(3, 4), (1, 2)} M : Size=1, Index=None, Ordered=False Key : Dimen : Domain : Size : Members - None : 1 : Reals - M_index_1 : Inf : ([None..0) | (0..None]) + None : 1 : Reals - M_index_1 : Inf : ([-inf..0) | (0..inf]) N : Size=1, Index=None, Ordered=False Key : Dimen : Domain : Size : Members None : 1 : Integers - Reals : Inf : [] diff --git a/pyomo/core/tests/unit/test_sets.py b/pyomo/core/tests/unit/test_sets.py index 831a1f9b0f2..4a228e9ba3d 100644 --- a/pyomo/core/tests/unit/test_sets.py +++ b/pyomo/core/tests/unit/test_sets.py @@ -3292,8 +3292,8 @@ def test_xor(self): self.assertNotIn(1, X) with self.assertRaisesRegex( RangeDifferenceError, r"We do not support subtracting an " - r"infinite discrete range \[0:None\] from an infinite " - r"continuous range \[None..None\]"): + r"infinite discrete range \[0:inf\] from an infinite " + r"continuous range \[-inf..inf\]"): X < Reals # In the set rewrite, the following now works! # try: @@ -3325,8 +3325,8 @@ def test_sub(self): self.assertNotIn(1, X) with self.assertRaisesRegex( RangeDifferenceError, r"We do not support subtracting an " - r"infinite discrete range \[0:None\] from an infinite " - r"continuous range \[None..None\]"): + r"infinite discrete range \[0:inf\] from an infinite " + r"continuous range \[-inf..inf\]"): X < Reals # In the set rewrite, the following now works! # try: diff --git a/pyomo/core/tests/unit/test_units.py b/pyomo/core/tests/unit/test_units.py index 2b096882c5b..c132142cd01 100644 --- a/pyomo/core/tests/unit/test_units.py +++ b/pyomo/core/tests/unit/test_units.py @@ -20,11 +20,14 @@ tanh, asinh, acosh, atanh, ceil, floor, ) from pyomo.common.log import LoggingIntercept -from pyomo.util.check_units import assert_units_consistent, check_units_equivalent +from pyomo.util.check_units import ( + assert_units_consistent, check_units_equivalent, +) from pyomo.core.expr import inequality import pyomo.core.expr.current as EXPR from pyomo.core.base.units_container import ( - pint_available, InconsistentUnitsError, UnitsError, PyomoUnitsContainer, + pint_available, pint_module, _DeferredUnitsSingleton, + InconsistentUnitsError, UnitsError, PyomoUnitsContainer, ) from io import StringIO @@ -34,6 +37,33 @@ def python_callback_function(arg1, arg2): @unittest.skipIf(not pint_available, 'Testing units requires pint') class TestPyomoUnit(unittest.TestCase): + def test_container_constructor(self): + # Custom pint registry: + um0 = PyomoUnitsContainer(None) + self.assertIsNone(um0.pint_registry) + self.assertIsNone(um0._pint_dimensionless) + um1 = PyomoUnitsContainer() + self.assertIsNotNone(um1.pint_registry) + self.assertIsNotNone(um1._pint_dimensionless) + with self.assertRaisesRegex( + ValueError, + 'Cannot operate with Unit and Unit of different registries'): + self.assertEqual(um1._pint_dimensionless, units._pint_dimensionless) + self.assertIsNot(um1.pint_registry, units.pint_registry) + um2 = PyomoUnitsContainer(pint_module.UnitRegistry()) + self.assertIsNotNone(um2.pint_registry) + self.assertIsNotNone(um2._pint_dimensionless) + with self.assertRaisesRegex( + ValueError, + 'Cannot operate with Unit and Unit of different registries'): + self.assertEqual(um2._pint_dimensionless, units._pint_dimensionless) + self.assertIsNot(um2.pint_registry, units.pint_registry) + self.assertIsNot(um2.pint_registry, um1.pint_registry) + + um3 = PyomoUnitsContainer(units.pint_registry) + self.assertIs(um3.pint_registry, units.pint_registry) + self.assertEqual(um3._pint_dimensionless, units._pint_dimensionless) + def test_PyomoUnit_NumericValueMethods(self): m = ConcreteModel() uc = units @@ -584,7 +614,7 @@ def test_pickle(self): self.assertEqual(base.getvalue(), test.getvalue()) # Test pickling a custom units manager - um = PyomoUnitsContainer() + um = PyomoUnitsContainer(pint_module.UnitRegistry()) m = ConcreteModel() m.x = Var(units=um.kg) m.c = Constraint(expr=m.x**2 <= 10*um.kg**2) @@ -610,5 +640,28 @@ def test_pickle(self): i.pprint(test) self.assertEqual(base.getvalue(), test.getvalue()) + def test_set_pint_registry(self): + um = _DeferredUnitsSingleton() + pint_reg = pint_module.UnitRegistry() + # Test we can (silently) set the registry if it is the first + # thing we do + with LoggingIntercept() as LOG: + um.set_pint_registry(pint_reg) + self.assertEqual(LOG.getvalue(), "") + self.assertIs(um.pint_registry, pint_reg) + # Test that a no-op set is silent + with LoggingIntercept() as LOG: + um.set_pint_registry(pint_reg) + self.assertEqual(LOG.getvalue(), "") + # Test that changing the registry generates a warning + with LoggingIntercept() as LOG: + um.set_pint_registry(pint_module.UnitRegistry()) + self.assertIn( + "Changing the pint registry used by the Pyomo Units " + "system after the PyomoUnitsContainer was constructed", + LOG.getvalue() + ) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_visitor.py b/pyomo/core/tests/unit/test_visitor.py index 180139345db..16d5d25ea95 100644 --- a/pyomo/core/tests/unit/test_visitor.py +++ b/pyomo/core/tests/unit/test_visitor.py @@ -520,7 +520,7 @@ def test_replacement_walker0(self): class ReplacementWalkerTest3(ExpressionReplacementVisitor): def __init__(self, model): - ExpressionReplacementVisitor.__init__(self) + super().__init__(remove_named_expressions=False) self.model = model def visiting_potential_leaf(self, node): @@ -596,8 +596,8 @@ def test_replacement_walker5(self): e = M.z*M.x walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(e.__class__ is MonomialTermExpression) - self.assertTrue(f.__class__ is ProductExpression) + self.assertIs(e.__class__, MonomialTermExpression) + self.assertIs(f.__class__, ProductExpression) self.assertTrue(f.arg(0).is_potentially_variable()) self.assertEqual("z*x", str(e)) self.assertEqual("2*w[1]*x", str(f)) @@ -661,14 +661,11 @@ def test_replacement_walker0(self): # class ReplacementWalker_ReplaceInternal(ExpressionReplacementVisitor): - def __init__(self): - ExpressionReplacementVisitor.__init__(self) - - def visit(self, node, values): + def exitNode(self, node, data): if type(node) == ProductExpression: - return sum(values) + return sum(data[1]) else: - return node + return super().exitNode(node, data) class WalkerTests_ReplaceInternal(unittest.TestCase): diff --git a/pyomo/core/util.py b/pyomo/core/util.py index d53cb13d014..3fe40e4b796 100644 --- a/pyomo/core/util.py +++ b/pyomo/core/util.py @@ -12,14 +12,14 @@ # Utility functions # -__all__ = ['sum_product', 'summation', 'dot_product', 'sequence', 'prod', 'quicksum'] +__all__ = ['sum_product', 'summation', 'dot_product', 'sequence', 'prod', 'quicksum', 'target_list'] from pyomo.core.expr.numvalue import native_numeric_types from pyomo.core.expr.numeric_expr import decompose_term from pyomo.core.expr import current as EXPR from pyomo.core.base.var import Var from pyomo.core.base.expression import Expression - +from pyomo.core.base.component import _ComponentBase def prod(terms): """ @@ -282,3 +282,21 @@ def xsequence(*args): remove_in='6.0') return sequence(*args) + +def target_list(x): + if isinstance(x, _ComponentBase): + return [ x ] + elif hasattr(x, '__iter__'): + ans = [] + for i in x: + if isinstance(i, _ComponentBase): + ans.append(i) + else: + raise ValueError( + "Expected Component or list of Components." + "\n\tReceived %s" % (type(i),)) + return ans + else: + raise ValueError( + "Expected Component or list of Components." + "\n\tReceived %s" % (type(x),)) diff --git a/pyomo/dae/flatten.py b/pyomo/dae/flatten.py index 9aa518a6b15..d754dd615c8 100644 --- a/pyomo/dae/flatten.py +++ b/pyomo/dae/flatten.py @@ -35,8 +35,10 @@ def get_slice_for_set(s): return (Ellipsis,) else: # Case for e.g. UnindexedComponent_set + # Should this be None or tuple()? RBP 202110 return None + class _NotAnIndex(object): """ `None` is a valid index, so we use a dummy class to @@ -45,6 +47,7 @@ class _NotAnIndex(object): """ pass + def _fill_indices(filled_index, index): """ `filled_index` is a list with some entries `_NotAnIndex`. @@ -66,6 +69,7 @@ def _fill_indices(filled_index, index): else: return filled_index + def _fill_indices_from_product(partial_index_list, product): """ `partial_index_list` is a list of indices, each corresponding to a @@ -108,7 +112,7 @@ def _fill_indices_from_product(partial_index_list, product): # because `comp` was created with two set arguments, the first # of which was already a product. - yield _fill_indices(filled_index, index) + yield (index, _fill_indices(filled_index, index)) normalize_index.flatten = False # Want to get the unflattened factors when we advance the @@ -117,6 +121,118 @@ def _fill_indices_from_product(partial_index_list, product): # Reset `normalize_index.flatten` normalize_index.flatten = _normalize_index_flatten + +def slice_component_along_sets( + component, sets, context_slice=None, normalize=None, + ): + """ + This function generates all possible slices of the provided component + along the provided sets. That is, it will iterate over the component's + other indexing sets and, for each index, yield a slice along the + sets specified in the call signature. + + Arguments + --------- + component: Component + The component whose slices will be yielded + sets: ComponentSet + ComponentSet of Pyomo sets that will be sliced along + context_slice: IndexedComponent_slice + If provided, instead of creating a new slice, we will extend this + one with appropriate getattr and getitem calls. + normalize: Bool + If False, the returned index (from the product of "other sets") + is not normalized, regardless of the value of normalize_index.flatten. + This is necessary to use this index with _fill_indices. + + Yields + ------ + tuple + The first entry is the index in the product of "other sets" + corresponding to the slice, and the second entry is the slice + at that index. + + """ + set_set = ComponentSet(sets) + subsets = list(component.index_set().subsets()) + temp_idx = [get_slice_for_set(s) if s in set_set else _NotAnIndex + for s in subsets] + other_sets = [s for s in subsets if s not in set_set] + + if context_slice is None: + base_component = component + else: + base_component = getattr(context_slice, component.local_name) + + if component.is_indexed(): + # We need to iterate over sets that aren't sliced + # `c.is_indexed()` covers the case when UnindexedComponent_set + # is in `other_sets`. + if other_sets: + cross_prod = other_sets[0].cross(*other_sets[1:]) + else: + # If we are only indexed by sets we need to slice, we + # should just use tuple(temp_idx) as our index. We spoof + # a cross_prod here so we don't have to repeat the try/except + # logic below in a separate branch. An empty tuple is the right + # singleton to work in the embedded call to _fill_indices. + cross_prod = [tuple()] + + for prod_index, new_index in _fill_indices_from_product( + temp_idx, + cross_prod, + ): + try: + if normalize_index.flatten: + # This index is always normalized if normalize_index.flatten + # is True. I have not encountered a situation where + # "denormalization" makes sense here. + # As normalization is also done in the IndexedComponent, + # normalizing here primarily just affects what the resulting + # slice "looks like." E.g. slice(None) vs (slice(None),). + # This has implications for generating CUIDs from these + # slices, where we would like consistency in the string + # representation. + # TODO: Should CUID normalize (slice(None),)? + new_index = normalize_index(new_index) + c_slice = base_component[new_index] + if type(c_slice) is IndexedComponent_slice: + # This is just to make sure we do not have an + # empty slice. + # + # Note that c_slice is not necessarily a slice. + # We enter this loop even if no sets need slicing. + temp_slice = c_slice.duplicate() + next(iter(temp_slice)) + if ((normalize is None and normalize_index.flatten) + or normalize): + # Most users probably want this index to be normalized, + # so they can more conveniently use it as a key in a + # mapping. (E.g. they will get "a" as opposed to ("a",).) + # However, to use it in the calling routine + # generate_sliced_components, we need this index to not + # have been normalized, so that indices are tuples, + # partitioned according to their "factor sets." + # This is why we allow the argument normalize=False to + # override normalize_index.flatten. + prod_index = normalize_index(prod_index) + yield prod_index, c_slice + except StopIteration: + # We have an empty slice for some reason, e.g. + # a coordinate of `new_index` from the cross + # product was skipped in the original component. + pass + except KeyError: + # We are creating scalar components from a product of + # sets. Components may be undefined for certain indices. + # We want to simply skip that index and move on. + pass + else: + # Component is a data object + c_slice = base_component + yield (), c_slice + + def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): """ Recursively generate sliced components of a block and its subblocks, along @@ -138,155 +254,77 @@ def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): `index_map` is potentially a map from each set in `sets` to a "representative index" to use when descending into subblocks. """ + if type(slice_) is IndexedComponent_slice: + context_slice = slice_.duplicate() + else: + context_slice = None + + # Looks for components indexed by these sets immediately in our block for c in b.component_objects(ctype, descend_into=False): subsets = list(c.index_set().subsets()) - temp_idx = [get_slice_for_set(s) if s in sets else _NotAnIndex - for s in subsets] new_sets = [s for s in subsets if s in sets] - other_sets = [s for s in subsets if s not in sets] + # Extend our "index stack" sliced_sets = index_stack + new_sets - # We have extended our "index stack;" now we must extend - # our slice. - - if other_sets and c.is_indexed(): - # We need to iterate over sets that aren't sliced - # `c.is_indexed()` covers the case when UnindexedComponent_set - # is in `other_sets`. - cross_prod = other_sets[0].cross(*other_sets[1:]) - # The original implementation was to pick an arbitrary index - # from the "flattened sets" and slice all the other indices. - # - # This implementation avoids issues about selecting an arbitrary - # index, but requires duplicating some of the slice-iter logic below - - for new_index in _fill_indices_from_product(temp_idx, cross_prod): - try: - c_slice = getattr(slice_, c.local_name)[new_index] - if type(c_slice) is IndexedComponent_slice: - # This is just to make sure we do not have an - # empty slice. - temp_slice = c_slice.duplicate() - next(iter(temp_slice)) - yield sliced_sets, c_slice - except StopIteration: - # We have an empty slice for some reason, e.g. - # a coordinate of `new_index` from the cross - # product was skipped in the original component. - pass - except KeyError: - # We are creating scalar components from a product of - # sets. Components may be undefined for certain indices. - # We want to simply skip that index and move on. - pass - else: - # `c` is indexed only by sets we would like to slice. - # Slice the component if it is indexed so a future getattr - # will be valid. - try: - if c.is_indexed(): - c_slice = getattr(slice_, c.local_name)[...] - # Make sure this slice is not empty... - next(iter(c_slice.duplicate())) - else: - c_slice = getattr(slice_, c.local_name) - yield sliced_sets, c_slice - except StopIteration: - pass + # Extend our slice with this component + for idx, new_slice in slice_component_along_sets( + c, sets, context_slice=context_slice, normalize=False + ): + yield sliced_sets, new_slice # We now descend into subblocks for sub in b.component_objects(Block, descend_into=False): subsets = list(sub.index_set().subsets()) - temp_idx = [get_slice_for_set(s) if s in sets else _NotAnIndex - for s in subsets] new_sets = [s for s in subsets if s in sets] - other_sets = [s for s in subsets if s not in sets] - - # For each set we are slicing, if the user specified an index, put it - # here. Otherwise, slice the set and we will call next(iter(slice_)) - # once the full index is constructed. - descend_index_sliced_sets = tuple(index_map[s] if s in index_map else - get_slice_for_set(s) for s in new_sets) # Extend stack with new matched indices. index_stack.extend(new_sets) - if other_sets and sub.is_indexed(): - cross_prod = other_sets[0].cross(*other_sets[1:]) - - for new_index in _fill_indices_from_product(temp_idx, cross_prod): - try: - sub_slice = getattr(slice_, sub.local_name)[new_index] - - # Now we need to pick a block data to descend into - if new_sets: - # We sliced some sets, and need to fill in any - # indices provided by the user - - # `new_index` could be a scalar, for compatibility with - # `normalize_index.flatten==False`. - tupl_new_index = (new_index,) if type(new_index) \ - is not tuple else new_index - # Extract the indices of "other sets": - incomplete_descend_index = list( - idx if subset not in sets else _NotAnIndex - for idx, subset in zip(tupl_new_index, subsets) - ) - # Fill rest of the entries with specified indices for - # sliced sets: - descend_index = _fill_indices(incomplete_descend_index, - descend_index_sliced_sets) - if len(descend_index) == 1: - descend_index = descend_index[0] - - descend_slice = sub[descend_index] - data = descend_slice if type(descend_slice) is not \ - IndexedComponent_slice else next(iter(descend_slice)) - # If the user has supplied enough indices that we can - # descend into a concrete component, we do so. Otherwise - # we use the user's indices, slice the rest, and advance - # the iterator. - else: - # All indices are specified - data = sub[new_index] - for st, v in generate_sliced_components(data, index_stack, - sub_slice, sets, ctype, index_map): - yield tuple(st), v - except StopIteration: - # Empty slice due to "skipped" index in subblock. - pass - #except KeyError: - # # Trying to access a concrete data object for a "skipped" index. - # # I have been unable to produce this behavior for blocks, - # # but it may be possible somehow. - # pass - else: - # Either `sub` is a simple component, or we are slicing - # all of its sets. What is common here is that we don't need - # to iterate over "other sets." - try: - if sub.is_indexed(): - sub_slice = getattr(slice_, sub.local_name)[...] - # We have to get the block data object. - descend_slice = sub[descend_index_sliced_sets] - data = descend_slice if type(descend_slice) is not \ - IndexedComponent_slice else next(iter(descend_slice)) - else: - # `sub` is a simple component - sub_slice = getattr(slice_, sub.local_name) - data = sub - for st, v in generate_sliced_components(data, index_stack, - sub_slice, sets, ctype, index_map): - yield tuple(st), v - except StopIteration: - # We encountered an empty slice. This should be very rare. - pass + # Need to construct an index to descend into for each slice-of-block + # we are about generate. + given_descend_idx = [_NotAnIndex for _ in subsets] + for i, s in enumerate(subsets): + if s in index_map: + # Use a user-given index if available + given_descend_idx[i] = index_map[s] + elif s in sets: + # Otherwise use a slice. We will advanced the slice iter + # to try to get a concrete component from this slice. + given_descend_idx[i] = get_slice_for_set(s) + + # Generate slices from this sub-block + for idx, new_slice in slice_component_along_sets( + sub, sets, context_slice=context_slice, normalize=False + ): + if sub.is_indexed(): + # fill any remaining placeholders with the "index" of our slice + descend_idx = _fill_indices(list(given_descend_idx), idx) + # create a slice-or-data object + descend_data = sub[descend_idx] + if type(descend_data) is IndexedComponent_slice: + try: + # Attempt to find a data object matching this slice + descend_data = next(iter(descend_data)) + except StopIteration: + # For this particular idx (and given indices), no + # block data object exists to descend into. + # Not sure if we should raise an error here... -RBP + continue + else: + descend_data = sub + + # Recursively generate sliced components from this data object + for st, v in generate_sliced_components( + descend_data, index_stack, new_slice, sets, ctype, index_map + ): + yield tuple(st), v # pop the index sets of the block whose sub-components # we just finished iterating over. for _ in new_sets: index_stack.pop() + def flatten_components_along_sets(m, sets, ctype, indices=None): """ This function iterates over components (recursively) contained @@ -312,7 +350,12 @@ def flatten_components_along_sets(m, sets, ctype, indices=None): index_map = indices else: index_map = ComponentMap(zip(sets, indices)) - + for s, idx in index_map.items(): + if not idx in s: + raise ValueError( + "%s is a bad index for set %s. \nPlease provide an index " + "that is in the set." % (idx, s.name) + ) index_stack = [] set_of_sets = ComponentSet(sets) @@ -356,6 +399,7 @@ def flatten_components_along_sets(m, sets, ctype, indices=None): # ^ These components are indexed by time and space return sets_list, comps_list + def flatten_dae_components(model, time, ctype, indices=None): target = ComponentSet((time,)) sets_list, comps_list = flatten_components_along_sets(model, target, ctype, diff --git a/pyomo/dae/simulator.py b/pyomo/dae/simulator.py index 994e58afbe9..1dc3c1de6bf 100644 --- a/pyomo/dae/simulator.py +++ b/pyomo/dae/simulator.py @@ -208,24 +208,27 @@ class Pyomo2Scipy_Visitor(EXPR.ExpressionReplacementVisitor): """ def __init__(self, templatemap): - super(Pyomo2Scipy_Visitor, self).__init__() + # Note because we are creating a "nonPyomo" expression tree, we + # want to remove all Expression nodes (as opposed to replacing + # them in place) + super().__init__(descend_into_named_expressions=True, + remove_named_expressions=True) self.templatemap = templatemap - def visiting_potential_leaf(self, node): - if type(node) is IndexTemplate: - return True, node + def beforeChild(self, node, child, child_idx): + if type(child) is IndexTemplate: + return False, child - if type(node) is EXPR.GetItemExpression: - _id = _GetItemIndexer(node) + if type(child) is EXPR.GetItemExpression: + _id = _GetItemIndexer(child) if _id not in self.templatemap: self.templatemap[_id] = Param(mutable=True) self.templatemap[_id].construct() self.templatemap[_id]._name = "%s[%s]" % ( _id.base.name, ','.join(str(x) for x in _id.args)) - return True, self.templatemap[_id] + return False, self.templatemap[_id] - return super( - Pyomo2Scipy_Visitor, self).visiting_potential_leaf(node) + return super().beforeChild(node, child, child_idx) def convert_pyomo2scipy(expr, templatemap): @@ -245,7 +248,7 @@ def convert_pyomo2scipy(expr, templatemap): raise DAE_Error("SciPy is not installed. Cannot substitute SciPy " "intrinsic functions.") visitor = Pyomo2Scipy_Visitor(templatemap) - return visitor.dfs_postorder_stack(expr) + return visitor.walk_expression(expr) class Substitute_Pyomo2Casadi_Visitor(EXPR.ExpressionReplacementVisitor): @@ -260,34 +263,37 @@ class Substitute_Pyomo2Casadi_Visitor(EXPR.ExpressionReplacementVisitor): """ def __init__(self, templatemap): - super(Substitute_Pyomo2Casadi_Visitor, self).__init__() + # Note because we are creating a "nonPyomo" expression tree, we + # want to remove all Expression nodes (as opposed to replacing + # them in place) + super().__init__(descend_into_named_expressions=True, + remove_named_expressions=True) self.templatemap = templatemap - def visit(self, node, values): + def exitNode(self, node, data): """Replace a node if it's a unary function.""" - if type(node) is EXPR.UnaryFunctionExpression: + ans = super().exitNode(node, data) + if type(ans) is EXPR.UnaryFunctionExpression: return EXPR.UnaryFunctionExpression( - values[0], - node._name, - casadi_intrinsic[node._name]) - return node + ans.args, + ans.getname(), + casadi_intrinsic[ans.getname()]) + return ans - def visiting_potential_leaf(self, node): + def beforeChild(self, node, child, child_idx): """Replace a node if it's a _GetItemExpression.""" - if type(node) is EXPR.GetItemExpression: - _id = _GetItemIndexer(node) + if type(child) is EXPR.GetItemExpression: + _id = _GetItemIndexer(child) if _id not in self.templatemap: name = "%s[%s]" % ( _id.base.name, ','.join(str(x) for x in _id.args)) self.templatemap[_id] = casadi.SX.sym(name) - return True, self.templatemap[_id] + return False, self.templatemap[_id] - if type(node) in native_numeric_types or \ - not node.is_expression_type() or \ - type(node) is IndexTemplate: - return True, node + elif type(child) is IndexTemplate: + return False, child - return False, None + return super().beforeChild(node, child, child_idx) class Convert_Pyomo2Casadi_Visitor(EXPR.ExpressionValueVisitor): @@ -345,7 +351,7 @@ def substitute_pyomo2casadi(expr, templatemap): raise DAE_Error("CASADI is not installed. Cannot substitute CasADi " "variables and intrinsic functions.") visitor = Substitute_Pyomo2Casadi_Visitor(templatemap) - return visitor.dfs_postorder_stack(expr) + return visitor.walk_expression(expr) def convert_pyomo2casadi(expr): diff --git a/pyomo/dae/tests/test_flatten.py b/pyomo/dae/tests/test_flatten.py index 531f3540474..05344213d8c 100644 --- a/pyomo/dae/tests/test_flatten.py +++ b/pyomo/dae/tests/test_flatten.py @@ -9,7 +9,15 @@ # ___________________________________________________________________________ import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Block, Var, Reference, Set, Constraint +from pyomo.environ import ( + ConcreteModel, + Block, + Var, + Reference, + Set, + Constraint, + ComponentUID, + ) from pyomo.dae import ContinuousSet from pyomo.common.collections import ComponentSet, ComponentMap from pyomo.core.base.indexed_component import ( @@ -19,6 +27,7 @@ from pyomo.dae.flatten import ( flatten_dae_components, flatten_components_along_sets, + slice_component_along_sets, ) class TestAssumedBehavior(unittest.TestCase): @@ -1166,6 +1175,307 @@ def test_model4_xyz(self): raise RuntimeError() +class TestCUID(unittest.TestCase): + """ + When returning indexed components, the flattener returns references. + Unless these are subsequently attached to a model (and maybe even if they + are), these references will not have useful names. Creating a CUID + from the referent attribute of these references is the preferred way + to generate these names, because these names will be unique if these + references are generated multiple times. + + However, when referring to a slice, a CUID is not truly unique, as + "m.b[*].v" is often equivalent to "m.b[**].v". + Our convention is to always use constant-dimension slices ("*") + unless we are slicing a component with a None-dimensioned set. + + These tests assert that we use the correct convention. + + """ + + # 3 cases to cover: + # Components indexed by no sets we're interested in + # Components indexed by some sets we're interested in + # Components indexed by all sets we're interested in + + def test_cuids_no_sets_no_subblocks(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=["a", "b"]) + m.s3 = Set(initialize=[4, 5, 6]) + m.s4 = Set(initialize=["c", "d"]) + m.v1 = Var(m.s3, m.s4) + + pred_cuid_set = { + "v1[4,c]", + "v1[4,d]", + "v1[5,c]", + "v1[5,d]", + "v1[6,c]", + "v1[6,d]", + } + + sets = (m.s1, m.s2) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 1 and sets[0] is UnindexedComponent_set: + self.assertEqual(len(comps), len(m.s1)*len(m.s2)) + cuid_set = set(str(ComponentUID(comp)) for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + def test_cuids_some_sets_no_subblocks(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=["a", "b"]) + m.s3 = Set(initialize=[4, 5, 6]) + m.s4 = Set(initialize=["c", "d"]) + m.v1 = Var(m.s1, m.s4) + + pred_cuid_set = { + "v1[1,*]", + "v1[2,*]", + "v1[3,*]", + } + + sets = (m.s3, m.s4) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 1 and sets[0] is m.s4: + self.assertEqual(len(comps), len(m.s1)) + cuid_set = set(str(ComponentUID(comp.referent)) + for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + def test_cuids_all_sets_no_subblocks(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=["a", "b"]) + m.s3 = Set(initialize=[4, 5, 6]) + m.s4 = Set(initialize=["c", "d"]) + m.v1 = Var(m.s3, m.s4) + + pred_cuid_set = { + "v1[*,*]", + } + + sets = (m.s3, m.s4) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 2 and sets[0] is m.s3 and sets[1] is m.s4: + self.assertEqual(len(comps), 1) + cuid_set = set(str(ComponentUID(comp.referent)) + for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + def test_cuid_one_set_no_subblocks(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.v = Var(m.s1) + + pred_cuid_set = { + "v[*]", + } + + sets = (m.s1,) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(comps_list), 1) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 1 and sets[0] is m.s1: + self.assertEqual(len(comps), 1) + cuid_set = set(str(ComponentUID(comp.referent)) + for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + def test_cuids_no_sets_with_subblocks(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=["a", "b"]) + m.s3 = Set(initialize=[4, 5, 6]) + m.s4 = Set(initialize=["c", "d"]) + def block_rule(b, i, j): + b.v = Var() + m.b = Block(m.s1, m.s2, rule=block_rule) + + pred_cuid_set = { + "b[1,a].v", + "b[1,b].v", + "b[2,a].v", + "b[2,b].v", + "b[3,a].v", + "b[3,b].v", + } + + sets = (m.s3, m.s4) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(comps_list), 1) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 1 and sets[0] is UnindexedComponent_set: + self.assertEqual(len(comps), len(m.s1)*len(m.s2)) + cuid_set = set(str(ComponentUID(comp)) for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + def test_cuids_some_sets_with_subblocks(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=["a", "b"]) + m.s3 = Set(initialize=[4, 5, 6]) + m.s4 = Set(initialize=["c", "d"]) + def block_rule(b, i, j): + b.v = Var() + m.b = Block(m.s1, m.s2, rule=block_rule) + + pred_cuid_set = { + "b[*,a].v", + "b[*,b].v", + } + + sets = (m.s1, m.s4) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(comps_list), 1) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 1 and sets[0] is m.s1: + self.assertEqual(len(comps), len(m.s2)) + cuid_set = set(str(ComponentUID(comp.referent)) + for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + def test_cuids_all_sets_with_subblocks(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=["a", "b"]) + m.s3 = Set(initialize=[4, 5, 6]) + m.s4 = Set(initialize=["c", "d"]) + def block_rule(b, i, j): + b.v = Var() + m.b = Block(m.s1, m.s2, rule=block_rule) + + pred_cuid_set = { + "b[*,*].v", + } + + sets = (m.s1, m.s2) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(comps_list), 1) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 2 and sets[0] is m.s1 and sets[1] is m.s2: + self.assertEqual(len(comps), 1) + cuid_set = set(str(ComponentUID(comp.referent)) + for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + def test_cuids_multiple_slices(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + def block_rule(b, i): + b.v = Var(m.s1) + m.b = Block(m.s1, rule=block_rule) + + pred_cuid_set = {"b[*].v[*]"} + sets = (m.s1,) + ctype = Var + sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(comps_list), 1) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 2 and sets[0] is m.s1 and sets[1] is m.s1: + self.assertEqual(len(comps), 1) + cuid_set = set(str(ComponentUID(comp.referent)) + for comp in comps) + self.assertEqual(cuid_set, pred_cuid_set) + else: + raise RuntimeError() + + +class TestSliceComponent(TestFlatten): + + def make_model(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=["a", "b"]) + m.s3 = Set(initialize=[4, 5, 6]) + m.s4 = Set(initialize=["c", "d"]) + m.v12 = Var(m.s1, m.s2) + m.v124 = Var(m.s1, m.s2, m.s4) + return m + + def test_no_sets(self): + m = self.make_model() + var = m.v12 + sets = (m.s3, m.s4) + ref_data = {self._hashRef(v) for v in m.v12.values()} + + slices = [slice_ for _, slice_ in slice_component_along_sets(var, sets)] + self.assertEqual(len(slices), len(ref_data)) + self.assertEqual(len(slices), len(m.s1)*len(m.s2)) + for slice_ in slices: + self.assertIn(self._hashRef(slice_), ref_data) + + def test_one_set(self): + m = self.make_model() + var = m.v124 + sets = (m.s1, m.s3) + ref_data = { + self._hashRef(Reference(m.v124[:, i, j])) for i, j in m.s2*m.s4 + } + + slices = [s for _, s in slice_component_along_sets(var, sets)] + self.assertEqual(len(slices), len(ref_data)) + self.assertEqual(len(slices), len(m.s2)*len(m.s4)) + for slice_ in slices: + self.assertIn(self._hashRef(Reference(slice_)), ref_data) + + def test_some_sets(self): + m = self.make_model() + var = m.v124 + sets = (m.s1, m.s3) + ref_data = { + self._hashRef(Reference(m.v124[:, i, j])) for i, j in m.s2*m.s4 + } + + slices = [s for _, s in slice_component_along_sets(var, sets)] + self.assertEqual(len(slices), len(ref_data)) + self.assertEqual(len(slices), len(m.s2)*len(m.s4)) + for slice_ in slices: + self.assertIn(self._hashRef(Reference(slice_)), ref_data) + + def test_all_sets(self): + m = self.make_model() + var = m.v12 + sets = (m.s1, m.s2) + ref_data = {self._hashRef(Reference(m.v12[:, :]))} + + slices = [s for _, s in slice_component_along_sets(var, sets)] + self.assertEqual(len(slices), len(ref_data)) + self.assertEqual(len(slices), 1) + for slice_ in slices: + self.assertIn(self._hashRef(Reference(slice_)), ref_data) + + class TestExceptional(unittest.TestCase): """ These are the cases that motivate the try/excepts in the slice-checking @@ -1254,10 +1564,68 @@ def block_rule(b, i, j): subset_set = ComponentSet(m.b.index_set().subsets()) for s in sets: self.assertIn(s, subset_set) - + + def test_descend_stop_iteration(self): + """ + Even if we construct a non-empty slice, if we provide a bad + index to descend into, we can end up with no valid blocks + to descend into. Unclear whether we should raise an error here. + """ + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=['a', 'b']) + m.v = Var(m.s1, m.s2) + + def b_rule(b, i, j): + b.v = Var() + m.b = Block(m.s1, m.s2, rule=b_rule) + + # 'b' will be a bad index to descend into + for i in m.s1: + del m.b[i, 'b'] + + with self.assertRaises(StopIteration): + next(iter(m.b[:, 'b'])) + + sets = (m.s1, m.s2) + ctype = Var + indices = ComponentMap([(m.s2, 'b')]) + sets_list, comps_list = flatten_components_along_sets( + m, sets, ctype, indices=indices, + ) + for sets, comps in zip(sets_list, comps_list): + # Here we just check that m.b[:,:].v was not encountered, + # because of our poor choice of "descend index" + if len(sets) == 2 and sets[0] is m.s1 and sets[1] is m.s2: + self.assertEqual(len(comps), 1) + self.assertEqual(str(ComponentUID(comps[0].referent)), "v[*,*]") + else: + raise RuntimeError() + + def test_bad_descend_index(self): + m = ConcreteModel() + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=['a', 'b']) + m.v = Var(m.s1, m.s2) + + def b_rule(b, i, j): + b.v = Var() + m.b = Block(m.s1, m.s2, rule=b_rule) + + sets = (m.s1, m.s2) + ctype = Var + # Here we accidentally provide an index for the wrong set. + indices = ComponentMap([(m.s1, 'b')]) + # Check that we fail gracefully instead of hitting the StopIteration + # checked by the above test. + with self.assertRaisesRegex(ValueError, "bad index"): + sets_list, comps_list = flatten_components_along_sets( + m, sets, ctype, indices=indices, + ) + def test_keyerror(self): """ - KeyErrors occur when we a component that we don't slice + KeyErrors occur when a component that we don't slice doesn't have data for some members of its indexing set. """ m = ConcreteModel() diff --git a/pyomo/dataportal/tests/test_dataportal.py b/pyomo/dataportal/tests/test_dataportal.py index fc6d7301093..97ad62aeaa1 100644 --- a/pyomo/dataportal/tests/test_dataportal.py +++ b/pyomo/dataportal/tests/test_dataportal.py @@ -699,7 +699,7 @@ def compare_data(self, name, file_suffix): def test_store_set1(self): # Write 1-D set model = ConcreteModel() - model.A = Set(initialize=set([1,3,5])) + model.A = Set(initialize=[1,3,5]) data = DataPortal() data.store(data=model.A, **self.create_write_options('set1')) self.compare_data('set1', self.suffix) @@ -707,7 +707,7 @@ def test_store_set1(self): def test_store_set1a(self): # Write 1-D set model = ConcreteModel() - model.A = Set(initialize=set([1,3,5])) + model.A = Set(initialize=[1,3,5]) data = DataPortal() data.store(data="A", model=model, **self.create_write_options('set1')) self.compare_data('set1', self.suffix) @@ -715,7 +715,7 @@ def test_store_set1a(self): def test_store_set2(self): # Write 2-D set model = ConcreteModel() - model.A = Set(initialize=set([(1,2),(3,4),(5,6)]), dimen=2) + model.A = Set(initialize=[(1,2),(3,4),(5,6)], dimen=2) data = DataPortal() data.store(data=model.A, **self.create_write_options('set2')) self.compare_data('set2', self.suffix) @@ -731,7 +731,7 @@ def test_store_param1(self): def test_store_param2(self): # Write 1-D param model = ConcreteModel() - model.A = Set(initialize=set([1,2,3])) + model.A = Set(initialize=[1,2,3]) model.p = Param(model.A, initialize={1:10, 2:20, 3:30}) data = DataPortal() data.store(data=model.p, **self.create_write_options('param2')) @@ -740,7 +740,7 @@ def test_store_param2(self): def test_store_param3(self): # Write 2-D params model = ConcreteModel() - model.A = Set(initialize=set([(1,2),(2,3),(3,4)]), dimen=2) + model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) data = DataPortal() @@ -750,7 +750,7 @@ def test_store_param3(self): def test_store_param4(self): # Write 2-D params model = ConcreteModel() - model.A = Set(initialize=set([(1,2),(2,3),(3,4)]), dimen=2) + model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) data = DataPortal() @@ -988,7 +988,7 @@ def test_store_set1(self): # Write 1-D set self.check_skiplist('store_set1') model = ConcreteModel() - model.A = Set(initialize=set([1,3,5])) + model.A = Set(initialize=[1,3,5]) data = DataPortal() data.store(set=model.A, **self.create_write_options('set1')) self.compare_data('set1', self.suffix) @@ -997,7 +997,7 @@ def test_store_set2(self): # Write 2-D set self.check_skiplist('store_set2') model = ConcreteModel() - model.A = Set(initialize=set([(1,2),(3,4),(5,6)]), dimen=2) + model.A = Set(initialize=[(1,2),(3,4),(5,6)], dimen=2) data = DataPortal() data.store(set=model.A, **self.create_write_options('set2')) self.compare_data('set2', self.suffix) @@ -1015,7 +1015,7 @@ def test_store_param2(self): # Write 1-D param self.check_skiplist('store_param2') model = ConcreteModel() - model.A = Set(initialize=set([1,2,3])) + model.A = Set(initialize=[1,2,3]) model.p = Param(model.A, initialize={1:10, 2:20, 3:30}) data = DataPortal() data.store(param=model.p, **self.create_write_options('param2')) @@ -1025,7 +1025,7 @@ def test_store_param3(self): # Write 2-D params self.check_skiplist('store_param3') model = ConcreteModel() - model.A = Set(initialize=set([(1,2),(2,3),(3,4)]), dimen=2) + model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) data = DataPortal() @@ -1036,7 +1036,7 @@ def test_store_param4(self): # Write 2-D params self.check_skiplist('store_param4') model = ConcreteModel() - model.A = Set(initialize=set([(1,2),(2,3),(3,4)]), dimen=2) + model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) data = DataPortal() diff --git a/pyomo/environ/tests/standalone_minimal_pyomo_driver.py b/pyomo/environ/tests/standalone_minimal_pyomo_driver.py index 00a90481084..42f3a1c7bf4 100644 --- a/pyomo/environ/tests/standalone_minimal_pyomo_driver.py +++ b/pyomo/environ/tests/standalone_minimal_pyomo_driver.py @@ -98,6 +98,9 @@ def run_writer_test(): def run_solverfactory_test(): skip_solvers = { 'py', + 'xpress', + '_xpress_shell', + '_mock_xpress', } with LoggingIntercept() as LOG, capture_output(capture_fd=True) as OUT: diff --git a/pyomo/gdp/plugins/bigm.py b/pyomo/gdp/plugins/bigm.py index 4df2c946f9d..ca5116abc39 100644 --- a/pyomo/gdp/plugins/bigm.py +++ b/pyomo/gdp/plugins/bigm.py @@ -26,12 +26,13 @@ from pyomo.core.base import Transformation, TransformationFactory, Reference import pyomo.core.expr.current as EXPR from pyomo.gdp import Disjunct, Disjunction, GDP_Error -from pyomo.gdp.util import ( _warn_for_active_logical_constraint, target_list, - is_child_of, get_src_disjunction, - get_src_constraint, get_transformed_constraints, +from pyomo.gdp.util import ( _warn_for_active_logical_constraint, is_child_of, + get_src_disjunction, get_src_constraint, + get_transformed_constraints, _get_constraint_transBlock, get_src_disjunct, _warn_for_active_disjunction, _warn_for_active_disjunct, preprocess_targets) +from pyomo.core.util import target_list from pyomo.network import Port from pyomo.repn import generate_standard_repn from functools import wraps diff --git a/pyomo/gdp/plugins/hull.py b/pyomo/gdp/plugins/hull.py index bc9d03d90b2..b89e4131df0 100644 --- a/pyomo/gdp/plugins/hull.py +++ b/pyomo/gdp/plugins/hull.py @@ -25,11 +25,12 @@ ) from pyomo.gdp import Disjunct, Disjunction, GDP_Error from pyomo.gdp.util import ( _warn_for_active_logical_constraint, - clone_without_expression_components, target_list, - is_child_of, get_src_disjunction, - get_src_constraint, get_transformed_constraints, - get_src_disjunct, _warn_for_active_disjunction, + clone_without_expression_components, is_child_of, + get_src_disjunction, get_src_constraint, + get_transformed_constraints, get_src_disjunct, + _warn_for_active_disjunction, _warn_for_active_disjunct, preprocess_targets) +from pyomo.core.util import target_list from pyomo.network import Port from functools import wraps from weakref import ref as weakref_ref diff --git a/pyomo/gdp/util.py b/pyomo/gdp/util.py index ec53498e0f5..6b5d459cda0 100644 --- a/pyomo/gdp/util.py +++ b/pyomo/gdp/util.py @@ -12,8 +12,6 @@ from pyomo.gdp import GDP_Error, Disjunction from pyomo.gdp.disjunct import _DisjunctData, Disjunct -from pyomo.core.base.component import _ComponentBase - from pyomo.core import Block, TraversalStrategy from pyomo.opt import TerminationCondition, SolverStatus from weakref import ref as weakref_ref @@ -78,7 +76,7 @@ def clone_without_expression_components(expr, substitute=None): # visitor = EXPR.ExpressionReplacementVisitor(substitute=substitute, remove_named_expressions=True) - return visitor.dfs_postorder_stack(expr) + return visitor.walk_expression(expr) def preprocess_targets(targets): preprocessed_targets = [] @@ -97,24 +95,6 @@ def preprocess_targets(targets): preprocessed_targets.append(t) return preprocessed_targets -def target_list(x): - if isinstance(x, _ComponentBase): - return [ x ] - elif hasattr(x, '__iter__'): - ans = [] - for i in x: - if isinstance(i, _ComponentBase): - ans.append(i) - else: - raise ValueError( - "Expected Component or list of Components." - "\n\tReceived %s" % (type(i),)) - return ans - else: - raise ValueError( - "Expected Component or list of Components." - "\n\tReceived %s" % (type(x),)) - # [ESJ 07/09/2019 Should this be a more general utility function elsewhere? I'm # putting it here for now so that all the gdp transformations can use it. # Returns True if child is a node or leaf in the tree rooted at parent, False diff --git a/pyomo/neos/kestrel.py b/pyomo/neos/kestrel.py index bbce56abff1..8e021c51c87 100644 --- a/pyomo/neos/kestrel.py +++ b/pyomo/neos/kestrel.py @@ -126,8 +126,8 @@ def setup_connection(self): http.client.BadStatusLine): e = sys.exc_info()[1] self.neos = None - logger.info("Fail.") - logger.warning("NEOS is temporarily unavailable.\n") + logger.info("Fail: %s" % (e,)) + logger.warning("NEOS is temporarily unavailable:\n\t(%s)" % (e,)) def tempfile(self): return os.path.join(tempfile.gettempdir(),'at%s.jobs' % os.getenv('ampl_id')) diff --git a/pyomo/neos/tests/test_neos.py b/pyomo/neos/tests/test_neos.py index d198e2644e5..87e08e8b3dc 100644 --- a/pyomo/neos/tests/test_neos.py +++ b/pyomo/neos/tests/test_neos.py @@ -21,6 +21,7 @@ import os.path import pyomo.common.unittest as unittest +from pyomo.common.log import LoggingIntercept from pyomo.scripting.pyomo_main import main from pyomo.scripting.util import cleanup @@ -82,6 +83,18 @@ def test_doc(self): #missing = gamssolvers - amplsolvers #self.assertEqual(len(missing) == 0) + def test_connection_failed(self): + try: + orig_host = pyomo.neos.kestrel.NEOS.host + pyomo.neos.kestrel.NEOS.host = 'neos-bogus-server.org' + with LoggingIntercept() as LOG: + kestrel = kestrelAMPL() + self.assertIsNone(kestrel.neos) + self.assertRegex(LOG.getvalue(), + "NEOS is temporarily unavailable:\n\t\(.+\)") + finally: + pyomo.neos.kestrel.NEOS.host = orig_host + class RunAllNEOSSolvers(object): def test_bonmin(self): diff --git a/pyomo/repn/plugins/ampl/ampl_.py b/pyomo/repn/plugins/ampl/ampl_.py index 4629342a624..468e0e0a132 100644 --- a/pyomo/repn/plugins/ampl/ampl_.py +++ b/pyomo/repn/plugins/ampl/ampl_.py @@ -351,6 +351,11 @@ def __call__(self, include_all_variable_bounds = \ io_options.pop("include_all_variable_bounds", False) + # List of variables that don't appear in constraints to force into the + # nl-file + export_nonlinear_variables = \ + io_options.pop("export_nonlinear_variables", False) + if len(io_options): raise ValueError( "ProblemWriter_nl passed unrecognized io_options:\n\t" + @@ -399,7 +404,8 @@ def __call__(self, show_section_timing=show_section_timing, skip_trivial_constraints=skip_trivial_constraints, file_determinism=file_determinism, - include_all_variable_bounds=include_all_variable_bounds) + include_all_variable_bounds=include_all_variable_bounds, + export_nonlinear_variables=export_nonlinear_variables) self._symbolic_solver_labels = False self._output_fixed_variable_bounds = False @@ -700,7 +706,8 @@ def _print_model_NL(self, model, show_section_timing=False, skip_trivial_constraints=False, file_determinism=1, - include_all_variable_bounds=False): + include_all_variable_bounds=False, + export_nonlinear_variables=False): output_fixed_variable_bounds = self._output_fixed_variable_bounds symbolic_solver_labels = self._symbolic_solver_labels @@ -817,13 +824,13 @@ def _print_model_NL(self, model, ObjNonlinearVarsInt = set() for block in all_blocks_list: - gen_obj_repn = \ - getattr(block, "_gen_obj_repn", True) - - # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): - block._repn = ComponentMap() - block_repn = block._repn + gen_obj_repn = getattr(block, "_gen_obj_repn", None) + if gen_obj_repn is not None: + gen_obj_repn = bool(gen_obj_repn) + # Get/Create the ComponentMap for the repn + if not hasattr(block,'_repn'): + block._repn = ComponentMap() + block_repn = block._repn for active_objective in block.component_data_objects(Objective, active=True, @@ -834,13 +841,7 @@ def _print_model_NL(self, model, if len(objname) > max_rowname_len: max_rowname_len = len(objname) - if gen_obj_repn: - repn = generate_standard_repn(active_objective.expr, - quadratic=False) - block_repn[active_objective] = repn - linear_vars = repn.linear_vars - nonlinear_vars = repn.nonlinear_vars - else: + if gen_obj_repn == False: repn = block_repn[active_objective] linear_vars = repn.linear_vars # By default, the NL writer generates @@ -850,6 +851,15 @@ def _print_model_NL(self, model, # are using a cached repn object, so we # must check for the quadratic form. if repn.is_nonlinear() and (repn.nonlinear_expr is None): + # Note that this is fragile: + # generate_standard_repn can leave nonlinear + # terms in both quadratic and nonlinear fields. + # However, when this was writen the assumption + # is that generate_standard_repn is only called + # with quadratic=True for QCQPs (by the LP + # writer). So, quadratic and nonlinear_expr + # will both never be non-empty. This assertion + # will fail if that assumption is ever violated: assert repn.is_quadratic() assert len(repn.quadratic_vars) > 0 nonlinear_vars = {} @@ -859,7 +869,13 @@ def _print_model_NL(self, model, nonlinear_vars = nonlinear_vars.values() else: nonlinear_vars = repn.nonlinear_vars - + else: + repn = generate_standard_repn(active_objective.expr, + quadratic=False) + linear_vars = repn.linear_vars + nonlinear_vars = repn.nonlinear_vars + if gen_obj_repn: + block_repn[active_objective] = repn try: wrapped_repn = RepnWrapper( repn, @@ -923,13 +939,13 @@ def _print_model_NL(self, model, for block in all_blocks_list: all_repns = list() - gen_con_repn = \ - getattr(block, "_gen_con_repn", True) - - # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): - block._repn = ComponentMap() - block_repn = block._repn + gen_con_repn = getattr(block, "_gen_con_repn", None) + if gen_con_repn is not None: + gen_con_repn = bool(gen_con_repn) + # Get/Create the ComponentMap for the repn + if not hasattr(block,'_repn'): + block._repn = ComponentMap() + block_repn = block._repn # Initializing the constraint dictionary for constraint_data in block.component_data_objects(Constraint, @@ -947,36 +963,46 @@ def _print_model_NL(self, model, if len(conname) > max_rowname_len: max_rowname_len = len(conname) - if constraint_data._linear_canonical_form: - repn = constraint_data.canonical_form() + if gen_con_repn == False: + repn = block_repn[constraint_data] linear_vars = repn.linear_vars - nonlinear_vars = repn.nonlinear_vars + # By default, the NL writer generates + # StandardRepn objects without the more + # expense quadratic processing, but + # there is no guarantee of this if we + # are using a cached repn object, so we + # must check for the quadratic form. + if repn.is_nonlinear() and (repn.nonlinear_expr is None): + # Note that this is fragile: + # generate_standard_repn can leave nonlinear + # terms in both quadratic and nonlinear fields. + # However, when this was writen the assumption + # is that generate_standard_repn is only called + # with quadratic=True for QCQPs (by the LP + # writer). So, quadratic and nonlinear_expr + # will both never be non-empty. This assertion + # will fail if that assumption is ever violated: + assert repn.is_quadratic() + assert len(repn.quadratic_vars) > 0 + nonlinear_vars = {} + for v1, v2 in repn.quadratic_vars: + nonlinear_vars[id(v1)] = v1 + nonlinear_vars[id(v2)] = v2 + nonlinear_vars = nonlinear_vars.values() + else: + nonlinear_vars = repn.nonlinear_vars else: - if gen_con_repn: - repn = generate_standard_repn(constraint_data.body, - quadratic=False) - block_repn[constraint_data] = repn + if constraint_data._linear_canonical_form: + repn = constraint_data.canonical_form() linear_vars = repn.linear_vars nonlinear_vars = repn.nonlinear_vars else: - repn = block_repn[constraint_data] + repn = generate_standard_repn(constraint_data.body, + quadratic=False) linear_vars = repn.linear_vars - # By default, the NL writer generates - # StandardRepn objects without the more - # expense quadratic processing, but - # there is no guarantee of this if we - # are using a cached repn object, so we - # must check for the quadratic form. - if repn.is_nonlinear() and (repn.nonlinear_expr is None): - assert repn.is_quadratic() - assert len(repn.quadratic_vars) > 0 - nonlinear_vars = {} - for v1, v2 in repn.quadratic_vars: - nonlinear_vars[id(v1)] = v1 - nonlinear_vars[id(v2)] = v2 - nonlinear_vars = nonlinear_vars.values() - else: - nonlinear_vars = repn.nonlinear_vars + nonlinear_vars = repn.nonlinear_vars + if gen_con_repn: + block_repn[constraint_data] = repn ### GAH: Even if this is fixed, it is still useful to ### write out these types of constraints @@ -1117,6 +1143,15 @@ def _print_model_NL(self, model, UnusedVars = AllVars.difference(UsedVars) LinearVars.update(UnusedVars) + if export_nonlinear_variables: + for v in export_nonlinear_variables: + v_iter = v.values() if v.is_indexed() else iter((v,)) + for vi in v_iter: + if self_varID_map[id(vi)] not in UsedVars: + Vars_dict[id(vi)] = vi + ConNonlinearVars.update([self_varID_map[id(vi)]]) + + ### There used to be an if statement here for the following code block ### checking model.statistics.num_binary_vars was greater than zero. ### To this day, I don't know how it worked. diff --git a/pyomo/repn/plugins/baron_writer.py b/pyomo/repn/plugins/baron_writer.py index b80b344a3e7..5acfa1fbd61 100644 --- a/pyomo/repn/plugins/baron_writer.py +++ b/pyomo/repn/plugins/baron_writer.py @@ -85,21 +85,7 @@ def visit(self, node, values): else: tmp.append(val) - if node.__class__ in EXPR.NPV_expression_types: - return ftoa(value(node)) - - if node.__class__ is EXPR.LinearExpression: - for v in node.linear_vars: - self.variables.add(id(v)) - - if node.__class__ in { - EXPR.ProductExpression, EXPR.MonomialTermExpression}: - if tmp[0] in node._to_string.minus_one: - return "- {0}".format(tmp[1]) - if tmp[0] in node._to_string.one: - return tmp[1] - return "{0} * {1}".format(tmp[0], tmp[1]) - elif node.__class__ is EXPR.PowExpression: + if node.__class__ is EXPR.PowExpression: x,y = node.args if type(x) not in native_types and not x.is_fixed() and \ type(y) not in native_types and not y.is_fixed(): @@ -110,19 +96,24 @@ def visit(self, node, values): return "{0} ^ {1}".format(tmp[0], tmp[1]) elif node.__class__ is EXPR.UnaryFunctionExpression: if node.name == "sqrt": - return "{0} ^ 0.5".format(tmp[0]) + # Parens are necessary because sqrt() and "^" have + # different precedence levels. Instead of parsing the + # arg, be safe and explicitly add parens + return "(({0}) ^ 0.5)".format(tmp[0]) elif node.name == 'log10': - return "{0} * log({1})".format(math.log10(math.e), tmp[0]) + return "({0} * log({1}))".format(math.log10(math.e), tmp[0]) elif node.name in {'exp','log'}: - return node._to_string(tmp, None, self.smap, True) + pass else: raise RuntimeError( 'The BARON .BAR format does not support the unary ' 'function "%s".' % (node.name,)) elif node.__class__ is EXPR.AbsExpression: - return "({0} ^ 2) ^ 0.5".format(tmp[0]) - else: - return node._to_string(tmp, None, self.smap, True) + # Parens are necessary because abs() and "^" have different + # precedence levels. Instead of parsing the arg, be safe + # and explicitly add parens + return "((({0}) ^ 2) ^ 0.5)".format(tmp[0]) + return node._to_string(tmp, None, self.smap, True) def visiting_potential_leaf(self, node): """ @@ -132,37 +123,62 @@ def visiting_potential_leaf(self, node): """ #print("ISLEAF") #print(node.__class__) - if node is None: - return True, None if node.__class__ in native_types: return True, ftoa(node) if node.is_expression_type(): + # Special handling if NPV and semi-NPV types: + if not node.is_potentially_variable(): + return True, ftoa(value(node)) + if node.__class__ is EXPR.MonomialTermExpression: + return True, self._monomial_to_string(node) + if node.__class__ is EXPR.LinearExpression: + return True, self._linear_to_string(node) # we will descend into this, so type checking will happen later return False, None if node.is_component_type(): - _ctype = node.ctype - if _ctype not in valid_expr_ctypes_minlp: + if node.ctype not in valid_expr_ctypes_minlp: # Make sure all components in active constraints # are basic ctypes we know how to deal with. raise RuntimeError( "Unallowable component '%s' of type %s found in an active " "constraint or objective.\nThe GAMS writer cannot export " "expressions with this component type." - % (node.name, _ctype.__name__)) + % (node.name, node.ctype.__name__)) - if node.is_variable_type(): - if node.fixed: - return True, ftoa(value(node)) + if node.is_fixed(): + return True, ftoa(value(node)) + else: + assert node.is_variable_type() + self.variables.add(id(node)) + return True, self.smap.getSymbol(node) + + def _monomial_to_string(self, node): + const, var = node.args + const = value(const) + if var.is_fixed(): + return ftoa(const * var.value) + self.variables.add(id(var)) + # Special handling: ftoa is slow, so bypass _to_string when this + # is a trivial term + if const in {-1, 1}: + if const < 0: + return '-' + self.smap.getSymbol(var) else: - self.variables.add(id(node)) - label = self.smap.getSymbol(node) - return True, label - - return True, ftoa(value(node)) - + return self.smap.getSymbol(var) + return node._to_string((ftoa(const), self.smap.getSymbol(var)), + False, self.smap, True) + + def _linear_to_string(self, node): + iter_ = iter(node.args) + values = [] + if node.constant: + next(iter_) + values.append(ftoa(node.constant)) + values.extend(map(self._monomial_to_string, iter_)) + return node._to_string(values, False, self.smap, True) def expression_to_string(expr, variables, labeler=None, smap=None): if labeler is not None: diff --git a/pyomo/repn/plugins/cpxlp.py b/pyomo/repn/plugins/cpxlp.py index 01b580a619b..1b265600e76 100644 --- a/pyomo/repn/plugins/cpxlp.py +++ b/pyomo/repn/plugins/cpxlp.py @@ -475,12 +475,13 @@ def _print_model_LP(self, onames = [] for block in all_blocks: - gen_obj_repn = getattr(block, "_gen_obj_repn", True) - - # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): - block._repn = ComponentMap() - block_repn = block._repn + gen_obj_repn = getattr(block, "_gen_obj_repn", None) + if gen_obj_repn is not None: + gen_obj_repn = bool(gen_obj_repn) + # Get/Create the ComponentMap for the repn + if not hasattr(block,'_repn'): + block._repn = ComponentMap() + block_repn = block._repn for objective_data in block.component_data_objects( Objective, @@ -506,11 +507,12 @@ def _print_model_LP(self, else: output.append("max \n") - if gen_obj_repn: - repn = generate_standard_repn(objective_data.expr) - block_repn[objective_data] = repn - else: + if gen_obj_repn == False: repn = block_repn[objective_data] + else: + repn = generate_standard_repn(objective_data.expr) + if gen_obj_repn: + block_repn[objective_data] = repn degree = repn.polynomial_degree() @@ -567,12 +569,13 @@ def _print_model_LP(self, def constraint_generator(): for block in all_blocks: - gen_con_repn = getattr(block, "_gen_con_repn", True) - - # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): - block._repn = ComponentMap() - block_repn = block._repn + gen_con_repn = getattr(block, "_gen_con_repn", None) + if gen_con_repn is not None: + gen_con_repn = bool(gen_con_repn) + # Get/Create the ComponentMap for the repn + if not hasattr(block,'_repn'): + block._repn = ComponentMap() + block_repn = block._repn for constraint_data in block.component_data_objects( Constraint, @@ -585,13 +588,15 @@ def constraint_generator(): assert not constraint_data.equality continue # non-binding, so skip - if constraint_data._linear_canonical_form: - repn = constraint_data.canonical_form() - elif gen_con_repn: - repn = generate_standard_repn(constraint_data.body) - block_repn[constraint_data] = repn - else: + if gen_con_repn == False: repn = block_repn[constraint_data] + else: + if constraint_data._linear_canonical_form: + repn = constraint_data.canonical_form() + else: + repn = generate_standard_repn(constraint_data.body) + if gen_con_repn: + block_repn[constraint_data] = repn yield constraint_data, repn diff --git a/pyomo/repn/plugins/gams_writer.py b/pyomo/repn/plugins/gams_writer.py index cc9a623d130..0e76293162e 100644 --- a/pyomo/repn/plugins/gams_writer.py +++ b/pyomo/repn/plugins/gams_writer.py @@ -47,11 +47,12 @@ # class ToGamsVisitor(EXPR.ExpressionValueVisitor): - def __init__(self, smap, treechecker): + def __init__(self, smap, treechecker, output_fixed_variables=False): super(ToGamsVisitor, self).__init__() self.smap = smap self.treechecker = treechecker self.is_discontinuous = False + self.output_fixed_variables = output_fixed_variables def visit(self, node, values): """ Visit nodes that have been expanded """ @@ -59,33 +60,26 @@ def visit(self, node, values): for i,val in enumerate(values): arg = node._args_[i] - if arg is None: - tmp.append('Undefined') # TODO: coverage - else: - parens = False - if val and val[0] in '-+': + parens = False + if val[0] in '-+': + # Note: This is technically only necessary for i > 0 + parens = True + elif arg.__class__ in native_types: + pass + elif arg.is_expression_type(): + if node._precedence() < arg._precedence(): parens = True - elif arg.__class__ in native_numeric_types: - pass - elif arg.__class__ in nonpyomo_leaf_types: - val = "'{0}'".format(val) - elif arg.is_expression_type(): - if node._precedence() < arg._precedence(): + elif node._precedence() == arg._precedence(): + if i == 0: + parens = node._associativity() != 1 + elif i == len(node._args_)-1: + parens = node._associativity() != -1 + else: parens = True - elif node._precedence() == arg._precedence(): - if i == 0: - parens = node._associativity() != 1 - elif i == len(node._args_)-1: - parens = node._associativity() != -1 - else: - parens = True - if parens: - tmp.append("({0})".format(val)) - else: - tmp.append(val) - - if node.__class__ in EXPR.NPV_expression_types: - return ftoa(value(node)) + if parens: + tmp.append("(" + val + ")") + else: + tmp.append(val) if node.__class__ is EXPR.PowExpression: # If the exponent is a positive integer, use the power() function. @@ -119,13 +113,20 @@ def visiting_potential_leaf(self, node): Return True if the node is not expanded. """ - if node is None: - return True, None - if node.__class__ in native_types: - return True, ftoa(node) + try: + return True, ftoa(node) + except TypeError: + return True, repr(node) if node.is_expression_type(): + # Special handling if NPV and semi-NPV types: + if not node.is_potentially_variable(): + return True, ftoa(value(node)) + if node.__class__ is EXPR.MonomialTermExpression: + return True, self._monomial_to_string(node) + if node.__class__ is EXPR.LinearExpression: + return True, self._linear_to_string(node) # we will descend into this, so type checking will happen later if node.is_component_type(): self.treechecker(node) @@ -145,22 +146,45 @@ def visiting_potential_leaf(self, node): # Vars later since they don't disappear from the expressions self.treechecker(node) - if node.is_variable_type(): - if node.fixed: - return True, ftoa(value(node)) + if node.is_fixed() and not ( + self.output_fixed_variables and node.is_potentially_variable()): + return True, ftoa(value(node)) + else: + assert node.is_variable_type() + return True, self.smap.getSymbol(node) + + def _monomial_to_string(self, node): + const, var = node.args + const = value(const) + if var.is_fixed() and not self.output_fixed_variables: + return ftoa(const * var.value) + # Special handling: ftoa is slow, so bypass _to_string when this + # is a trivial term + if const in {-1, 1}: + if const < 0: + return '-' + self.smap.getSymbol(var) else: - label = self.smap.getSymbol(node) - return True, label - - return True, ftoa(value(node)) - - -def expression_to_string(expr, treechecker, labeler=None, smap=None): + return self.smap.getSymbol(var) + return node._to_string((ftoa(const), self.smap.getSymbol(var)), + False, self.smap, True) + + def _linear_to_string(self, node): + iter_ = iter(node.args) + values = [] + if node.constant: + next(iter_) + values.append(ftoa(node.constant)) + values.extend(map(self._monomial_to_string, iter_)) + return node._to_string(values, False, self.smap, True) + + +def expression_to_string(expr, treechecker, labeler=None, smap=None, + output_fixed_variables=False): if labeler is not None: if smap is None: smap = SymbolMap() smap.default_labeler = labeler - visitor = ToGamsVisitor(smap, treechecker) + visitor = ToGamsVisitor(smap, treechecker, output_fixed_variables) expr_str = visitor.dfs_postorder_stack(expr) return expr_str, visitor.is_discontinuous @@ -315,6 +339,9 @@ def __call__(self, For model attributes, is GAMS_MODEL. - skip_trivial_constraints=False Skip writing constraints whose body section is fixed. + - output_fixed_variables=False + If True, output fixed variables as variables; otherwise, + output numeric value. - file_determinism=1 | How much effort do we want to put into ensuring the | GAMS file is written deterministically for a Pyomo model: @@ -366,6 +393,10 @@ def __call__(self, skip_trivial_constraints = \ io_options.pop("skip_trivial_constraints", False) + # Output fixed variables as variables + output_fixed_variables = \ + io_options.pop("output_fixed_variables", False) + # How much effort do we want to put into ensuring the # GAMS file is written deterministically for a Pyomo model: # 0 : None @@ -475,6 +506,7 @@ def var_label(obj): con_labeler=con_labeler, sort=sort, skip_trivial_constraints=skip_trivial_constraints, + output_fixed_variables=output_fixed_variables, warmstart=warmstart, solver=solver, mtype=mtype, @@ -502,6 +534,7 @@ def _write_model(self, con_labeler, sort, skip_trivial_constraints, + output_fixed_variables, warmstart, solver, mtype, @@ -556,7 +589,9 @@ def _write_model(self, cName = symbolMap.getSymbol(con, con_labeler) con_body_str, con_discontinuous = expression_to_string( - con_body, tc, smap=symbolMap) + con_body, tc, smap=symbolMap, + output_fixed_variables=output_fixed_variables + ) dnlp |= con_discontinuous if con.equality: constraint_names.append('%s' % cName) @@ -593,7 +628,9 @@ def _write_model(self, if obj.expr.polynomial_degree() not in linear_degree: linear = False obj_expr_str, obj_discontinuous = expression_to_string( - obj.expr, tc, smap=symbolMap) + obj.expr, tc, smap=symbolMap, + output_fixed_variables=output_fixed_variables, + ) dnlp |= obj_discontinuous oName = symbolMap.getSymbol(obj, con_labeler) constraint_names.append(oName) diff --git a/pyomo/repn/tests/ampl/test_ampl_nl.py b/pyomo/repn/tests/ampl/test_ampl_nl.py index 6215231b61f..45bdb4b9c88 100644 --- a/pyomo/repn/tests/ampl/test_ampl_nl.py +++ b/pyomo/repn/tests/ampl/test_ampl_nl.py @@ -17,7 +17,14 @@ import pyomo.common.unittest as unittest from pyomo.common.getGSL import find_GSL -from pyomo.environ import ConcreteModel, Var, Constraint, Objective, Param, Block, ExternalFunction, value +from pyomo.common.tempfiles import TempfileManager +from pyomo.environ import ( + ConcreteModel, Var, Constraint, Objective, Param, Block, + ExternalFunction, value, +) + +import pyomo.repn.plugins.ampl.ampl_ as ampl_ +gsr = ampl_.generate_standard_repn thisdir = os.path.dirname(os.path.abspath(__file__)) @@ -36,6 +43,81 @@ def _get_fnames(self): prefix = os.path.join(thisdir, test_name.replace("test_", "", 1)) return prefix+".nl.baseline", prefix+".nl.out" + def test_export_nonlinear_variables(self): + model = ConcreteModel() + model.x = Var() + model.y = Var() + model.z = Var() + model.w = Var([1,2,3]) + model.c = Constraint(expr=model.x == model.y**2) + + model.y.fix(3) + test_fname = "export_nonlinear_variables" + model.write( + test_fname, + format='nl', + io_options={'symbolic_solver_labels':True} + ) + with open(test_fname + '.col') as f: + names = list(map(str.strip, f.readlines())) + assert "z" not in names # z is not in a constraint + assert "y" not in names # y is fixed + assert "x" in names + self._cleanup(test_fname) + model.write( + test_fname, + format='nl', + io_options={ + 'symbolic_solver_labels':True, + 'export_nonlinear_variables':[model.z] + } + ) + with open(test_fname + '.col') as f: + names = list(map(str.strip, f.readlines())) + assert "z" in names + assert "y" not in names + assert "x" in names + assert "w[1]" not in names + assert "w[2]" not in names + assert "w[3]" not in names + self._cleanup(test_fname) + model.write( + test_fname, + format='nl', + io_options={ + 'symbolic_solver_labels':True, + 'export_nonlinear_variables':[model.z, model.w] + } + ) + with open(test_fname + '.col') as f: + names = list(map(str.strip, f.readlines())) + assert "z" in names + assert "y" not in names + assert "x" in names + assert "w[1]" in names + assert "w[2]" in names + assert "w[3]" in names + + self._cleanup(test_fname) + + model.write( + test_fname, + format='nl', + io_options={ + 'symbolic_solver_labels':True, + 'export_nonlinear_variables':[model.z, model.w[2]] + } + ) + with open(test_fname + '.col') as f: + names = list(map(str.strip, f.readlines())) + assert "z" in names + assert "y" not in names + assert "x" in names + assert "w[1]" not in names + assert "w[2]" in names + assert "w[3]" not in names + + self._cleanup(test_fname) def test_var_on_other_model(self): other = ConcreteModel() @@ -185,8 +267,6 @@ def test_external_expression_rewrite_fixed(self): variable_baseline), msg="Files %s and %s differ" % (test_fname, baseline_fname)) - self.assertIsNot(m._repn, None) - m.x.fix() self._cleanup(test_fname) m.write(test_fname, format='nl', @@ -209,6 +289,124 @@ def test_external_expression_rewrite_fixed(self): msg="Files %s and %s differ" % (test_fname, baseline_fname)) self._cleanup(test_fname) + def test_obj_con_cache(self): + model = ConcreteModel() + model.x = Var() + model.c = Constraint(expr=model.x**2 >= 1) + model.obj = Objective(expr=model.x**2) + + with TempfileManager.new_context() as TMP: + nl_file = TMP.create_tempfile(suffix='.nl') + model.write(nl_file, format='nl') + self.assertFalse(hasattr(model, '_repn')) + with open(nl_file) as FILE: + nl_ref = FILE.read() + + nl_file = TMP.create_tempfile(suffix='.nl') + model._gen_obj_repn = True + model.write(nl_file) + self.assertEqual(len(model._repn), 1) + self.assertIn(model.obj, model._repn) + obj_repn = model._repn[model.obj] + with open(nl_file) as FILE: + nl_test = FILE.read() + self.assertEqual(nl_ref, nl_test) + + nl_file = TMP.create_tempfile(suffix='.nl') + del model._repn + model._gen_obj_repn = None + model._gen_con_repn = True + model.write(nl_file) + self.assertEqual(len(model._repn), 1) + self.assertIn(model.c, model._repn) + c_repn = model._repn[model.c] + with open(nl_file) as FILE: + nl_test = FILE.read() + self.assertEqual(nl_ref, nl_test) + + nl_file = TMP.create_tempfile(suffix='.nl') + del model._repn + model._gen_obj_repn = True + model._gen_con_repn = True + model.write(nl_file) + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + obj_repn = model._repn[model.obj] + c_repn = model._repn[model.c] + with open(nl_file) as FILE: + nl_test = FILE.read() + self.assertEqual(nl_ref, nl_test) + + nl_file = TMP.create_tempfile(suffix='.nl') + model._gen_obj_repn = None + model._gen_con_repn = None + model.write(nl_file) + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIs(obj_repn, model._repn[model.obj]) + self.assertIs(c_repn, model._repn[model.c]) + with open(nl_file) as FILE: + nl_test = FILE.read() + self.assertEqual(nl_ref, nl_test) + + nl_file = TMP.create_tempfile(suffix='.nl') + model._gen_obj_repn = True + model._gen_con_repn = True + model.write(nl_file) + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIsNot(obj_repn, model._repn[model.obj]) + self.assertIsNot(c_repn, model._repn[model.c]) + obj_repn = model._repn[model.obj] + c_repn = model._repn[model.c] + with open(nl_file) as FILE: + nl_test = FILE.read() + self.assertEqual(nl_ref, nl_test) + + nl_file = TMP.create_tempfile(suffix='.nl') + model._gen_obj_repn = False + model._gen_con_repn = False + try: + def dont_call_gsr(*args, **kwargs): + self.fail("generate_standard_repn should not be called") + ampl_.generate_standard_repn = dont_call_gsr + model.write(nl_file) + finally: + ampl_.generate_standard_repn = gsr + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIs(obj_repn, model._repn[model.obj]) + self.assertIs(c_repn, model._repn[model.c]) + with open(nl_file) as FILE: + nl_test = FILE.read() + self.assertEqual(nl_ref, nl_test) + + # Check that repns generated by the LP wrter will be + # processed correctly + model._repn[model.c] = c_repn = gsr(model.c.body, quadratic=True) + model._repn[model.obj] = obj_repn = gsr( + model.obj.expr, quadratic=True) + nl_file = TMP.create_tempfile(suffix='.nl') + try: + def dont_call_gsr(*args, **kwargs): + self.fail("generate_standard_repn should not be called") + ampl_.generate_standard_repn = dont_call_gsr + model.write(nl_file) + finally: + ampl_.generate_standard_repn = gsr + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIs(obj_repn, model._repn[model.obj]) + self.assertIs(c_repn, model._repn[model.c]) + with open(nl_file) as FILE: + nl_test = FILE.read() + self.assertEqual(nl_ref, nl_test) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/repn/tests/baron/branching_priorities.bar.baseline b/pyomo/repn/tests/baron/branching_priorities.bar.baseline index 7a2d820c3b1..a0f19dfc620 100644 --- a/pyomo/repn/tests/baron/branching_priorities.bar.baseline +++ b/pyomo/repn/tests/baron/branching_priorities.bar.baseline @@ -27,7 +27,7 @@ y_2_: 2; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c: y_1_ * y_2_ + (-2) * x >= 0; +c: y_1_*y_2_ - 2*x >= 0; OBJ: maximize y_1_ + y_2_; diff --git a/pyomo/repn/tests/baron/no_column_ordering_quadratic.bar.baseline b/pyomo/repn/tests/baron/no_column_ordering_quadratic.bar.baseline index bd01176d620..3a0652b73f8 100644 --- a/pyomo/repn/tests/baron/no_column_ordering_quadratic.bar.baseline +++ b/pyomo/repn/tests/baron/no_column_ordering_quadratic.bar.baseline @@ -9,9 +9,9 @@ VARIABLES a, b, c; EQUATIONS c_e_FIX_ONE_VAR_CONST__, con; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -con: a + b + c + a * a + b * b + c * c + a * b + a * c + b * c <= 1; +con: a + b + c + a*a + b*b + c*c + a*b + a*c + b*c <= 1; -OBJ: minimize a + b + c + a * a + b * b + c * c + a * b + a * c + b * c ; +OBJ: minimize a + b + c + a*a + b*b + c*c + a*b + a*c + b*c; STARTING_POINT{ ONE_VAR_CONST__: 1; diff --git a/pyomo/repn/tests/baron/small10.pyomo.bar b/pyomo/repn/tests/baron/small10.pyomo.bar index b41a9b4a7c8..5500c6faa21 100644 --- a/pyomo/repn/tests/baron/small10.pyomo.bar +++ b/pyomo/repn/tests/baron/small10.pyomo.bar @@ -10,21 +10,21 @@ EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; c1: x1 == 0; -c2: 0 * x1 + x1 == 0; +c2: 0*x1 + x1 == 0; c3: x1 == 0; c4: x1 == 0; -c5: 0 * x1 ^ 2 + x1 == 0; -c6: 0 * x1 * 1 + x1 == 0; -c7: 0 * x1 ^ 2 + x1 == 0; -c8: 0 * x1 * 1 + x1 == 0; -c9: 0 * x1 == 0; -c10: 0 * x1 ^ 2 == 0; -c11: 0 * x1 * 1 == 0; -c12: 0 * x1 ^ 2 == 0; -c13: 0 * x1 * 1 == 0; -c14: 0 * x1 == 0; +c5: 0*x1 ^ 2 + x1 == 0; +c6: 0*x1*1 + x1 == 0; +c7: 0*x1 ^ 2 + x1 == 0; +c8: 0*x1*1 + x1 == 0; +c9: 0*x1 == 0; +c10: 0*x1 ^ 2 == 0; +c11: 0*x1*1 == 0; +c12: 0*x1 ^ 2 == 0; +c13: 0*x1*1 == 0; +c14: 0*x1 == 0; -OBJ: minimize x1 + 0 * x1 + 0 * x1 + x1 * x1 * 0 + x1 * x1 * 0 + 0 * x1 ^ 2; +OBJ: minimize x1 + 0*x1 + 0*x1 + x1*x1*0 + x1*x1*0 + 0*x1 ^ 2; STARTING_POINT{ ONE_VAR_CONST__: 1; diff --git a/pyomo/repn/tests/baron/small12.pyomo.bar b/pyomo/repn/tests/baron/small12.pyomo.bar index 4b9be1b9f3f..57b41a688d1 100644 --- a/pyomo/repn/tests/baron/small12.pyomo.bar +++ b/pyomo/repn/tests/baron/small12.pyomo.bar @@ -11,18 +11,18 @@ EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; c1: Expr_if( ( 0.0 ), then=( vTrue ), else=( vFalse ) ) == -1; c2: Expr_if( ( 1.0 ), then=( vTrue ), else=( vFalse ) ) == 1; -c3: Expr_if( ( vN1 <= 0.0 ), then=( vTrue ), else=( vFalse ) ) == 1; -c4: Expr_if( ( v0 <= 0.0 ), then=( vTrue ), else=( vFalse ) ) == 1; -c5: Expr_if( ( vP1 <= 0.0 ), then=( vTrue ), else=( vFalse ) ) == -1; -c6: Expr_if( ( vN1 < 0.0 ), then=( vTrue ), else=( vFalse ) ) == 1; -c7: Expr_if( ( v0 < 0.0 ), then=( vTrue ), else=( vFalse ) ) == -1; -c8: Expr_if( ( vP1 < 0.0 ), then=( vTrue ), else=( vFalse ) ) == -1; -c9: Expr_if( ( 0.0 <= 10.0*vN1 ), then=( vTrue ), else=( vFalse ) ) == -1; -c10: Expr_if( ( 0.0 <= 10.0*v0 ), then=( vTrue ), else=( vFalse ) ) == 1; -c11: Expr_if( ( 0.0 <= 10.0*vP1 ), then=( vTrue ), else=( vFalse ) ) == 1; -c12: Expr_if( ( 0.0 < 10.0*vN1 ), then=( vTrue ), else=( vFalse ) ) == -1; -c13: Expr_if( ( 0.0 < 10.0*v0 ), then=( vTrue ), else=( vFalse ) ) == -1; -c14: Expr_if( ( 0.0 < 10.0*vP1 ), then=( vTrue ), else=( vFalse ) ) == 1; +c3: Expr_if( ( vN1 <= 0 ), then=( vTrue ), else=( vFalse ) ) == 1; +c4: Expr_if( ( v0 <= 0 ), then=( vTrue ), else=( vFalse ) ) == 1; +c5: Expr_if( ( vP1 <= 0 ), then=( vTrue ), else=( vFalse ) ) == -1; +c6: Expr_if( ( vN1 < 0 ), then=( vTrue ), else=( vFalse ) ) == 1; +c7: Expr_if( ( v0 < 0 ), then=( vTrue ), else=( vFalse ) ) == -1; +c8: Expr_if( ( vP1 < 0 ), then=( vTrue ), else=( vFalse ) ) == -1; +c9: Expr_if( ( 0 <= 10.0*vN1 ), then=( vTrue ), else=( vFalse ) ) == -1; +c10: Expr_if( ( 0 <= 10.0*v0 ), then=( vTrue ), else=( vFalse ) ) == 1; +c11: Expr_if( ( 0 <= 10.0*vP1 ), then=( vTrue ), else=( vFalse ) ) == 1; +c12: Expr_if( ( 0 < 10.0*vN1 ), then=( vTrue ), else=( vFalse ) ) == -1; +c13: Expr_if( ( 0 < 10.0*v0 ), then=( vTrue ), else=( vFalse ) ) == -1; +c14: Expr_if( ( 0 < 10.0*vP1 ), then=( vTrue ), else=( vFalse ) ) == 1; c15: Expr_if( ( -1 <= vN2 <= 1 ), then=( vTrue ), else=( vFalse ) ) == -1; c16: Expr_if( ( - vP1 <= vN1 <= 1 ), then=( vTrue ), else=( vFalse ) ) == 1; c17: Expr_if( ( - vP1**2 <= v0 <= 1 ), then=( vTrue ), else=( vFalse ) ) == 1; @@ -34,7 +34,7 @@ c22: Expr_if( ( -1 < v0 < vP1**2 ), then=( vTrue ), else=( vFalse ) ) == 1; c23: Expr_if( ( -1 < vP1 < vP1 ), then=( vTrue ), else=( vFalse ) ) == -1; c24: Expr_if( ( -1 < vP2 < 1 ), then=( vTrue ), else=( vFalse ) ) == -1; -OBJ: minimize 10 * Expr_if( ( v0 ), then=( vTrue ), else=( vFalse ) ); +OBJ: minimize 10*Expr_if( ( v0 ), then=( vTrue ), else=( vFalse ) ); STARTING_POINT{ ONE_VAR_CONST__: 1; diff --git a/pyomo/repn/tests/baron/small13.pyomo.bar b/pyomo/repn/tests/baron/small13.pyomo.bar index 6ca10a6cdb4..6bc336c92e2 100644 --- a/pyomo/repn/tests/baron/small13.pyomo.bar +++ b/pyomo/repn/tests/baron/small13.pyomo.bar @@ -10,7 +10,7 @@ EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; c1: x1 ^ 3 - x1 == 0; -c2: 10 * (x1 ^ 3 - x1) == 0; +c2: 10*(x1 ^ 3 - x1) == 0; c3: (x1 ^ 3 - x1)/10 == 0; OBJ: maximize x1; diff --git a/pyomo/repn/tests/baron/small14a.pyomo.bar b/pyomo/repn/tests/baron/small14a.pyomo.bar index a95cc819967..2cb87408130 100644 --- a/pyomo/repn/tests/baron/small14a.pyomo.bar +++ b/pyomo/repn/tests/baron/small14a.pyomo.bar @@ -10,10 +10,10 @@ EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3, c4, c5; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; c1: log(x1) == 0; -c2: 0.434294481903 * log(x1) == 0; +c2: (0.4342944819032518 * log(x1)) == 0; c3: exp(x2) == 1; -c4: x1 ^ 0.5 == 1; -c5: (x1 ^ 2) ^ 0.5 == 1; +c4: ((x1) ^ 0.5) == 1; +c5: (((x1) ^ 2) ^ 0.5) == 1; OBJ: minimize x1 + x2; diff --git a/pyomo/repn/tests/baron/small3.pyomo.bar b/pyomo/repn/tests/baron/small3.pyomo.bar index c3bc07a9aaf..1c47c33d3fe 100644 --- a/pyomo/repn/tests/baron/small3.pyomo.bar +++ b/pyomo/repn/tests/baron/small3.pyomo.bar @@ -11,7 +11,7 @@ EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; c1: x1 ^ 2 == 4; -OBJ: minimize x2 * x1; +OBJ: minimize x2*x1; STARTING_POINT{ ONE_VAR_CONST__: 1; diff --git a/pyomo/repn/tests/baron/small4.pyomo.bar b/pyomo/repn/tests/baron/small4.pyomo.bar index 535e8949669..ba9cf3176ac 100644 --- a/pyomo/repn/tests/baron/small4.pyomo.bar +++ b/pyomo/repn/tests/baron/small4.pyomo.bar @@ -9,7 +9,7 @@ VARIABLES x1, x2; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c1: x1 * x2 == 4; +c1: x1*x2 == 4; OBJ: minimize x1 ^ 2; diff --git a/pyomo/repn/tests/baron/small5.pyomo.bar b/pyomo/repn/tests/baron/small5.pyomo.bar index 318fe51db05..e25b747e5e8 100644 --- a/pyomo/repn/tests/baron/small5.pyomo.bar +++ b/pyomo/repn/tests/baron/small5.pyomo.bar @@ -21,18 +21,18 @@ x3: 1; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c1: 0.5 * x1 * (x2 - x3) == 2; -c2: 0.5 * x1 * (x2 - x3) == 2; -c3: x1 * (x2 - x3)/2 == 2; -c4: x1 * (0.5 * x2 + (-0.5) * x3) == 2; -c5: x1 * (x2 - x3) * 0.5 == 2; -c6: x1 * (x2 - x3) == 4; -c7: 0.5 * x1 * (x2 - x3) == 2; -c8: 0.5 * x1 * (x2 - x3) == 2; -c9: x1 * (x2 - x3)/2 == 2; -c10: x1 * (0.5 * x2 + (-0.5) * x3) == 2; -c11: x1 * (x2 - x3) * (0.5) == 2; -c12: x1 * (x2 - x3) == 4; +c1: 0.5*x1*(x2 - x3) == 2; +c2: 0.5*x1*(x2 - x3) == 2; +c3: x1*(x2 - x3)/2 == 2; +c4: x1*(0.5*x2 - 0.5*x3) == 2; +c5: x1*(x2 - x3)*0.5 == 2; +c6: x1*(x2 - x3) == 4; +c7: 0.5*x1*(x2 - x3) == 2; +c8: 0.5*x1*(x2 - x3) == 2; +c9: x1*(x2 - x3)/2 == 2; +c10: x1*(0.5*x2 - 0.5*x3) == 2; +c11: x1*(x2 - x3)*(0.5) == 2; +c12: x1*(x2 - x3) == 4; OBJ: minimize x2 ^ 2/2 + x2 ^ 2/2; diff --git a/pyomo/repn/tests/baron/small6.pyomo.bar b/pyomo/repn/tests/baron/small6.pyomo.bar index c5a1e7dca70..c3cb3616fdc 100644 --- a/pyomo/repn/tests/baron/small6.pyomo.bar +++ b/pyomo/repn/tests/baron/small6.pyomo.bar @@ -21,12 +21,12 @@ x3: 1; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3, c4, c5, c6; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c1: 1/2 * x1 * (x2 - x3) == 2; -c2: x1/2 * (x2 - x3) == 2; -c3: x1 * (x2 - x3)/2 == 2; -c4: x1 * (x2/2 - x3/2) == 2; -c5: x1 * (x2 - x3) * (1/2) == 2; -c6: x1 * (x2 - x3) + (-2) * 2 == 0; +c1: 1/2*x1*(x2 - x3) == 2; +c2: x1/2*(x2 - x3) == 2; +c3: x1*(x2 - x3)/2 == 2; +c4: x1*(x2/2 - x3/2) == 2; +c5: x1*(x2 - x3)*(1/2) == 2; +c6: x1*(x2 - x3) - 4 == 0; OBJ: minimize x2; diff --git a/pyomo/repn/tests/baron/small7.pyomo.bar b/pyomo/repn/tests/baron/small7.pyomo.bar index 48913f190f3..93c05ea7ba0 100644 --- a/pyomo/repn/tests/baron/small7.pyomo.bar +++ b/pyomo/repn/tests/baron/small7.pyomo.bar @@ -21,30 +21,30 @@ x3: 1; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c1: 1/2/2 * x1 * (x2 - x3) == 2; -c2: x1/2/2 * (x2 - x3) == 2; -c3: x1 * (x2 - x3)/2/2 == 2; -c4: x1 * (x2/2/2 - x3/2/2) == 2; -c5: x1 * (x2 - x3) * (1/2/2) == 2; -c6: x1 * (x2 - x3) + (-4) * 2 == 0; -c7: 1/(2 * 2) * x1 * (x2 - x3) == 2; -c8: x1/(2 * 2) * (x2 - x3) == 2; -c9: x1 * (x2 - x3)/(2 * 2) == 2; -c10: x1 * (x2/(2 * 2) - x3/(2 * 2)) == 2; -c11: x1 * (x2 - x3) * (1/(2 * 2)) == 2; -c12: x1 * (x2 - x3) + (-4) * 2 == 0; -c13: 1/(2 + 2) * x1 * (x2 - x3) == 2; -c14: x1/(2 + 2) * (x2 - x3) == 2; -c15: x1 * (x2 - x3)/(2 + 2) == 2; -c16: x1 * (x2/(2 + 2) - x3/(2 + 2)) == 2; -c17: x1 * (x2 - x3) * (1/(2 + 2)) == 2; -c18: x1 * (x2 - x3) - 2 * (2 + 2) == 0; -c19: 1/(2 + 2) ^ 2 * x1 * (x2 - x3) == 2; -c20: x1/(2 + 2) ^ 2 * (x2 - x3) == 2; -c21: x1 * (x2 - x3)/(2 + 2) ^ 2 == 2; -c22: x1 * (x2/(2 + 2) ^ 2 - x3/(2 + 2) ^ 2) == 2; -c23: x1 * (x2 - x3) * (1/(2 + 2) ^ 2) == 2; -c24: x1 * (x2 - x3) - 2 * (2 + 2) ^ 2 == 0; +c1: 1/2/2*x1*(x2 - x3) == 2; +c2: x1/2/2*(x2 - x3) == 2; +c3: x1*(x2 - x3)/2/2 == 2; +c4: x1*(x2/2/2 - x3/2/2) == 2; +c5: x1*(x2 - x3)*(1/2/2) == 2; +c6: x1*(x2 - x3) - 8 == 0; +c7: 1/(4)*x1*(x2 - x3) == 2; +c8: x1/(2*2)*(x2 - x3) == 2; +c9: x1*(x2 - x3)/(4) == 2; +c10: x1*(x2/(4) - x3/(4)) == 2; +c11: x1*(x2 - x3)*(1/(4)) == 2; +c12: x1*(x2 - x3) - 8 == 0; +c13: 1/(2 + 2)*x1*(x2 - x3) == 2; +c14: x1/(2 + 2)*(x2 - x3) == 2; +c15: x1*(x2 - x3)/(2 + 2) == 2; +c16: x1*(x2/(2 + 2) - x3/(2 + 2)) == 2; +c17: x1*(x2 - x3)*(1/(2 + 2)) == 2; +c18: x1*(x2 - x3) - 2*(2 + 2) == 0; +c19: 1/(2 + 2) ^ 2*x1*(x2 - x3) == 2; +c20: x1/(2 + 2) ^ 2*(x2 - x3) == 2; +c21: x1*(x2 - x3)/(2 + 2) ^ 2 == 2; +c22: x1*(x2/(2 + 2) ^ 2 - x3/(2 + 2) ^ 2) == 2; +c23: x1*(x2 - x3)*(1/(2 + 2) ^ 2) == 2; +c24: x1*(x2 - x3) - 2*(2 + 2) ^ 2 == 0; OBJ: minimize x2; diff --git a/pyomo/repn/tests/baron/small8.pyomo.bar b/pyomo/repn/tests/baron/small8.pyomo.bar index bed648c3063..b0a02c147bc 100644 --- a/pyomo/repn/tests/baron/small8.pyomo.bar +++ b/pyomo/repn/tests/baron/small8.pyomo.bar @@ -13,11 +13,11 @@ x3: 7; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c1: x1 * x1 >= 2; -c2: x1 + (-0.5) * x2 <= 0; +c1: x1*x1 >= 2; +c2: x1 - 0.5*x2 <= 0; c3: x3 - (x1 + 2) <= 0; -OBJ: minimize x3 + x2 * x2 + x1; +OBJ: minimize x3 + x2*x2 + x1; STARTING_POINT{ ONE_VAR_CONST__: 1; diff --git a/pyomo/repn/tests/baron/small9.pyomo.bar b/pyomo/repn/tests/baron/small9.pyomo.bar index 37a9000b23a..a6f51103a36 100644 --- a/pyomo/repn/tests/baron/small9.pyomo.bar +++ b/pyomo/repn/tests/baron/small9.pyomo.bar @@ -9,11 +9,11 @@ VARIABLES x1, x2; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c1, c2, c3, c4, c5; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c1: x1 * 0 * x2 + x1 == 1; -c2: 0 * x1 * x2 + x1 == 1; +c1: x1*0*x2 + x1 == 1; +c2: 0*x1*x2 + x1 == 1; c3: x1 == 1; -c4: x1 * 0 * x2 == 1; -c5: 0 * x1 * x2 == 1; +c4: x1*0*x2 == 1; +c5: 0*x1*x2 == 1; OBJ: minimize x1; diff --git a/pyomo/repn/tests/baron/test_baron.py b/pyomo/repn/tests/baron/test_baron.py index 0343e1690c8..1fae5a97b7d 100644 --- a/pyomo/repn/tests/baron/test_baron.py +++ b/pyomo/repn/tests/baron/test_baron.py @@ -51,10 +51,14 @@ def _check_baseline(self, model, **kwds): try: self.assertTrue(cmp(test_fname, baseline_fname)) except: - with open(test_fname, 'r') as f1, open(baseline_fname, 'r') as f2: + with open(baseline_fname, 'r') as f1, open(test_fname, 'r') as f2: f1_contents = f1.read().replace(' ;', ';').split() f2_contents = f2.read().replace(' ;', ';').split() - self.assertEqual(f1_contents, f2_contents) + self.assertEqual( + f1_contents, f2_contents, + "\n\nbaseline: %s\ntestFile: %s\n" % ( + baseline_fname, test_fname) + ) self._cleanup(test_fname) def _gen_expression(self, terms): diff --git a/pyomo/repn/tests/baron/test_baron_comparison.py b/pyomo/repn/tests/baron/test_baron_comparison.py index 4c786f6c9a6..efb53d433e2 100644 --- a/pyomo/repn/tests/baron/test_baron_comparison.py +++ b/pyomo/repn/tests/baron/test_baron_comparison.py @@ -72,14 +72,17 @@ def barwriter_baseline_test(self, name): testCase]) # Check that the pyomo BAR file matches its own baseline - with open(output, 'r') as f1, open(baseline, 'r') as f2: + with open(baseline, 'r') as f1, open(output, 'r') as f2: f1_contents = list(filter(None, f1.read().split())) f2_contents = list(filter(None, f2.read().split())) for item1, item2 in itertools.zip_longest(f1_contents, f2_contents): try: self.assertAlmostEqual(float(item1), float(item2)) except: - self.assertEqual(item1, item2) + self.assertEqual( + item1, item2, + "\n\nbaseline: %s\ntestFile: %s\n" % (baseline, output) + ) os.remove(join(currdir, name+'.test.bar')) diff --git a/pyomo/repn/tests/baron/var_on_deactivated_block.bar.baseline b/pyomo/repn/tests/baron/var_on_deactivated_block.bar.baseline index c247cb09c9b..e04becff9fc 100644 --- a/pyomo/repn/tests/baron/var_on_deactivated_block.bar.baseline +++ b/pyomo/repn/tests/baron/var_on_deactivated_block.bar.baseline @@ -9,7 +9,7 @@ VARIABLES other_a, x; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c: other_a + 2 * x <= 0; +c: other_a + 2*x <= 0; OBJ: minimize x; diff --git a/pyomo/repn/tests/baron/var_on_nonblock.bar.baseline b/pyomo/repn/tests/baron/var_on_nonblock.bar.baseline index c247cb09c9b..e04becff9fc 100644 --- a/pyomo/repn/tests/baron/var_on_nonblock.bar.baseline +++ b/pyomo/repn/tests/baron/var_on_nonblock.bar.baseline @@ -9,7 +9,7 @@ VARIABLES other_a, x; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c: other_a + 2 * x <= 0; +c: other_a + 2*x <= 0; OBJ: minimize x; diff --git a/pyomo/repn/tests/baron/var_on_other_model.bar.baseline b/pyomo/repn/tests/baron/var_on_other_model.bar.baseline index 35fa95b7b38..ad833532a4d 100644 --- a/pyomo/repn/tests/baron/var_on_other_model.bar.baseline +++ b/pyomo/repn/tests/baron/var_on_other_model.bar.baseline @@ -9,7 +9,7 @@ VARIABLES a, x; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c: a + 2 * x <= 0; +c: a + 2*x <= 0; OBJ: minimize x; diff --git a/pyomo/repn/tests/cpxlp/test_cpxlp.py b/pyomo/repn/tests/cpxlp/test_cpxlp.py index 57a7c2b8369..05b9e3dd68d 100644 --- a/pyomo/repn/tests/cpxlp/test_cpxlp.py +++ b/pyomo/repn/tests/cpxlp/test_cpxlp.py @@ -17,7 +17,10 @@ from filecmp import cmp import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Var, Constraint, Objective, Block, ComponentMap +from pyomo.common.tempfiles import TempfileManager +from pyomo.environ import ( + ConcreteModel, Var, Constraint, Objective, Block, ComponentMap, +) thisdir = os.path.dirname(os.path.abspath(__file__)) @@ -235,6 +238,91 @@ def __init__(self, *args, **kwds): model.write, test_fname, format='lp') self._cleanup(test_fname) + def test_obj_con_cache(self): + model = ConcreteModel() + model.x = Var() + model.c = Constraint(expr=model.x >= 1) + model.obj = Objective(expr=model.x*2) + + with TempfileManager.new_context() as TMP: + lp_file = TMP.create_tempfile(suffix='.lp') + model.write(lp_file, format='lp') + self.assertFalse(hasattr(model, '_repn')) + with open(lp_file) as FILE: + lp_ref = FILE.read() + + lp_file = TMP.create_tempfile(suffix='.lp') + model._gen_obj_repn = True + model.write(lp_file) + self.assertEqual(len(model._repn), 1) + self.assertIn(model.obj, model._repn) + obj_repn = model._repn[model.obj] + with open(lp_file) as FILE: + lp_test = FILE.read() + self.assertEqual(lp_ref, lp_test) + + lp_file = TMP.create_tempfile(suffix='.lp') + model._gen_obj_repn = None + model._gen_con_repn = True + model.write(lp_file) + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIs(obj_repn, model._repn[model.obj]) + obj_repn = model._repn[model.obj] + c_repn = model._repn[model.c] + with open(lp_file) as FILE: + lp_test = FILE.read() + self.assertEqual(lp_ref, lp_test) + + lp_file = TMP.create_tempfile(suffix='.lp') + model._gen_obj_repn = None + model._gen_con_repn = None + model.write(lp_file) + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIs(obj_repn, model._repn[model.obj]) + self.assertIs(c_repn, model._repn[model.c]) + with open(lp_file) as FILE: + lp_test = FILE.read() + self.assertEqual(lp_ref, lp_test) + + lp_file = TMP.create_tempfile(suffix='.lp') + model._gen_obj_repn = True + model._gen_con_repn = True + model.write(lp_file) + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIsNot(obj_repn, model._repn[model.obj]) + self.assertIsNot(c_repn, model._repn[model.c]) + obj_repn = model._repn[model.obj] + c_repn = model._repn[model.c] + with open(lp_file) as FILE: + lp_test = FILE.read() + self.assertEqual(lp_ref, lp_test) + + lp_file = TMP.create_tempfile(suffix='.lp') + model._gen_obj_repn = False + model._gen_con_repn = False + import pyomo.repn.plugins.ampl.ampl_ as ampl_ + gsr = ampl_.generate_standard_repn + try: + def dont_call_gsr(*args, **kwargs): + self.fail("generate_standard_repn should not be called") + ampl_.generate_standard_repn = dont_call_gsr + model.write(lp_file) + finally: + ampl_.generate_standard_repn = gsr + self.assertEqual(len(model._repn), 2) + self.assertIn(model.obj, model._repn) + self.assertIn(model.c, model._repn) + self.assertIs(obj_repn, model._repn[model.obj]) + self.assertIs(c_repn, model._repn[model.c]) + with open(lp_file) as FILE: + lp_test = FILE.read() + self.assertEqual(lp_ref, lp_test) if __name__ == "__main__": diff --git a/pyomo/repn/tests/gams/small5.pyomo.gms b/pyomo/repn/tests/gams/small5.pyomo.gms index a9308a8dc99..1f545d74927 100644 --- a/pyomo/repn/tests/gams/small5.pyomo.gms +++ b/pyomo/repn/tests/gams/small5.pyomo.gms @@ -22,16 +22,17 @@ VARIABLES x2 x3; + c1.. 0.5*x1*(x2 - x3) =e= 2 ; c2.. 0.5*x1*(x2 - x3) =e= 2 ; c3.. x1*(x2 - x3)/2 =e= 2 ; -c4.. x1*(0.5*x2 + (-0.5)*x3) =e= 2 ; +c4.. x1*(0.5*x2 - 0.5*x3) =e= 2 ; c5.. x1*(x2 - x3)*0.5 =e= 2 ; c6.. x1*(x2 - x3) =e= 4 ; c7.. 0.5*x1*(x2 - x3) =e= 2 ; c8.. 0.5*x1*(x2 - x3) =e= 2 ; c9.. x1*(x2 - x3)/2 =e= 2 ; -c10.. x1*(0.5*x2 + (-0.5)*x3) =e= 2 ; +c10.. x1*(0.5*x2 - 0.5*x3) =e= 2 ; c11.. x1*(x2 - x3)*(0.5) =e= 2 ; c12.. x1*(x2 - x3) =e= 4 ; c13.. GAMS_OBJECTIVE =e= power(x2, 2)/2 + power(x2, 2)/2 ; diff --git a/pyomo/repn/tests/gams/small6.pyomo.gms b/pyomo/repn/tests/gams/small6.pyomo.gms index ae31fa35a76..3b340b3636b 100644 --- a/pyomo/repn/tests/gams/small6.pyomo.gms +++ b/pyomo/repn/tests/gams/small6.pyomo.gms @@ -16,12 +16,13 @@ VARIABLES x2 x3; + c1.. 1/2*x1*(x2 - x3) =e= 2 ; c2.. x1/2*(x2 - x3) =e= 2 ; c3.. x1*(x2 - x3)/2 =e= 2 ; c4.. x1*(x2/2 - x3/2) =e= 2 ; c5.. x1*(x2 - x3)*(1/2) =e= 2 ; -c6.. x1*(x2 - x3) + (-2)*2 =e= 0 ; +c6.. x1*(x2 - x3) - 4 =e= 0 ; c7.. GAMS_OBJECTIVE =e= x2 ; x1.lo = -1; diff --git a/pyomo/repn/tests/gams/small7.pyomo.gms b/pyomo/repn/tests/gams/small7.pyomo.gms index cdc19f43297..db7f3e7b9ac 100644 --- a/pyomo/repn/tests/gams/small7.pyomo.gms +++ b/pyomo/repn/tests/gams/small7.pyomo.gms @@ -34,18 +34,19 @@ VARIABLES x2 x3; + c1.. 1/2/2*x1*(x2 - x3) =e= 2 ; c2.. x1/2/2*(x2 - x3) =e= 2 ; c3.. x1*(x2 - x3)/2/2 =e= 2 ; c4.. x1*(x2/2/2 - x3/2/2) =e= 2 ; c5.. x1*(x2 - x3)*(1/2/2) =e= 2 ; -c6.. x1*(x2 - x3) + (-4)*2 =e= 0 ; -c7.. 1/(2*2)*x1*(x2 - x3) =e= 2 ; +c6.. x1*(x2 - x3) - 8 =e= 0 ; +c7.. 1/(4)*x1*(x2 - x3) =e= 2 ; c8.. x1/(2*2)*(x2 - x3) =e= 2 ; -c9.. x1*(x2 - x3)/(2*2) =e= 2 ; -c10.. x1*(x2/(2*2) - x3/(2*2)) =e= 2 ; -c11.. x1*(x2 - x3)*(1/(2*2)) =e= 2 ; -c12.. x1*(x2 - x3) + (-4)*2 =e= 0 ; +c9.. x1*(x2 - x3)/(4) =e= 2 ; +c10.. x1*(x2/(4) - x3/(4)) =e= 2 ; +c11.. x1*(x2 - x3)*(1/(4)) =e= 2 ; +c12.. x1*(x2 - x3) - 8 =e= 0 ; c13.. 1/(2 + 2)*x1*(x2 - x3) =e= 2 ; c14.. x1/(2 + 2)*(x2 - x3) =e= 2 ; c15.. x1*(x2 - x3)/(2 + 2) =e= 2 ; diff --git a/pyomo/repn/tests/gams/small8.pyomo.gms b/pyomo/repn/tests/gams/small8.pyomo.gms index c62492c33fc..ca5c813edfc 100644 --- a/pyomo/repn/tests/gams/small8.pyomo.gms +++ b/pyomo/repn/tests/gams/small8.pyomo.gms @@ -15,8 +15,9 @@ VARIABLES GAMS_OBJECTIVE x3; + c1_lo.. 2 =l= x1*x1 ; -c2_hi.. x1 + (-0.5)*x2 =l= 0 ; +c2_hi.. x1 - 0.5*x2 =l= 0 ; c3_hi.. x3 - (x1 + 2) =l= 0 ; c4.. GAMS_OBJECTIVE =e= x3 + x2*x2 + x1 ; diff --git a/pyomo/repn/tests/gams/test_gams.py b/pyomo/repn/tests/gams/test_gams.py index 059d92eaa11..7ef8edd98d2 100644 --- a/pyomo/repn/tests/gams/test_gams.py +++ b/pyomo/repn/tests/gams/test_gams.py @@ -47,7 +47,10 @@ def _get_fnames(self): def _check_baseline(self, model, **kwds): baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) - io_options = {"symbolic_solver_labels": True} + io_options = { + "symbolic_solver_labels": True, + "output_fixed_variables": True, + } io_options.update(kwds) model.write(test_fname, format="gams", @@ -55,10 +58,14 @@ def _check_baseline(self, model, **kwds): try: self.assertTrue(cmp(test_fname, baseline_fname)) except: - with open(test_fname, 'r') as f1, open(baseline_fname, 'r') as f2: + with open(baseline_fname, 'r') as f1, open(test_fname, 'r') as f2: f1_contents = list(filter(None, f1.read().split())) f2_contents = list(filter(None, f2.read().split())) - self.assertEqual(f1_contents, f2_contents) + self.assertEqual( + f1_contents, f2_contents, + "\n\nbaseline: %s\ntestFile: %s\n" % ( + baseline_fname, test_fname) + ) self._cleanup(test_fname) def _gen_expression(self, terms): @@ -171,7 +178,7 @@ def test_quicksum(self): m = ConcreteModel() m.y = Var(domain=Binary) m.c = Constraint(expr=quicksum([m.y, m.y], linear=True) == 1) - m.y.fix(1) + lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) @@ -180,6 +187,15 @@ def test_quicksum(self): m.c2 = Constraint(expr=quicksum([m.x, m.y], linear=True) == 1) self.assertEqual(("x2 + x1", False), expression_to_string(m.c2.body, tc, smap=smap)) + m.y.fix(1) + lbl = NumericLabeler('x') + smap = SymbolMap(lbl) + tc = StorageTreeChecker(m) + self.assertEqual(("1 + 1", False), expression_to_string(m.c.body, tc, smap=smap)) + m.x = Var() + m.c2 = Constraint(expr=quicksum([m.x, m.y], linear=True) == 1) + self.assertEqual(("x1 + 1", False), expression_to_string(m.c2.body, tc, smap=smap)) + def test_quicksum_integer_var_fixed(self): m = ConcreteModel() m.x = Var() @@ -254,10 +270,10 @@ def test_fixed_var_to_string(self): smap = SymbolMap(lbl) tc = StorageTreeChecker(m) self.assertEqual(expression_to_string( - m.x + m.y - m.z, tc, lbl, smap=smap), ("x1 + x2 - (-3)", False)) + m.x + m.y - m.z, tc, lbl, smap=smap), ("x1 + x2 + 3", False)) m.z.fix(-400) self.assertEqual(expression_to_string( - m.z + m.y - m.z, tc, smap=smap), ("(-400) + x2 - (-400)", False)) + m.z + m.y - m.z, tc, smap=smap), ("(-400) + x2 + 400", False)) m.z.fix(8.8) self.assertEqual(expression_to_string( m.x + m.z - m.y, tc, smap=smap), ("x1 + 8.8 - x2", False)) diff --git a/pyomo/repn/tests/gams/test_gams_comparison.py b/pyomo/repn/tests/gams/test_gams_comparison.py index be2f6021923..cce8be82921 100644 --- a/pyomo/repn/tests/gams/test_gams_comparison.py +++ b/pyomo/repn/tests/gams/test_gams_comparison.py @@ -85,10 +85,13 @@ def gams_writer_baseline_test(self, name, targetdir): try: self.assertTrue(cmp(testFile, baseline)) except: - with open(testFile, 'r') as f1, open(baseline, 'r') as f2: + with open(baseline, 'r') as f1, open(testFile, 'r') as f2: f1_contents = list(filter(None, f1.read().split())) f2_contents = list(filter(None, f2.read().split())) - self.assertEqual(f1_contents, f2_contents) + self.assertEqual( + f1_contents, f2_contents, + "\n\nbaseline: %s\ntestFile: %s\n" % (baseline, testFile) + ) @parameterized.parameterized.expand(input=invalidlist) diff --git a/pyomo/solvers/plugins/solvers/ASL.py b/pyomo/solvers/plugins/solvers/ASL.py index 51b6a5a5a5b..e2776233982 100644 --- a/pyomo/solvers/plugins/solvers/ASL.py +++ b/pyomo/solvers/plugins/solvers/ASL.py @@ -98,7 +98,12 @@ def _get_version(self): stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) - return _extract_version(results.stdout) + ver = _extract_version(results.stdout) + if ver is None: + # Some ASL solvers do not export a version number + if results.stdout.strip().split()[-1].startswith('ASL('): + return '0.0.0' + return ver except OSError: pass except subprocess.TimeoutExpired: diff --git a/pyomo/solvers/plugins/solvers/XPRESS.py b/pyomo/solvers/plugins/solvers/XPRESS.py index e05402022e4..440ee2415fb 100644 --- a/pyomo/solvers/plugins/solvers/XPRESS.py +++ b/pyomo/solvers/plugins/solvers/XPRESS.py @@ -75,6 +75,12 @@ class XPRESS_shell(ILMLicensedSystemCallSolver): """ def __init__(self, **kwds): + logger.warning( + "The shell interface for Xpress is broken for recent versions "\ + "of Xpress. Please use xpress_direct or xpress_persistent, "\ + "which require the Xpress Python API. Python bindings "\ + "for recent versions of Xpress can be installed via `pip`: "\ + ".") # # Call base class constructor # diff --git a/pyomo/solvers/plugins/solvers/xpress_direct.py b/pyomo/solvers/plugins/solvers/xpress_direct.py index 1a507aa1bd1..acbae2eca47 100644 --- a/pyomo/solvers/plugins/solvers/xpress_direct.py +++ b/pyomo/solvers/plugins/solvers/xpress_direct.py @@ -63,6 +63,10 @@ def _finalize_xpress_import(xpress, avail): XpressDirect.XpressException = RuntimeError else: XpressDirect.XpressException = xpress.ModelError + # In (pypi) versions prior to 8.13.0, the 'xpress.rng' keyword was + # 'xpress.range' + if not hasattr(xpress, 'rng'): + xpress.rng = xpress.range class _xpress_importer_class(object): # We want to be able to *update* the message that the deferred @@ -357,7 +361,7 @@ def _add_constraint(self, con): name=conname) elif con.has_lb() and con.has_ub(): xpress_con = xpress.constraint(body=xpress_expr, - sense=xpress.range, + sense=xpress.rng, lb=value(con.lower), ub=value(con.upper), name=conname) diff --git a/pyomo/util/calc_var_value.py b/pyomo/util/calc_var_value.py index 23f875a3762..2012e16d05b 100644 --- a/pyomo/util/calc_var_value.py +++ b/pyomo/util/calc_var_value.py @@ -8,8 +8,9 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr.numvalue import native_numeric_types, value +from pyomo.core.expr.numvalue import native_numeric_types, value, is_fixed from pyomo.core.expr.calculus.derivatives import differentiate +from pyomo.core.base.constraint import Constraint, _ConstraintData import logging logger = logging.getLogger(__name__) @@ -29,10 +30,13 @@ def calculate_variable_from_constraint(variable, constraint, Parameters: ----------- - variable: `pyomo.core.base.var._VarData` + variable: :py:class:`_VarData` The variable to solve for - constraint: `pyomo.core.base.constraint._ConstraintData` - The equality constraint to use to solve for the variable value + constraint: :py:class:`_ConstraintData` or relational expression or `tuple` + The equality constraint to use to solve for the variable value. + May be a `ConstraintData` object or any valid argument for + ``Constraint(expr=<>)`` (i.e., a relational expression or 2- or + 3-tuple) eps: `float` The tolerance to use to determine equality [default=1e-8]. iterlim: `int` @@ -53,8 +57,16 @@ def calculate_variable_from_constraint(variable, constraint, respect the variable bounds. """ - upper = value(constraint.upper) - if value(constraint.lower) != upper: + # Leverage all the Constraint logic to process the incoming tuple/expression + if not isinstance(constraint, _ConstraintData): + constraint = Constraint(expr=constraint, name=type(constraint).__name__) + constraint.construct() + + body = constraint.body + lower = constraint.lb + upper = constraint.ub + + if lower != upper: raise ValueError("Constraint must be an equality constraint") if variable.value is None: @@ -86,12 +98,12 @@ def calculate_variable_from_constraint(variable, constraint, # solve the common case where variable is linear with coefficient of 1.0 x1 = value(variable) # Note: both the direct (linear) calculation and Newton's method - # below rely on a numerically feasible initial starting point. + # below rely on a numerically valid initial starting point. # While we have strategies for dealing with hitting numerically # invalid (e.g., sqrt(-1)) conditions below, if the initial point is # not valid, we will allow that exception to propagate up try: - residual_1 = value(constraint.body) + residual_1 = value(body) except: logger.error( "Encountered an error evaluating the expression at the " @@ -99,7 +111,7 @@ def calculate_variable_from_constraint(variable, constraint, raise variable.set_value(x1 - (residual_1-upper)) - residual_2 = value(constraint.body, exception=False) + residual_2 = value(body, exception=False) # If we encounter an error while evaluating the expression at the # linear intercept calculated assuming the derivative was 1. This @@ -120,14 +132,15 @@ def calculate_variable_from_constraint(variable, constraint, intercept = (residual_1-upper) - slope*x1 if slope: variable.set_value(-intercept/slope) - body_val = value(constraint.body, exception=False) + body_val = value(body, exception=False) if body_val is not None and abs(body_val-upper) < eps: return # Variable appears nonlinearly; solve using Newton's method variable.set_value(orig_initial_value) # restore initial value - expr = constraint.body - constraint.upper - expr_deriv = differentiate(expr, wrt=variable, mode=differentiate.Modes.sympy) + expr = body - upper + expr_deriv = differentiate(expr, wrt=variable, + mode=differentiate.Modes.sympy) if type(expr_deriv) in native_numeric_types and expr_deriv == 0: raise ValueError("Variable derivative == 0, cannot solve for variable") diff --git a/pyomo/util/subsystems.py b/pyomo/util/subsystems.py index e76c509e96c..09fbac8ecb6 100644 --- a/pyomo/util/subsystems.py +++ b/pyomo/util/subsystems.py @@ -12,7 +12,69 @@ from pyomo.core.base.reference import Reference from pyomo.core.expr.visitor import identify_variables from pyomo.common.collections import ComponentSet, ComponentMap -from pyomo.common.backports import nullcontext +from pyomo.common.modeling import unique_component_name + +from pyomo.core.base.constraint import Constraint +from pyomo.core.base.expression import Expression +from pyomo.core.base.external import ExternalFunction +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor +from pyomo.core.expr.numeric_expr import ExternalFunctionExpression +from pyomo.core.expr.numvalue import native_types + + +class _ExternalFunctionVisitor(StreamBasedExpressionVisitor): + + def initializeWalker(self, expr): + self._functions = [] + self._seen = set() + return True, None + + def exitNode(self, node, data): + if type(node) is ExternalFunctionExpression: + if id(node) not in self._seen: + self._seen.add(id(node)) + self._functions.append(node) + + def finalizeResult(self, result): + return self._functions + + def enterNode(self, node): + pass + + def acceptChildResult(self, node, data, child_result, child_idx): + pass + + def acceptChildResult(self, node, data, child_result, child_idx): + if child_result.__class__ in native_types: + return False, None + return child_result.is_expression_type(), None + + +def identify_external_functions(expr): + yield from _ExternalFunctionVisitor().walk_expression(expr) + + +def add_local_external_functions(block): + ef_exprs = [] + for comp in block.component_data_objects( + (Constraint, Expression), active=True + ): + ef_exprs.extend(identify_external_functions(comp.expr)) + unique_functions = [] + fcn_set = set() + for expr in ef_exprs: + fcn = expr._fcn + data = (fcn._library, fcn._function) + if data not in fcn_set: + fcn_set.add(data) + unique_functions.append(data) + fcn_comp_map = {} + for lib, name in unique_functions: + comp_name = unique_component_name(block, "_" + name) + comp = ExternalFunction(library=lib, function=name) + block.add_component(comp_name, comp) + fcn_comp_map[lib, name] = comp + return fcn_comp_map def create_subsystem_block(constraints, variables=None, include_fixed=False): @@ -47,11 +109,12 @@ def create_subsystem_block(constraints, variables=None, include_fixed=False): var_set = ComponentSet(variables) input_vars = [] for con in constraints: - for var in identify_variables(con.body, include_fixed=include_fixed): + for var in identify_variables(con.expr, include_fixed=include_fixed): if var not in var_set: input_vars.append(var) var_set.add(var) block.input_vars = Reference(input_vars) + add_local_external_functions(block) return block diff --git a/pyomo/util/tests/test_calc_var_value.py b/pyomo/util/tests/test_calc_var_value.py index fea97075199..45c546658a8 100644 --- a/pyomo/util/tests/test_calc_var_value.py +++ b/pyomo/util/tests/test_calc_var_value.py @@ -14,7 +14,7 @@ import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept -from pyomo.environ import ConcreteModel, Var, Constraint, value, exp +from pyomo.environ import ConcreteModel, Var, Constraint, Param, value, exp from pyomo.util.calc_var_value import calculate_variable_from_constraint from pyomo.core.expr.calculus.diff_with_sympy import differentiate_available @@ -64,7 +64,6 @@ def test_initialize_value(self): ValueError, "Constraint must be an equality constraint"): calculate_variable_from_constraint(m.x, m.lt) - def test_linear(self): m = ConcreteModel() m.x = Var() @@ -73,6 +72,22 @@ def test_linear(self): calculate_variable_from_constraint(m.x, m.c) self.assertEqual(value(m.x), 2) + def test_constraint_as_tuple(self): + m = ConcreteModel() + m.x = Var() + m.p = Param(initialize=15, mutable=True) + + calculate_variable_from_constraint(m.x, 5*m.x == 5) + self.assertEqual(value(m.x), 1) + calculate_variable_from_constraint(m.x, (5*m.x, 10)) + self.assertEqual(value(m.x), 2) + calculate_variable_from_constraint(m.x, (15, 5*m.x, m.p)) + self.assertEqual(value(m.x), 3) + with self.assertRaisesRegex( + ValueError, "Constraint 'tuple' is a Ranged Inequality " + "with a variable upper bound."): + calculate_variable_from_constraint(m.x, (15, 5*m.x, m.x)) + @unittest.skipIf(not differentiate_available, "this test requires sympy") def test_nonlinear(self): diff --git a/pyomo/util/tests/test_subsystems.py b/pyomo/util/tests/test_subsystems.py index 42ddb21a204..7ec49cd80a4 100644 --- a/pyomo/util/tests/test_subsystems.py +++ b/pyomo/util/tests/test_subsystems.py @@ -16,7 +16,10 @@ generate_subsystem_blocks, TemporarySubsystemManager, ParamSweeper, + identify_external_functions, + add_local_external_functions, ) +from pyomo.common.getGSL import find_GSL def _make_simple_model(): @@ -324,6 +327,101 @@ def test_generate_dont_fix_inputs_with_fixed_var(self): self.assertFalse(m.v3.fixed) self.assertTrue(m.v4.fixed) + def _make_model_with_external_functions(self): + m = pyo.ConcreteModel() + gsl = find_GSL() + m.bessel = pyo.ExternalFunction( + library=gsl, function="gsl_sf_bessel_J0" + ) + m.fermi = pyo.ExternalFunction( + library=gsl, function="gsl_sf_fermi_dirac_m1" + ) + m.v1 = pyo.Var(initialize=1.0) + m.v2 = pyo.Var(initialize=2.0) + m.v3 = pyo.Var(initialize=3.0) + m.con1 = pyo.Constraint(expr=m.v1 == 0.5) + m.con2 = pyo.Constraint(expr=2*m.fermi(m.v1) + m.v2**2 - m.v3 == 1.0) + m.con3 = pyo.Constraint( + expr=m.bessel(m.v1) - m.bessel(m.v2) + m.v3**2 == 2.0 + ) + return m + + @unittest.skipUnless(find_GSL(), "Could not find the AMPL GSL library") + def test_identify_external_functions(self): + m = self._make_model_with_external_functions() + m._con = pyo.Constraint(expr=2*m.fermi(m.bessel(m.v1**2) + 0.1) == 1.0) + + gsl = find_GSL() + + fcns = list(identify_external_functions(m.con2.expr)) + self.assertEqual(len(fcns), 1) + self.assertEqual(fcns[0]._fcn._library, gsl) + self.assertEqual(fcns[0]._fcn._function, "gsl_sf_fermi_dirac_m1") + + fcns = list(identify_external_functions(m.con3.expr)) + fcn_data = set((fcn._fcn._library, fcn._fcn._function) for fcn in fcns) + self.assertEqual(len(fcns), 2) + pred_fcn_data = {(gsl, "gsl_sf_bessel_J0")} + self.assertEqual(fcn_data, pred_fcn_data) + + fcns = list(identify_external_functions(m._con.expr)) + fcn_data = set((fcn._fcn._library, fcn._fcn._function) for fcn in fcns) + self.assertEqual(len(fcns), 2) + pred_fcn_data = { + (gsl, "gsl_sf_bessel_J0"), + (gsl, "gsl_sf_fermi_dirac_m1"), + } + self.assertEqual(fcn_data, pred_fcn_data) + + def _solve_ef_model_with_ipopt(self): + m = self._make_model_with_external_functions() + ipopt = pyo.SolverFactory("ipopt") + ipopt.solve(m) + return m + + @unittest.skipUnless(find_GSL(), "Could not find the AMPL GSL library") + @unittest.skipUnless( + pyo.SolverFactory("ipopt").available(), + "ipopt is not available" + ) + def test_with_external_function(self): + m = self._make_model_with_external_functions() + subsystem = ([m.con2, m.con3], [m.v2, m.v3]) + + m.v1.set_value(0.5) + block = create_subsystem_block(*subsystem) + ipopt = pyo.SolverFactory("ipopt") + with TemporarySubsystemManager(to_fix=list(block.input_vars.values())): + ipopt.solve(block) + + # Correct values obtained by solving with Ipopt directly + # in another script. + self.assertEqual(m.v1.value, 0.5) + self.assertFalse(m.v1.fixed) + self.assertAlmostEqual(m.v2.value, 1.04816, delta=1e-5) + self.assertAlmostEqual(m.v3.value, 1.34356, delta=1e-5) + + # Result obtained by solving the full system + m_full = self._solve_ef_model_with_ipopt() + self.assertAlmostEqual(m.v1.value, m_full.v1.value) + self.assertAlmostEqual(m.v2.value, m_full.v2.value) + self.assertAlmostEqual(m.v3.value, m_full.v3.value) + + @unittest.skipUnless(find_GSL(), "Could not find the AMPL GSL library") + def test_external_function_with_potential_name_collision(self): + m = self._make_model_with_external_functions() + m.b = pyo.Block() + m.b._gsl_sf_bessel_J0 = pyo.Var() + m.b.con = pyo.Constraint( + expr=m.b._gsl_sf_bessel_J0 == m.bessel(m.v1) + ) + add_local_external_functions(m.b) + self.assertTrue(isinstance(m.b._gsl_sf_bessel_J0, pyo.Var)) + ex_fcns = list(m.b.component_objects(pyo.ExternalFunction)) + self.assertEqual(len(ex_fcns), 1) + fcn = ex_fcns[0] + self.assertEqual(fcn._function, "gsl_sf_bessel_J0") + class TestTemporarySubsystemManager(unittest.TestCase):