diff --git a/.travis.yml b/.travis.yml index d652f6c4d..71f84e11a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,34 +1,22 @@ # Modified from # https://github.com/calliope-project/calliope/blob/master/.travis.yml -language: python +language: bash sudo: false # Use container-based infrastructure -matrix: - include: - - env: - - PYTHON_VERSION="2.7" - - env: - - PYTHON_VERSION="3.6" - - env: - - PYTHON_VERSION="3.7" - - env: - - PYTHON_VERSION="3.8" +os: + - windows + - linux + - osx +env: + - PYTHON_VERSION="3.6" + - PYTHON_VERSION="3.7" + - PYTHON_VERSION="3.8" before_install: - - if [[ "$PYTHON_VERSION" == "2.7" ]]; then - wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; - else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; - fi - - bash miniconda.sh -b -p $HOME/miniconda - - export PATH="$HOME/miniconda/bin:$PATH" - - hash -r - - conda config --set always_yes yes --set changeps1 no - - conda update -q conda - # Useful for debugging any issues with conda - - conda info -a - - source $HOME/miniconda/etc/profile.d/conda.sh + - wget https://raw.githubusercontent.com/trichter/conda4travis/latest/conda4travis.sh -O conda4travis.sh + - source conda4travis.sh + - if [ "$TRAVIS_OS_NAME" != "windows" ]; then echo " - coincbc" >> environment.yaml; fi; install: - conda config --add pinned_packages python=$PYTHON_VERSION @@ -44,7 +32,9 @@ install: # - "sh -e /etc/init.d/xvfb start" # - sleep 3 # give xvfb some time to start -script: "make test" +script: + - py.test # after_success: # - coveralls + diff --git a/MANIFEST.in b/MANIFEST.in index 1ee75eeeb..8b187018f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,6 @@ include pypsa/component_attrs/*.csv include pypsa/standard_types/*.csv include pypsa/components.csv +include pypsa/variables.csv include README.rst LICENSE.txt include requirements.yml diff --git a/doc/optimal_power_flow.rst b/doc/optimal_power_flow.rst index 51b9343ac..caa444a67 100644 --- a/doc/optimal_power_flow.rst +++ b/doc/optimal_power_flow.rst @@ -3,28 +3,34 @@ ###################### -See the module ``pypsa.opf``. +See the module ``pypsa.opf`` and ``pypsa.linopf``. Optimisation with the linearised power flow equations for (mixed) AC +and DC networks is fully supported. Note that optimisation with the full non-linear power flow equations is not yet supported. -Non-Linear Optimal Power Flow -============================== -Optimisation with the full non-linear power flow equations is not yet -supported. +All constraints and variables are listed below. +Overview +-------- +* The linear OPF module can optimise the dispatch of generation and storage and the capacities of generation, storage and transmission infrastructure. -Linear Optimal Power Flow -========================= +* It is assumed that the load is inelastic and must be met in every snapshot (this will be relaxed in future versions). -Optimisation with the linearised power flow equations for (mixed) AC -and DC networks is fully supported. +* The optimisation currently uses continuous variables for most functionality; unit commitment with binary variables is also implemented for generators. -All constraints and variables are listed below. + +* The objective function is the total system cost for the snapshots optimised. + +* Each snapshot can be given a weighting :math:`w_t` to represent e.g. multiple hours. + +* This set-up can also be used for stochastic optimisation, if you interpret the weighting as a probability. + +* Each transmission asset has a capital cost. + +* Each generation and storage asset has a capital cost and a marginal cost. -Overview --------- Execute: @@ -48,35 +54,16 @@ for more details). .. automethod:: pypsa.Network.lopf -The linear OPF module can optimise the dispatch of generation and storage -and the capacities of generation, storage and transmission infrastructure. - -It is assumed that the load is inelastic and must be met in every -snapshot (this will be relaxed in future versions). - -The optimisation currently uses continuous variables for most -functionality; unit commitment with binary variables is also -implemented for generators. - -The objective function is the total system cost for the snapshots -optimised. - -Each snapshot can be given a weighting :math:`w_t` to represent -e.g. multiple hours. - -This set-up can also be used for stochastic optimisation, if you -interpret the weighting as a probability. - -Each transmission asset has a capital cost. -Each generation and storage asset has a capital cost and a marginal cost. +.. important:: Since version v0.15.1, PyPSA enables the optimisation without the use of `pyomo `_. This make the ``lopf`` function much more efficient in terms of memory usage and time. For this purpose two new module were introduced, ``pypsa.linopf`` and ``pypsa.linopt`` wich mainly reflect the functionality of ``pypsa.opf`` and ``pypsa.opt`` but without using pyomo. + Note that when setting pyomo to False, the ``extra_functionality`` has to be adapted to the appropriate syntax. -.. warning:: If the transmission capacity is changed in passive networks, then the impedance will also change (i.e. if parallel lines are installed). This is NOT reflected in the LOPF, so the network equations may no longer be valid. Note also that all the expansion is continuous. +.. warning:: If the transmission capacity is changed in passive networks, then the impedance will also change (i.e. if parallel lines are installed). This is NOT reflected in the ordinary LOPF, however ``pypsa.linopf.ilopf`` covers this through an iterative process as done `in here `_. -Optimising dispatch only: a market model ----------------------------------------- +Optimising dispatch only - a market model +----------------------------------------- Capacity optimisation can be turned off so that only the dispatch is optimised, like a short-run electricity market model. @@ -89,7 +76,7 @@ point-to-point HVDC link). Optimising total annual system costs ------------------------------------- +---------------------------------------- To minimise long-run annual system costs for meeting an inelastic electrical load, capital costs for transmission and generation should be set to @@ -117,41 +104,28 @@ functionality is planned. Variables and notation summary ------------------------------ -:math:`n \in N = \{0,\dots |N|-1\}` label the buses - -:math:`t \in T = \{0,\dots |T|-1\}` label the snapshots - -:math:`l \in L = \{0,\dots |L|-1\}` label the branches - -:math:`s \in S = \{0,\dots |S|-1\}` label the different generator/storage types at each bus - -:math:`w_t` weighting of time :math:`t` in the objective function - -:math:`g_{n,s,t}` dispatch of generator :math:`s` at bus :math:`n` at time :math:`t` - -:math:`\bar{g}_{n,s}` nominal power of generator :math:`s` at bus :math:`n` - -:math:`\bar{g}_{n,s,t}` availability of generator :math:`s` at bus :math:`n` at time :math:`t` per unit of nominal power - -:math:`u_{n,s,t}` binary status variable for generator with unit commitment - -:math:`suc_{n,s,t}` start-up cost if generator with unit commitment is started at time :math:`t` - -:math:`sdc_{n,s,t}` shut-down cost if generator with unit commitment is shut down at time :math:`t` - -:math:`c_{n,s}` capital cost of extending generator nominal power by one MW - -:math:`o_{n,s}` marginal cost of dispatch generator for one MWh - -:math:`f_{l,t}` flow of power in branch :math:`l` at time :math:`t` - -:math:`F_{l}` capacity of branch :math:`l` - -:math:`\eta_{n,s}` efficiency of generator :math:`s` at bus :math:`n` - -:math:`\eta_{l}` efficiency of controllable link :math:`l` - -:math:`e_s` CO2-equivalent-tonne-per-MWh of the fuel carrier :math:`s` +.. csv-table:: + :widths: 20 50 + :delim: ; + + :math:`n \in N = \{0,\dots |N|-1\}`; label the buses + :math:`t \in T = \{0,\dots |T|-1\}`; label the snapshots + :math:`l \in L = \{0,\dots |L|-1\}`; label the branches + :math:`s \in S = \{0,\dots |S|-1\}`; label the different generator/storage types at each bus + :math:`w_t`; weighting of time :math:`t` in the objective function + :math:`g_{n,s,t}`; dispatch of generator :math:`s` at bus :math:`n` at time :math:`t` + :math:`\bar{g}_{n,s}`; nominal power of generator :math:`s` at bus :math:`n` + :math:`\bar{g}_{n,s,t}`; availability of generator :math:`s` at bus :math:`n` at time :math:`t` per unit of nominal power + :math:`u_{n,s,t}`; binary status variable for generator with unit commitment + :math:`suc_{n,s,t}`; start-up cost if generator with unit commitment is started at time :math:`t` + :math:`sdc_{n,s,t}`; shut-down cost if generator with unit commitment is shut down at time :math:`t` + :math:`c_{n,s}`; capital cost of extending generator nominal power by one MW + :math:`o_{n,s}`; marginal cost of dispatch generator for one MWh + :math:`f_{l,t}`; flow of power in branch :math:`l` at time :math:`t` + :math:`F_{l}`; capacity of branch :math:`l` + :math:`\eta_{n,s}`; efficiency of generator :math:`s` at bus :math:`n` + :math:`\eta_{l}`; efficiency of controllable link :math:`l` + :math:`e_s`; CO2-equivalent-tonne-per-MWh of the fuel carrier :math:`s` Further definitions are given below. @@ -233,8 +207,9 @@ Generator unit commitment constraints These are defined in ``pypsa.opf.define_generator_variables_constraints(network,snapshots)``. -The implementation is a complete implementation of the unit commitment constraints defined in Chapter 4.3 of `Convex Optimization of Power Systems `_ by -Joshua Adam Taylor (CUP, 2015). +.. important:: Unit commitment constraints will only be build fully if pyomo is set to True. If pyomo is set to False a simplified version of the unit commitment is calculated by ignoring the parameters `min_up_time`, `min_down_time`, `start_up_cost`, `shut_down_cost`, `up_time_before` and `down_time_before`. + +The implementation is a complete implementation of the unit commitment constraints defined in Chapter 4.3 of `Convex Optimization of Power Systems `_ by Joshua Adam Taylor (CUP, 2015). Unit commitment can be turned on for any generator by setting ``committable`` to be ``True``. This introduces a @@ -316,7 +291,7 @@ at start-up :math:`rusu_{n,s}` and shut-down :math:`rdsd_{n,s}` \end{gather*} Storage Unit constraints ------------------------- +------------------------- These are defined in ``pypsa.opf.define_storage_variables_constraints(network,snapshots)``. @@ -375,7 +350,7 @@ storage unit where the state of charge must empty every day.) Store constraints ------------------------- +------------------ These are defined in ``pypsa.opf.define_store_variables_constraints(network,snapshots)``. @@ -415,7 +390,8 @@ optimisation assumes :math:`e_{n,s,t=-1} = e_{n,s,t=|T|-1}`. Passive branch flows: lines and transformers --------------------------------------------- +--------------------------------------------- + See ``pypsa.opf.define_passive_branch_flows(network,snapshots)`` and ``pypsa.opf.define_passive_branch_constraints(network,snapshots)`` and ``pypsa.opf.define_branch_extension_variables(network,snapshots)``. @@ -442,20 +418,17 @@ This flow is the limited by the capacity :math:``F_l`` of the line .. math:: |f_{l,t}| \leq F_l -Note that if :math:`F_l` is also subject to optimisation -(``branch.s_nom_extendable == True``), then the impedance :math:`x` of -the line is NOT automatically changed with the capacity (to represent -e.g. parallel lines being added). +.. note:: + If :math:`F_l` is also subject to optimisation + (``branch.s_nom_extendable -- True``), then the impedance :math:`x` of + the line is NOT automatically changed with the capacity (to represent + e.g. parallel lines being added). -There are two choices here: + There are two choices here: -Iterate the LOPF again with the updated impedances (see e.g. ``_). + 1. Iterate the LOPF again with the updated impedances, see e.g. ``_, like done by ``pypsa.linopf.ilopf`` -João Gorenstein Dedecca has also implemented a MILP version of the -transmission expansion, see -``_, which properly takes -account of the impedance with a disjunctive relaxation. This will be -pulled into the main PyPSA code base soon. + 2. João Gorenstein Dedecca has also implemented a MILP version of the transmission expansion, see ``_, which properly takes account of the impedance with a disjunctive relaxation. This will be pulled into the main PyPSA code base soon. .. _formulations: @@ -463,6 +436,8 @@ pulled into the main PyPSA code base soon. Passive branch flow formulations -------------------------------- + + PyPSA implements four formulations of the linear power flow equations that are mathematically equivalent, but may have different solving times. These different formulations are described and @@ -490,7 +465,9 @@ generators at most nodes. .. _opf-links: Controllable branch flows: links ---------------------------------- +-------------------------------- + + See ``pypsa.opf.define_controllable_branch_flows(network,snapshots)`` and ``pypsa.opf.define_branch_extension_variables(network,snapshots)``. @@ -518,6 +495,7 @@ efficiencies ``efficiencyi``, i.e. :math:`\eta_{i,l}`, then at Nodal power balances -------------------- + See ``pypsa.opf.define_nodal_balances(network,snapshots)``. This is the most important equation, which guarantees that the power @@ -541,6 +519,7 @@ feeding in and out of it (i.e. like Kirchhoff's Current Law). Global constraints ------------------ + See ``pypsa.opf.define_global_constraints(network,snapshots)``. Global constraints apply to more than one component. @@ -575,16 +554,24 @@ optimisation stored in ``network.global_constraints.mu``. Custom constraints and other functionality ------------------------------------------ -PyPSA uses the Python optimisation language `pyomo -`_ to construct the OPF problem. You can easily -extend the optimisation problem constructed by PyPSA using the usual -pyomo syntax. To do this, pass the function ``network.lopf`` a + +Since PyPSA v0.15, the lopf function is provided by two different modules. The ordinary implementation based on the ``pypsa.opf`` module uses +`pyomo `_ to set up the linear optimisation problem and passing it to the solver. The implementation without pyomo, based on the module ``pypsa.linopf``, uses a straight-forward approach to write out the ``.lp`` file directly and explicitly running it from a solver's interface. Therefore the application of custom constraints depend on whether pyomo is activated or not. + +In general for a custom constraint, pass the function ``network.lopf`` a function ``extra_functionality`` as an argument. This function must take two arguments ``extra_functionality(network,snapshots)`` and is called after the model building is complete, but before it is sent to the solver. It allows the user to add, change or remove constraints and alter the objective function. +1. pyomo is set to True +================================= + +You can easily +extend the optimisation problem constructed by PyPSA using the usual +pyomo syntax. + The `CHP example `_ and the `example that replaces generators and storage units with fundamental links @@ -599,55 +586,90 @@ arguments `extra_postprocessing(network,snapshots,duals)`. It allows the user to extract further information about the solution, such as additional shadow prices for constraints. +2. pyomo is set to False +======================== + +In general when pyomo is disabled, all variable and constraint references are stored in the network object itself. Thus every variable and constraint is attached to a component, e.g. the dispatch variable of network.generators.p is attached to the component 'Generator' and can be easily accessed by + + >>> get_var(n, 'Generator', 'p') + +An additional constraint can easily be implemented by using the funtions + +* ``pypsa.linopt.get_var`` for getting the variables which should be included in the constraint +* ``pypsa.linopt.linexpr`` for creating linear expressions for the left hand side (lhs) of the constraint. Note that only the lhs includes all terms with variables, the rhs is a constant. +* ``pypsa.linopt.define_constraints`` for defining a network constraint. + +The are functions defined as such: + +.. automethod:: pypsa.linopt.get_var +.. automethod:: pypsa.linopt.linexpr +.. automethod:: pypsa.linopt.define_constraints + +The function ``extra_postprocessing`` is not necessary when pyomo is deactivated. For retrieving additional shadow prices, just pass the name of the constraint, to which the constraint is attached, to the ``keep_shadowprices`` parameter of the ``lopf`` function. + +.. Fixing variables +.. ---------------- + +.. This feature is only valid if pyomo is disabled in the lopf function (i.e. ``pyomo=False``). It is possible to fix all variables to specific values. Create a pandas DataFrame or a column with the same name as the variable but with suffix '_set'. For all not ``NaN`` values additional constraints will be build to fix the variables. + +.. For example let's say, we want to fix the output of a single generator 'gas1' to 200 MW for all snapshots. Then we can add a dataframe ``p_set`` to network.generators_t with the according value and index. + +.. >>> network.generators_t['p_set'] = pd.DataFrame(200, index=network.snapshots, columns=['gas1']) + +.. The lopf will now build extra constraints to fix the ``p`` variables of generator 'gas1' to 200. In the same manner, we can fix the variables only for some specific snapshots. This is applicable to all variables, also ``state_of_charge`` for storage units or ``p`` for links. Static investment variables can be fixed via adding additional columns, e.g. a ``s_nom_set`` column to ``network.lines``. + + Inputs ------ + For the linear optimal power flow, the following data for each component are used. For almost all values, defaults are assumed if not explicitly set. For the defaults and units, see :doc:`components`. -network{snapshot_weightings} +* network.{snapshot_weightings} -bus.{v_nom, carrier} +* bus.{v_nom, carrier} -load.{p_set} +* load.{p_set} -generator.{p_nom, p_nom_extendable, p_nom_min, p_nom_max, p_min_pu, p_max_pu, marginal_cost, capital_cost, efficiency, carrier} +* generator.{p_nom, p_nom_extendable, p_nom_min, p_nom_max, p_min_pu, p_max_pu, marginal_cost, capital_cost, efficiency, carrier} -storage_unit.{p_nom, p_nom_extendable, p_nom_min, p_nom_max, p_min_pu, p_max_pu, marginal_cost, capital_cost, efficiency*, standing_loss, inflow, state_of_charge_set, max_hours, state_of_charge_initial, cyclic_state_of_charge} +* storage_unit.{p_nom, p_nom_extendable, p_nom_min, p_nom_max, p_min_pu, p_max_pu, marginal_cost, capital_cost, efficiency*, standing_loss, inflow, state_of_charge_set, max_hours, state_of_charge_initial, cyclic_state_of_charge} -store.{e_nom, e_nom_extendable, e_nom_min, e_nom_max, e_min_pu, e_max_pu, e_cyclic, e_initial, capital_cost, marginal_cost, standing_loss} +* store.{e_nom, e_nom_extendable, e_nom_min, e_nom_max, e_min_pu, e_max_pu, e_cyclic, e_initial, capital_cost, marginal_cost, standing_loss} -line.{x, s_nom, s_nom_extendable, s_nom_min, s_nom_max, capital_cost} +* line.{x, s_nom, s_nom_extendable, s_nom_min, s_nom_max, capital_cost} -transformer.{x, s_nom, s_nom_extendable, s_nom_min, s_nom_max, capital_cost} +* transformer.{x, s_nom, s_nom_extendable, s_nom_min, s_nom_max, capital_cost} -link.{p_min_pu, p_max_pu, p_nom, p_nom_extendable, p_nom_min, p_nom_max, capital_cost} +* link.{p_min_pu, p_max_pu, p_nom, p_nom_extendable, p_nom_min, p_nom_max, capital_cost} -carrier.{carrier_attribute} +* carrier.{carrier_attribute} -global_constraint.{type, carrier_attribute, sense, constant} +* global_constraint.{type, carrier_attribute, sense, constant} .. note:: Note that for lines and transformers you MUST make sure that :math:`x` is non-zero, otherwise the bus admittance matrix will be singular. Outputs ------- -bus.{v_mag_pu, v_ang, p, marginal_price} -load.{p} +* bus.{v_mag_pu, v_ang, p, marginal_price} + +* load.{p} -generator.{p, p_nom_opt} +* generator.{p, p_nom_opt} -storage_unit.{p, p_nom_opt, state_of_charge, spill} +* storage_unit.{p, p_nom_opt, state_of_charge, spill} -store.{p, e_nom_opt, e} +* store.{p, e_nom_opt, e} -line.{p0, p1, s_nom_opt, mu_lower, mu_upper} +* line.{p0, p1, s_nom_opt, mu_lower, mu_upper} -transformer.{p0, p1, s_nom_opt, mu_lower, mu_upper} +* transformer.{p0, p1, s_nom_opt, mu_lower, mu_upper} -link.{p0, p1, p_nom_opt, mu_lower, mu_upper} +* link.{p0, p1, p_nom_opt, mu_lower, mu_upper} -global_constraint.{mu} +* global_constraint.{mu} diff --git a/environment.yaml b/environment.yaml index 696818747..50694da70 100644 --- a/environment.yaml +++ b/environment.yaml @@ -13,5 +13,6 @@ dependencies: - networkx>=1.10 - pyomo - cartopy>=0.16 - - coincbc +# - coincbc - glpk + - gurobi::gurobi diff --git a/environment_dev.yaml b/environment_dev.yaml index 2f5c0050b..32f1ab41b 100644 --- a/environment_dev.yaml +++ b/environment_dev.yaml @@ -4,6 +4,7 @@ channels: - conda-forge dependencies: + - python - pip - pytest - pytest-cov diff --git a/examples/lopf_with_pyomo_False.ipynb b/examples/lopf_with_pyomo_False.ipynb new file mode 100644 index 000000000..ab4214a59 --- /dev/null +++ b/examples/lopf_with_pyomo_False.ipynb @@ -0,0 +1,392 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pypsa\n", + "import pandas as pd\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "path = os.path.join(pypsa.__path__[0], '..', 'examples', 'ac-dc-meshed', 'ac-dc-data')\n", + "n = pypsa.Network(path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Modify the network a bit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set gas generators to non-exdendable" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.generators.loc[n.generators.carrier == 'gas', 'p_nom_extendable'] = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add ramp limit" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.generators.loc[n.generators.carrier == 'gas', 'ramp_limit_down'] = 0.2\n", + "n.generators.loc[n.generators.carrier == 'gas', 'ramp_limit_up'] = 0.2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add additional storage units (cyclic and non-cyclic) and fix one state_of_charge" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.add('StorageUnit', 'su', bus='Manchester', marginal_cost=10, inflow=50,\n", + " p_nom_extendable=True, capital_cost=10, p_nom=2000,\n", + " efficiency_dispatch=0.5,\n", + " cyclic_state_of_charge=True, state_of_charge_initial=1000)\n", + "\n", + "n.add('StorageUnit', 'su2', bus='Manchester', marginal_cost=10,\n", + " p_nom_extendable=True, capital_cost=50, p_nom=2000,\n", + " efficiency_dispatch=0.5, carrier='gas',\n", + " cyclic_state_of_charge=False, state_of_charge_initial=1000)\n", + "\n", + "n.storage_units_t.state_of_charge_set.loc[n.snapshots[7], 'su'] = 100" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add additional store" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.add('Bus', 'storebus', carrier='hydro', x=-5, y=55)\n", + "n.madd('Link', ['battery_power', 'battery_discharge'], '',\n", + " bus0=['Manchester', 'storebus'], bus1=['storebus', 'Manchester'],\n", + " p_nom=100, efficiency=.9, p_nom_extendable=True, p_nom_max=1000)\n", + "n.madd('Store', ['store'], bus='storebus', e_nom=2000, e_nom_extendable=True,\n", + " marginal_cost=10, capital_cost=10, e_nom_max=5000, e_initial=100,\n", + " e_cyclic=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extra funcionalities:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pypsa.linopt import get_var, linexpr, join_exprs, define_constraints " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One of the most important functions is linexpr which take one or more tuples of coefficient and variable pairs which should go into the left hand side (lhs) of the constraint. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Add mimimum for state_of_charge" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def minimal_state_of_charge(n, snapshots):\n", + " vars_soc = get_var(n, 'StorageUnit', 'state_of_charge')\n", + " lhs = linexpr((1, vars_soc))\n", + " define_constraints(n, lhs, '>', 50, 'StorageUnit', 'soc_lower_bound')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Fix the ratio between ingoing and outgoing capacity of the Store" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def fix_link_cap_ratio(n, snapshots):\n", + " vars_link = get_var(n, 'Link', 'p_nom')\n", + " eff = n.links.at['battery_power', 'efficiency']\n", + " lhs = linexpr((1, vars_link['battery_power']), (-eff, vars_link['battery_discharge']))\n", + " define_constraints(n, lhs, '=', 0, 'battery_discharge', attr='fixratio')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Every bus must in total produce the 20% of the total demand\n", + "\n", + "This requires the function `pypsa.linopt.join_exprs` which sums up arrays of linear expressions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def fix_bus_production(n, snapshots):\n", + " total_demand = n.loads_t.p_set.sum().sum() \n", + " prod_per_bus = linexpr((1, get_var(n, 'Generator', 'p'))).groupby(n.generators.bus, axis=1).apply(join_exprs)\n", + " define_constraints(n, prod_per_bus, '>=', total_demand/5, 'Bus', 'production_share')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Combine them ..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def extra_functionalities(n, snapshots):\n", + " minimal_state_of_charge(n, snapshots)\n", + " fix_link_cap_ratio(n, snapshots)\n", + " fix_bus_production(n, snapshots)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ...and run the lopf with `pyomo=False`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.lopf(pyomo=False, extra_functionality=extra_functionalities, \n", + " keep_shadowprices=['Bus', 'battery_discharge', 'StorageUnit'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `keep_shadowprices` argument in the lopf now decides which shadow prices (SP) should be retrieved. It can either be set to `True`, then all SP are kept. It also can be a list of names of the constraints. Therefore the `name` argument in `define_constraints` is necessary, in our case 'battery_discharge', 'StorageUnit' and 'Bus'. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Analysing the constraints" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see if the system got our own constraints. We look at `n.constraints` which combines summarises constraints going into the linear problem" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.constraints" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The last three entries show our constraints. As 'soc_lower_bound' is time-dependent, the `pnl` value is set to `True`. \n", + "\n", + "Let's check whether out two custom constraint are fulfilled:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.links.loc[['battery_power', 'battery_discharge'], ['p_nom_opt']]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.storage_units_t.state_of_charge" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.generators_t.p.groupby(n.generators.bus, axis=1).sum().sum()/n.loads_t.p.sum().sum()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Looks good! Now, let's see which dual values were parsed. Therefore we have a look into `n.dualvalues` \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n.dualvalues" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Again we see the last two entries reflect our constraints (the values in the columns play only a minor role). Having a look what the values are:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pypsa.linopt import get_dual" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "get_dual(n, 'StorageUnit', 'soc_lower_bound')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "get_dual(n, 'battery_discharge', 'fixratio')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "get_dual(n, 'Bus', 'production_share')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Side note\n", + "Some of the predefined constraints are stored in components itself like `n.lines_t.mu_upper` or `n.buses_t.marginal_price`, this is the case if their are designated columns are spots for those. All other dual are under the hook stored in `n.duals`" + ] + } + ], + "metadata": { + "@webio": { + "lastCommId": null, + "lastKernelId": null + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/pypsa/__init__.py b/pypsa/__init__.py index 8eb408fb0..4b39db6a3 100644 --- a/pypsa/__init__.py +++ b/pypsa/__init__.py @@ -26,7 +26,15 @@ from __future__ import absolute_import from . import components, descriptors -from . import pf, opf, plot, networkclustering, io, contingency, geo +from . import pf, opf, opt, plot, networkclustering, io, contingency, geo, stats + +import sys + +#do this as long as python 2.7 should be supported +if sys.version_info.major >= 3: + from . import linopf, linopt + + from .components import Network, SubNetwork diff --git a/pypsa/component_attrs/generators.csv b/pypsa/component_attrs/generators.csv index b640cc3c7..6f0922528 100644 --- a/pypsa/component_attrs/generators.csv +++ b/pypsa/component_attrs/generators.csv @@ -31,3 +31,8 @@ p,series,MW,0.,active power at bus (positive if net generation),Output q,series,MVar,0.,reactive power (positive if net generation),Output p_nom_opt,float,MW,0.,Optimised nominal power.,Output status,series,n/a,1,"Status (1 is on, 0 is off). Only outputted if committable is True.",Output +mu_upper,series,currency/MWh,0.,Shadow price of upper p_nom limit,Output +mu_lower,series,currency/MWh,0.,Shadow price of lower p_nom limit,Output +mu_p_set,series,currency/MWh,0.,Shadow price of fixed power generation p_set,Output +mu_ramp_limit_up,series,currency/MWh,0.,Shadow price of upper ramp up limit,Output +mu_ramp_limit_down,series,currency/MWh,0.,Shadow price of lower ramp down limit,Output diff --git a/pypsa/component_attrs/links.csv b/pypsa/component_attrs/links.csv index a8cd131a8..719d234e9 100644 --- a/pypsa/component_attrs/links.csv +++ b/pypsa/component_attrs/links.csv @@ -19,4 +19,5 @@ p0,series,MW,0.,Active power at bus0 (positive if branch is withdrawing power fr p1,series,MW,0.,Active power at bus1 (positive if branch is withdrawing power from bus1).,Output p_nom_opt,float,MVA,0.,Optimised capacity for active power.,Output mu_lower,series,currency/MVA,0.,Shadow price of lower p_nom limit -F \leq f. Always non-negative.,Output -mu_upper,series,currency/MVA,0.,Shadow price of upper p_nom limit f \leq F. Always non-negative.,Output \ No newline at end of file +mu_upper,series,currency/MVA,0.,Shadow price of upper p_nom limit f \leq F. Always non-negative.,Output +mu_p_set,series,currency/MWh,0.,Shadow price of fixed power transmission p_set,Output diff --git a/pypsa/component_attrs/storage_units.csv b/pypsa/component_attrs/storage_units.csv index 64962f0ff..de7bdd177 100644 --- a/pypsa/component_attrs/storage_units.csv +++ b/pypsa/component_attrs/storage_units.csv @@ -24,7 +24,12 @@ efficiency_dispatch,float,per unit,1.,Efficiency of storage on the way out of th standing_loss,float,per unit,0.,Losses per hour to state of charge.,Input (optional) inflow,static or series,MW,0.,"Inflow to the state of charge, e.g. due to river inflow in hydro reservoir.",Input (optional) p,series,MW,0.,active power at bus (positive if net generation),Output +p_dispatch,series,MW,0.,active power dispatch at bus,Output +p_store,series,MW,0.,active power charging at bus,Output q,series,MVar,0.,reactive power (positive if net generation),Output state_of_charge,series,MWh,NaN,State of charge as calculated by the OPF.,Output spill,series,MW,0.,Spillage for each snapshot.,Output p_nom_opt,float,MW,0.,Optimised nominal power.,Output +mu_upper,series,currency/MWh,0.,Shadow price of upper p_nom limit,Output +mu_lower,series,currency/MWh,0.,Shadow price of lower p_nom limit,Output +mu_state_of_charge_set,series,currency/MWh,0.,Shadow price of fixed state of charge state_of_charge_set,Output diff --git a/pypsa/component_attrs/stores.csv b/pypsa/component_attrs/stores.csv index 5d5e032a8..48223176d 100644 --- a/pypsa/component_attrs/stores.csv +++ b/pypsa/component_attrs/stores.csv @@ -19,4 +19,7 @@ standing_loss,float,per unit,0.,Losses per hour to energy.,Input (optional) p,series,MW,0.,active power at bus (positive if net generation),Output q,series,MVar,0.,reactive power (positive if net generation),Output e,series,MWh,0.,Energy as calculated by the OPF.,Output -e_nom_opt,float,MW,0.,Optimised nominal energy capacity outputed by OPF.,Output \ No newline at end of file +e_nom_opt,float,MW,0.,Optimised nominal energy capacity outputed by OPF.,Output +mu_upper,series,currency/MWh,0.,Shadow price of upper e_nom limit,Output +mu_lower,series,currency/MWh,0.,Shadow price of lower e_nom limit,Output +mu_e_set,series,currency/MWh,0.,Shadow price of fixed energy level e_set,Output diff --git a/pypsa/components.py b/pypsa/components.py index 734a32551..b5f9a412d 100644 --- a/pypsa/components.py +++ b/pypsa/components.py @@ -21,24 +21,18 @@ # make the code as Python 3 compatible as possible from __future__ import division, absolute_import -import six from six import iteritems, itervalues, iterkeys -from six.moves import map from weakref import ref __author__ = "Tom Brown (FIAS), Jonas Hoersch (FIAS), David Schlachtberger (FIAS)" __copyright__ = "Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS), David Schlachtberger (FIAS), GNU GPL 3" -import networkx as nx import numpy as np import pandas as pd -import scipy as sp, scipy.sparse from scipy.sparse import csgraph -from itertools import chain from collections import namedtuple -from operator import itemgetter import os @@ -71,10 +65,11 @@ from .graph import graph, incidence_matrix, adjacency_matrix -import inspect - import sys +if sys.version_info.major >= 3: + from .linopf import network_lopf as network_lopf_lowmem + import logging logger = logging.getLogger(__name__) @@ -198,7 +193,7 @@ class Network(Basic): pf = network_pf - lopf = network_lopf +# lopf = network_lopf opf = network_opf @@ -407,6 +402,105 @@ def set_snapshots(self,snapshots): #NB: No need to rebind pnl to self, since haven't changed it + def lopf(self, snapshots=None, pyomo=True, solver_name="glpk", + solver_options={}, solver_logfile=None, formulation="kirchhoff", + keep_files=False, extra_functionality=None, **kwargs): + """ + Linear optimal power flow for a group of snapshots. + + Parameters + ---------- + snapshots : list or index slice + A list of snapshots to optimise, must be a subset of + network.snapshots, defaults to network.snapshots + pyomo : bool, default True + Whether to use pyomo for building and solving the model, setting + this to False saves a lot of memory and time. + solver_name : string + Must be a solver name that pyomo recognises and that is + installed, e.g. "glpk", "gurobi" + solver_options : dictionary + A dictionary with additional options that get passed to the solver. + (e.g. {'threads':2} tells gurobi to use only 2 cpus) + solver_logfile : None|string + If not None, sets the logfile option of the solver. + keep_files : bool, default False + Keep the files that pyomo constructs from OPF problem + construction, e.g. .lp file - useful for debugging + formulation : string + Formulation of the linear power flow equations to use; must be + one of ["angles","cycles","kirchhoff","ptdf"] + extra_functionality : callable function + This function must take two arguments + `extra_functionality(network,snapshots)` and is called after + the model building is complete, but before it is sent to the + solver. It allows the user to + add/change constraints and add/change the objective function. + + Other Parameters + ---------------- + + ptdf_tolerance : float + Only taking effect when pyomo is True. + Value below which PTDF entries are ignored + free_memory : set, default {'pyomo'} + Only taking effect when pyomo is True. + Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series + data away while the solver runs (as a pickle to disk) and/or free + `pyomo` data after the solution has been extracted. + solver_io : string, default None + Only taking effect when pyomo is True. + Solver Input-Output option, e.g. "python" to use "gurobipy" for + solver_name="gurobi" + skip_pre : bool, default False + Only taking effect when pyomo is True. + Skip the preliminary steps of computing topology, calculating + dependent values and finding bus controls. + extra_postprocessing : callable function + Only taking effect when pyomo is True. + This function must take three arguments + `extra_postprocessing(network,snapshots,duals)` and is called after + the model has solved and the results are extracted. It allows the user + to extract further information about the solution, such as additional + shadow prices. + warmstart : bool or string, default False + Only taking effect when pyomo is False. + Use this to warmstart the optimization. Pass a string which gives + the path to the basis file. If set to True, a path to + a basis file must be given in network.basis_fn. + store_basis : bool, default True + Only taking effect when pyomo is False. + Whether to store the basis of the optimization results. If True, + the path to the basis file is saved in network.basis_fn. Note that + a basis can only be stored if simplex, dual-simplex, or barrier + *with* crossover is used for solving. + keep_references : bool, default False + Only taking effect when pyomo is False. + Keep the references of variable and constraint names withing the + network. These can be looked up in `n.vars` and `n.cons` after solving. + keep_shadowprices : bool or list of component names + Only taking effect when pyomo is False. + Keep shadow prices for all constraints, if set to True. If a list + is passed the shadow prices will only be parsed for those constraint + names. Defaults to ['Bus', 'Line', 'GlobalConstraint']. + After solving, the shadow prices can be retrieved using + :func:`pypsa.linopt.get_dual` with corresponding name + solver_dir : str, default None + Only taking effect when pyomo is False. + Path to directory where necessary files are written, default None leads + to the default temporary directory used by tempfile.mkstemp(). + + """ + args = {'snapshots': snapshots, 'keep_files': keep_files, + 'solver_options': solver_options, 'formulation': formulation, + 'extra_functionality': extra_functionality, + 'solver_name': solver_name, 'solver_logfile': solver_logfile} + args.update(kwargs) + if pyomo: + return network_lopf(self, **args) + else: + return network_lopf_lowmem(self, **args) + def add(self, class_name, name, **kwargs): @@ -834,6 +928,8 @@ def determine_network_topology(self): for sub in self.sub_networks.obj: find_cycles(sub) + sub.find_bus_controls() + def iterate_components(self, components=None, skip_empty=True): if components is None: @@ -893,7 +989,7 @@ def bad_by_type(branch, attr): c.list_name, attr, bad) bad = c.df.index[(c.df["x"] == 0.) & (c.df["r"] == 0.) & - c.df.apply(bad_by_type, args=('x',), axis=1) & + c.df.apply(bad_by_type, args=('x',), axis=1) & c.df.apply(bad_by_type, args=('r',), axis=1)] if len(bad) > 0: logger.warning("The following %s have zero series impedance, which will break the load flow:\n%s", diff --git a/pypsa/descriptors.py b/pypsa/descriptors.py index fe7dc9cc4..96f8d0df1 100644 --- a/pypsa/descriptors.py +++ b/pypsa/descriptors.py @@ -22,19 +22,15 @@ # make the code as Python 3 compatible as possible from __future__ import division from __future__ import absolute_import -from six import iteritems, string_types +from six import iteritems __author__ = "Tom Brown (FIAS), Jonas Hoersch (FIAS)" __copyright__ = "Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS), GNU GPL 3" - - - #weak references are necessary to make sure the key-value pair are #destroyed if the key object goes out of scope -from weakref import WeakKeyDictionary from collections import OrderedDict from itertools import repeat @@ -303,3 +299,77 @@ def zsum(s, *args, **kwargs): Meant to be set as pd.Series.zsum = zsum. """ return 0 if s.empty else s.sum(*args, **kwargs) + +#Perhaps this should rather go into components.py +nominal_attrs = {'Generator': 'p_nom', + 'Line': 's_nom', + 'Transformer': 's_nom', + 'Link': 'p_nom', + 'Store': 'e_nom', + 'StorageUnit': 'p_nom'} + +def expand_series(ser, columns): + """ + Helper function to quickly expand a series to a dataframe with according + column axis and every single column being the equal to the given series. + """ + return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1) + + +def get_extendable_i(n, c): + """ + Getter function. Get the index of extendable elements of a given component. + """ + return n.df(c)[lambda ds: ds[nominal_attrs[c] + '_extendable']].index + +def get_non_extendable_i(n, c): + """ + Getter function. Get the index of non-extendable elements of a given + component. + """ + return n.df(c)[lambda ds: ~ds[nominal_attrs[c] + '_extendable']].index + +def get_bounds_pu(n, c, sns, index=slice(None), attr=None): + """ + Getter function to retrieve the per unit bounds of a given compoent for + given snapshots and possible subset of elements (e.g. non-extendables). + Depending on the attr you can further specify the bounds of the variable + you are looking at, e.g. p_store for storage units. + + Parameters + ---------- + n : pypsa.Network + c : string + Component name, e.g. "Generator", "Line". + sns : pandas.Index/pandas.DateTimeIndex + set of snapshots for the bounds + index : pd.Index, default None + Subset of the component elements. If None (default) bounds of all + elements are returned. + attr : string, default None + attribute name for the bounds, e.g. "p", "s", "p_store" + + """ + min_pu_str = nominal_attrs[c].replace('nom', 'min_pu') + max_pu_str = nominal_attrs[c].replace('nom', 'max_pu') + + max_pu = get_switchable_as_dense(n, c, max_pu_str, sns) + if c in n.passive_branch_components: + min_pu = - max_pu + elif c == 'StorageUnit': + min_pu = pd.DataFrame(0, max_pu.index, max_pu.columns) + if attr == 'p_store': + max_pu = - get_switchable_as_dense(n, c, min_pu_str, sns) + if attr == 'state_of_charge': + max_pu = expand_series(n.df(c).max_hours, sns).T + min_pu = pd.DataFrame(0, *max_pu.axes) + else: + min_pu = get_switchable_as_dense(n, c, min_pu_str, sns) + return min_pu[index], max_pu[index] + +def additional_linkports(n): + return [i[3:] for i in n.links.columns if i.startswith('bus') + and i not in ['bus0', 'bus1']] + + + diff --git a/pypsa/linopf.py b/pypsa/linopf.py new file mode 100644 index 000000000..68dde7b3d --- /dev/null +++ b/pypsa/linopf.py @@ -0,0 +1,986 @@ +## Copyright 2019 Tom Brown (KIT), Fabian Hofmann (FIAS) + +## This program is free software; you can redistribute it and/or +## modify it under the terms of the GNU General Public License as +## published by the Free Software Foundation; either version 3 of the +## License, or (at your option) any later version. + +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. + +## You should have received a copy of the GNU General Public License +## along with this program. If not, see . + +""" +Build optimisation problems from PyPSA networks without Pyomo. +Originally retrieved from nomopyomo ( -> 'no more Pyomo'). +""" + + +from .pf import (_as_snapshots, get_switchable_as_dense as get_as_dense) +from .descriptors import (get_bounds_pu, get_extendable_i, get_non_extendable_i, + expand_series, nominal_attrs, additional_linkports, Dict) + +from .linopt import (linexpr, write_bound, write_constraint, set_conref, + set_varref, get_con, get_var, join_exprs, run_and_read_cbc, + run_and_read_gurobi, run_and_read_glpk, define_constraints, + define_variables, align_with_static_component, define_binaries) + + +import pandas as pd +import numpy as np + +import gc, time, os, re, shutil +from tempfile import mkstemp + +import logging +logger = logging.getLogger(__name__) + +lookup = pd.read_csv(os.path.join(os.path.dirname(__file__), 'variables.csv'), + index_col=['component', 'variable']) + + +def define_nominal_for_extendable_variables(n, c, attr): + """ + Initializes variables for nominal capacities for a given component and a + given attribute. + + Parameters + ---------- + n : pypsa.Network + c : str + network component of which the nominal capacity should be defined + attr : str + name of the variable, e.g. 'p_nom' + + """ + ext_i = get_extendable_i(n, c) + if ext_i.empty: return + lower = n.df(c)[attr+'_min'][ext_i] + upper = n.df(c)[attr+'_max'][ext_i] + define_variables(n, lower, upper, c, attr) + + +def define_dispatch_for_extendable_and_committable_variables(n, sns, c, attr): + """ + Initializes variables for power dispatch for a given component and a + given attribute. + + Parameters + ---------- + n : pypsa.Network + c : str + name of the network component + attr : str + name of the attribute, e.g. 'p' + + """ + ext_i = get_extendable_i(n, c) + if c == 'Generator': + ext_i = ext_i | n.generators.query('committable').index + if ext_i.empty: return + define_variables(n, -np.inf, np.inf, c, attr, axes=[sns, ext_i], spec='extendables') + + +def define_dispatch_for_non_extendable_variables(n, sns, c, attr): + """ + Initializes variables for power dispatch for a given component and a + given attribute. + + Parameters + ---------- + n : pypsa.Network + c : str + name of the network component + attr : str + name of the attribute, e.g. 'p' + + """ + fix_i = get_non_extendable_i(n, c) + if c == 'Generator': + fix_i = fix_i.difference(n.generators.query('committable').index) + if fix_i.empty: return + nominal_fix = n.df(c)[nominal_attrs[c]][fix_i] + min_pu, max_pu = get_bounds_pu(n, c, sns, fix_i, attr) + lower = min_pu.mul(nominal_fix) + upper = max_pu.mul(nominal_fix) + define_variables(n, lower, upper, c, attr, spec='nonextendables') + + +def define_dispatch_for_extendable_constraints(n, sns, c, attr): + """ + Sets power dispatch constraints for extendable devices for a given + component and a given attribute. + + Parameters + ---------- + n : pypsa.Network + c : str + name of the network component + attr : str + name of the attribute, e.g. 'p' + + """ + ext_i = get_extendable_i(n, c) + if ext_i.empty: return + min_pu, max_pu = get_bounds_pu(n, c, sns, ext_i, attr) + operational_ext_v = get_var(n, c, attr)[ext_i] + nominal_v = get_var(n, c, nominal_attrs[c])[ext_i] + rhs = 0 + + lhs, *axes = linexpr((max_pu, nominal_v), (-1, operational_ext_v), + return_axes=True) + define_constraints(n, lhs, '>=', rhs, c, 'mu_upper', axes=axes, spec=attr) + + lhs, *axes = linexpr((min_pu, nominal_v), (-1, operational_ext_v), + return_axes=True) + define_constraints(n, lhs, '<=', rhs, c, 'mu_lower', axes=axes, spec=attr) + + +def define_fixed_variable_constraints(n, sns, c, attr, pnl=True): + """ + Sets constraints for fixing variables of a given component and attribute + to the corresponding values in n.df(c)[attr + '_set'] if pnl is True, or + n.pnl(c)[attr + '_set'] + + Parameters + ---------- + n : pypsa.Network + c : str + name of the network component + attr : str + name of the attribute, e.g. 'p' + pnl : bool, default True + Whether variable which should be fixed is time-dependent + + """ + + if pnl: + if attr + '_set' not in n.pnl(c): return + fix = n.pnl(c)[attr + '_set'].unstack().dropna() + if fix.empty: return + lhs = linexpr((1, get_var(n, c, attr).unstack()[fix.index]), as_pandas=False) + constraints = write_constraint(n, lhs, '=', fix).unstack().T + else: + if attr + '_set' not in n.df(c): return + fix = n.df(c)[attr + '_set'].dropna() + if fix.empty: return + lhs = linexpr((1, get_var(n, c, attr)[fix.index]), as_pandas=False) + constraints = write_constraint(n, lhs, '=', fix) + set_conref(n, constraints, c, f'mu_{attr}_set') + + +def define_generator_status_variables(n, snapshots): + com_i = n.generators.query('committable').index + ext_i = get_extendable_i(n, 'Generator') + if not (ext_i & com_i).empty: + logger.warning("The following generators have both investment optimisation" + f" and unit commitment:\n\n\t{', '.join((ext_i & com_i))}\n\nCurrently PyPSA cannot " + "do both these functions, so PyPSA is choosing investment optimisation " + "for these generators.") + com_i = com_i.difference(ext_i) + if com_i.empty: return + define_binaries(n, (snapshots, com_i), 'Generator', 'status') + + +def define_committable_generator_constraints(n, snapshots): + c, attr = 'Generator', 'status' + com_i = n.df(c).query('committable and not p_nom_extendable').index + if com_i.empty: return + nominal = n.df(c)[nominal_attrs[c]][com_i] + min_pu, max_pu = get_bounds_pu(n, c, snapshots, com_i, 'p') + lower = min_pu.mul(nominal) + upper = max_pu.mul(nominal) + + status = get_var(n, c, attr) + p = get_var(n, c, 'p')[com_i] + + lhs = linexpr((lower, status), (-1, p)) + define_constraints(n, lhs, '<=', 0, 'Generators', 'committable_lb') + + lhs = linexpr((upper, status), (-1, p)) + define_constraints(n, lhs, '>=', 0, 'Generators', 'committable_ub') + + + +def define_ramp_limit_constraints(n, sns): + """ + Defines ramp limits for generators wiht valid ramplimit + + """ + c = 'Generator' + rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index + rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index + if rup_i.empty & rdown_i.empty: + return + fix_i = get_non_extendable_i(n, c) + ext_i = get_extendable_i(n, c) + com_i = n.df(c).query('committable').index.difference(ext_i) + p = get_var(n, c, 'p').loc[sns[1:]] + p_prev = get_var(n, c, 'p').shift(1).loc[sns[1:]] + + # fix up + gens_i = rup_i & fix_i + lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i])) + rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom') + define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', spec='nonext.') + + # ext up + gens_i = rup_i & ext_i + limit_pu = n.df(c)['ramp_limit_up'][gens_i] + p_nom = get_var(n, c, 'p_nom')[gens_i] + lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom)) + define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', spec='ext.') + + # com up + gens_i = rup_i & com_i + if not gens_i.empty: + limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom') + limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom') + status = get_var(n, c, 'status').loc[sns[1:], gens_i] + status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i] + lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), + (limit_start - limit_up, status_prev), (- limit_start, status)) + define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', spec='com.') + + # fix down + gens_i = rdown_i & fix_i + lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i])) + rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom') + define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', spec='nonext.') + + # ext down + gens_i = rdown_i & ext_i + limit_pu = n.df(c)['ramp_limit_down'][gens_i] + p_nom = get_var(n, c, 'p_nom')[gens_i] + lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom)) + define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', spec='ext.') + + # com down + gens_i = rdown_i & com_i + if not gens_i.empty: + limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom') + limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom') + status = get_var(n, c, 'status').loc[sns[1:], gens_i] + status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i] + lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), + (limit_down - limit_shut, status), (limit_shut, status_prev)) + define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', spec='com.') + +def define_nodal_balance_constraints(n, sns): + """ + Defines nodal balance constraint. + + """ + + def bus_injection(c, attr, groupcol='bus', sign=1): + # additional sign only necessary for branches in reverse direction + if 'sign' in n.df(c): + sign = sign * n.df(c).sign + expr = linexpr((sign, get_var(n, c, attr))).rename(columns=n.df(c)[groupcol]) + # drop empty bus2, bus3 if multiline link + if c == 'Link': + expr.drop(columns='', errors='ignore', inplace=True) + return expr + + # one might reduce this a bit by using n.branches and lookup + args = [['Generator', 'p'], ['Store', 'p'], ['StorageUnit', 'p_dispatch'], + ['StorageUnit', 'p_store', 'bus', -1], ['Line', 's', 'bus0', -1], + ['Line', 's', 'bus1', 1], ['Transformer', 's', 'bus0', -1], + ['Transformer', 's', 'bus1', 1], ['Link', 'p', 'bus0', -1], + ['Link', 'p', 'bus1', get_as_dense(n, 'Link', 'efficiency', sns)]] + args = [arg for arg in args if not n.df(arg[0]).empty] + + for i in additional_linkports(n): + eff = get_as_dense(n, 'Link', f'efficiency{i}', sns) + args.append(['Link', 'p', f'bus{i}', eff]) + + lhs = (pd.concat([bus_injection(*arg) for arg in args], axis=1) + .groupby(axis=1, level=0) + .agg(lambda x: ''.join(x.values)) + .reindex(columns=n.buses.index, fill_value='')) + sense = '=' + rhs = ((- get_as_dense(n, 'Load', 'p_set', sns) * n.loads.sign) + .groupby(n.loads.bus, axis=1).sum() + .reindex(columns=n.buses.index, fill_value=0)) + define_constraints(n, lhs, sense, rhs, 'Bus', 'marginal_price') + + +def define_kirchhoff_constraints(n, sns): + """ + Defines Kirchhoff voltage constraints + + """ + comps = n.passive_branch_components & set(n.variables.index.levels[0]) + if len(comps) == 0: return + branch_vars = pd.concat({c:get_var(n, c, 's') for c in comps}, axis=1) + + def cycle_flow(ds): + ds = ds[lambda ds: ds!=0.].dropna() + vals = linexpr((ds, branch_vars[ds.index]), as_pandas=False) + return vals.sum(1) + + constraints = [] + for sub in n.sub_networks.obj: + branches = sub.branches() + C = pd.DataFrame(sub.C.todense(), index=branches.index) + if C.empty: + continue + carrier = n.sub_networks.carrier[sub.name] + weightings = branches.x_pu_eff if carrier == 'AC' else branches.r_pu_eff + C_weighted = 1e5 * C.mul(weightings, axis=0) + cycle_sum = C_weighted.apply(cycle_flow) + cycle_sum.index = sns + con = write_constraint(n, cycle_sum, '=', 0) + constraints.append(con) + constraints = pd.concat(constraints, axis=1, ignore_index=True) + set_conref(n, constraints, 'SubNetwork', 'mu_kirchhoff_voltage_law') + + +def define_storage_unit_constraints(n, sns): + """ + Defines state of charge (soc) constraints for storage units. In principal + the constraints states: + + previous_soc + p_store - p_dispatch + inflow - spill == soc + + """ + sus_i = n.storage_units.index + if sus_i.empty: return + c = 'StorageUnit' + # spillage + upper = get_as_dense(n, c, 'inflow', sns).loc[:, lambda df: df.max() > 0] + spill = write_bound(n, 0, upper) + set_varref(n, spill, 'StorageUnit', 'spill') + + eh = expand_series(n.snapshot_weightings[sns], sus_i) #elapsed hours + + eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh) + eff_dispatch = expand_series(n.df(c).efficiency_dispatch, sns).T + eff_store = expand_series(n.df(c).efficiency_store, sns).T + + soc = get_var(n, c, 'state_of_charge') + cyclic_i = n.df(c).query('cyclic_state_of_charge').index + noncyclic_i = n.df(c).query('~cyclic_state_of_charge').index + + prev_soc_cyclic = soc.shift().fillna(soc.loc[sns[-1]]) + + coeff_var = [(-1, soc), + (-1/eff_dispatch * eh, get_var(n, c, 'p_dispatch')), + (eff_store * eh, get_var(n, c, 'p_store'))] + + lhs, *axes = linexpr(*coeff_var, return_axes=True) + + def masked_term(coeff, var, cols): + return linexpr((coeff[cols], var[cols]))\ + .reindex(index=axes[0], columns=axes[1], fill_value='').values + + if ('StorageUnit', 'spill') in n.variables.index: + lhs += masked_term(-eh, get_var(n, c, 'spill'), spill.columns) + lhs += masked_term(eff_stand, prev_soc_cyclic, cyclic_i) + lhs += masked_term(eff_stand.loc[sns[1:]], soc.shift().loc[sns[1:]], noncyclic_i) + + rhs = -get_as_dense(n, c, 'inflow', sns).mul(eh) + rhs.loc[sns[0], noncyclic_i] -= n.df(c).state_of_charge_initial[noncyclic_i] + + define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge') + + +def define_store_constraints(n, sns): + """ + Defines energy balance constraints for stores. In principal this states: + + previous_e - p == e + + """ + stores_i = n.stores.index + if stores_i.empty: return + c = 'Store' + variables = write_bound(n, -np.inf, np.inf, axes=[sns, stores_i]) + set_varref(n, variables, c, 'p') + + eh = expand_series(n.snapshot_weightings[sns], stores_i) #elapsed hours + eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh) + + e = get_var(n, c, 'e') + cyclic_i = n.df(c).query('e_cyclic').index + noncyclic_i = n.df(c).query('~e_cyclic').index + + previous_e_cyclic = e.shift().fillna(e.loc[sns[-1]]) + + coeff_var = [(-eh, get_var(n, c, 'p')), (-1, e)] + + lhs, *axes = linexpr(*coeff_var, return_axes=True) + + def masked_term(coeff, var, cols): + return linexpr((coeff[cols], var[cols]))\ + .reindex(index=axes[0], columns=axes[1], fill_value='').values + + lhs += masked_term(eff_stand, previous_e_cyclic, cyclic_i) + lhs += masked_term(eff_stand.loc[sns[1:]], e.shift().loc[sns[1:]], noncyclic_i) + + rhs = pd.DataFrame(0, sns, stores_i) + rhs.loc[sns[0], noncyclic_i] -= n.df(c)['e_initial'][noncyclic_i] + + define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge') + + +def define_global_constraints(n, sns): + """ + Defines global constraints for the optimization. Possible types are + + 1. primary_energy + Use this to constraint the byproducts of primary energy sources as + CO2 + 2. transmission_volume_expansion_limit + Use this to set a limit for line volume expansion. Possible carriers + are 'AC' and 'DC' + 3. transmission_expansion_cost_limit + Use this to set a limit for line expansion costs. Possible carriers + are 'AC' and 'DC' + + """ + glcs = n.global_constraints.query('type == "primary_energy"') + for name, glc in glcs.iterrows(): + rhs = glc.constant + lhs = '' + carattr = glc.carrier_attribute + emissions = n.carriers.query(f'{carattr} != 0')[carattr] + + if emissions.empty: continue + + # generators + gens = n.generators.query('carrier in @emissions.index') + if not gens.empty: + em_pu = gens.carrier.map(emissions)/gens.efficiency + em_pu = n.snapshot_weightings.to_frame() @ em_pu.to_frame('weightings').T + vals = linexpr((em_pu, get_var(n, 'Generator', 'p')[gens.index]), + as_pandas=False) + lhs += join_exprs(vals) + + # storage units + sus = n.storage_units.query('carrier in @emissions.index and ' + 'not cyclic_state_of_charge') + sus_i = sus.index + if not sus.empty: + coeff_val = (-sus.carrier.map(emissions), get_var(n, 'StorageUnit', + 'state_of_charge').loc[sns[-1], sus_i]) + vals = linexpr(coeff_val, as_pandas=False) + lhs = lhs + '\n' + join_exprs(vals) + rhs -= sus.carrier.map(emissions) @ sus.state_of_charge_initial + + # stores + n.stores['carrier'] = n.stores.bus.map(n.buses.carrier) + stores = n.stores.query('carrier in @emissions.index and not e_cyclic') + if not stores.empty: + coeff_val = (-stores.carrier.map(emissions), get_var(n, 'Store', 'e') + .loc[sns[-1], stores.index]) + vals = linexpr(coeff_val, as_pandas=False) + lhs = lhs + '\n' + join_exprs(vals) + rhs -= stores.carrier.map(emissions) @ stores.e_initial + + con = write_constraint(n, lhs, glc.sense, rhs, axes=pd.Index([name])) + set_conref(n, con, 'GlobalConstraint', 'mu', name) + + # for the next two to we need a line carrier + if len(n.global_constraints) > len(glcs): + n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier) + # expansion limits + glcs = n.global_constraints.query('type == ' + '"transmission_volume_expansion_limit"') + substr = lambda s: re.sub('[\[\]\(\)]', '', s) + for name, glc in glcs.iterrows(): + car = [substr(c.strip()) for c in glc.carrier_attribute.split(',')] + lhs = '' + for c, attr in (('Line', 's_nom'), ('Link', 'p_nom')): + ext_i = n.df(c).query(f'carrier in @car and {attr}_extendable').index + if ext_i.empty: continue + v = linexpr((n.df(c).length[ext_i], get_var(n, c, attr)[ext_i]), + as_pandas=False) + lhs += '\n' + join_exprs(v) + if lhs == '': continue + sense = glc.sense + rhs = glc.constant + con = write_constraint(n, lhs, sense, rhs, axes=pd.Index([name])) + set_conref(n, con, 'GlobalConstraint', 'mu', name) + + # expansion cost limits + glcs = n.global_constraints.query('type == ' + '"transmission_expansion_cost_limit"') + for name, glc in glcs.iterrows(): + car = [substr(c.strip()) for c in glc.carrier_attribute.split(',')] + lhs = '' + for c, attr in (('Line', 's_nom'), ('Link', 'p_nom')): + ext_i = n.df(c).query(f'carrier in @car and {attr}_extendable').index + if ext_i.empty: continue + v = linexpr((n.df(c).capital_cost[ext_i], get_var(n, c, attr)[ext_i]), + as_pandas=False) + lhs += '\n' + join_exprs(v) + if lhs == '': continue + sense = glc.sense + rhs = glc.constant + con = write_constraint(n, lhs, sense, rhs, axes=pd.Index([name])) + set_conref(n, con, 'GlobalConstraint', 'mu', name) + + +def define_objective(n, sns): + """ + Defines and writes out the objective function + + """ + # constant for already done investment + nom_attr = nominal_attrs.items() + constant = 0 + for c, attr in nom_attr: + ext_i = get_extendable_i(n, c) + constant += n.df(c)[attr][ext_i] @ n.df(c).capital_cost[ext_i] + object_const = write_bound(n, constant, constant) + n.objective_f.write(linexpr((-1, object_const), as_pandas=False)[0]) + + for c, attr in lookup.query('marginal_cost').index: + cost = (get_as_dense(n, c, 'marginal_cost', sns) + .loc[:, lambda ds: (ds != 0).all()] + .mul(n.snapshot_weightings[sns], axis=0)) + if cost.empty: continue + terms = linexpr((cost, get_var(n, c, attr).loc[sns, cost.columns])) + n.objective_f.write(join_exprs(terms)) + # investment + for c, attr in nominal_attrs.items(): + cost = n.df(c)['capital_cost'][get_extendable_i(n, c)] + if cost.empty: continue + terms = linexpr((cost, get_var(n, c, attr)[cost.index])) + n.objective_f.write(join_exprs(terms)) + + +def prepare_lopf(n, snapshots=None, keep_files=False, + extra_functionality=None, solver_dir=None): + """ + Sets up the linear problem and writes it out to a lp file + + Returns + ------- + Tuple (fdp, problem_fn) indicating the file descriptor and the file name of + the lp file + + """ + n._xCounter, n._cCounter = 1, 1 + n.vars, n.cons = Dict(), Dict() + + cols = ['component', 'name', 'pnl', 'specification'] + n.variables = pd.DataFrame(columns=cols).set_index(cols[:2]) + n.constraints = pd.DataFrame(columns=cols).set_index(cols[:2]) + + snapshots = n.snapshots if snapshots is None else snapshots + start = time.time() + + tmpkwargs = dict(text=True, dir=solver_dir) + # mkstemp(suffix, prefix, **tmpkwargs) + fdo, objective_fn = mkstemp('.txt', 'pypsa-objectve-', **tmpkwargs) + fdc, constraints_fn = mkstemp('.txt', 'pypsa-constraints-', **tmpkwargs) + fdb, bounds_fn = mkstemp('.txt', 'pypsa-bounds-', **tmpkwargs) + fdi, binaries_fn = mkstemp('.txt', 'pypsa-binaries-', **tmpkwargs) + fdp, problem_fn = mkstemp('.lp', 'pypsa-problem-', **tmpkwargs) + + n.objective_f = open(objective_fn, mode='w') + n.constraints_f = open(constraints_fn, mode='w') + n.bounds_f = open(bounds_fn, mode='w') + n.binaries_f = open(binaries_fn, mode='w') + + n.objective_f.write('\* LOPF *\n\nmin\nobj:\n') + n.constraints_f.write("\n\ns.t.\n\n") + n.bounds_f.write("\nbounds\n") + n.binaries_f.write("\nbinary\n") + + for c, attr in lookup.query('nominal and not handle_separately').index: + define_nominal_for_extendable_variables(n, c, attr) + # define_fixed_variable_constraints(n, snapshots, c, attr, pnl=False) + for c, attr in lookup.query('not nominal and not handle_separately').index: + define_dispatch_for_non_extendable_variables(n, snapshots, c, attr) + define_dispatch_for_extendable_and_committable_variables(n, snapshots, c, attr) + align_with_static_component(n, c, attr) + define_dispatch_for_extendable_constraints(n, snapshots, c, attr) + # define_fixed_variable_constraints(n, snapshots, c, attr) + define_generator_status_variables(n, snapshots) + + # consider only state_of_charge_set for the moment + define_fixed_variable_constraints(n, snapshots, 'StorageUnit', 'state_of_charge') + define_fixed_variable_constraints(n, snapshots, 'Store', 'e') + + define_committable_generator_constraints(n, snapshots) + define_ramp_limit_constraints(n, snapshots) + define_storage_unit_constraints(n, snapshots) + define_store_constraints(n, snapshots) + define_kirchhoff_constraints(n, snapshots) + define_nodal_balance_constraints(n, snapshots) + define_global_constraints(n, snapshots) + define_objective(n, snapshots) + + if extra_functionality is not None: + extra_functionality(n, snapshots) + + n.binaries_f.write("end\n") + + # explicit closing with file descriptor is necessary for windows machines + for f, fd in (('bounds_f', fdb), ('constraints_f', fdc), + ('objective_f', fdo), ('binaries_f', fdi)): + getattr(n, f).close(); delattr(n, f); os.close(fd) + + # concate files + with open(problem_fn, 'wb') as wfd: + for f in [objective_fn, constraints_fn, bounds_fn, binaries_fn]: + with open(f,'rb') as fd: + shutil.copyfileobj(fd, wfd) + if not keep_files: + os.remove(f) + + logger.info(f'Total preparation time: {round(time.time()-start, 2)}s') + return fdp, problem_fn + + +def assign_solution(n, sns, variables_sol, constraints_dual, + keep_references=False, keep_shadowprices=None): + """ + Helper function. Assigns the solution of a succesful optimization to the + network. + + """ + + def set_from_frame(pnl, attr, df): + if attr not in pnl: #use this for subnetworks_t + pnl[attr] = df.reindex(n.snapshots) + elif pnl[attr].empty: + pnl[attr] = df.reindex(n.snapshots) + else: + pnl[attr].loc[sns, :] = df.reindex(columns=pnl[attr].columns) + + pop = not keep_references + def map_solution(c, attr): + variables = get_var(n, c, attr, pop=pop) + predefined = True + if (c, attr) not in lookup.index: + predefined = False + n.sols[c] = n.sols[c] if c in n.sols else Dict(df=pd.DataFrame(), pnl={}) + n.solutions.at[(c, attr), 'in_comp'] = predefined + if isinstance(variables, pd.DataFrame): + # case that variables are timedependent + n.solutions.at[(c, attr), 'pnl'] = True + pnl = n.pnl(c) if predefined else n.sols[c].pnl + values = variables.stack().map(variables_sol).unstack() + if c in n.passive_branch_components: + set_from_frame(pnl, 'p0', values) + set_from_frame(pnl, 'p1', - values) + elif c == 'Link': + set_from_frame(pnl, 'p0', values) + for i in ['1'] + additional_linkports(n): + i_eff = '' if i == '1' else i + eff = get_as_dense(n, 'Link', f'efficiency{i_eff}', sns) + set_from_frame(pnl, f'p{i}', - values * eff) + else: + set_from_frame(pnl, attr, values) + else: + # case that variables are static + n.solutions.at[(c, attr), 'pnl'] = False + sol = variables.map(variables_sol) + if predefined: + non_ext = n.df(c)[attr] + n.df(c)[attr + '_opt'] = sol.reindex(non_ext.index).fillna(non_ext) + else: + n.sols[c].df[attr] = sol + + n.sols = Dict() + n.solutions = pd.DataFrame(index=n.variables.index, columns=['in_comp', 'pnl']) + for c, attr in n.variables.index: + map_solution(c, attr) + + # if nominal capcity was no variable set optimal value to nominal + for c, attr in lookup.query('nominal').index.difference(n.variables.index): + n.df(c)[attr+'_opt'] = n.df(c)[attr] + + # recalculate storageunit net dispatch + if not n.df('StorageUnit').empty: + c = 'StorageUnit' + n.pnl(c)['p'] = n.pnl(c)['p_dispatch'] - n.pnl(c)['p_store'] + + # duals + if keep_shadowprices == False: + keep_shadowprices = [] + + sp = n.constraints.index + if isinstance(keep_shadowprices, list): + sp = sp[sp.isin(keep_shadowprices, level=0)] + + def map_dual(c, attr): + # If c is a pypsa component name the dual is store at n.pnl(c) + # or n.df(c). For the second case the index of the constraints have to + # be a subset of n.df(c).index otherwise the dual is stored at + # n.duals[c].df + constraints = get_con(n, c, attr, pop=pop) + is_pnl = isinstance(constraints, pd.DataFrame) + # TODO: setting the sign is not very clear + sign = 1 if 'upper' in attr or attr == 'marginal_price' else -1 + n.dualvalues.at[(c, attr), 'pnl'] = is_pnl + to_component = c in n.all_components + if is_pnl: + n.dualvalues.at[(c, attr), 'in_comp'] = to_component + duals = constraints.stack().map(sign * constraints_dual).unstack() + if c not in n.duals and not to_component: + n.duals[c] = Dict(df=pd.DataFrame(), pnl={}) + pnl = n.pnl(c) if to_component else n.duals[c].pnl + set_from_frame(pnl, attr, duals) + else: + # here to_component can change + duals = constraints.map(sign * constraints_dual) + if to_component: + to_component = (duals.index.isin(n.df(c).index).all()) + n.dualvalues.at[(c, attr), 'in_comp'] = to_component + if c not in n.duals and not to_component: + n.duals[c] = Dict(df=pd.DataFrame(), pnl={}) + df = n.df(c) if to_component else n.duals[c].df + df[attr] = duals + + n.duals = Dict() + n.dualvalues = pd.DataFrame(index=sp, columns=['in_comp', 'pnl']) + # extract shadow prices attached to components + for c, attr in sp: + map_dual(c, attr) + + # discard remaining if wanted + if not keep_references: + for c, attr in n.constraints.index.difference(sp): + get_con(n, c, attr, pop) + + #load + if len(n.loads): + set_from_frame(n.pnl('Load'), 'p', get_as_dense(n, 'Load', 'p_set', sns)) + + #clean up vars and cons + for c in list(n.vars): + if n.vars[c].df.empty and n.vars[c].pnl == {}: n.vars.pop(c) + for c in list(n.cons): + if n.cons[c].df.empty and n.cons[c].pnl == {}: n.cons.pop(c) + + # recalculate injection + ca = [('Generator', 'p', 'bus' ), ('Store', 'p', 'bus'), + ('Load', 'p', 'bus'), ('StorageUnit', 'p', 'bus'), + ('Link', 'p0', 'bus0'), ('Link', 'p1', 'bus1')] + for i in additional_linkports(n): + ca.append(('Link', f'p{i}', f'bus{i}')) + + sign = lambda c: n.df(c).sign if 'sign' in n.df(c) else -1 #sign for 'Link' + n.buses_t.p = pd.concat( + [n.pnl(c)[attr].mul(sign(c)).rename(columns=n.df(c)[group]) + for c, attr, group in ca], axis=1).groupby(level=0, axis=1).sum()\ + .reindex(columns=n.buses.index, fill_value=0) + + def v_ang_for_(sub): + buses_i = sub.buses_o + if len(buses_i) == 1: + return pd.DataFrame(0, index=sns, columns=buses_i) + sub.calculate_B_H(skip_pre=True) + Z = pd.DataFrame(np.linalg.pinv((sub.B).todense()), buses_i, buses_i) + Z -= Z[sub.slack_bus] + return n.buses_t.p.reindex(columns=buses_i) @ Z + n.buses_t.v_ang = (pd.concat([v_ang_for_(sub) for sub in n.sub_networks.obj], + axis=1) + .reindex(columns=n.buses.index, fill_value=0)) + + +def network_lopf(n, snapshots=None, solver_name="cbc", + solver_logfile=None, extra_functionality=None, + extra_postprocessing=None, formulation="kirchhoff", + keep_references=False, keep_files=False, + keep_shadowprices=['Bus', 'Line', 'GlobalConstraint'], + solver_options=None, warmstart=False, store_basis=False, + solver_dir=None): + """ + Linear optimal power flow for a group of snapshots. + + Parameters + ---------- + snapshots : list or index slice + A list of snapshots to optimise, must be a subset of + network.snapshots, defaults to network.snapshots + solver_name : string + Must be a solver name that pyomo recognises and that is + installed, e.g. "glpk", "gurobi" + pyomo : bool, default True + Whether to use pyomo for building and solving the model, setting + this to False saves a lot of memory and time. + solver_logfile : None|string + If not None, sets the logfile option of the solver. + solver_options : dictionary + A dictionary with additional options that get passed to the solver. + (e.g. {'threads':2} tells gurobi to use only 2 cpus) + solver_dir : str, default None + Path to directory where necessary files are written, default None leads + to the default temporary directory used by tempfile.mkstemp(). + keep_files : bool, default False + Keep the files that pyomo constructs from OPF problem + construction, e.g. .lp file - useful for debugging + formulation : string + Formulation of the linear power flow equations to use; must be + one of ["angles","cycles","kirchhoff","ptdf"] + extra_functionality : callable function + This function must take two arguments + `extra_functionality(network,snapshots)` and is called after + the model building is complete, but before it is sent to the + solver. It allows the user to + add/change constraints and add/change the objective function. + extra_postprocessing : callable function + This function must take three arguments + `extra_postprocessing(network,snapshots,duals)` and is called after + the model has solved and the results are extracted. It allows the user + to extract further information about the solution, such as additional + shadow prices. + warmstart : bool or string, default False + Use this to warmstart the optimization. Pass a string which gives + the path to the basis file. If set to True, a path to + a basis file must be given in network.basis_fn. + store_basis : bool, default False + Whether to store the basis of the optimization results. If True, + the path to the basis file is saved in network.basis_fn. Note that + a basis can only be stored if simplex, dual-simplex, or barrier + *with* crossover is used for solving. + keep_references : bool, default False + Keep the references of variable and constraint names withing the + network. These can be looked up in `n.vars` and `n.cons` after solving. + keep_shadowprices : bool or list of component names + Keep shadow prices for all constraints, if set to True. If a list + is passed the shadow prices will only be parsed for those constraint + names. Defaults to ['Bus', 'Line', 'GlobalConstraint']. + After solving, the shadow prices can be retrieved using + :func:`pypsa.linopt.get_dual` with corresponding name + + """ + supported_solvers = ["cbc", "gurobi", 'glpk', 'scs'] + if solver_name not in supported_solvers: + raise NotImplementedError(f"Solver {solver_name} not in " + f"supported solvers: {supported_solvers}") + + if formulation != "kirchhoff": + raise NotImplementedError("Only the kirchhoff formulation is supported") + + if n.generators.committable.any(): + logger.warn("Unit commitment is not yet completely implemented for " + "optimising without pyomo. Thus minimum up time, minimum down time, " + "start up costs, shut down costs will be ignored.") + + #disable logging because multiple slack bus calculations, keep output clean + snapshots = _as_snapshots(n, snapshots) + n.calculate_dependent_values() + n.determine_network_topology() + + logger.info("Prepare linear problem") + fdp, problem_fn = prepare_lopf(n, snapshots, keep_files, + extra_functionality, solver_dir) + fds, solution_fn = mkstemp(prefix='pypsa-solve', suffix='.sol', dir=solver_dir) + + if warmstart == True: + warmstart = n.basis_fn + logger.info("Solve linear problem using warmstart") + else: + logger.info(f"Solve linear problem using {solver_name.title()} solver") + + solve = eval(f'run_and_read_{solver_name}') + res = solve(n, problem_fn, solution_fn, solver_logfile, + solver_options, keep_files, warmstart, store_basis) + status, termination_condition, variables_sol, constraints_dual, obj = res + + if not keep_files: + os.close(fdp); os.remove(problem_fn) + os.close(fds); os.remove(solution_fn) + + if "optimal" not in termination_condition: + logger.warning('Problem was not solved to optimality') + return status, termination_condition + else: + logger.info('Optimization successful. Objective value: {:.2e}'.format(obj)) + + n.objective = obj + assign_solution(n, snapshots, variables_sol, constraints_dual, + keep_references=keep_references, + keep_shadowprices=keep_shadowprices) + gc.collect() + + return status,termination_condition + + +def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1, + max_iterations=100, **kwargs): + ''' + Iterative linear optimization updating the line parameters for passive + AC and DC lines. This is helpful when line expansion is enabled. After each + sucessful solving, line impedances and line resistance are recalculated + based on the optimization result. If warmstart is possible, it uses the + result from the previous iteration to fasten the optimization. + + Parameters + ---------- + snapshots : list or index slice + A list of snapshots to optimise, must be a subset of + network.snapshots, defaults to network.snapshots + msq_threshold: float, default 0.05 + Maximal mean square difference between optimized line capacity of + the current and the previous iteration. As soon as this threshold is + undercut, and the number of iterations is bigger than 'min_iterations' + the iterative optimization stops + min_iterations : integer, default 1 + Minimal number of iteration to run regardless whether the msq_threshold + is already undercut + max_iterations : integer, default 100 + Maximal numbder of iterations to run regardless whether msq_threshold + is already undercut + **kwargs + Keyword arguments of the lopf function which runs at each iteration + + ''' + + n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier) + ext_i = get_extendable_i(n, 'Line') + typed_i = n.lines.query('type != ""').index + ext_untyped_i = ext_i.difference(typed_i) + ext_typed_i = ext_i & typed_i + base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * + n.lines.bus0.map(n.buses.v_nom)) + n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i] + + def update_line_params(n, s_nom_prev): + factor = n.lines.s_nom_opt / s_nom_prev + for attr, carrier in (('x', 'AC'), ('r', 'DC')): + ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i) + n.lines.loc[ln_i, attr] /= factor[ln_i] + ln_i = ext_i & typed_i + n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i] + + def msq_diff(n, s_nom_prev): + lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \ + n.lines['s_nom_opt'].mean() + logger.info(f"Mean square difference after iteration {iteration} is " + f"{lines_err}") + return lines_err + + iteration = 0 + kwargs['store_basis'] = True + diff = msq_threshold + while diff >= msq_threshold or iteration < min_iterations: + if iteration >= max_iterations: + logger.info(f'Iteration {iteration} beyond max_iterations ' + f'{max_iterations}. Stopping ...') + break + + s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom + kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__())) + network_lopf(n, snapshots, **kwargs) + update_line_params(n, s_nom_prev) + diff = msq_diff(n, s_nom_prev) + iteration += 1 + logger.info('Running last lopf with fixed branches, overwrite p_nom ' + 'for links and s_nom for lines') + ext_links_i = get_extendable_i(n, 'Link') + n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False + n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False + network_lopf(n, snapshots, **kwargs) + n.lines.loc[ext_i, 's_nom_extendable'] = True + n.links.loc[ext_links_i, 'p_nom_extendable'] = True diff --git a/pypsa/linopt.py b/pypsa/linopt.py new file mode 100644 index 000000000..fd871f799 --- /dev/null +++ b/pypsa/linopt.py @@ -0,0 +1,706 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Tools for fast Linear Problem file writing. This module contains + +- io functions for writing out variables, constraints and objective + into a lp file. +- functions to create lp format based linear expression +- solver functions which read the lp file, run the problem and return the + solution + +This module supports the linear optimal power flow calculation whithout using +pyomo (see module linopt.py) +""" + +from .descriptors import Dict +import pandas as pd +import os, logging, re, io, subprocess +import numpy as np +from pandas import IndexSlice as idx + +logger = logging.getLogger(__name__) + +# ============================================================================= +# Front end functions +# ============================================================================= + +def define_variables(n, lower, upper, name, attr='', axes=None, spec=''): + """ + Defines variable(s) for pypsa-network with given lower bound(s) and upper + bound(s). The variables are stored in the network object under n.vars with + key of the variable name. If multiple variables are defined at ones, at + least one of lower and upper has to be an array (including pandas) of + shape > (1,) or axes have to define the dimensions of the variables. + + Parameter + --------- + n : pypsa.Network + lower : pd.Series/pd.DataFrame/np.array/str/float + lower bound(s) for the variable(s) + upper : pd.Series/pd.DataFrame/np.array/str/float + upper bound(s) for the variable(s) + name : str + general name of the variable (or component which the variable is + referring to). The variable will then be stored under: + * n.vars[name].pnl if the variable is two-dimensional + * n.vars[name].df if the variable is one-dimensional + but can easily be accessed with :func:`get_var(n, name, attr)` + attr : str default '' + Specifying name of the variable, defines under which name the variable(s) + are stored in n.vars[name].pnl if two-dimensional or in n.vars[name].df + if one-dimensional + axes : pd.Index or tuple of pd.Index objects, default None + Specifies the axes and therefore the shape of the variables if bounds + are single strings or floats. This is helpful when mutliple variables + have the same upper and lower bound. + + + Example + -------- + + Let's say we want to define a demand-side-managed load at each bus of + network n, which has a minimum of 0 and a maximum of 10. We then define + lower bound (lb) and upper bound (ub) and pass it to define_variables + + >>> from pypsa.linopt import define_variables, get_var + >>> lb = pd.DataFrame(0, index=n.snapshots, columns=n.buses.index) + >>> ub = pd.DataFrame(10, index=n.snapshots, columns=n.buses.index) + >>> define_variables(n, lb, ub, 'DSM', 'variableload') + + Now the variables can be accessed by :func:`pypsa.linopt.get_var` using + + >>> variables = get_var(n, 'DSM', 'variableload') + + Note that this is usefull for the `extra_functionality` argument. + """ + var = write_bound(n, lower, upper, axes) + set_varref(n, var, name, attr, spec=spec) + + +def define_binaries(n, axes, name, attr='', spec=''): + """ + Defines binary-variable(s) for pypsa-network. The variables are stored + in the network object under n.vars with key of the variable name. + For each entry for the pd.Series of pd.DataFrame spanned by the axes + argument the function defines a binary. + + Parameter + --------- + n : pypsa.Network + axes : pd.Index or tuple of pd.Index objects + Specifies the axes and therefore the shape of the variables. + name : str + general name of the variable (or component which the variable is + referring to). The variable will then be stored under: + * n.vars[name].pnl if the variable is two-dimensional + * n.vars[name].df if the variable is one-dimensional + attr : str default '' + Specifying name of the variable, defines under which name the variable(s) + are stored in n.vars[name].pnl if two-dimensional or in n.vars[name].df + if one-dimensional + + See also + --------- + define_variables + + """ + var = write_binary(n, axes) + set_varref(n, var, name, attr, spec=spec) + + +def define_constraints(n, lhs, sense, rhs, name, attr='', axes=None, spec=''): + """ + Defines constraint(s) for pypsa-network with given left hand side (lhs), + sense and right hand side (rhs). The constraints are stored in the network + object under n.cons with key of the constraint name. If multiple constraints + are defined at ones, only using np.arrays, then the axes argument can be used + for defining the axes for the constraints (this is espececially recommended + for time-dependent constraints). If one of lhs, sense and rhs is a + pd.Series/pd.DataFrame the axes argument is not necessary. + + Parameters + ---------- + n: pypsa.Network + lhs: pd.Series/pd.DataFrame/np.array/str/float + left hand side of the constraint(s), created with + :func:`pypsa.linot.linexpr`. + sense: pd.Series/pd.DataFrame/np.array/str/float + sense(s) of the constraint(s) + rhs: pd.Series/pd.DataFrame/np.array/str/float + right hand side of the constraint(s), must only contain pure constants, + no variables + name: str + general name of the constraint (or component which the constraint is + referring to). The constraint will then be stored under: + + * n.cons[name].pnl if the constraint is two-dimensional + * n.cons[name].df if the constraint is one-dimensional + attr: str default '' + Specifying name of the constraint, defines under which name the + constraint(s) are stored in n.cons[name].pnl if two-dimensional or in + n.cons[name].df if one-dimensional + axes: pd.Index or tuple of pd.Index objects, default None + Specifies the axes if all of lhs, sense and rhs are np.arrays or single + strings or floats. + + + Example + -------- + + Let's say we want to constraint all gas generators to a maximum of 100 MWh + during the first 10 snapshots. We then firstly get all operational variables + for this subset and constraint there sum to less equal 100. + + >>> from pypsa.linopt import get_var, linexpr, defin_constraints + >>> gas_i = n.generators.query('carrier == "Natural Gas"').index + >>> gas_vars = get_var(n, 'Generator', 'p').loc[n.snapshots[:10], gas_i] + >>> lhs = linexpr((1, gas_vars)).sum().sum() + >>> define_(n, lhs, '<=', 100, 'Generator', 'gas_power_limit') + + Now the constraint references can be accessed by + :func:`pypsa.linopt.get_con` using + + >>> cons = get_var(n, 'Generator', 'gas_power_limit') + + Under the hook they are stored in n.cons.Generator.pnl.gas_power_limit. + For retrieving their shadow prices add the general name of the constraint + to the keep_shadowprices argument. + + Note that this is usefull for the `extra_functionality` argument. + + """ + con = write_constraint(n, lhs, sense, rhs, axes) + set_conref(n, con, name, attr, spec=spec) + +# ============================================================================= +# writing functions +# ============================================================================= + +def _get_handlers(axes, *maybearrays): + axes = [axes] if isinstance(axes, pd.Index) else axes + if axes is None: + axes, shape = broadcasted_axes(*maybearrays) + else: + shape = tuple(map(len, axes)) + length = np.prod(shape) + return axes, shape, length + + +def write_bound(n, lower, upper, axes=None): + """ + Writer function for writing out mutliple variables at a time. If lower and + upper are floats it demands to give pass axes, a tuple of (index, columns) + or (index), for creating the variable of same upper and lower bounds. + Return a series or frame with variable references. + """ + axes, shape, length = _get_handlers(axes, lower, upper) + if not length: return pd.Series() + n._xCounter += length + variables = np.arange(n._xCounter - length, n._xCounter).reshape(shape) + lower, upper = _str_array(lower), _str_array(upper) + n.bounds_f.write(join_exprs(lower + ' <= x' + _str_array(variables, True) + + ' <= '+ upper + '\n')) + return to_pandas(variables, *axes) + +def write_constraint(n, lhs, sense, rhs, axes=None): + """ + Writer function for writing out mutliple constraints to the corresponding + constraints file. If lower and upper are numpy.ndarrays it axes must not be + None but a tuple of (index, columns) or (index). + Return a series or frame with constraint references. + """ + axes, shape, length = _get_handlers(axes, lhs, sense, rhs) + if not length: return pd.Series() + n._cCounter += length + cons = np.arange(n._cCounter - length, n._cCounter).reshape(shape) + if isinstance(sense, str): + sense = '=' if sense == '==' else sense + lhs, sense, rhs = _str_array(lhs), _str_array(sense), _str_array(rhs) + n.constraints_f.write(join_exprs('c' + _str_array(cons, True) + ':\n' + + lhs + sense + ' ' + rhs + '\n\n')) + return to_pandas(cons, *axes) + +def write_binary(n, axes): + """ + Writer function for writing out mutliple binary-variables at a time. + According to the axes it writes out binaries for each entry the pd.Series + or pd.DataFrame spanned by axes. Returns a series or frame with variable + references. + """ + axes, shape, length = _get_handlers(axes) + n._xCounter += length + variables = np.arange(n._xCounter - length, n._xCounter).reshape(shape) + n.binaries_f.write(join_exprs('x' + _str_array(variables, True) + '\n')) + return to_pandas(variables, *axes) + +# ============================================================================= +# helpers, helper functions +# ============================================================================= + +def broadcasted_axes(*dfs): + """ + Helper function which, from a collection of arrays, series, frames and other + values, retrieves the axes of series and frames which result from + broadcasting operations. It checks whether index and columns of given + series and frames, repespectively, are aligned. Using this function allows + to subsequently use pure numpy operations and keep the axes in the + background. + + """ + axes = [] + shape = (1,) + + if set(map(type, dfs)) == {tuple}: + dfs = sum(dfs, ()) + + for df in dfs: + shape = max(shape, np.asarray(df).shape) + if isinstance(df, (pd.Series, pd.DataFrame)): + if len(axes): + assert (axes[-1] == df.axes[-1]).all(), ('Series or DataFrames ' + 'are not aligned. Please make sure that all indexes and ' + 'columns of Series and DataFrames going into the linear ' + 'expression are equally sorted.') + axes = df.axes if len(df.axes) > len(axes) else axes + return axes, shape + + +def align_with_static_component(n, c, attr): + """ + Alignment of time-dependent variables with static components. If c is a + pypsa.component name, it will sort the columns of the variable according + to the statid component. + """ + if c in n.all_components and (c, attr) in n.variables.index: + if not n.variables.pnl[c, attr]: return + if len(n.vars[c].pnl[attr].columns) != len(n.df(c).index): return + n.vars[c].pnl[attr] = n.vars[c].pnl[attr].reindex(columns=n.df(c).index) + + +def linexpr(*tuples, as_pandas=True, return_axes=False): + """ + Elementwise concatenation of tuples in the form (coefficient, variables). + Coefficient and variables can be arrays, series or frames. Per default + returns a pandas.Series or pandas.DataFrame of strings. If return_axes + is set to True the return value is split into values and axes, where values + are the numpy.array and axes a tuple containing index and column if present. + + Parameters + ---------- + tuples: tuple of tuples + Each tuple must of the form (coeff, var), where + + * coeff is a numerical value, or a numerical array, series, frame + * var is a str or a array, series, frame of variable strings + as_pandas : bool, default True + Whether to return to resulting array as a series, if 1-dimensional, or + a frame, if 2-dimensional. Supersedes return_axes argument. + return_axes: Boolean, default False + Whether to return index and column (if existent) + + Example + ------- + Initialize coefficients and variables + + >>> coeff1 = 1 + >>> var1 = pd.Series(['a1', 'a2', 'a3']) + >>> coeff2 = pd.Series([-0.5, -0.3, -1]) + >>> var2 = pd.Series(['b1', 'b2', 'b3']) + + Create the linear expression strings + + >>> linexpr((coeff1, var1), (coeff2, var2)) + 0 +1.0 a1 -0.5 b1 + 1 +1.0 a2 -0.3 b2 + 2 +1.0 a3 -1.0 b3 + dtype: object + + For a further step the resulting frame can be used as the lhs of + :func:`pypsa.linopt.define_constraints` + + For retrieving only the values: + + >>> linexpr((coeff1, var1), (coeff2, var2), as_pandas=False) + array(['+1.0 a1 -0.5 b1', '+1.0 a2 -0.3 b2', '+1.0 a3 -1.0 b3'], dtype=object) + + """ + axes, shape = broadcasted_axes(*tuples) + expr = np.repeat('', np.prod(shape)).reshape(shape).astype(object) + if np.prod(shape): + for coeff, var in tuples: + expr = expr + _str_array(coeff) + ' x' + _str_array(var, True) + '\n' + if return_axes: + return (expr, *axes) + if as_pandas: + return to_pandas(expr, *axes) + return expr + + +def to_pandas(array, *axes): + """ + Convert a numpy array to pandas.Series if 1-dimensional or to a + pandas.DataFrame if 2-dimensional. Provide index and columns if needed + """ + return pd.Series(array, *axes) if array.ndim == 1 else pd.DataFrame(array, *axes) + +_to_float_str = lambda f: '%+f'%f +_v_to_float_str = np.vectorize(_to_float_str, otypes=[object]) + +_to_int_str = lambda d: '%d'%d +_v_to_int_str = np.vectorize(_to_int_str, otypes=[object]) + +def _str_array(array, integer_string=False): + if isinstance(array, (float, int)): + if integer_string: + return _to_int_str(array) + return _to_float_str(array) + array = np.asarray(array) + if array.dtype < str and array.size: + if integer_string: + return _v_to_int_str(np.asarray(array)) + return _v_to_float_str(np.asarray(array)) + else: + return array + +def join_exprs(df): + """ + Helper function to join arrays, series or frames of strings together. + + """ + return ''.join(np.asarray(df).flatten()) + +# ============================================================================= +# references to vars and cons, rewrite this part to not store every reference +# ============================================================================= + +def _add_reference(ref_dict, df, attr, pnl=True): + if pnl: + if attr in ref_dict.pnl: + ref_dict.pnl[attr][df.columns] = df + else: + ref_dict.pnl[attr] = df + else: + if ref_dict.df.empty: + ref_dict.df[attr] = df + else: + ref_dict.df = pd.concat([ref_dict.df, df.to_frame(attr)]) + +def set_varref(n, variables, c, attr, spec=''): + """ + Sets variable references to the network. + One-dimensional variable references will be collected at n.vars[c].df, + two-dimensional varaibles in n.vars[c].pnl + For example: + * nominal capacity variables for generators are stored in + `n.vars.Generator.df.p_nom` + * operational variables for generators are stored in + `n.vars.Generator.pnl.p` + """ + if not variables.empty: + pnl = variables.ndim == 2 + if c not in n.variables.index: + n.vars[c] = Dict(df=pd.DataFrame(), pnl=Dict()) + if ((c, attr) in n.variables.index) and (spec != ''): + n.variables.at[idx[c, attr], 'specification'] += ', ' + spec + else: + n.variables.loc[idx[c, attr], :] = [pnl, spec] + _add_reference(n.vars[c], variables, attr, pnl=pnl) + +def set_conref(n, constraints, c, attr, spec=''): + """ + Sets variable references to the network. + One-dimensional constraint references will be collected at n.cons[c].df, + two-dimensional in n.cons[c].pnl + For example: + * constraints for nominal capacity variables for generators are stored in + `n.cons.Generator.df.mu_upper` + * operational capacity limits for generators are stored in + `n.cons.Generator.pnl.mu_upper` + """ + if not constraints.empty: + pnl = constraints.ndim == 2 + if c not in n.constraints.index: + n.cons[c] = Dict(df=pd.DataFrame(), pnl=Dict()) + if ((c, attr) in n.constraints.index) and (spec != ''): + n.constraints.at[idx[c, attr], 'specification'] += ', ' + spec + else: + n.constraints.loc[idx[c, attr], :] = [pnl, spec] + _add_reference(n.cons[c], constraints, attr, pnl=pnl) + +def get_var(n, c, attr, pop=False): + ''' + Retrieves variable references for a given static or time-depending + attribute of a given component. The function looks into n.variables to + detect whether the variable is a time-dependent or static. + + Parameters + ---------- + n : pypsa.Network + c : str + component name to which the constraint belongs + attr: str + attribute name of the constraints + + Example + ------- + >>> get_var(n, 'Generator', 'p') + + ''' + vvars = n.vars[c].pnl if n.variables.pnl[c, attr] else n.vars[c].df + return vvars.pop(attr) if pop else vvars[attr] + + +def get_con(n, c, attr, pop=False): + """ + Retrieves constraint references for a given static or time-depending + attribute of a give component. + + Parameters + ---------- + n : pypsa.Network + c : str + component name to which the constraint belongs + attr: str + attribute name of the constraints + + Example + ------- + get_con(n, 'Generator', 'mu_upper') + """ + cons = n.cons[c].pnl if n.constraints.pnl[c, attr] else n.cons[c].df + return cons.pop(attr) if pop else cons[attr] + + +def get_sol(n, name, attr=''): + """ + Retrieves solution for a given variable. Note that a lookup of all stored + solutions is given in n.solutions. + + + Parameters + ---------- + n : pypsa.Network + c : str + general variable name (or component name if variable is attached to a + component) + attr: str + attribute name of the variable + + Example + ------- + get_dual(n, 'Generator', 'mu_upper') + """ + pnl = n.solutions.at[(name, attr), 'pnl'] + if n.solutions.at[(name, attr), 'in_comp']: + return n.pnl(name)[attr] if pnl else n.df(name)[attr + '_opt'] + else: + return n.sols[name].pnl[attr] if pnl else n.sols[name].df[attr] + + +def get_dual(n, name, attr=''): + """ + Retrieves shadow price for a given constraint. Note that for retrieving + shadow prices of a custom constraint, its name has to be passed to + `keep_references` in the lopf, or `keep_references` has to be set to True. + Note that a lookup of all stored shadow prices is given in n.dualvalues. + + Parameters + ---------- + n : pypsa.Network + c : str + constraint name to which the constraint belongs + attr: str + attribute name of the constraints + + Example + ------- + get_dual(n, 'Generator', 'mu_upper') + """ + pnl = n.dualvalues.at[(name, attr), 'pnl'] + if n.dualvalues.at[(name, attr), 'in_comp']: + return n.pnl(name)[attr] if pnl else n.df(name)[attr] + else: + return n.duals[name].pnl[attr] if pnl else n.duals[name].df[attr] + + +# ============================================================================= +# solvers +# ============================================================================= + +def set_int_index(ser): + ser.index = ser.index.str[1:].astype(int) + return ser + +def run_and_read_cbc(n, problem_fn, solution_fn, solver_logfile, + solver_options, keep_files, warmstart=None, + store_basis=True): + """ + Solving function. Reads the linear problem file and passes it to the cbc + solver. If the solution is sucessful it returns variable solutions and + constraint dual values. + + For more information on the solver options, run 'cbc' in your shell + """ + #printingOptions is about what goes in solution file + command = f"cbc -printingOptions all -import {problem_fn} " + if warmstart: + command += f'-basisI {warmstart} ' + if (solver_options is not None) and (solver_options != {}): + command += solver_options + command += f"-solve -solu {solution_fn} " + if store_basis: + n.basis_fn = solution_fn.replace('.sol', '.bas') + command += f'-basisO {n.basis_fn} ' + + result = subprocess.run(command.split(' '), stdout=subprocess.PIPE) + if solver_logfile is not None: + print(result.stdout.decode('utf-8'), file=open(solver_logfile, 'w')) + + f = open(solution_fn,"r") + data = f.readline() + f.close() + + if data.startswith("Optimal - objective value"): + status = "ok" + termination_condition = status + objective = float(data[len("Optimal - objective value "):]) + elif "Infeasible" in data: + termination_condition = "infeasible" + status = 'infeasible' + else: + termination_condition = "other" + status = 'other' + + if termination_condition != "optimal": + return status, termination_condition, None, None, None + + sol = pd.read_csv(solution_fn, header=None, skiprows=[0], + sep=r'\s+', usecols=[1,2,3], index_col=0) + variables_b = sol.index.str[0] == 'x' + variables_sol = sol[variables_b][2].pipe(set_int_index) + constraints_dual = sol[~variables_b][3].pipe(set_int_index) + + return (status, termination_condition, variables_sol, + constraints_dual, objective) + + +def run_and_read_glpk(n, problem_fn, solution_fn, solver_logfile, + solver_options, keep_files, warmstart=None, + store_basis=True): + """ + Solving function. Reads the linear problem file and passes it to the glpk + solver. If the solution is sucessful it returns variable solutions and + constraint dual values. + + For more information on the glpk solver options: + https://kam.mff.cuni.cz/~elias/glpk.pdf + """ + # TODO use --nopresol argument for non-optimal solution output + command = (f"glpsol --lp {problem_fn} --output {solution_fn}") + if solver_logfile is not None: + command += f' --log {solver_logfile}' + if warmstart: + command += f' --ini {warmstart}' + if store_basis: + n.basis_fn = solution_fn.replace('.sol', '.bas') + command += f' -w {n.basis_fn}' + if (solver_options is not None) and (solver_options != {}): + command += solver_options + + subprocess.run(command.split(' '), stdout=subprocess.PIPE) + + f = open(solution_fn) + def read_until_break(f): + linebreak = False + while not linebreak: + line = f.readline() + linebreak = line == '\n' + yield line + + info = io.StringIO(''.join(read_until_break(f))[:-2]) + info = pd.read_csv(info, sep=':', index_col=0, header=None)[1] + status = info.Status.lower().strip() + objective = float(re.sub('[^0-9\.\+\-]+', '', info.Objective)) + termination_condition = status + + if 'optimal' not in termination_condition: + return status, termination_condition, None, None, None + else: + status = 'ok' + + duals = io.StringIO(''.join(read_until_break(f))[:-2]) + duals = pd.read_fwf(duals)[1:].set_index('Row name') + if 'Marginal' in duals: + constraints_dual = pd.to_numeric(duals['Marginal'], 'coerce')\ + .fillna(0).pipe(set_int_index) + else: + logger.warning("Shadow prices of MILP couldn't be parsed") + constraints_dual = pd.Series(index=duals.index) + + sol = io.StringIO(''.join(read_until_break(f))[:-2]) + variables_sol = (pd.read_fwf(sol)[1:].set_index('Column name') + ['Activity'].astype(float).pipe(set_int_index)) + f.close() + + return (status, termination_condition, variables_sol, + constraints_dual, objective) + + +def run_and_read_gurobi(n, problem_fn, solution_fn, solver_logfile, + solver_options, keep_files, warmstart=None, + store_basis=True): + """ + Solving function. Reads the linear problem file and passes it to the gurobi + solver. If the solution is sucessful it returns variable solutions and + constraint dual values. Gurobipy must be installed for using this function + + For more information on solver options: + https://www.gurobi.com/documentation/{gurobi_verion}/refman/parameter_descriptions.html + """ + import gurobipy + # disable logging for this part, as gurobi output is doubled otherwise + logging.disable(50) + + m = gurobipy.read(problem_fn) + if solver_options is not None: + for key, value in solver_options.items(): + m.setParam(key, value) + if solver_logfile is not None: + m.setParam("logfile", solver_logfile) + + if warmstart: + m.read(warmstart) + m.optimize() + logging.disable(1) + + if store_basis: + n.basis_fn = solution_fn.replace('.sol', '.bas') + try: + m.write(n.basis_fn) + except gurobipy.GurobiError: + logger.info('No model basis stored') + del n.basis_fn + + Status = gurobipy.GRB.Status + statusmap = {getattr(Status, s) : s.lower() for s in Status.__dir__() + if not s.startswith('_')} + status = statusmap[m.status] + termination_condition = status + if termination_condition != "optimal": + return status, termination_condition, None, None, None + else: + status = 'ok' + + variables_sol = pd.Series({v.VarName: v.x for v + in m.getVars()}).pipe(set_int_index) + try: + constraints_dual = pd.Series({c.ConstrName: c.Pi for c in + m.getConstrs()}).pipe(set_int_index) + except AttributeError: + logger.warning("Shadow prices of MILP couldn't be parsed") + constraints_dual = pd.Series(index=[c.ConstrName for c in m.getConstrs()]) + objective = m.ObjVal + del m + return (status, termination_condition, variables_sol, + constraints_dual, objective) diff --git a/pypsa/opf.py b/pypsa/opf.py index 8e3ec3568..ac2437d8d 100644 --- a/pypsa/opf.py +++ b/pypsa/opf.py @@ -31,9 +31,8 @@ import numpy as np import pandas as pd from scipy.sparse.linalg import spsolve -from pyomo.environ import (ConcreteModel, Var, Objective, - NonNegativeReals, Constraint, Reals, - Suffix, Expression, Binary, SolverFactory) +from pyomo.environ import (ConcreteModel, Var, NonNegativeReals, Constraint, + Reals, Suffix, Binary, SolverFactory) try: from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver @@ -41,8 +40,6 @@ # Only used in conjunction with isinstance, so we mock it to be backwards compatible class PersistentSolver(): pass -from itertools import chain - import logging logger = logging.getLogger(__name__) @@ -57,7 +54,6 @@ class PersistentSolver(): pass find_bus_controls, calculate_B_H, calculate_PTDF, find_tree, find_cycles, _as_snapshots) from .opt import (l_constraint, l_objective, LExpression, LConstraint, - patch_optsolver_free_model_before_solving, patch_optsolver_record_memusage_before_solving, empty_network, free_pyomo_initializers) from .descriptors import (get_switchable_as_dense, get_switchable_as_iter, diff --git a/pypsa/opt.py b/pypsa/opt.py index 0951c8349..3d692e324 100644 --- a/pypsa/opt.py +++ b/pypsa/opt.py @@ -52,6 +52,10 @@ __copyright__ = "Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS), GNU GPL 3" +# ============================================================================= +# Tools for solving with pyomo +# ============================================================================= + class LExpression(object): """Affine expression of optimisation variables. @@ -382,3 +386,11 @@ def wrapper(): except ImportError: logger.debug("Unable to measure memory usage, since the resource library is missing") return False + + +# ============================================================================= +# Helpers for opf_lowmemory +# ============================================================================= + + + diff --git a/pypsa/pf.py b/pypsa/pf.py index e57125b0a..24575e446 100644 --- a/pypsa/pf.py +++ b/pypsa/pf.py @@ -616,7 +616,7 @@ def find_slack_bus(sub_network): gens = sub_network.generators() if len(gens) == 0: - logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name)) +# logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name)) sub_network.slack_generator = None sub_network.slack_bus = sub_network.buses_i()[0] @@ -641,7 +641,7 @@ def find_slack_bus(sub_network): #also put it into the dataframe sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus - logger.info("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus)) + logger.debug("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus)) def find_bus_controls(sub_network): @@ -900,7 +900,7 @@ def find_tree(sub_network, weight='x_pu'): #find bus with highest degree to use as slack tree_slack_bus, slack_degree = max(degree(sub_network.tree), key=itemgetter(1)) - logger.info("Tree slack bus is %s with degree %d.", tree_slack_bus, slack_degree) + logger.debug("Tree slack bus is %s with degree %d.", tree_slack_bus, slack_degree) #determine which buses are supplied in tree through branch from slack diff --git a/pypsa/stats.py b/pypsa/stats.py new file mode 100644 index 000000000..a346aeb7d --- /dev/null +++ b/pypsa/stats.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Post-solving statistics of network. This module contains functions to anaylize +an optimized network. Basic information of network can be summarized as well as +constraint gaps can be double-checked. +""" + +from .descriptors import (expand_series, get_switchable_as_dense as get_as_dense, + nominal_attrs) +import pandas as pd +import logging + +idx = pd.IndexSlice + + +# ============================================================================= +# Network summary +# ============================================================================= + +opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} + +def calculate_costs(n): + raise NotImplementedError + mc = {} + for c in n.iterate_comonents(): + if 'marginal_cost' in c.df: + + mc[c] = c.df @ c.pnl['p'] + + +def calculate_curtailment(n): + max_pu = n.generators_t.p_max_pu + avail = (max_pu.multiply(n.generators.p_nom_opt.loc[max_pu.columns]).sum() + .groupby(n.generators.carrier).sum()) + used = (n.generators_t.p[max_pu.columns].sum() + .groupby(n.generators.carrier).sum()) + return (((avail - used)/avail)*100).round(3) + + +# and others from pypsa-eur + + +# ============================================================================= +# gap analysis +# ============================================================================= + + +def describe_storage_unit_contraints(n): + """ + Checks whether all storage units are balanced over time. This function + requires the network to contain the separate variables p_store and + p_dispatch, since they cannot be reconstructed from p. The latter results + from times tau where p_store(tau) > 0 **and** p_dispatch(tau) > 0, which + is allowed (even though not economic). Therefor p_store is necessarily + equal to negative entries of p, vice versa for p_dispatch. + """ + sus = n.storage_units + sus_i = sus.index + if sus_i.empty: return + sns = n.snapshots + c = 'StorageUnit' + pnl = n.pnl(c) + + description = {} + + eh = expand_series(n.snapshot_weightings, sus_i) + stand_eff = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh) + dispatch_eff = expand_series(n.df(c).efficiency_dispatch, sns).T + store_eff = expand_series(n.df(c).efficiency_store, sns).T + inflow = get_as_dense(n, c, 'inflow') * eh + spill = eh[pnl.spill.columns] * pnl.spill + + description['Spillage Limit'] = pd.Series({'min': + (inflow[spill.columns] - spill).min().min()}) + + if 'p_store' in pnl: + soc = pnl.state_of_charge + + store = store_eff * eh * pnl.p_store#.clip(upper=0) + dispatch = 1/dispatch_eff * eh * pnl.p_dispatch#(lower=0) + start = soc.iloc[-1].where(sus.cyclic_state_of_charge, + sus.state_of_charge_initial) + previous_soc = stand_eff * soc.shift().fillna(start) + + + reconstructed = (previous_soc.add(store, fill_value=0) + .add(inflow, fill_value=0) + .add(-dispatch, fill_value=0) + .add(-spill, fill_value=0)) + description['SOC Balance StorageUnit'] = ((reconstructed - soc) + .unstack().describe()) + else: + logging.info('Storage Unit SOC balance not reconstructable as no ' + 'p_store and p_dispatch in n.storage_units_t.') + return pd.concat(description, axis=1, sort=False) + + +def describe_nodal_balance_constraint(n): + """ + Helper function to double check whether network flow is balanced + """ + network_injection = pd.concat( + [n.pnl(c)[f'p{inout}'].rename(columns=n.df(c)[f'bus{inout}']) + for inout in (0, 1) for c in ('Line', 'Transformer')], axis=1)\ + .groupby(level=0, axis=1).sum() + return (n.buses_t.p - network_injection).unstack().describe()\ + .to_frame('Nodal Balance Constr.') + +def describe_upper_dispatch_constraints(n): + ''' + Recalculates the minimum gap between operational status and nominal capacity + ''' + description = {} + key = ' Upper Limit' + for c, attr in nominal_attrs.items(): + dispatch_attr = 'p0' if c in ['Line', 'Transformer', 'Link'] else attr[0] + description[c + key] = pd.Series({'min': + (n.df(c)[attr + '_opt'] * + get_as_dense(n, c, attr[0] + '_max_pu') - + n.pnl(c)[dispatch_attr]).min().min()}) + return pd.concat(description, axis=1) + + +def describe_lower_dispatch_constraints(n): + description = {} + key = ' Lower Limit' + for c, attr in nominal_attrs.items(): + if c in ['Line', 'Transformer', 'Link']: + dispatch_attr = 'p0' + description[c] = pd.Series({'min': + (n.df(c)[attr + '_opt'] * + get_as_dense(n, c, attr[0] + '_max_pu') + + n.pnl(c)[dispatch_attr]).min().min()}) + else: + dispatch_attr = attr[0] + description[c + key] = pd.Series({'min': + (-n.df(c)[attr + '_opt'] * + get_as_dense(n, c, attr[0] + '_min_pu') + + n.pnl(c)[dispatch_attr]).min().min()}) + return pd.concat(description, axis=1) + + +def describe_store_contraints(n): + """ + Checks whether all stores are balanced over time. + """ + stores = n.stores + stores_i = stores.index + if stores_i.empty: return + sns = n.snapshots + c = 'Store' + pnl = n.pnl(c) + + eh = expand_series(n.snapshot_weightings, stores_i) + stand_eff = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh) + + start = pnl.e.iloc[-1].where(stores.e_cyclic, stores.e_initial) + previous_e = stand_eff * pnl.e.shift().fillna(start) + + return (previous_e - pnl.p - pnl.e).unstack().describe()\ + .to_frame('SOC Balance Store') + + +def describe_cycle_constraints(n): + weightings = n.lines.x_pu_eff.where(n.lines.carrier == 'AC', n.lines.r_pu_eff) + + def cycle_flow(sub): + C = pd.DataFrame(sub.C.todense(), index=sub.lines_i()) + if C.empty: + return None + C_weighted = 1e5 * C.mul(weightings[sub.lines_i()], axis=0) + return C_weighted.apply(lambda ds: ds @ n.lines_t.p0[ds.index].T) + + return pd.concat([cycle_flow(sub) for sub in n.sub_networks.obj], axis=0)\ + .unstack().describe().to_frame('Cycle Constr.') + + + +def constraint_stats(n, round_digit=1e-30): + """ + Post-optimization function to recalculate gap statistics of different + constraints. For inequality constraints only the minimum of lhs - rhs, with + lhs >= rhs is returned. + """ + return pd.concat([describe_cycle_constraints(n), + describe_store_contraints(n), + describe_storage_unit_contraints(n), + describe_nodal_balance_constraint(n), + describe_lower_dispatch_constraints(n), + describe_upper_dispatch_constraints(n)], + axis=1, sort=False) + +def check_constraints(n, tol=1e-3): + """ + Post-optimization test function to double-check most of the lopf + constraints. For relevant equaility constraints, it test whether the + deviation between lhs and rhs is below the given tolerance. For inequality + constraints, it test whether the inequality is violated with a higher + value then the tolerance. + + Parameters + ---------- + n : pypsa.Network + tol : float + Gap tolerance + + Returns AssertionError if tolerance is exceeded. + + """ + n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier) + stats = constraint_stats(n).rename(index=str.title) + condition = stats.T[['Min', 'Max']].query('Min < -@tol | Max > @tol').T + assert condition.empty, (f'The following constraint(s) are exceeding the ' + f'given tolerance of {tol}: \n{condition}') + + + + diff --git a/pypsa/variables.csv b/pypsa/variables.csv new file mode 100644 index 000000000..673b5dcf3 --- /dev/null +++ b/pypsa/variables.csv @@ -0,0 +1,19 @@ +component,variable,marginal_cost,nominal,handle_separately +Generator,p,True,False,False +Generator,status,False,False,True +Generator,p_nom,False,True,False +Line,s,False,False,False +Line,s_nom,False,True,False +Transformer,s,False,False,False +Transformer,s_nom,False,True,False +Link,p,True,False,False +Link,p_nom,False,True,False +Store,e,False,False,False +Store,p,True,False,True +Store,e_nom,False,True,False +StorageUnit,p_dispatch,True,False,False +StorageUnit,p_store,False,False,False +StorageUnit,state_of_charge,False,False,False +StorageUnit,p_nom,False,True,False +StorageUnit,spill,False,False,True + diff --git a/setup.py b/setup.py index 9669f3f14..17c1f81b9 100644 --- a/setup.py +++ b/setup.py @@ -29,11 +29,12 @@ 'tables', 'pyomo>=5.3', 'matplotlib', - 'networkx>=1.10', + 'networkx>=1.10' ], extras_require = { "cartopy": ['cartopy>=0.16'], - "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"] + "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"], + 'gurobipy':['gurobipy'] }, classifiers=[ 'Development Status :: 3 - Alpha', diff --git a/test/test_ac_dc_lopf.py b/test/test_ac_dc_lopf.py index 21fc9013b..7cfb831fa 100644 --- a/test/test_ac_dc_lopf.py +++ b/test/test_ac_dc_lopf.py @@ -1,54 +1,50 @@ -from __future__ import print_function, division -from __future__ import absolute_import - import pypsa - -import datetime -import pandas as pd - -import networkx as nx - -import numpy as np - -from itertools import chain, product - +from itertools import product import os +from numpy.testing import assert_array_almost_equal as equal +import sys - - -from distutils.spawn import find_executable - +solver_name = 'glpk' if sys.platform == 'win32' else 'cbc' def test_lopf(): - csv_folder_name = os.path.join(os.path.dirname(__file__), "../examples/ac-dc-meshed/ac-dc-data") + csv_folder_name = os.path.join(os.path.dirname(__file__), "..", "examples", + "ac-dc-meshed", "ac-dc-data") - network = pypsa.Network(csv_folder_name) + n = pypsa.Network(csv_folder_name) results_folder_name = os.path.join(csv_folder_name,"results-lopf") - network_r = pypsa.Network(results_folder_name) - + n_r = pypsa.Network(results_folder_name) #test results were generated with GLPK; solution should be unique, #so other solvers should not differ (tested with cbc and gurobi) - solver_name = "cbc" - snapshots = network.snapshots + snapshots = n.snapshots for formulation, free_memory in product(["angles", "cycles", "kirchhoff", "ptdf"], [{}, {"pypsa"}]): - network.lopf(snapshots=snapshots,solver_name=solver_name,formulation=formulation, free_memory=free_memory) - print(network.generators_t.p.loc[:,network.generators.index]) - print(network_r.generators_t.p.loc[:,network.generators.index]) - - np.testing.assert_array_almost_equal(network.generators_t.p.loc[:,network.generators.index],network_r.generators_t.p.loc[:,network.generators.index],decimal=4) - - np.testing.assert_array_almost_equal(network.lines_t.p0.loc[:,network.lines.index],network_r.lines_t.p0.loc[:,network.lines.index],decimal=4) - - np.testing.assert_array_almost_equal(network.links_t.p0.loc[:,network.links.index],network_r.links_t.p0.loc[:,network.links.index],decimal=4) - + n.lopf(snapshots=snapshots, solver_name=solver_name, + formulation=formulation, free_memory=free_memory) + + equal(n.generators_t.p.loc[:,n.generators.index], + n_r.generators_t.p.loc[:,n.generators.index],decimal=4) + equal(n.lines_t.p0.loc[:,n.lines.index], + n_r.lines_t.p0.loc[:,n.lines.index],decimal=4) + equal(n.links_t.p0.loc[:,n.links.index], + n_r.links_t.p0.loc[:,n.links.index],decimal=4) + + if sys.version_info.major >= 3: + status, cond = n.lopf(snapshots=snapshots, solver_name=solver_name, + pyomo=False) + assert status == 'ok' + equal(n.generators_t.p.loc[:,n.generators.index], + n_r.generators_t.p.loc[:,n.generators.index],decimal=2) + equal(n.lines_t.p0.loc[:,n.lines.index], + n_r.lines_t.p0.loc[:,n.lines.index],decimal=2) + equal(n.links_t.p0.loc[:,n.links.index], + n_r.links_t.p0.loc[:,n.links.index],decimal=2) if __name__ == "__main__": diff --git a/test/test_ac_dc_lpf.py b/test/test_ac_dc_lpf.py index d3774876f..7abd2330e 100644 --- a/test/test_ac_dc_lpf.py +++ b/test/test_ac_dc_lpf.py @@ -21,7 +21,7 @@ def test_lpf(): - csv_folder_name = os.path.join(os.path.dirname(__file__), "../examples/ac-dc-meshed/ac-dc-data") + csv_folder_name = os.path.join(os.path.dirname(__file__), "..", "examples", "ac-dc-meshed", "ac-dc-data") network = pypsa.Network(csv_folder_name) diff --git a/test/test_opf_storage.py b/test/test_opf_storage.py index 62a21df35..320deb198 100644 --- a/test/test_opf_storage.py +++ b/test/test_opf_storage.py @@ -1,61 +1,39 @@ -from __future__ import print_function, division -from __future__ import absolute_import import pypsa - -import datetime import pandas as pd - -import networkx as nx - -import numpy as np - -from itertools import chain - +import sys import os +from numpy.testing import assert_array_almost_equal as equal - -from distutils.spawn import find_executable - +solvers = ['glpk'] if sys.platform == 'win32' else ['cbc', 'glpk'] def test_opf(): + csv_folder_name = os.path.join(os.path.dirname(__file__), "..", "examples", + "opf-storage-hvdc","opf-storage-data") - csv_folder_name = os.path.join(os.path.dirname(__file__), "../examples/opf-storage-hvdc/opf-storage-data") - - network = pypsa.Network(csv_folder_name) - - #test results were generated with GLPK and other solvers may differ - solver_name = "glpk" - - snapshots = network.snapshots - - network.lopf(snapshots=snapshots,solver_name=solver_name) - - - results_folder_name = "results" - + n = pypsa.Network(csv_folder_name) - network.export_to_csv_folder(results_folder_name) + target_path = os.path.join(csv_folder_name,"results","generators-p.csv") - good_results_filename = os.path.join(csv_folder_name,"results","generators-p.csv") + target_gen_p = pd.read_csv(target_path, index_col=0) - good_arr = pd.read_csv(good_results_filename,index_col=0).values - - print(good_arr) - - results_filename = os.path.join(results_folder_name,"generators-p.csv") - - - arr = pd.read_csv(results_filename,index_col=0).values + #test results were generated with GLPK and other solvers may differ + for solver_name in solvers: + n.lopf(solver_name=solver_name, pyomo=True) - print(arr) + equal(n.generators_t.p.reindex_like(target_gen_p), target_gen_p, decimal=2) + if sys.version_info.major >= 3: - np.testing.assert_array_almost_equal(arr,good_arr) + for solver_name in solvers: + status, cond = n.lopf(solver_name=solver_name, pyomo=False) + assert status == 'ok' + equal(n.generators_t.p.reindex_like(target_gen_p), target_gen_p, + decimal=2) if __name__ == "__main__": diff --git a/test/test_sclopf_scigrid.py b/test/test_sclopf_scigrid.py index ae700ed1b..1eb580d7e 100644 --- a/test/test_sclopf_scigrid.py +++ b/test/test_sclopf_scigrid.py @@ -1,17 +1,18 @@ -from __future__ import print_function, division -from __future__ import absolute_import - import os import numpy as np import pypsa +import sys + +solver_name = 'glpk' if sys.platform == 'win32' else 'cbc' + def test_sclopf(): - csv_folder_name = os.path.join(os.path.dirname(__file__), "../examples/scigrid-de/scigrid-with-load-gen-trafos/") + csv_folder_name = os.path.join(os.path.dirname(__file__), "..", "examples", + "scigrid-de", "scigrid-with-load-gen-trafos") network = pypsa.Network(csv_folder_name) #test results were generated with GLPK and other solvers may differ - solver_name = "cbc" #There are some infeasibilities without line extensions for line_name in ["316","527","602"]: diff --git a/test/test_unit_commitment.py b/test/test_unit_commitment.py index 3639b3c24..9b0c0f4ad 100644 --- a/test/test_unit_commitment.py +++ b/test/test_unit_commitment.py @@ -49,6 +49,47 @@ def test_part_load(): np.testing.assert_array_almost_equal(nu.generators_t.p.values,expected_dispatch) +def test_part_load_without_pyomo(): + """This test is based on +https://pypsa.org/examples/unit-commitment.html +and is not very comprehensive.""" + + nu = pypsa.Network() + + snapshots = range(4) + + nu.set_snapshots(snapshots) + + nu.add("Bus","bus") + + + nu.add("Generator","coal",bus="bus", + committable=True, + p_min_pu=0.3, + marginal_cost=20, + p_nom=10000) + + nu.add("Generator","gas",bus="bus", + committable=True, + marginal_cost=70, + p_min_pu=0.1, + p_nom=1000) + + nu.add("Load","load",bus="bus",p_set=[4000,6000,5000,800]) + + solver_name = "glpk" + + nu.lopf(nu.snapshots,solver_name=solver_name, pyomo=False) + + expected_status = np.array([[1,1,1,0],[0,0,0,1]],dtype=float).T + + np.testing.assert_array_almost_equal(nu.generators_t.status.values,expected_status) + + expected_dispatch = np.array([[4000,6000,5000,0],[0,0,0,800]],dtype=float).T + + np.testing.assert_array_almost_equal(nu.generators_t.p.values,expected_dispatch) + + def test_minimum_up_time(): """This test is based on https://pypsa.org/examples/unit-commitment.html @@ -139,3 +180,4 @@ def test_minimum_down_time(): test_minimum_down_time() test_minimum_up_time() test_part_load() + test_part_load_without_pyomo()