forked from e2nIEE/pandapower
-
Notifications
You must be signed in to change notification settings - Fork 9
/
toolbox.py
2928 lines (2452 loc) · 123 KB
/
toolbox.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import copy
import gc
from collections import defaultdict
from collections.abc import Iterable
from itertools import chain
import numpy as np
import pandas as pd
from packaging import version
from pandapower.auxiliary import get_indices, pandapowerNet, _preserve_dtypes
from pandapower.create import create_switch, create_line_from_parameters, \
create_impedance, create_empty_network, create_gen, create_ext_grid, \
create_load, create_shunt, create_bus, create_sgen, create_storage
from pandapower.opf.validate_opf_input import _check_necessary_opf_parameters
from pandapower.run import runpp
from pandapower.std_types import change_std_type
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
# --- general issues
def element_bus_tuples(bus_elements=True, branch_elements=True, res_elements=False):
"""
Utility function
Provides the tuples of elements and corresponding columns for buses they are connected to
:param bus_elements: whether tuples for bus elements e.g. load, sgen, ... are included
:param branch_elements: whether branch elements e.g. line, trafo, ... are included
:return: set of tuples with element names and column names
"""
ebts = set()
if bus_elements:
ebts.update([("sgen", "bus"), ("load", "bus"), ("ext_grid", "bus"), ("gen", "bus"),
("ward", "bus"), ("xward", "bus"), ("shunt", "bus"),
("storage", "bus"), ("asymmetric_load", "bus"), ("asymmetric_sgen", "bus")])
if branch_elements:
ebts.update([("line", "from_bus"), ("line", "to_bus"), ("impedance", "from_bus"),
("switch", "bus"), ("impedance", "to_bus"), ("trafo", "hv_bus"),
("trafo", "lv_bus"), ("trafo3w", "hv_bus"), ("trafo3w", "mv_bus"),
("trafo3w", "lv_bus"), ("dcline", "from_bus"), ("dcline", "to_bus")])
if res_elements:
elements_without_res = ["switch", "measurement", "asymmetric_load", "asymmetric_sgen"]
ebts.update(
[("res_" + ebt[0], ebt[1]) for ebt in ebts if ebt[0] not in elements_without_res])
return ebts
def pp_elements(bus=True, bus_elements=True, branch_elements=True, other_elements=True,
res_elements=False):
"""
Returns a set of pandapower elements.
"""
pp_elms = set(["bus"]) if bus else set()
pp_elms |= set([el[0] for el in element_bus_tuples(
bus_elements=bus_elements, branch_elements=branch_elements, res_elements=res_elements)])
if other_elements:
pp_elms |= {"measurement"}
return pp_elms
def branch_element_bus_dict(include_switch=False):
"""
Returns a dict with keys of branch elements and values of bus column names as list.
"""
ebts = element_bus_tuples(bus_elements=False, branch_elements=True, res_elements=False)
branch_elements = {ebt[0] for ebt in ebts}
bebd = {elm: [] for elm in branch_elements}
for elm, bus in ebts:
bebd[elm].append(bus)
if not include_switch:
del bebd["switch"]
return bebd
def signing_system_value(elm):
"""
Returns a 1 for all bus elements using the consumver viewpoint and a -1 for all bus elements
using the generator viewpoint.
"""
generator_viewpoint_elms = ["ext_grid", "gen", "sgen"]
if elm in generator_viewpoint_elms:
return -1
elif elm in pp_elements(bus=False, branch_elements=False, other_elements=False):
return 1
else:
raise ValueError("This function is defined for bus elements, not for '%s'." % str(elm))
# def pq_from_cosphi(s, cosphi, qmode, pmode):
# """
# Calculates P/Q values from rated apparent power and cosine(phi) values.
# - s: rated apparent power
# - cosphi: cosine phi of the
# - qmode: "ind" for inductive or "cap" for capacitive behaviour
# - pmode: "load" for load or "gen" for generation
# As all other pandapower functions this function is based on the consumer viewpoint. For active
# power, that means that loads are positive and generation is negative. For reactive power,
# inductive behaviour is modeled with positive values, capacitive behaviour with negative values.
# """
# s = np.array(ensure_iterability(s))
# cosphi = np.array(ensure_iterability(cosphi, len(s)))
# qmode = np.array(ensure_iterability(qmode, len(s)))
# pmode = np.array(ensure_iterability(pmode, len(s)))
#
# # qmode consideration
# unknown_qmode = set(qmode) - set(["ind", "cap", "ohm"])
# if len(unknown_qmode):
# raise ValueError("Unknown qmodes: " + str(list(unknown_qmode)))
# qmode_is_ohm = qmode == "ohm"
# if any(cosphi[qmode_is_ohm] != 1):
# raise ValueError("qmode cannot be 'ohm' if cosphi is not 1.")
# qsign = np.ones(qmode.shape)
# qsign[qmode == "cap"] = -1
#
# # pmode consideration
# unknown_pmode = set(pmode) - set(["load", "gen"])
# if len(unknown_pmode):
# raise ValueError("Unknown pmodes: " + str(list(unknown_pmode)))
# psign = np.ones(pmode.shape)
# psign[pmode == "gen"] = -1
#
# # calculate p and q
# p = psign * s * cosphi
# q = qsign * np.sqrt(s ** 2 - p ** 2)
#
# if len(p) > 1:
# return p, q
# else:
# return p[0], q[0]
def pq_from_cosphi(s, cosphi, qmode, pmode):
"""
Calculates P/Q values from rated apparent power and cosine(phi) values.
- s: rated apparent power
- cosphi: cosine phi of the
- qmode: "underexcided" (Q absorption, decreases voltage) or "overexcited" (Q injection, increases voltage)
- pmode: "load" for load or "gen" for generation
As all other pandapower functions this function is based on the consumer viewpoint. For active
power, that means that loads are positive and generation is negative. For reactive power,
underexcited behavior (Q absorption, decreases voltage) is modeled with positive values,
overexcited behavior (Q injection, increases voltage) with negative values.
"""
if hasattr(s, "__iter__"):
s = ensure_iterability(s)
cosphi = ensure_iterability(cosphi, len(s))
qmode = ensure_iterability(qmode, len(s))
pmode = ensure_iterability(pmode, len(s))
p, q = [], []
for s_, cosphi_, qmode_, pmode_ in zip(s, cosphi, qmode, pmode):
p_, q_ = _pq_from_cosphi(s_, cosphi_, qmode_, pmode_)
p.append(p_)
q.append(q_)
return np.array(p), np.array(q)
else:
return _pq_from_cosphi(s, cosphi, qmode, pmode)
def _pq_from_cosphi(s, cosphi, qmode, pmode):
if qmode in ("ind", "cap"):
logger.warning('capacitive or inductive behavior will be replaced by more clear terms ' +
'"underexcited" (Q absorption, decreases voltage) and "overexcited" ' +
'(Q injection, increases voltage). Please use "underexcited" ' +
'in place of "ind" and "overexcited" in place of "cap".')
if qmode == "ind" or qmode == "underexcited":
qsign = 1 if pmode == "load" else -1
elif qmode == "cap" or qmode == "overexcited":
qsign = -1 if pmode == "load" else 1
else:
raise ValueError('Unknown mode %s - specify "underexcited" (Q absorption, decreases voltage'
') or "overexcited" (Q injection, increases voltage)' % qmode)
p = s * cosphi
q = qsign * np.sqrt(s ** 2 - p ** 2)
return p, q
# def cosphi_from_pq(p, q):
# """
# Analog to pq_from_cosphi, but other way around.
# In consumer viewpoint (pandapower): cap=overexcited and ind=underexcited
# """
# p = np.array(ensure_iterability(p))
# q = np.array(ensure_iterability(q, len(p)))
# if len(p) != len(q):
# raise ValueError("p and q must have the same length.")
# p_is_zero = np.array(p == 0)
# cosphi = np.empty(p.shape)
# if sum(p_is_zero):
# cosphi[p_is_zero] = np.nan
# logger.warning("A cosphi from p=0 is undefined.")
# cosphi[~p_is_zero] = np.cos(np.arctan(q[~p_is_zero] / p[~p_is_zero]))
# s = (p ** 2 + q ** 2) ** 0.5
# pmode = np.array(["undef", "load", "gen"])[np.sign(p).astype(int)]
# qmode = np.array(["ohm", "ind", "cap"])[np.sign(q).astype(int)]
# if len(p) > 1:
# return cosphi, s, qmode, pmode
# else:
# return cosphi[0], s[0], qmode[0], pmode[0]
def cosphi_from_pq(p, q):
if hasattr(p, "__iter__"):
assert len(p) == len(q)
s, cosphi, qmode, pmode = [], [], [], []
for p_, q_ in zip(p, q):
cosphi_, s_, qmode_, pmode_ = _cosphi_from_pq(p_, q_)
s.append(s_)
cosphi.append(cosphi_)
qmode.append(qmode_)
pmode.append(pmode_)
return np.array(cosphi), np.array(s), np.array(qmode), np.array(pmode)
else:
return _cosphi_from_pq(p, q)
def _cosphi_from_pq(p, q):
"""
Analog to pq_from_cosphi, but the other way around.
In consumer viewpoint (pandapower): "underexcited" (Q absorption, decreases voltage) and
"overexcited" (Q injection, increases voltage)
"""
if p == 0:
cosphi = np.nan
logger.warning("A cosphi from p=0 is undefined.")
else:
cosphi = np.cos(np.arctan(q / p))
s = (p ** 2 + q ** 2) ** 0.5
pmode = ["undef", "load", "gen"][int(np.sign(p))]
qmode = ["ohm", "underexcited", "overexcited"][int(np.sign(q))]
return cosphi, s, qmode, pmode
def dataframes_equal(x_df, y_df, tol=1.e-14, ignore_index_order=True):
"""
Returns a boolean whether the nets are equal or not.
"""
if ignore_index_order:
x_df.sort_index(axis=1, inplace=True)
y_df.sort_index(axis=1, inplace=True)
x_df.sort_index(axis=0, inplace=True)
y_df.sort_index(axis=0, inplace=True)
# eval if two DataFrames are equal, with regard to a tolerance
if x_df.shape == y_df.shape:
if x_df.shape[0]:
# we use numpy.allclose to grant a tolerance on numerical values
numerical_equal = np.allclose(x_df.select_dtypes(include=[np.number]),
y_df.select_dtypes(include=[np.number]),
atol=tol, equal_nan=True)
else:
numerical_equal = True
# ... use pandas .equals for the rest, which also evaluates NaNs to be equal
rest_equal = x_df.select_dtypes(exclude=[np.number]).equals(
y_df.select_dtypes(exclude=[np.number]))
return numerical_equal & rest_equal
else:
return False
def compare_arrays(x, y):
"""
Returns an array of bools whether array x is equal to array y. Strings are allowed in x
or y. NaN values are assumed as equal.
"""
if x.shape == y.shape:
# (x != x) is like np.isnan(x) - but works also for strings
return np.equal(x, y) | ((x != x) & (y != y))
else:
raise ValueError("x and y needs to have the same shape.")
def ensure_iterability(var, len_=None):
"""
Ensures iterability of a variable (and optional length).
"""
if hasattr(var, "__iter__") and not isinstance(var, str):
if isinstance(len_, int) and len(var) != len_:
raise ValueError("Length of variable differs from %i." % len_)
else:
len_ = len_ or 1
var = [var] * len_
return var
# --- Information
def lf_info(net, numv=1, numi=2): # pragma: no cover
"""
Prints some basic information of the results in a net
(max/min voltage, max trafo load, max line load).
OPTIONAL:
**numv** (integer, 1) - maximal number of printed maximal respectively minimal voltages
**numi** (integer, 2) - maximal number of printed maximal loading at trafos or lines
"""
logger.info("Max voltage in vm_pu:")
for _, r in net.res_bus.sort_values("vm_pu", ascending=False).iloc[:numv].iterrows():
logger.info(" %s at busidx %s (%s)", r.vm_pu, r.name, net.bus.name.at[r.name])
logger.info("Min voltage in vm_pu:")
for _, r in net.res_bus.sort_values("vm_pu").iloc[:numv].iterrows():
logger.info(" %s at busidx %s (%s)", r.vm_pu, r.name, net.bus.name.at[r.name])
logger.info("Max loading trafo in %:")
if net.res_trafo is not None:
for _, r in net.res_trafo.sort_values("loading_percent", ascending=False).iloc[
:numi].iterrows():
logger.info(" %s loading at trafo %s (%s)", r.loading_percent, r.name,
net.trafo.name.at[r.name])
logger.info("Max loading line in %:")
for _, r in net.res_line.sort_values("loading_percent", ascending=False).iloc[:numi].iterrows():
logger.info(" %s loading at line %s (%s)", r.loading_percent, r.name,
net.line.name.at[r.name])
def opf_task(net, delta_pq=1e-3, keep=False, log=True):
"""
Collects some basic inforamtion of the optimal powerflow task und prints them.
"""
if keep:
net = copy.deepcopy(net)
_check_necessary_opf_parameters(net, logger)
opf_task_overview = {"flexibilities": dict(),
"network_constraints": dict(),
"flexibilities_without_costs": dict()}
_determine_flexibilities_dict(net, opf_task_overview["flexibilities"], delta_pq)
_determine_network_constraints_dict(net, opf_task_overview["network_constraints"])
_determine_costs_dict(net, opf_task_overview)
_check_overlapping_constraints(opf_task_overview)
if log:
_log_opf_task_overview(opf_task_overview)
return opf_task_overview
def _determine_flexibilities_dict(net, data, delta_pq, **kwargs):
"""
Determines which flexibilities exists in the net.
INPUT:
**net** - panpdapower net
**data** (dict) - to store flexibilities information
**delta_pq** (float) - if (abs(max - min) <= delta_pq) the variable is not assumed as
flexible, since the range is as small as delta_pq (should be small, too).
OPTIONAL:
**kwargs**** - for comparing constraint columns with numpy.isclose(): rtol and atol
"""
flex_elements = ["ext_grid", "gen", "dcline", "sgen", "load", "storage"]
flex_tuple = tuple(zip(flex_elements, [True] * 3 + [False] * 3))
for elm, controllable_default in flex_tuple:
for power_type in ["P", "Q"]:
key = power_type + elm
if elm != "dcline":
constraints = {"P": ["min_p_mw", "max_p_mw"],
"Q": ["min_q_mvar", "max_q_mvar"]}[power_type]
else:
constraints = {"P": ["max_p_mw"],
"Q": ["min_q_from_mvar", "max_q_from_mvar",
"min_q_to_mvar", "max_q_to_mvar"]}[power_type]
# determine indices of controllable elements, continue if no controllable element exists
if elm in ["ext_grid", "dcline"]:
controllables = net[elm].index
elif "controllable" in net[elm].columns:
controllables = net[elm].index[net[elm].controllable]
elif controllable_default and net[elm].shape[0]:
controllables = net[elm].index
else:
continue
if not len(controllables):
continue
# consider delta_pq
if len(constraints) >= 2 and pd.Series(constraints[:2]).isin(net[elm].columns).all():
controllables = _find_idx_without_numerical_difference(
net[elm], constraints[0], constraints[1], delta_pq, idx=controllables,
equal_nan=False)
if elm == "dcline" and power_type == "Q" and len(controllables) and \
pd.Series(constraints[2:4]).isin(net[elm].columns).all():
controllables = _find_idx_without_numerical_difference(
net[elm], constraints[2], constraints[3], delta_pq, idx=controllables,
equal_nan=False)
# add missing constraint columns
for col_to_add in set(constraints) - set(net[elm].columns):
net[elm][col_to_add] = np.nan
data[key] = _cluster_same_floats(net[elm].loc[controllables], constraints, **kwargs)
shorted = [col[:3] if col[:3] in ["min", "max"] else col for col in data[key].columns]
if len(shorted) == len(set(shorted)):
data[key].columns = shorted
def _find_idx_without_numerical_difference(df, column1, column2, delta, idx=None, equal_nan=False):
"""
Returns indices where comlumn1 and column2 have a numerical difference bigger than delta.
INPUT:
**df** (DataFrame)
**column1** (str) - name of first column within df to compare.
The values of df[column1] must be numericals.
**column2** (str) - name of second column within df to compare.
The values of df[column2] must be numericals.
**delta** (numerical) - value which defines whether indices are returned or not
OPTIONAL:
**idx** (iterable, None) - list of indices which should be considered only
**equal_nan** (bool, False) - if False, indices are included where at least one value in
df[column1] and df[column2] is NaN
OUTPUT:
**index** (pandas.Index) - index within idx where df[column1] and df[column2] deviates by
at least delta or, if equal_na is True, one value is NaN
"""
idx = idx if idx is not None else df.index
idx_isnull = df.index[df[[column1, column2]].isnull().any(axis=1)]
idx_without_null = idx.difference(idx_isnull)
idx_no_delta = idx_without_null[(df.loc[idx_without_null, column1] - df.loc[
idx_without_null, column2]).abs().values <= delta]
if equal_nan:
return idx_without_null.difference(idx_no_delta)
else:
return idx.difference(idx_no_delta)
def _determine_network_constraints_dict(net, data, **kwargs):
"""
Determines which flexibilities exists in the net.
INPUT:
**net** - panpdapower net
**data** (dict) - to store constraints information
OPTIONAL:
**kwargs**** - for comparing constraint columns with numpy.isclose(): rtol and atol
"""
const_tuple = [("VMbus", "bus", ["min_vm_pu", "max_vm_pu"]),
("LOADINGline", "line", ["max_loading_percent"]),
("LOADINGtrafo", "trafo", ["max_loading_percent"]),
("LOADINGtrafo3w", "trafo3w", ["max_loading_percent"])
]
for key, elm, constraints in const_tuple:
missing_columns = set(constraints) - set(net[elm].columns)
if net[elm].shape[0] and len(missing_columns) != len(constraints):
# add missing constraint columns
for col_to_add in missing_columns:
net[elm][col_to_add] = np.nan
data[key] = _cluster_same_floats(net[elm], constraints, **kwargs)
shorted = [col[:3] if col[:3] in ["min", "max"] else col for col in data[key].columns]
if len(shorted) == len(set(shorted)):
data[key].columns = shorted
def _determine_costs_dict(net, opf_task_overview):
"""
Determines which flexibilities do not have costs in the net. Each element is considered as one,
i.e. if ext_grid 0, for instance, is flexible in both, P and Q, and has one cost entry for P,
it is not considered as 'flexibilities_without_costs'.
INPUT:
**net** - panpdapower net
**opf_task_overview** (dict of dicts) - both, "flexibilities_without_costs" and
"flexibilities" must be in opf_task_overview.keys()
"""
cost_dfs = [df for df in ["poly_cost", "pwl_cost"] if net[df].shape[0]]
if not len(cost_dfs):
opf_task_overview["flexibilities_without_costs"] = "all"
return
flex_elements = ["ext_grid", "gen", "sgen", "load", "dcline", "storage"]
for flex_element in flex_elements:
# determine keys of opf_task_overview["flexibilities"] ending with flex_element
keys = [power_type + flex_element for power_type in ["P", "Q"] if (
power_type + flex_element) in opf_task_overview["flexibilities"].keys()]
# determine indices of all flexibles
idx_without_cost = set()
for key in keys:
idx_without_cost |= set(chain(*opf_task_overview["flexibilities"][key]["index"]))
# simple alternative without itertools.chain():
# idx_without_cost |= {idx for idxs in opf_task_overview["flexibilities"][key][
# "index"] for idx in idxs}
for cost_df in cost_dfs:
idx_with_cost = set(net[cost_df].element[net[cost_df].et == flex_element].astype(int))
if len(idx_with_cost - idx_without_cost):
logger.warning("These " + flex_element + "s have cost data but aren't flexible or" +
" have both, poly_cost and pwl_cost: " +
str(sorted(idx_with_cost - idx_without_cost)))
idx_without_cost -= idx_with_cost
if len(idx_without_cost):
opf_task_overview["flexibilities_without_costs"][flex_element] = list(idx_without_cost)
def _cluster_same_floats(df, subset=None, **kwargs):
"""
Clusters indices with close values. The values of df[subset] must be numericals.
INPUT:
**df** (DataFrame)
OPTIONAL:
**subset** (iterable, None) - list of columns of df which should be considered to cluster
**kwargs**** - for numpy.isclose(): rtol and atol
OUTPUT:
**cluster_df** (DataFrame) - table of clustered values and corresponding lists of indices
"""
if df.index.duplicated().any():
logger.error("There are duplicated indices in df. Clusters will be determined but remain " +
"ambiguous.")
subset = subset if subset is not None else df.select_dtypes(include=[
np.number]).columns.tolist()
uniq = ~df.duplicated(subset=subset).values
# prepare cluster_df
cluster_df = pd.DataFrame(np.empty((sum(uniq), len(subset) + 1)), columns=["index"] + subset)
cluster_df["index"] = cluster_df["index"].astype(object)
cluster_df[subset] = df.loc[uniq, subset].values
if sum(uniq) == df.shape[0]: # fast return if df has no duplicates
for i1, idx in enumerate(df.index):
cluster_df.at[i1, "index"] = [idx]
else: # determine index clusters
i2 = 0
for i1, uni in enumerate(uniq):
if uni:
cluster_df.at[i2, "index"] = list(df.index[np.isclose(
df[subset].values.astype(float),
df[subset].iloc[[i1]].values.astype(float),
equal_nan=True, **kwargs).all(axis=1)])
i2 += 1
return cluster_df
def _check_overlapping_constraints(opf_task_overview):
"""
Logs variables where the minimum constraint is bigger than the maximum constraint.
"""
overlap = []
for dict_key in ["flexibilities", "network_constraints"]:
for key, df in opf_task_overview[dict_key].items():
min_col = [col for col in df.columns if "min" in col]
max_col = [col for col in df.columns if "max" in col]
n_col = min(len(min_col), len(max_col))
for i_col in range(n_col):
assert min_col[i_col].replace("min", "") == max_col[i_col].replace("max", "")
if (df[min_col[i_col]] > df[max_col[i_col]]).any():
overlap.append(key)
if len(overlap):
logger.error("At these variables, there is a minimum constraint exceeding the maximum " +
"constraint value: " + str(overlap))
def _log_opf_task_overview(opf_task_overview):
"""
Logs OPF task information.
"""
s = ""
for dict_key, data in opf_task_overview.items():
if isinstance(data, str):
assert dict_key == "flexibilities_without_costs"
s += "\n\n%s flexibilities without costs" % data
continue
else:
assert isinstance(data, dict)
heading_logged = False
keys, elms = _get_keys_and_elements_from_opf_task_dict(data)
for key, elm in zip(keys, elms):
assert elm in key
df = data[key]
if dict_key in ["flexibilities", "network_constraints"]:
if not df.shape[0]:
continue
if not heading_logged:
s += "\n\n%s:" % dict_key
heading_logged = True
# --- logging information
len_idx = len(list(chain(*df["index"])))
if df.shape[0] > 1:
s += "\n %ix %s" % (len_idx, key)
else:
if not len(set(df.columns).symmetric_difference({"index", "min", "max"})):
s += "\n %g <= %ix %s (all) <= %g" % (
df.loc[0, "min"], len_idx, key, df.loc[0, "max"])
else:
s += "\n %ix %s (all) with these constraints:" % (len_idx, key)
for col in set(df.columns) - {"index"}:
s += " %s=%g" % (col, df.loc[0, col])
elif dict_key == "flexibilities_without_costs":
if not heading_logged:
s += "\n\n%s:" % dict_key
heading_logged = True
s += "\n%ix %s" % (len(df), key)
else:
raise NotImplementedError("Key %s is unknown to this code." % dict_key)
logger.info(s + "\n")
def _get_keys_and_elements_from_opf_task_dict(dict_):
keys = list(dict_.keys())
elms = ["".join(c for c in key if not c.isupper()) for key in keys]
keys = list(np.array(keys)[np.argsort(elms)])
elms = sorted(elms)
return keys, elms
def switch_info(net, sidx): # pragma: no cover
"""
Prints what buses and elements are connected by a certain switch.
"""
switch_type = net.switch.at[sidx, "et"]
bidx = net.switch.at[sidx, "bus"]
bus_name = net.bus.at[bidx, "name"]
eidx = net.switch.at[sidx, "element"]
if switch_type == "b":
bus2_name = net.bus.at[eidx, "name"]
logger.info("Switch %u connects bus %u (%s) with bus %u (%s)" % (sidx, bidx, bus_name,
eidx, bus2_name))
elif switch_type == "l":
line_name = net.line.at[eidx, "name"]
logger.info("Switch %u connects bus %u (%s) with line %u (%s)" % (sidx, bidx, bus_name,
eidx, line_name))
elif switch_type == "t":
trafo_name = net.trafo.at[eidx, "name"]
logger.info("Switch %u connects bus %u (%s) with trafo %u (%s)" % (sidx, bidx, bus_name,
eidx, trafo_name))
def overloaded_lines(net, max_load=100):
"""
Returns the results for all lines with loading_percent > max_load or None, if
there are none.
"""
if net.converged:
return net["res_line"].index[net["res_line"]["loading_percent"] > max_load]
else:
raise UserWarning("The last loadflow terminated erratically, results are invalid!")
def violated_buses(net, min_vm_pu, max_vm_pu):
"""
Returns all bus indices where vm_pu is not within min_vm_pu and max_vm_pu or returns None, if
there are none of those buses.
"""
if net.converged:
return net["bus"].index[(net["res_bus"]["vm_pu"] < min_vm_pu) |
(net["res_bus"]["vm_pu"] > max_vm_pu)]
else:
raise UserWarning("The last loadflow terminated erratically, results are invalid!")
def nets_equal(net1, net2, check_only_results=False, exclude_elms=None, **kwargs):
"""
Compares the DataFrames of two networks. The networks are considered equal
if they share the same keys and values, except of the
'et' (elapsed time) entry which differs depending on
runtime conditions and entries stating with '_'.
"""
eq = isinstance(net1, pandapowerNet) and isinstance(net2, pandapowerNet)
exclude_elms = [] if exclude_elms is None else list(exclude_elms)
exclude_elms += ["res_" + ex for ex in exclude_elms]
not_equal = []
if eq:
# for two networks make sure both have the same keys that do not start with "_"...
net1_keys = [key for key in net1.keys() if not (key.startswith("_") or key in exclude_elms)]
net2_keys = [key for key in net2.keys() if not (key.startswith("_") or key in exclude_elms)]
keys_to_check = set(net1_keys) & set(net2_keys)
key_difference = set(net1_keys) ^ set(net2_keys)
if len(key_difference) > 0:
logger.info("Networks entries mismatch at: %s" % key_difference)
if not check_only_results:
return False
# ... and then iter through the keys, checking for equality for each table
for df_name in list(keys_to_check):
# skip 'et' (elapsed time) and entries starting with '_' (internal vars)
if (df_name != 'et' and not df_name.startswith("_")):
if check_only_results and not df_name.startswith("res_"):
continue # skip anything that is not a result table
if isinstance(net1[df_name], pd.DataFrame) and isinstance(net2[df_name],
pd.DataFrame):
frames_equal = dataframes_equal(net1[df_name], net2[df_name], **kwargs)
eq &= frames_equal
if not frames_equal:
not_equal.append(df_name)
if len(not_equal) > 0:
logger.error("Networks do not match in DataFrame(s): %s" % (', '.join(not_equal)))
return eq
def clear_result_tables(net):
"""
Clears all 'res_...' DataFrames in net.
"""
for key in net.keys():
if isinstance(net[key], pd.DataFrame) and key[:3] == "res" and net[key].shape[0]:
net[key].drop(net[key].index, inplace=True)
# --- Simulation setup and preparations
def add_column_from_node_to_elements(net, column, replace, elements=None, branch_bus=None,
verbose=True):
"""
Adds column data to elements, inferring them from the column data of buses they are
connected to.
INPUT:
**net** (pandapowerNet) - the pandapower net that will be changed
**column** (string) - name of column that should be copied from the bus table to the element
table
**replace** (boolean) - if True, an existing column in the element table will be overwritten
**elements** (list) - list of elements that should get the column values from the bus table
**branch_bus** (list) - defines which bus should be considered for branch elements.
'branch_bus' must have the length of 2. One entry must be 'from_bus' or 'to_bus', the
other 'hv_bus' or 'lv_bus'
EXAMPLE:
compare to add_zones_to_elements()
"""
branch_bus = ["from_bus", "hv_bus"] if branch_bus is None else branch_bus
if column not in net.bus.columns:
raise ValueError("%s is not in net.bus.columns" % column)
elements = elements if elements is not None else pp_elements(bus=False, other_elements=False)
elements_to_replace = elements if replace else [
el for el in elements if column not in net[el].columns or net[el][column].isnull().all()]
# bus elements
for element, bus_type in element_bus_tuples(bus_elements=True, branch_elements=False):
if element in elements_to_replace:
net[element][column] = net["bus"][column].loc[net[element][bus_type]].values
# branch elements
to_validate = {}
for element, bus_type in element_bus_tuples(bus_elements=False, branch_elements=True):
if element in elements_to_replace:
if bus_type in (branch_bus + ["bus"]): # copy data, append branch_bus for switch.bus
net[element][column] = net["bus"][column].loc[net[element][bus_type]].values
else: # save data for validation
to_validate[element] = net["bus"][column].loc[net[element][bus_type]].values
# validate branch elements, but do not validate double and switches at all
already_validated = ["switch"]
for element, bus_type in element_bus_tuples(bus_elements=False, branch_elements=True):
if (element in elements_to_replace) & (element not in already_validated):
already_validated += [element]
crossing = sum(~compare_arrays(net[element][column].values, to_validate[element]))
if crossing > 0:
if verbose:
logger.warning("There have been %i %ss with different " % (crossing, element) +
"%s data at from-/hv- and to-/lv-bus" % column)
else:
logger.debug("There have been %i %ss with different " % (crossing, element) +
"%s data at from-/hv- and to-/lv-bus" % column)
def add_column_from_element_to_elements(net, column, replace, elements=None,
continue_on_missing_column=True):
"""
Adds column data to elements, inferring them from the column data of the elements linked by the
columns "element" and "element_type" or "et".
INPUT:
**net** (pandapowerNet) - the pandapower net that will be changed
**column** (string) - name of column that should be copied from the tables of the elements.
**replace** (boolean) - if True, an existing column will be overwritten
**elements** (list) - list of elements that should get the column values from the linked
element tables. If None, all elements with the columns "element" and "element_type" or
"et" are considered (these are currently "measurement" and "switch").
**continue_on_missing_column** (Boolean, True) - If False, a error will be raised in case of
an element table has no column 'column' although this element is refered in 'elements'.
E.g. 'measurement' is in 'elements' and in net.measurement is a trafo measurement but
in net.trafo there is no column 'name' although column=='name' - ni this case
'continue_on_missing_column' acts.
EXAMPLE:
import pandapower as pp
import pandapower.networks as pn
net = pn.create_cigre_network_mv()
pp.create_measurement(net, "i", "trafo", 5, 3, 0, side="hv")
pp.create_measurement(net, "i", "line", 5, 3, 0, side="to")
pp.create_measurement(net, "p", "bus", 5, 3, 2)
print(net.measurement.name.values, net.switch.name.values)
pp.add_column_from_element_to_elements(net, "name", True)
print(net.measurement.name.values, net.switch.name.values)
"""
elements = elements if elements is not None else pp_elements()
elements_with_el_and_et_column = [el for el in elements if "element" in net[el].columns and (
"element_type" in net[el].columns or "et" in net[el].columns)]
elements_to_replace = elements_with_el_and_et_column if replace else [
el for el in elements_with_el_and_et_column if column not in net[el].columns or net[el][
column].isnull().all()]
for el in elements_to_replace:
et_col = "element_type" if "element_type" in net[el].columns else "et"
element_type = net[el][et_col]
for short, complete in [("t", "trafo"), ("t3", "trafo3w"), ("l", "line"), ("s", "switch"),
("b", "bus")]:
element_type.loc[element_type == short] = complete
element_types_without_column = [et for et in set(element_type) if column not in
net[et].columns]
if len(element_types_without_column):
message = "%s is not in net[et].columns with et in " % column + str(
element_types_without_column)
if not continue_on_missing_column:
raise KeyError(message)
else:
logger.debug(message)
for et in list(set(element_type) - set(element_types_without_column)):
idx_et = element_type.index[element_type == et]
net[el].loc[idx_et, column] = net[et][column].loc[net[el].element[idx_et]].values
def add_zones_to_elements(net, replace=True, elements=None, **kwargs):
"""
Adds zones to elements, inferring them from the zones of buses they are connected to.
"""
elements = ["line", "trafo", "ext_grid", "switch"] if elements is None else elements
add_column_from_node_to_elements(net, "zone", replace=replace, elements=elements, **kwargs)
def reindex_buses(net, bus_lookup):
"""
Changes the index of net.bus and considers the new bus indices in all other pandapower element
tables.
INPUT:
**net** - pandapower network
**bus_lookup** (dict) - the keys are the old bus indices, the values the new bus indices
"""
not_fitting_bus_lookup_keys = set(bus_lookup.keys()) - set(net.bus.index)
if len(not_fitting_bus_lookup_keys):
logger.error("These bus indices are unknown to net. Thus, they cannot be reindexed: " +
str(not_fitting_bus_lookup_keys))
missing_bus_indices = sorted(set(net.bus.index) - set(bus_lookup.keys()))
if len(missing_bus_indices):
bus_lookup.update({b: b for b in missing_bus_indices})
net.bus.index = get_indices(net.bus.index, bus_lookup)
net.res_bus.index = get_indices(net.res_bus.index, bus_lookup)
for element, value in element_bus_tuples():
net[element][value] = get_indices(net[element][value], bus_lookup)
net["bus_geodata"].set_index(get_indices(net["bus_geodata"].index, bus_lookup), inplace=True)
bb_switches = net.switch[net.switch.et == "b"]
net.switch.loc[bb_switches.index, "element"] = get_indices(bb_switches.element, bus_lookup)
bus_meas = net.measurement.element_type == "bus"
net.measurement.loc[bus_meas, "element"] = get_indices(net.measurement.loc[bus_meas, "element"],
bus_lookup)
side_meas = pd.to_numeric(net.measurement.side, errors="coerce").notnull()
net.measurement.loc[side_meas, "side"] = get_indices(net.measurement.loc[side_meas, "side"],
bus_lookup)
return bus_lookup
def create_continuous_bus_index(net, start=0, store_old_index=False):
"""
Creates a continuous bus index starting at 'start' and replaces all
references of old indices by the new ones.
INPUT:
**net** - pandapower network
OPTIONAL:
**start** - index begins with "start"
**store_old_index** - if True, stores the old index in net.bus["old_index"]
OUTPUT:
**bus_lookup** - mapping of old to new index
"""
net.bus.sort_index(inplace=True)
if store_old_index:
net.bus["old_index"] = net.bus.index.values
new_bus_idxs = list(np.arange(start, len(net.bus) + start))
bus_lookup = dict(zip(net["bus"].index.values, new_bus_idxs))
reindex_buses(net, bus_lookup)
return bus_lookup
def reindex_elements(net, element, new_indices, old_indices=None):
"""
Changes the index of net[element].
INPUT:
**net** - pandapower network
**element** (str) - name of the element table
**new_indices** (iterable) - list of new indices
OPTIONAL:
**old_indices** (iterable) - list of old/previous indices which will be replaced.
If None, all indices are considered.
"""
old_indices = old_indices if old_indices is not None else net[element].index
if not len(new_indices) or not net[element].shape[0]:
return
assert len(new_indices) == len(old_indices)
lookup = dict(zip(old_indices, new_indices))
if element == "bus":
reindex_buses(net, lookup)
return
# --- reindex
net[element]["index"] = net[element].index
net[element].loc[old_indices, "index"] = get_indices(old_indices, lookup)
net[element].set_index("index", inplace=True)
# --- adapt measurement link
if element in ["line", "trafo", "trafo3w"]:
affected = net.measurement[(net.measurement.element_type == element) &
(net.measurement.element.isin(old_indices))]
if len(affected):
net.measurement.loc[affected.index, "element"] = get_indices(affected.element, lookup)
# --- adapt switch link
if element in ["line", "trafo"]:
affected = net.switch[(net.switch.et == element[0]) &
(net.switch.element.isin(old_indices))]
if len(affected):
net.switch.loc[affected.index, "element"] = get_indices(affected.element, lookup)
# --- adapt line_geodata index
if element == "line" and "line_geodata" in net and net["line_geodata"].shape[0]:
net["line_geodata"]["index"] = net["line_geodata"].index
net["line_geodata"].loc[old_indices, "index"] = get_indices(old_indices, lookup)
net["line_geodata"].set_index("index", inplace=True)
# adapt index in cost dataframes
for cost_df in ["pwl_cost", "poly_cost"]:
element_in_cost_df = net[cost_df].et == element
if sum(element_in_cost_df):
net[cost_df].element.loc[element_in_cost_df] = get_indices(net[cost_df].element[
element_in_cost_df], lookup)
def create_continuous_elements_index(net, start=0, add_df_to_reindex=set()):
"""
Creating a continuous index for all the elements, starting at zero and replaces all references
of old indices by the new ones.
INPUT:
**net** - pandapower network with unodered indices
OPTIONAL:
**start** - index begins with "start"
**add_df_to_reindex** - by default all useful pandapower elements for power flow will be
selected. Customized DataFrames can also be considered here.