CJahns commited on
Commit
41f2cd0
·
1 Parent(s): 6313b4b

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. Input_Jahr_2021.xlsx +3 -0
  3. requirements.txt +87 -0
  4. streamlit_app.py +220 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Input_Jahr_2021.xlsx filter=lfs diff=lfs merge=lfs -text
Input_Jahr_2021.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a163d9c060e9b8c54d09281246eec1cabbce26478374efaec6429602f1f1e0a4
3
+ size 4545951
requirements.txt ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==5.1.1
2
+ asttokens==2.4.0
3
+ attrs==23.1.0
4
+ backcall==0.2.0
5
+ blinker==1.6.2
6
+ Bottleneck==1.3.7
7
+ cachetools==5.3.1
8
+ certifi==2023.7.22
9
+ charset-normalizer==3.2.0
10
+ click==8.1.7
11
+ cloudpickle==2.2.1
12
+ colorama==0.4.6
13
+ comm==0.1.4
14
+ dask==2023.9.2
15
+ debugpy==1.8.0
16
+ decorator==5.1.1
17
+ deprecation==2.1.0
18
+ et-xmlfile==1.1.0
19
+ executing==1.2.0
20
+ fsspec==2023.9.2
21
+ gitdb==4.0.10
22
+ GitPython==3.1.37
23
+ highspy==1.5.3
24
+ idna==3.4
25
+ importlib-metadata==6.8.0
26
+ ipykernel==6.25.2
27
+ ipython==8.15.0
28
+ jedi==0.19.0
29
+ Jinja2==3.1.2
30
+ jsonschema==4.19.1
31
+ jsonschema-specifications==2023.7.1
32
+ jupyter_client==8.3.1
33
+ jupyter_core==5.3.1
34
+ linopy==0.2.6
35
+ locket==1.0.0
36
+ markdown-it-py==3.0.0
37
+ MarkupSafe==2.1.3
38
+ matplotlib-inline==0.1.6
39
+ mdurl==0.1.2
40
+ nest-asyncio==1.5.8
41
+ numexpr==2.8.6
42
+ numpy==1.26.0
43
+ openpyxl==3.1.2
44
+ packaging==23.1
45
+ pandas==2.1.1
46
+ parso==0.8.3
47
+ partd==1.4.1
48
+ pickleshare==0.7.5
49
+ Pillow==9.5.0
50
+ platformdirs==3.10.0
51
+ plotly==5.17.0
52
+ prompt-toolkit==3.0.39
53
+ protobuf==4.24.3
54
+ psutil==5.9.5
55
+ pure-eval==0.2.2
56
+ pyarrow==13.0.0
57
+ pydeck==0.8.1b0
58
+ Pygments==2.16.1
59
+ python-dateutil==2.8.2
60
+ pytz==2023.3.post1
61
+ pywin32==306
62
+ PyYAML==6.0.1
63
+ pyzmq==25.1.1
64
+ referencing==0.30.2
65
+ requests==2.31.0
66
+ rich==13.5.3
67
+ rpds-py==0.10.3
68
+ scipy==1.11.2
69
+ six==1.16.0
70
+ smmap==5.0.1
71
+ stack-data==0.6.2
72
+ streamlit==1.27.0
73
+ tenacity==8.2.3
74
+ toml==0.10.2
75
+ toolz==0.12.0
76
+ tornado==6.3.3
77
+ tqdm==4.66.1
78
+ traitlets==5.10.0
79
+ typing_extensions==4.8.0
80
+ tzdata==2023.3
81
+ tzlocal==5.0.1
82
+ urllib3==2.0.5
83
+ validators==0.22.0
84
+ watchdog==3.0.0
85
+ wcwidth==0.2.6
86
+ xarray==2023.8.0
87
+ zipp==3.17.0
streamlit_app.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Spyder Editor
5
+
6
+ This is a temporary script file.
7
+ """
8
+
9
+
10
+
11
+
12
+ from numpy import arange
13
+ import xarray as xr
14
+ import highspy
15
+ import linopy
16
+ import openpyxl
17
+ from linopy import Model, EQUAL
18
+ import pandas as pd
19
+ import plotly.express as px
20
+ ##import gurobipy
21
+
22
+ import streamlit as st
23
+
24
+
25
+ # %%
26
+
27
+ url_excel = r'Input_Jahr_2021.xlsx'
28
+
29
+
30
+
31
+ # %% [markdown]
32
+ # Read Sets
33
+
34
+ # %%
35
+ # Define all sets for the model
36
+
37
+ df_excel= pd.read_excel(url_excel,header=None)
38
+ t = pd.Index(df_excel.iloc[:,0], name = 't')[0:400]
39
+
40
+ df_excel= pd.read_excel(url_excel,header=None, sheet_name = 'Regionen')
41
+ r = pd.Index(df_excel.iloc[:,0], name='r')
42
+ rr = r.copy()
43
+ rr.names = ['rr']
44
+
45
+ df_excel= pd.read_excel(url_excel,header=None, sheet_name = 'Regionen')
46
+ r = pd.Index(df_excel.iloc[:,0], name='r')
47
+
48
+ df_excel = pd.read_excel(url_excel, sheet_name = 'Kraftwerke')
49
+ i = pd.Index(df_excel.iloc[:,0], name = 'i')
50
+
51
+ df_excel = pd.read_excel(url_excel, sheet_name = 'Kraftwerke')
52
+ iConv = pd.Index(df_excel.iloc[1:6,2], name = 'iConv')
53
+
54
+ df_excel = pd.read_excel(url_excel, sheet_name = 'Kraftwerke')
55
+ iRes = pd.Index(df_excel.iloc[0:4,4], name = 'iRes')
56
+
57
+ df_excel = pd.read_excel(url_excel, sheet_name = 'Kraftwerke')
58
+ iStor = pd.Index(df_excel.iloc[0:1,6], name = 'iStor')
59
+
60
+
61
+
62
+
63
+ # %%
64
+
65
+ ### Parameter
66
+ dt = 1
67
+
68
+ df_excel= pd.read_excel(url_excel,sheet_name = 'Nachfrage')
69
+ df_melt = pd.melt(df_excel,id_vars='Zeit')
70
+
71
+ df_melt = df_melt.rename({'Zeit':'t', 'variable':'r'}, axis= 1)
72
+ df_melt = df_melt.set_index(['t','r'])
73
+
74
+ xr_D_t_r = df_melt.iloc[:,0].to_xarray()
75
+
76
+
77
+
78
+
79
+
80
+
81
+ # %%
82
+
83
+ ##variable Kosten
84
+ df_excel = pd.read_excel(url_excel, sheet_name = 'Kosten')
85
+ df_excel = df_excel.rename(columns = {'Konventionelle':'i', 'Unnamed: 1':'Costs'})
86
+ df_excel = i.to_frame().reset_index(drop=True).merge( df_excel, how = 'left')
87
+ df_excel = df_excel.fillna(0)
88
+ df_excel = df_excel.set_index('i')
89
+ c_var_i = df_excel.iloc[:,0].to_xarray()
90
+
91
+
92
+ ##fixed generation of Res
93
+ df_excel = pd.read_excel(url_excel, sheet_name = 'EE',header=[0,1])
94
+
95
+ #Die Tabelle enthällt 2 Überschriften, deshalb die Anpassung mit stack
96
+ df_test = df_excel.set_index([('Zeit', 'Unnamed: 0_level_1')]).stack().stack()
97
+ df_test.index = df_test.index.set_names(['t','i','r'])
98
+ s_t_r_iRes = df_test.to_xarray()
99
+
100
+ s_t_r_iRes.sel(i = ['RoR']).values = 0.8 * s_t_r_iRes.sel(i = ['RoR']).values
101
+
102
+
103
+ #Korrektur: In BE und NL kompletter Zufluss für RoR
104
+ f_t_r_i = 0.2/0.8 * s_t_r_iRes.sel(i = ['RoR'])
105
+ f_t_r_i['i'].values[0] = 'HydroReservior'
106
+
107
+ f_t_r_i.loc[dict(r='BE')] = f_t_r_i.loc[dict(r='BE')]*0
108
+ f_t_r_i.loc[dict(r='NL')] = f_t_r_i.loc[dict(r='NL')]*0
109
+
110
+
111
+
112
+ ## y_max
113
+ df_excel = pd.read_excel(url_excel, sheet_name = 'InstallierteLeistungen', nrows = 12, usecols = 'A:G')
114
+ df_melt2 = pd.melt(df_excel,id_vars='2021 in MW')
115
+
116
+ df_melt2 = df_melt2.rename({'2021 in MW' : 'i', 'variable' : 'r'}, axis = 1)
117
+ df_melt2 = df_melt2.set_index(['i','r'])
118
+
119
+ y_max_i_r = df_melt2.iloc[:,0].to_xarray()
120
+
121
+
122
+ ##l_max
123
+ df_excel = pd.read_excel(url_excel, sheet_name = 'InstallierteLeistungen', skiprows = range(15), nrows = 1, usecols = 'A:G')
124
+ df_excel = df_excel.rename(columns = {'ReservoirSize (geschätzt)' : 'i'})
125
+ df_melt3 = pd.melt(df_excel, id_vars = 'i')
126
+ df_melt3 = df_melt3.rename({'variable' : 'r'}, axis = 1)
127
+ df_melt3 = df_melt3.set_index(['r','i'])
128
+ l_max_iStor_r = df_melt3.iloc[:, 0].to_xarray()
129
+
130
+ ## inflow of water reservoir
131
+ f_t_r_HydroReservoir = 0.2 * s_t_r_iRes.sel(i = ['RoR']) * y_max_i_r.sel(i = ['RoR'])
132
+
133
+
134
+ ##transmission capacity between region r and region rr
135
+ df_excel = pd.read_excel(url_excel, sheet_name = 'NTC', skiprows = range (1), nrows = 7, usecols = 'B:H')
136
+ df_melt4 = pd.melt(df_excel, id_vars = 'Unnamed: 1')
137
+ df_melt4 = df_melt4.rename({'Unnamed: 1' : 'rr' , 'variable' : 'r'}, axis = 1)
138
+ df_melt4 = df_melt4.set_index(['r','rr'])
139
+ x_cap_r_rr = df_melt4.iloc[:, 0].to_xarray()
140
+
141
+ #df_excel.loc[iConv]
142
+
143
+
144
+
145
+
146
+ # %%
147
+
148
+ ###Variablen
149
+ m = Model()
150
+ k = m.add_variables(coords = [t,r], name = 'k', lower = 0)#Curtailment
151
+ x = m.add_variables(coords = [t,r,rr], name = 'x', lower = 0)
152
+ y = m.add_variables(coords = [t,r,i], name = 'y', lower = 0)
153
+ l = m.add_variables(coords = [t,r,i], name = 'l', lower = 0)
154
+
155
+ C_op = m.add_variables(name = 'C_op')
156
+ eta_x = 0.001
157
+ ############## Start with the model
158
+
159
+ ##objective function
160
+ C_op = (y * c_var_i * dt).sum()
161
+
162
+ m.add_objective(C_op)
163
+ ##load serving
164
+ load_t_r = m.add_constraints((y * dt).sum(dims = 'i') - (k * dt) + \
165
+ (-x.sum('rr') + x.sum('r').rename({'rr':'r'}) ) *(1 -eta_x)== xr_D_t_r,name = 'load')
166
+ ##maximum capacity limit
167
+ maxcap_i_r_t = m.add_constraints((y <= y_max_i_r),name = 'max_cap')
168
+ ##infeed of renewables
169
+ infeed_iRes_r_t = m.add_constraints((y.sel(i = iRes) <= s_t_r_iRes.sel(i = iRes) * y_max_i_r.sel(i = iRes)),name = 'infeed')
170
+ ##capacity restriction storage power plant
171
+ maxcapsto_r_iStor_t = m.add_constraints(l.sel(i = iStor) <= l_max_iStor_r, name = 'max. filling str.')
172
+ ##transmission capacity
173
+ maxcaptrans_r_rr_t = m.add_constraints(x <= x_cap_r_rr, name = 'max. transm. cap.')
174
+ ##storage power plant
175
+ filling_iStor_r_t = m.add_constraints(l.sel(i = iStor).roll(t = -1) -l.sel(i = iStor) + \
176
+ y.sel(i = iStor) * dt == f_t_r_i.sel(i = 'HydroReservior') * dt , name = 'filling level')
177
+
178
+
179
+
180
+
181
+
182
+
183
+ # %%
184
+
185
+
186
+ m.solve(solver_name = 'highs')
187
+
188
+
189
+
190
+
191
+
192
+ # %% [markdown]
193
+ # Results
194
+
195
+ # %%
196
+
197
+ # Read Objective from solution
198
+ m.objective_value
199
+
200
+
201
+ #pd.options.plotting.backend = "plotly"
202
+ # Read dual values and plot
203
+ df = load_t_r.dual.to_dataframe().reset_index()
204
+ #df['t'] = pd.to_datetime(df['t'])
205
+
206
+
207
+ # %%
208
+ fig = px.line(df, x='t', y='dual', color='r' )
209
+
210
+ tab1, tab2 = st.tabs(["Streamlit theme (default)", "Plotly native theme"])
211
+ with tab1:
212
+ st.plotly_chart(fig, theme="streamlit", use_container_width=True)
213
+ with tab2:
214
+ st.plotly_chart(fig, theme=None, use_container_width=True)
215
+ # %%
216
+
217
+
218
+ # Read values
219
+ Productionlevels = m.solution['y'].sel(r = 'DE').to_dataframe()
220
+ #Productionlevels