-
Notifications
You must be signed in to change notification settings - Fork 0
/
build.py
132 lines (97 loc) · 3.62 KB
/
build.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# -*- coding: utf-8 -*-
""" build.py
Does download and unzip the pre-packaged Status-quo datapackage.
Based on this reference datapackage the carpeDIEM scenario datapackages
are built.
"""
import shutil
import zipfile
import os
import operator as op
import pandas as pd
from oemof.tabular.datapackage import building, processing
from oemof.tabular.datapackage.building import \
write_elements, write_sequences
from xlrd import XLRDError
from tools import update_field
# path handling
dpkg = './datapackages'
base = os.path.join(dpkg, 'SQ')
if os.path.exists(dpkg):
shutil.rmtree(dpkg)
os.mkdir(dpkg)
# unzip reference datapackage
zipfile.ZipFile(
building.download_data(
'https://github.com/ZNES-datapackages/Status-quo-2015/releases/'
'download/v0.1-beta/Status-quo-2015.zip',
directory=dpkg), 'r').extractall(base)
# load archived data
xls = './archive/data.xls'
datapackages = pd.read_excel(
xls, sheet_name='scenarios', index_col='identifier')
storages = pd.read_excel(xls, sheet_name='storages', index_col='name')
adaptations = pd.read_excel(xls, sheet_name='adaptations', index_col='scenario')
# based on data.xls the reference datapackage is copied for each scenario
# and updated
for pk in datapackages.index:
# declare paths
path = os.path.join(dpkg, pk)
epath = os.path.join(path, 'data', 'elements')
spath = os.path.join(path, 'data', 'sequences')
# copy datapackage
processing.copy_datapackage(os.path.join(base, 'datapackage.json'), path)
# update single value entries in copied datapackage
if pk in adaptations.index:
op = {'substract': op.sub, 'add': op.add}
for i, r in adaptations.loc[[pk], :].iterrows():
func = lambda x: op[r.operation](x, r.value)
update_field(r.resource, r.label, r.param, func, directory=epath)
# try add storages
try:
building.write_elements(
'battery.csv',
storages.loc[[pk], :].rename(index={pk: pk + '-battery'}),
directory=os.path.join(epath)
)
print('Added storage in datapackage %s.' % pk)
except KeyError as e:
pass
# try fetch timeseries data
try:
# TODO: parse dates from xls file
timesteps = pd.date_range(
'2015-01-01 00:00:00', '2015-12-31 23:00:00', freq='H')
ts = pd.read_excel(
xls, sheet_name='timeseries' + '-' + pk
).set_index(timesteps)['net_balance']
df = pd.read_excel(
xls, sheet_name='r_timeseries_components').set_index('name')
element = df.loc[['TS-pos-residual'], :]
write_elements(
'volatile.csv', element, directory=epath)
write_sequences(
'volatile_profile.csv',
ts.apply(lambda x: x if x > 0 else 0).rename(element.profile[0]),
directory=spath)
element = df.loc[['TS-neg-residual'], :]
write_elements('load.csv', element, directory=epath)
write_sequences(
'load_profile.csv',
ts.apply(lambda x: x if x < 0 else 0).abs().rename(element.profile[0]),
directory=spath)
except XLRDError as e:
print('Warning: No timeseries data found for package %s.' % pk)
pass
# update metadata
building.infer_metadata(
package_name=pk,
foreign_keys={
'bus': ['volatile', 'dispatchable', 'battery',
'load', 'excess', 'shortage', 'ror', 'phs', 'reservoir'],
'profile': ['load', 'volatile', 'ror', 'reservoir'],
'from_to_bus': ['grid'],
'chp': []
},
path=path
)