Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
Started to identify where the code need to be changed and
written TODOS
  • Loading branch information
Bachibouzouk committed Nov 10, 2023
1 parent 4a4ecee commit 483efd4
Show file tree
Hide file tree
Showing 3 changed files with 140 additions and 30 deletions.
7 changes: 6 additions & 1 deletion ramp/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
dest="fname_path",
nargs="+",
type=str,
help="path to the (xlsx) input files (including filename). If not provided, then legacy .py input files will be fetched",
help="path to the (xlsx) input files (including filename). Must be provided",
)
parser.add_argument(
"-o",
Expand Down Expand Up @@ -135,6 +135,7 @@ def main():
if ofnames is None:
ofnames = [None]

# TODO this should simply disappear
if fnames is None:
print(
"Please provide path to input file with option -i, \n\nDefault to old version of RAMP input files\n"
Expand All @@ -161,6 +162,10 @@ def main():
)

for i, j in enumerate(input_files_to_run):
# TODO here simply run accepted -i input file
#import os
#os.system('python filename.py')

run_usecase(
j=j,
ofname=ofnames[i],
Expand Down
149 changes: 120 additions & 29 deletions ramp/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def single_appliance_daily_load_profile(args):


class UseCase:
def __init__(self, name: str = "", users: Union[List, None] = None):
def __init__(self, name: str = "", users: Union[List, None] = None, date_start: str = None, date_end:str= None, parallel_processing:bool=False):
"""Creates a UseCase instance for gathering a list of User instances which own Appliance instances
Parameters
Expand All @@ -50,8 +50,18 @@ def __init__(self, name: str = "", users: Union[List, None] = None):
name of the usecase instance, by default ""
users : Union[Iterable,None], optional
a list of users to be added to the usecase instance, by default None
date_start: str, optional
date_end: str, optional
"""
self.name = name
self.date_start = date_start
self.date_end = date_end
self.parallel_processing=parallel_processing
self.peak_time_range = None
self._num_days = None
self.daily_profiles = None

self.appliances = []
if users is None:
users = []
Expand Down Expand Up @@ -84,38 +94,104 @@ def collect_appliances_from_users(self):
appliances = appliances + user.App_list
self.appliances = appliances

def generate_daily_load_profiles(self, num_profiles, peak_time_range, day_types):
profiles = []
for prof_i in range(num_profiles):
# initialise an empty daily profile (or profile load)
# that will be filled with the sum of the daily profiles of each User instance
usecase_load = np.zeros(1440)
# for each User instance generate a load profile, iterating through all user of this instance and
# all appliances they own, corresponds to step 2. of [1], p.7
for user in self.users:
user.generate_aggregated_load_profile(
prof_i, peak_time_range, day_types
)
# aggregate the user load to the usecase load
usecase_load = usecase_load + user.load
profiles.append(usecase_load)
# screen update about progress of computation
# print('Profile', prof_i+1, '/', num_profiles, 'completed')
return profiles
@property
def num_days(self):
if self._num_days is None:
self.initialize()
return self._num_days

def initialize(self, num_days=None, peak_enlarge=0.15):

if num_days is not None:
self._num_days = num_days
else:
pass # here compute self._num_days based on date_start and date_end

if self._num_days is None:
# asks the user how many days (i.e. code runs) they want
self._num_days = int(
input("please indicate the number of days to be generated: ")
)
print("Please wait...")

self.peak_time_range = self.calc_peak_time_range(peak_enlarge=peak_enlarge)
def calc_peak_time_range(self, peak_enlarge=0.15):
"""
Calculate the peak time range, which is used to discriminate between off-peak and on-peak coincident switch-on probability
Calculate first the overall Peak Window (taking into account all User classes).
The peak time range corresponds to `peak time frame` variable in eq. (1) of [1]
The peak window is just a time window in which coincident switch-on of multiple appliances assumes a higher probability than off-peak
Within the peak window, a random peak time is calculated and then enlarged into a peak_time_range following again a random procedure
Parameters
----------
peak_enlarge: float
percentage random enlargement or reduction of peak time range length
corresponds to \delta_{peak} in [1], p.7
Notes
-----
[1] F. Lombardi, S. Balderrama, S. Quoilin, E. Colombo,
Generating high-resolution multi-energy load profiles for remote areas with an open-source stochastic model,
Energy, 2019, https://doi.org/10.1016/j.energy.2019.04.097.
Returns
-------
peak time range: numpy array
"""

tot_max_profile = np.zeros(1440) # creates an empty daily profile
# Aggregate each User's theoretical max profile to the total theoretical max
for user in self.users:
tot_max_profile = tot_max_profile + user.maximum_profile
# Find the peak window within the theoretical max profile
peak_window = np.squeeze(np.argwhere(tot_max_profile == np.amax(tot_max_profile)))
# Within the peak_window, randomly calculate the peak_time using a gaussian distribution
peak_time = round(random.normalvariate(
mu=round(np.average(peak_window)),
sigma=1 / 3 * (peak_window[-1] - peak_window[0])
))
rand_peak_enlarge = round(math.fabs(peak_time - random.gauss(mu=peak_time, sigma=peak_enlarge * peak_time)))
# The peak_time is randomly enlarged based on the calibration parameter peak_enlarge
return np.arange(peak_time - rand_peak_enlarge, peak_time + rand_peak_enlarge)

def generate_daily_load_profiles(self, day_types):
if self.parallel_processing is True:
daily_profiles = self.generate_daily_load_profiles_parallel(day_types=day_types)
else:
daily_profiles = np.zeros((self.num_days, 1440))
for day_idx in range(self.num_days):
# initialise an empty daily profile (or profile load)
# that will be filled with the sum of the daily profiles of each User instance
usecase_load = np.zeros(1440)
# for each User instance generate a load profile, iterating through all user of this instance and
# all appliances they own, corresponds to step 2. of [1], p.7
for user in self.users:
user.generate_aggregated_load_profile(
day_idx, self.peak_time_range, day_types[day_idx]
)
# aggregate the user load to the usecase load
usecase_load = usecase_load + user.load
# TODO check this (it wasn't turned into numpy)
daily_profiles[day_idx,:] = usecase_load
# screen update about progress of computation
# print('Profile', prof_i+1, '/', num_days, 'completed')
daily_profiles = np.array(daily_profiles)
daily_profiles = daily_profiles

def generate_daily_load_profiles_parallel(
self, num_profiles, peak_time_range, day_types
self, day_types
):
max_parallel_processes = multiprocessing.cpu_count()
tasks = []
t = 0
for day_id in range(num_profiles):
day_type = day_types[day_id]
for day_idx in range(self.num_days):
day_type = day_types[day_idx]
for user in self.users:
for app in user.App_list:
for _ in range(user.num_users):
t = t + 1
tasks.append((app, (day_id, peak_time_range, day_type)))
tasks.append((app, (day_idx, self.peak_time_range, day_type)))

daily_profiles_dict = {}
timeout = 1
Expand All @@ -136,10 +212,10 @@ def generate_daily_load_profiles_parallel(
daily_profiles_dict[prof_i] = [daily_load]
pbar.update()

daily_profiles = np.zeros((num_profiles, 1440))
daily_profiles = np.zeros((self.num_days, 1440))

for day_id in range(num_profiles):
daily_profiles[day_id, :] = np.vstack(daily_profiles_dict[day_id]).sum(
for day_idx in range(self.num_days):
daily_profiles[day_idx, :] = np.vstack(daily_profiles_dict[day_idx]).sum(
axis=0
)

Expand Down Expand Up @@ -271,7 +347,7 @@ def load(self, filename: str) -> None:

class User:
def __init__(
self, user_name: str = "", num_users: int = 1, user_preference: int = 0
self, usecase=None, user_name: str = "", num_users: int = 1, user_preference: int = 0
):
"""Creates a User instance (User Category)
Expand All @@ -284,6 +360,8 @@ def __init__(
user_preference : int {0,1,2,3}, optional
Related to cooking behaviour, how many types of meal a user wants a day (number of user preferences has to be defined here and will be further specified with pref_index parameter), by default 0
"""
# TODO check type of Usecase
self.usecase=usecase
self.user_name = user_name
self.num_users = num_users
self.user_preference = user_preference
Expand Down Expand Up @@ -357,6 +435,14 @@ def maximum_profile(self) -> np.array:
) # this stacks the specific App curve in an overall curve comprising all the Apps within a User class
return np.transpose(np.sum(user_max_profile, axis=0)) * self.num_users

@property
def num_days(self):
if self.usecase is not None:
answer = self.usecase.num_days
else:
answer = 365
return answer

def save(self, filename: str = None) -> Union[pd.DataFrame, None]:
"""Saves/returns the model databas including allappliances as a single pd.DataFrame or excel file.
Expand Down Expand Up @@ -668,7 +754,9 @@ def __init__(
self.wd_we_type = wd_we_type

if isinstance(power, pd.DataFrame):
if power.shape == (365, 1):
# TODO change this automatic value depending on the range of the usecase if provided
# with self.user.usecase.num_days
if power.shape == (self.user.num_days, 1):
power = power.values[:, 0]
else:
raise ValueError("wrong size of array. array size should be (365,1).")
Expand All @@ -678,7 +766,7 @@ def __init__(

elif isinstance(power, (float, int)):
# TODO change this automatic value depending on the range of the usecase
power = power * np.ones(366)
power = power * np.ones(self.user.num_days + 1)

else:
raise ValueError("wrong data type for power.")
Expand Down Expand Up @@ -1051,6 +1139,7 @@ def update_daily_use(self, coincidence, power, indexes):
np.put(
self.daily_use,
indexes,
# TODO this is the only use of power in update_daily_use
(random_variation(var=self.thermal_p_var, norm=coincidence * power)),
)
# updates the time ranges remaining for switch on events, excluding the current switch_on event
Expand Down Expand Up @@ -1411,6 +1500,8 @@ def generate_load_profile(self, prof_i, peak_time_range, day_type, power):
Generating high-resolution multi-energy load profiles for remote areas with an open-source stochastic model,
Energy, 2019, https://doi.org/10.1016/j.energy.2019.04.097.
"""
# TODO power is only used within update_daily_use

# initialises variables for the cycle
self.daily_use = np.zeros(1440)

Expand Down
14 changes: 14 additions & 0 deletions ramp/example/input_file_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,3 +254,17 @@

S_Stereo = School.Appliance(1, 150, 2, 90, 0.1, 5, occasional_use=0.33)
S_Stereo.windows([510, 750], [810, 1080], 0.35)

if __name__=="__main__":
from ramp.core.core import UseCase
# date_start and date_end are optional and a full year would be automatically picked if not provided
uc = UseCase(users=User_list,date_start=...,date_end=...)
# This would encompass running calc_peak_time_range() as a method of UseCase instance and
# initialise_inputs() would not really need to be called as all it does is return peak_enlarge = 0.15,
# the profile number and the user_list which we already have here...
uc.initialize(num_profile=10, peak_enlarge=0.15)

# this should a new method which would call generate_daily_load_profiles() for each day of day_types
uc.generate_load_profiles(parallel=False, day_types=...)
# this would be a new method using work of @mohammadamint
results = uc.export_results()

0 comments on commit 483efd4

Please sign in to comment.