Skip to content

Commit

Permalink
Make time series possible for other attributes
Browse files Browse the repository at this point in the history
  • Loading branch information
awongel committed Apr 24, 2024
1 parent 701ad29 commit ab7fc2a
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 22 deletions.
32 changes: 16 additions & 16 deletions run_pypsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,31 +114,31 @@ def dicts_to_pypsa(case_dict, component_list, component_attr):
n = add_buses_to_network(n, component_list)

for component_dict in component_list:
for attribute in component_dict:
# Check if attribute is a string and csv file holding a time series
if isinstance(component_dict[attribute], str) and component_dict[attribute].endswith('.csv'):
logging.info(f"Processing time series file: {component_dict[attribute]}")
# Add time series to components
ts_file = os.path.join(case_dict["input_path"],component_dict[attribute])
# for generators and loads, add time series to components
for attr in component_dict:
# Add time series to components
if isinstance(component_dict[attr], str) and ".csv" in component_dict[attr]:
logging.info("reading time series file")
ts_file = os.path.join(case_dict["input_path"],component_dict[attr])
try:
ts = process_time_series_file(ts_file, case_dict["datetime_start"], case_dict["datetime_end"])
logging.info(f"Time series file: {component_dict[attribute]} processed successfully.")
logging.info(ts)
except Exception: # if time series not found in input path, use csv's in test directory
logging.error("Time series file not found for " + component_dict[attribute] + " of " + component_dict["name"] + ". Now exiting.")
sys.exit(1)
logging.warning("Time series file not found for " + component_dict["name"] + ". Using time series files in test directory.")
case_dict['input_path'] = "./test"
ts_file = os.path.join(case_dict["input_path"],component_dict[attr])
ts = process_time_series_file(ts_file, case_dict["datetime_start"], case_dict["datetime_end"])

if ts is not None:
# Include time series as snapshots taking every delta_t value
n.snapshots = ts.iloc[::case_dict['delta_t'], :].index if case_dict['delta_t'] else ts.index
# Add time series to component
component_dict[attribute] = ts.iloc[:, 0]
component_dict[attr] = ts.iloc[:, 0]

# Scale by numerics_scaling, this avoids rounding otherwise done in Gurobi for small numbers and normalize time series if needed
component_dict = scale_normalize_time_series(component_dict, case_dict["numerics_scaling"])
# Remove time_series_file from component_dict
component_dict = scale_normalize_time_series(component_dict, case_dict["numerics_scaling"])
else:
logging.warning("Time series not properly processed for " + component_dict[attribute] + " of " + component_dict["name"] + ". Now exiting.")
sys.exit(1)
logging.warning("Time series file not found for " + component_dict["name"] + ". Skipping component.")
continue

# Without time series file, set snaphsots to number of time steps defined in the input file
if len(n.snapshots) == 1 and case_dict["no_time_steps"] is not None:
Expand Down Expand Up @@ -230,7 +230,7 @@ def postprocess_results(n, case_dict):
time_results_df = pd.concat([time_results_df, n.links_t["p0"].rename(columns=dict(
zip(n.links_t["p0"].columns.to_list(),
[name + " dispatch" for name in n.links_t["p0"].columns.to_list()])))], axis=1)

# Collect objective and system cost in one dataframe
system_cost = (n.statistics()["Capital Expenditure"].sum() + n.statistics()[
"Operational Expenditure"].sum()) / case_dict["total_hours"]
Expand Down
11 changes: 5 additions & 6 deletions utilities/read_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,8 @@ def read_component_data(comp_dict, attr, val, technology, costs_df):
# if it's empty or a cost name, use read_attr to get the value from the costs dataframe.
if attr != None:
read_attr = None
# if "name", "bus", or "time_series_file" is in attr or value can be converted to a float, use that
if (val != None and (any(x in attr for x in ['name', 'bus', 'carrier']) or is_number(val) or '=' in val)):
# if "name", "bus" or "carrier" is in attr or value can be converted to a float, use that
if (val != None and (any(x in attr for x in ['name', 'bus', 'carrier']) or is_number(val) or '=' in val or '.csv' in val)):
comp_dict[attr] = val
# if otherwise value is a string, use database value if the string is just 'db'
# if first two letters are db use the rest of the string as the attribute name
Expand All @@ -147,13 +147,12 @@ def read_component_data(comp_dict, attr, val, technology, costs_df):
else:
val = val.replace('db_','')
read_attr = val
elif val.endswith('.csv'):
comp_dict[attr] = val
else:
logging.error('Failed to read in '+val + ' for attribute ' + attr + ' for component ' + comp_dict["component"] + ' ' + comp_dict["name"])
logging.error('Exiting now.')
logging.error('Tried to read in a string that is not a number, name, or contains "db" to indicate use a database value. Failed = '+val + ' for attribute ' + attr + ' for component ' + comp_dict["component"] + ' ' + comp_dict["name"])
logging.error('Terminal error. Exiting.')
exit()


# if read_attr is defined, use it to get the value from the costs dataframe
if read_attr != None:
if (technology in costs_df.index and read_attr in costs_df.columns):
Expand Down

0 comments on commit ab7fc2a

Please sign in to comment.