Skip to content

Commit

Permalink
Merge pull request #17 from carnegie/dev_atlite_cfs
Browse files Browse the repository at this point in the history
Atlite capacity factors and update to latest PyPSA
  • Loading branch information
awongel authored Nov 5, 2024
2 parents e16768f + a2f76f3 commit d3ce6f1
Show file tree
Hide file tree
Showing 7 changed files with 362 additions and 19 deletions.
2 changes: 1 addition & 1 deletion PyPSA
Submodule PyPSA updated 58 files
+92 −0 .github/workflows/codeql.yml
+44 −12 .github/workflows/test.yml
+5 −1 .pre-commit-config.yaml
+1 −1 CITATION.cff
+1 −0 codecov.yml
+3 −3 doc/api/optimization.rst
+1 −3 doc/conf.py
+0 −26 doc/contributing/mailing-list.rst
+5 −3 doc/contributing/support.rst
+20 −22 doc/contributing/troubleshooting.rst
+23 −54 doc/getting-started/installation.rst
+5 −32 doc/getting-started/introduction.rst
+0 −4 doc/index.rst
+2 −2 doc/references/api-reference.rst
+1 −8 doc/references/citing.rst
+0 −56 doc/references/comparable-software.rst
+2 −1 doc/references/developers.rst
+110 −76 doc/references/release-notes.rst
+7 −5 doc/references/users.rst
+88 −130 doc/user-guide/components.rst
+3 −3 doc/user-guide/contingency-analysis.rst
+87 −108 doc/user-guide/design.rst
+59 −56 doc/user-guide/import-export.rst
+345 −323 doc/user-guide/optimal-power-flow.rst
+10 −12 doc/user-guide/plotting.rst
+80 −82 doc/user-guide/power-flow.rst
+1 −1 examples/multi-decade-example.py
+33 −8 pyproject.toml
+3 −2 pypsa/__init__.py
+18 −9 pypsa/clustering/__init__.py
+94 −65 pypsa/clustering/spatial.py
+1 −1 pypsa/component_attrs/lines.csv
+2 −0 pypsa/component_attrs/storage_units.csv
+1 −0 pypsa/component_attrs/stores.csv
+1 −1 pypsa/component_attrs/transformers.csv
+224 −127 pypsa/components.py
+14 −4 pypsa/contingency.py
+59 −22 pypsa/descriptors.py
+16 −6 pypsa/examples.py
+6 −3 pypsa/geo.py
+28 −9 pypsa/graph.py
+384 −275 pypsa/io.py
+75 −45 pypsa/optimization/abstract.py
+12 −3 pypsa/optimization/common.py
+7 −1 pypsa/optimization/compat.py
+63 −36 pypsa/optimization/constraints.py
+24 −21 pypsa/optimization/global_constraints.py
+74 −70 pypsa/optimization/optimize.py
+15 −8 pypsa/optimization/variables.py
+216 −203 pypsa/pf.py
+235 −1 pypsa/plot.py
+0 −0 pypsa/py.typed
+220 −178 pypsa/statistics.py
+55 −0 pypsa/utils.py
+29 −29 pypsa/variables.csv
+17 −0 test/test_io.py
+82 −0 test/test_lopf_storage.py
+20 −1 test/test_plotting.py
61 changes: 61 additions & 0 deletions capacity_factors_atlite/get_US_CFs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import atlite
import cartopy.io.shapereader as shpreader
import geopandas as gpd
from shapely.geometry import box
import pandas as pd
import logging
from argparse import ArgumentParser

def main(year):

logging.basicConfig(level=logging.INFO)

shpfilename = shpreader.natural_earth(
resolution="10m", category="cultural", name="admin_0_countries"
)
reader = shpreader.Reader(shpfilename)
US = gpd.GeoSeries(
{r.attributes["NAME_EN"]: r.geometry for r in reader.records()},
crs={"init": "epsg:4326"}).reindex(["United States of America"])

# Only keep contiguous US
contiguous_48_bbox = box(minx=-125, miny=24.396308, maxx=-66.93457, maxy=49.384358)
# Clip the US geometry to the bounding box
region = US.geometry.intersection(contiguous_48_bbox)

region_name = "conus"

# Define the cutout; this will not yet trigger any major operations
cutout = atlite.Cutout(
path=f"{region_name}-{year}", module="era5",
bounds=region.unary_union.bounds,
time=f"{year}",
chunks={"time": 100,},)
# This is where all the work happens (this can take some time).
cutout.prepare(
compression={"zlib": True, "complevel": 9},
monthly_requests=True,
concurrent_requests=True)

# Extract the wind power generation capacity factors
wind_power_generation = cutout.wind(
"Vestas_V112_3MW",
capacity_factor_timeseries=True,
)

# Extract the solar power generation capacity factors
solar_power_generation = cutout.pv(
panel="CSi",
orientation='latitude_optimal',
tracking="horizontal",
capacity_factor_timeseries=True,)

# Save as netcdf
wind_power_generation.to_netcdf(f"{region_name}_wind_CF_timeseries_{year}.nc")
solar_power_generation.to_netcdf(f"{region_name}_solar_CF_timeseries_{year}.nc")

if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--year", type=int, help="Get data for this year")
args = parser.parse_args()
main(args.year)
235 changes: 235 additions & 0 deletions capacity_factors_atlite/get_top_25percent_cfs.ipynb

Large diffs are not rendered by default.

6 changes: 4 additions & 2 deletions env.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ channels:
dependencies:
- python>=3.10
- pip
- pypsa>=0.24
- pypsa=0.31.1
- xlrd
- openpyxl!=3.1.1
- memory_profiler
Expand All @@ -27,8 +27,10 @@ dependencies:
- shapely>=2.0
- matplotlib<3.6
- gurobi>=10.0.1
- atlite=0.2.14
- cartopy

# Keep in conda environment when calling ipython
# Keep in conda environment when calling ipython
- ipython

- pip:
Expand Down
42 changes: 41 additions & 1 deletion run_pypsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,44 @@ def postprocess_results(n, case_dict):
return df_dict


def add_bicharger_constraint(m, n):
"""
Add constraint requiring the same sizing for charging and discharging power
"""
# Add bi-directional charging constraint if bicharger in name of component

# Filter links with 'bicharger' in their name
bicharger_links = [link for link in n.links.index if 'bicharger' in link]

# Create a dictionary to store matching charger-discharger pairs
bicharger_pairs = {}

# Search for matching charger-discharger pairs based on the prefix before '-bicharger'
for link in bicharger_links:
# Extract the prefix, assuming the format is '*-bicharger'
prefix = link.split('-bicharger')[0]
possible_matches = [l for l in bicharger_links if l.startswith(prefix) and l != link]

# If a matching pair is found, store it
if possible_matches and not prefix in bicharger_pairs:
bicharger_pairs[prefix] = (link, possible_matches[0])

# Add constraints for each matching pair
for prefix, (charger_link, discharger_link) in bicharger_pairs.items():
# Get the power and efficiency values for the charger and discharger
logging.info(f"Found bi-directional charging pair for {prefix}: {charger_link} and {discharger_link}")
p_charger = m.variables['Link-p_nom'].at[charger_link]
p_discharger = m.variables['Link-p_nom'].at[discharger_link]
efficiency = n.links.at[discharger_link, 'efficiency']
logging.info(f"Charger power: {p_charger}, discharger power: {p_discharger}, Efficiency: {efficiency}")

# Define the symbolic expression for the constraint (without evaluation)
constraint_expression = p_charger - p_discharger * efficiency
# Add the constraint
m.add_constraints(lhs=constraint_expression, sign="=", rhs=0, name="bicharger_constraint_" + prefix)

return m

def build_network(infile):
""" infile: string path for .xlsx or .csv case file """

Expand All @@ -299,7 +337,9 @@ def build_network(infile):
def run_pypsa(network, infile, case_dict, component_list, outfile_suffix=""):

# Solve the linear optimization power flow with Gurobi
network.optimize(solver_name=case_dict['solver'])
model = network.optimize.create_model()
model = add_bicharger_constraint(model, network)
network.optimize.solve_model(solver_name=case_dict['solver'])

# Check if optimization was successful
if not hasattr(network, 'objective'):
Expand Down
8 changes: 5 additions & 3 deletions utilities/read_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import logging
import pypsa
from utilities.load_costs import load_costs
from utilities.utilities import is_number, remove_empty_rows, find_first_row_with_keyword, check_attributes, concatenate_list_of_strings
from utilities.utilities import is_number, remove_empty_rows, find_first_row_with_keyword, check_attributes, concatenate_list_of_strings, get_nyears
from datetime import datetime
from pathlib import Path
import pandas as pd
Expand Down Expand Up @@ -75,7 +75,8 @@ def update_component_attribute_dict(attributes_from_file):
"""
Create dictionary of allowable attributes for each component type
"""
component_attribute_dict = pypsa.descriptors.Dict({k: v.copy() for k, v in pypsa.components.component_attrs.items()})

component_attribute_dict = {k: v.copy() for k, v in pypsa.components.component_attrs.items()}

bus_numbers = [int(bus.replace("bus","")) for bus in attributes_from_file if bus is not None and bus.startswith('bus') and bus != 'bus']
# Add attributes for components that are not in default PyPSA
Expand Down Expand Up @@ -233,7 +234,8 @@ def read_input_file_to_dict(file_name):
case_data_dict['datetime_end'] = convert_slash_to_dash_dates(case_data_dict['datetime_end'])
if '/' in case_data_dict['datetime_start']:
case_data_dict['datetime_start'] = convert_slash_to_dash_dates(case_data_dict['datetime_start'])
nyears = case_data_dict['total_hours'] / 8760.

nyears = get_nyears(case_data_dict['datetime_start'], case_data_dict['datetime_end'])
case_data_dict['nyears'] = nyears

# Config file path
Expand Down
27 changes: 15 additions & 12 deletions utilities/utilities.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os, csv
import pandas as pd


def check_directory(directory):
"""
Check if directory exists, if not create it
Expand All @@ -22,7 +23,6 @@ def strip_quotes(string):
return string



def remove_empty_rows(list_of_lists):
"""
Eliminate all lists in a list of lists that are empty or contain only empty strings
Expand All @@ -41,13 +41,6 @@ def find_first_row_with_keyword(list_of_lists, keyword):
return i
return -1

"""
List files in a directory, stripping out hidden files and eliminating file extension
def list_files_in_directory(directory):
return [f.split('.')[0] for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and not f.startswith('.')]
"""


def check_attributes(element_list, dict_of_lists):
"""
Expand Down Expand Up @@ -81,7 +74,17 @@ def is_number(s):
return False


import csv
def get_nyears(start, end):
"""
Return the number of years in the time series
"""
time = pd.date_range(start, end, freq='h')
# Drop Feb 29 when leap year
if time.is_leap_year.any():
time = time[~((time.month == 2) & (time.day == 29))]
nyears = len(time) / 8760
return nyears


def skip_until_keyword(ts_file, keyword):
"""
Expand Down Expand Up @@ -138,13 +141,13 @@ def add_carrier_info(network, stats_df):
"""
# Initialize a list to hold the carrier information
carriers = []
sorted_components = sorted(network.iterate_components(), key=lambda x: x[0])
sorted_components = sorted(network.iterate_components(), key=lambda x: x.name)
# Iterate over components
for component_class in sorted_components:
if component_class[0] == "Bus":
if component_class.name == "Bus":
continue
# Collect carriers
components = getattr(network, component_class[1])
components = getattr(network, component_class.list_name)
# Sort components by index
if hasattr(components, "carrier"):
sorted_carriers = components.sort_index().carrier.tolist()
Expand Down

0 comments on commit d3ce6f1

Please sign in to comment.