Skip to content

Commit

Permalink
Merge branch 'master' into shapes
Browse files Browse the repository at this point in the history
  • Loading branch information
bobbyxng committed Jan 15, 2025
2 parents 27ee50f + 62afc9e commit 32aa3a2
Show file tree
Hide file tree
Showing 19 changed files with 80 additions and 92 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ repos:
# Run ruff to lint and format
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.8.6
rev: v0.9.1
hooks:
# Run the linter.
- id: ruff
Expand Down
3 changes: 3 additions & 0 deletions .readthedocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@

version: 2

sphinx:
configuration: doc/conf.py

build:
os: ubuntu-22.04
tools:
Expand Down
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ install: _conda_check
$(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install
# Install pinned environment
install-pinned-linux: _conda_check
$(CONDA_OR_MAMBA) env create -f envs/pinned-linux.yaml -n $(or $(name), pypsa-eur)
$(CONDA_OR_MAMBA) env create -f envs/linux-pinned.yaml -n $(or $(name), pypsa-eur)
$(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install
install-pinned-windows: _conda_check
$(CONDA_OR_MAMBA) env create -f envs/pinned-windows.yaml -n $(or $(name), pypsa-eur)
$(CONDA_OR_MAMBA) env create -f envs/windows-pinned.yaml -n $(or $(name), pypsa-eur)
$(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install
install-pinned-macos: _conda_check
$(CONDA_OR_MAMBA) env create -f envs/pinned-macos.yaml -n $(or $(name), pypsa-eur)
$(CONDA_OR_MAMBA) env create -f envs/macos-pinned.yaml -n $(or $(name), pypsa-eur)
$(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install


Expand Down
1 change: 0 additions & 1 deletion matplotlibrc
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,5 @@
#
# SPDX-License-Identifier: CC0-1.0
font.family: sans-serif
font.sans-serif: Ubuntu, DejaVu Sans
image.cmap: viridis
figure.autolayout : True
42 changes: 15 additions & 27 deletions rules/retrieve.smk
Original file line number Diff line number Diff line change
Expand Up @@ -545,8 +545,11 @@ if config["enable"]["retrieve"]:
if config["enable"]["retrieve"] and (
config["electricity"]["base_network"] == "osm-prebuilt"
):
# Dictionary of prebuilt versions, e.g. 0.3 : "13358976"
osm_prebuilt_version = {
OSM_VERSION = config["electricity"]["osm-prebuilt-version"]
OSM_COMPONENTS = ["buses", "converters", "lines", "links", "transformers"]
if OSM_VERSION >= 0.6:
OSM_COMPONENTS.append("map")
OSM_ZENODO_IDS = {
0.1: "12799202",
0.2: "13342577",
0.3: "13358976",
Expand All @@ -558,37 +561,22 @@ if config["enable"]["retrieve"] and (
# update rule to use the correct version
rule retrieve_osm_prebuilt:
input:
buses=storage(
f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/buses.csv"
),
converters=storage(
f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/converters.csv"
),
lines=storage(
f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/lines.csv"
),
links=storage(
f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/links.csv"
),
transformers=storage(
f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/transformers.csv"
),
map=storage(
f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/map.html"
),
[
storage(
f"https://zenodo.org/records/{OSM_ZENODO_IDS[OSM_VERSION]}/files/{component}.csv"
)
for component in OSM_COMPONENTS
],
output:
buses=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/buses.csv",
converters=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/converters.csv",
lines=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/lines.csv",
links=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/links.csv",
transformers=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/transformers.csv",
map=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/map.html",
[
f"data/osm-prebuilt/{OSM_VERSION}/{component}.csv"
for component in OSM_COMPONENTS
],
log:
"logs/retrieve_osm_prebuilt.log",
threads: 1
resources:
mem_mb=500,
retries: 2
run:
for key in input.keys():
move(input[key], output[key])
Expand Down
6 changes: 3 additions & 3 deletions scripts/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,9 +858,9 @@ def validate_checksum(file_path, zenodo_url=None, checksum=None):
for chunk in iter(lambda: f.read(65536), b""): # 64kb chunks
hasher.update(chunk)
calculated_checksum = hasher.hexdigest()
assert (
calculated_checksum == checksum
), "Checksum is invalid. This may be due to an incomplete download. Delete the file and re-execute the rule."
assert calculated_checksum == checksum, (
"Checksum is invalid. This may be due to an incomplete download. Delete the file and re-execute the rule."
)


def get_snapshots(snapshots, drop_leap_day=False, freq="h", **kwargs):
Expand Down
4 changes: 2 additions & 2 deletions scripts/add_electricity.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,7 +710,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
)
if not missing_countries.empty:
logger.warning(
f'Assuming max_hours=6 for hydro reservoirs in the countries: {", ".join(missing_countries)}'
f"Assuming max_hours=6 for hydro reservoirs in the countries: {', '.join(missing_countries)}"
)
hydro_max_hours = hydro.max_hours.where(
(hydro.max_hours > 0) & ~hydro.index.isin(missing_mh_single_i),
Expand Down Expand Up @@ -833,7 +833,7 @@ def estimate_renewable_capacities(
if expansion_limit:
assert np.isscalar(expansion_limit)
logger.info(
f"Reducing capacity expansion limit to {expansion_limit*100:.2f}% of installed capacity."
f"Reducing capacity expansion limit to {expansion_limit * 100:.2f}% of installed capacity."
)
n.generators.loc[tech_i, "p_nom_max"] = (
expansion_limit * n.generators.loc[tech_i, "p_nom_min"]
Expand Down
21 changes: 10 additions & 11 deletions scripts/base_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -587,9 +587,9 @@ def prefer_voltage(x, which):
.join(n.buses.country)
.dropna()
)
assert (
not df.empty
), f"No buses with defined country within 200km of bus `{b}`"
assert not df.empty, (
f"No buses with defined country within 200km of bus `{b}`"
)
n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"]

logger.warning(
Expand Down Expand Up @@ -720,14 +720,13 @@ def base_network(
):
base_network = config["electricity"].get("base_network")
osm_prebuilt_version = config["electricity"].get("osm-prebuilt-version")
assert (
base_network
in {
"entsoegridkit",
"osm-raw",
"osm-prebuilt",
}
), f"base_network must be either 'entsoegridkit', 'osm-raw' or 'osm-prebuilt', but got '{base_network}'"
assert base_network in {
"entsoegridkit",
"osm-raw",
"osm-prebuilt",
}, (
f"base_network must be either 'entsoegridkit', 'osm-raw' or 'osm-prebuilt', but got '{base_network}'"
)
if base_network == "entsoegridkit":
warnings.warn(
"The 'entsoegridkit' base network is deprecated and will be removed in future versions. Please use 'osm-raw' or 'osm-prebuilt' instead.",
Expand Down
7 changes: 2 additions & 5 deletions scripts/build_electricity_demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,7 @@ def fill_large_gaps(ds, shift):
nhours = shift / np.timedelta64(1, "h")
if (consecutive_nans(ds) > nhours).any():
logger.warning(
"There exist gaps larger then the time shift used for "
"copying time slices."
"There exist gaps larger then the time shift used for copying time slices."
)
time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
Expand Down Expand Up @@ -301,9 +300,7 @@ def manual_adjustment(load, fn_load, countries):
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method="linear", limit=interpolate_limit)

logger.info(
"Filling larger gaps by copying time-slices of period " f"'{time_shift}'."
)
logger.info(f"Filling larger gaps by copying time-slices of period '{time_shift}'.")
load = load.apply(fill_large_gaps, shift=time_shift)

if snakemake.params.load["supplement_synthetic"]:
Expand Down
6 changes: 3 additions & 3 deletions scripts/build_heat_source_potentials/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ def get_unit_conversion_factor(
) -> float:
if input_unit not in unit_scaling.keys():
raise ValueError(
f"Input unit {input_unit} not allowed. Must be one of {
unit_scaling.keys()}"
f"Input unit {input_unit} not allowed. Must be one of {unit_scaling.keys()}"
)
elif output_unit not in unit_scaling.keys():
raise ValueError(
f"Output unit {output_unit} not allowed. Must be one of {
unit_scaling.keys()}"
unit_scaling.keys()
}"
)

return unit_scaling[input_unit] / unit_scaling[output_unit]
Expand Down
2 changes: 1 addition & 1 deletion scripts/build_osm_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -1356,7 +1356,7 @@ def _map_links_to_dc_buses(links, dc_buses, distance_crs=DISTANCE_CRS):
dc_buses_all.reset_index(inplace=True)

logger.info(
f"Mapped {len(links_all)} links to {len(dc_buses_all)} DC buses. Dropping {len(dc_buses)-len(dc_buses_all)} DC buses."
f"Mapped {len(links_all)} links to {len(dc_buses_all)} DC buses. Dropping {len(dc_buses) - len(dc_buses_all)} DC buses."
)

return links_all, dc_buses_all
Expand Down
2 changes: 1 addition & 1 deletion scripts/build_population_layouts.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@

for ct in countries:
logger.debug(
f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%"
f"The urbanization rate for {ct} is {round(urban_fraction[ct] * 100)}%"
)

indicator_nuts3_ct = nuts3.country.apply(lambda x: 1.0 if x == ct else 0.0)
Expand Down
26 changes: 13 additions & 13 deletions scripts/clean_osm_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ def _import_lines_and_cables(path_lines):
country = os.path.basename(os.path.dirname(path_lines[key][idx]))

logger.info(
f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_lines[key])).zfill(2)}: {ip}"
f" - Importing {key} {str(idx + 1).zfill(2)}/{str(len(path_lines[key])).zfill(2)}: {ip}"
)
with open(ip) as f:
data = json.load(f)
Expand Down Expand Up @@ -458,7 +458,7 @@ def _import_lines_and_cables(path_lines):

else:
logger.info(
f" - Skipping {key} {str(idx+1).zfill(2)}/{str(len(path_lines[key])).zfill(2)} (empty): {ip}"
f" - Skipping {key} {str(idx + 1).zfill(2)}/{str(len(path_lines[key])).zfill(2)} (empty): {ip}"
)
continue
logger.info("---")
Expand Down Expand Up @@ -493,7 +493,7 @@ def _import_routes_relation(path_relation):
country = os.path.basename(os.path.dirname(path_relation[key][idx]))

logger.info(
f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_relation[key])).zfill(2)}: {ip}"
f" - Importing {key} {str(idx + 1).zfill(2)}/{str(len(path_relation[key])).zfill(2)}: {ip}"
)
with open(ip) as f:
data = json.load(f)
Expand Down Expand Up @@ -528,7 +528,7 @@ def _import_routes_relation(path_relation):

else:
logger.info(
f" - Skipping {key} {str(idx+1).zfill(2)}/{str(len(path_relation[key])).zfill(2)} (empty): {ip}"
f" - Skipping {key} {str(idx + 1).zfill(2)}/{str(len(path_relation[key])).zfill(2)} (empty): {ip}"
)
continue

Expand Down Expand Up @@ -753,9 +753,9 @@ def _clean_substations(df_substations, list_voltages):
)

df_substations.loc[bool_frequency_len & bool_split, "frequency"] = (
df_substations.loc[
bool_frequency_len & bool_split,
].apply(lambda row: row["frequency"].split(";")[row["split_count"] - 1], axis=1)
df_substations.loc[bool_frequency_len & bool_split,].apply(
lambda row: row["frequency"].split(";")[row["split_count"] - 1], axis=1
)
)

df_substations = _split_cells(df_substations, cols=["frequency"])
Expand Down Expand Up @@ -1260,7 +1260,7 @@ def _import_substations(path_substations):
): # unpopulated OSM json is about 51 bytes
country = os.path.basename(os.path.dirname(path_substations[key][idx]))
logger.info(
f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_substations[key])).zfill(2)}: {ip}"
f" - Importing {key} {str(idx + 1).zfill(2)}/{str(len(path_substations[key])).zfill(2)}: {ip}"
)
with open(ip) as f:
data = json.load(f)
Expand Down Expand Up @@ -1302,7 +1302,7 @@ def _import_substations(path_substations):

else:
logger.info(
f" - Skipping {key} {str(idx+1).zfill(2)}/{str(len(path_substations[key])).zfill(2)} (empty): {ip}"
f" - Skipping {key} {str(idx + 1).zfill(2)}/{str(len(path_substations[key])).zfill(2)} (empty): {ip}"
)
continue
logger.info("---")
Expand Down Expand Up @@ -1344,9 +1344,9 @@ def _import_substations(path_substations):
.reset_index()
)
df_substations_relation_members_grouped["geometry"] = (
df_substations_relation_members_grouped[
"linestring"
].apply(lambda x: x.convex_hull)
df_substations_relation_members_grouped["linestring"].apply(
lambda x: x.convex_hull
)
)

df_substations_relation = (
Expand Down Expand Up @@ -1802,7 +1802,7 @@ def _check_if_ways_in_multi(list, longer_list):
df_links = df_links.dropna(subset=["rating"])
len_after = len(df_links)
logger.info(
f"Dropped {len_before-len_after} elements without rating. "
f"Dropped {len_before - len_after} elements without rating. "
+ f"Imported {len_after} elements."
)

Expand Down
18 changes: 9 additions & 9 deletions scripts/cluster_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,16 +184,16 @@ def distribute_n_clusters_to_countries(

N = n.buses.groupby(["country", "sub_network"]).size()[L.index]

assert (
n_clusters >= len(N) and n_clusters <= N.sum()
), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
assert n_clusters >= len(N) and n_clusters <= N.sum(), (
f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
)

if isinstance(focus_weights, dict):
total_focus = sum(list(focus_weights.values()))

assert (
total_focus <= 1.0
), "The sum of focus weights must be less than or equal to 1."
assert total_focus <= 1.0, (
"The sum of focus weights must be less than or equal to 1."
)

for country, weight in focus_weights.items():
L[country] = weight / len(L[country])
Expand All @@ -205,9 +205,9 @@ def distribute_n_clusters_to_countries(

logger.warning("Using custom focus weights for determining number of clusters.")

assert np.isclose(
L.sum(), 1.0, rtol=1e-3
), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
assert np.isclose(L.sum(), 1.0, rtol=1e-3), (
f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
)

m = linopy.Model()
clusters = m.add_variables(
Expand Down
4 changes: 2 additions & 2 deletions scripts/plot_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,14 +233,14 @@ def plot_balances():

units = "MtCO2/a" if v[0] in co2_carriers else "TWh/a"
logger.debug(
f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}"
f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold'] / 10} {units}"
)
logger.debug(df.loc[to_drop])

df = df.drop(to_drop)

logger.debug(
f"Total energy balance for {v} of {round(df.sum().iloc[0],2)} {units}"
f"Total energy balance for {v} of {round(df.sum().iloc[0], 2)} {units}"
)

if df.empty:
Expand Down
2 changes: 1 addition & 1 deletion scripts/prepare_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
import tsam.timeseriesaggregation as tsam
except ImportError:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
"Optional dependency 'tsam' not found.Install via 'pip install tsam'"
)

p_max_pu_norm = n.generators_t.p_max_pu.max()
Expand Down
2 changes: 1 addition & 1 deletion scripts/prepare_perfect_foresight.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ def apply_time_segmentation_perfect(
import tsam.timeseriesaggregation as tsam
except ImportError:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
"Optional dependency 'tsam' not found.Install via 'pip install tsam'"
)

# get all time-dependent data
Expand Down
Loading

0 comments on commit 32aa3a2

Please sign in to comment.