diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0d94a2904..9caf25f57 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: # Run ruff to lint and format - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.8.6 + rev: v0.9.1 hooks: # Run the linter. - id: ruff diff --git a/.readthedocs.yml b/.readthedocs.yml index 5accba36d..000f013f2 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -4,6 +4,9 @@ version: 2 +sphinx: + configuration: doc/conf.py + build: os: ubuntu-22.04 tools: diff --git a/Makefile b/Makefile index c4b9ad82f..bf87579d6 100755 --- a/Makefile +++ b/Makefile @@ -41,13 +41,13 @@ install: _conda_check $(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install # Install pinned environment install-pinned-linux: _conda_check - $(CONDA_OR_MAMBA) env create -f envs/pinned-linux.yaml -n $(or $(name), pypsa-eur) + $(CONDA_OR_MAMBA) env create -f envs/linux-pinned.yaml -n $(or $(name), pypsa-eur) $(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install install-pinned-windows: _conda_check - $(CONDA_OR_MAMBA) env create -f envs/pinned-windows.yaml -n $(or $(name), pypsa-eur) + $(CONDA_OR_MAMBA) env create -f envs/windows-pinned.yaml -n $(or $(name), pypsa-eur) $(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install install-pinned-macos: _conda_check - $(CONDA_OR_MAMBA) env create -f envs/pinned-macos.yaml -n $(or $(name), pypsa-eur) + $(CONDA_OR_MAMBA) env create -f envs/macos-pinned.yaml -n $(or $(name), pypsa-eur) $(CONDA_OR_MAMBA) run -n $(or $(name), pypsa-eur) pre-commit install diff --git a/matplotlibrc b/matplotlibrc index 2928a0306..ca963f969 100644 --- a/matplotlibrc +++ b/matplotlibrc @@ -2,6 +2,5 @@ # # SPDX-License-Identifier: CC0-1.0 font.family: sans-serif -font.sans-serif: Ubuntu, DejaVu Sans image.cmap: viridis figure.autolayout : True diff --git a/rules/retrieve.smk b/rules/retrieve.smk index ce72c45e2..190cb23f7 100755 --- a/rules/retrieve.smk +++ b/rules/retrieve.smk @@ -545,8 +545,11 @@ if config["enable"]["retrieve"]: if config["enable"]["retrieve"] and ( config["electricity"]["base_network"] == "osm-prebuilt" ): - # Dictionary of prebuilt versions, e.g. 0.3 : "13358976" - osm_prebuilt_version = { + OSM_VERSION = config["electricity"]["osm-prebuilt-version"] + OSM_COMPONENTS = ["buses", "converters", "lines", "links", "transformers"] + if OSM_VERSION >= 0.6: + OSM_COMPONENTS.append("map") + OSM_ZENODO_IDS = { 0.1: "12799202", 0.2: "13342577", 0.3: "13358976", @@ -558,37 +561,22 @@ if config["enable"]["retrieve"] and ( # update rule to use the correct version rule retrieve_osm_prebuilt: input: - buses=storage( - f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/buses.csv" - ), - converters=storage( - f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/converters.csv" - ), - lines=storage( - f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/lines.csv" - ), - links=storage( - f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/links.csv" - ), - transformers=storage( - f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/transformers.csv" - ), - map=storage( - f"https://zenodo.org/records/{osm_prebuilt_version[config['electricity']['osm-prebuilt-version']]}/files/map.html" - ), + [ + storage( + f"https://zenodo.org/records/{OSM_ZENODO_IDS[OSM_VERSION]}/files/{component}.csv" + ) + for component in OSM_COMPONENTS + ], output: - buses=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/buses.csv", - converters=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/converters.csv", - lines=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/lines.csv", - links=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/links.csv", - transformers=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/transformers.csv", - map=f"data/osm-prebuilt/{config['electricity']['osm-prebuilt-version']}/map.html", + [ + f"data/osm-prebuilt/{OSM_VERSION}/{component}.csv" + for component in OSM_COMPONENTS + ], log: "logs/retrieve_osm_prebuilt.log", threads: 1 resources: mem_mb=500, - retries: 2 run: for key in input.keys(): move(input[key], output[key]) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 4d0bdf905..1251e2ab3 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -858,9 +858,9 @@ def validate_checksum(file_path, zenodo_url=None, checksum=None): for chunk in iter(lambda: f.read(65536), b""): # 64kb chunks hasher.update(chunk) calculated_checksum = hasher.hexdigest() - assert ( - calculated_checksum == checksum - ), "Checksum is invalid. This may be due to an incomplete download. Delete the file and re-execute the rule." + assert calculated_checksum == checksum, ( + "Checksum is invalid. This may be due to an incomplete download. Delete the file and re-execute the rule." + ) def get_snapshots(snapshots, drop_leap_day=False, freq="h", **kwargs): diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 5a23e4d74..4b99596eb 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -710,7 +710,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par ) if not missing_countries.empty: logger.warning( - f'Assuming max_hours=6 for hydro reservoirs in the countries: {", ".join(missing_countries)}' + f"Assuming max_hours=6 for hydro reservoirs in the countries: {', '.join(missing_countries)}" ) hydro_max_hours = hydro.max_hours.where( (hydro.max_hours > 0) & ~hydro.index.isin(missing_mh_single_i), @@ -833,7 +833,7 @@ def estimate_renewable_capacities( if expansion_limit: assert np.isscalar(expansion_limit) logger.info( - f"Reducing capacity expansion limit to {expansion_limit*100:.2f}% of installed capacity." + f"Reducing capacity expansion limit to {expansion_limit * 100:.2f}% of installed capacity." ) n.generators.loc[tech_i, "p_nom_max"] = ( expansion_limit * n.generators.loc[tech_i, "p_nom_min"] diff --git a/scripts/base_network.py b/scripts/base_network.py index 20d5df5ca..ca2d8e5b9 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -587,9 +587,9 @@ def prefer_voltage(x, which): .join(n.buses.country) .dropna() ) - assert ( - not df.empty - ), f"No buses with defined country within 200km of bus `{b}`" + assert not df.empty, ( + f"No buses with defined country within 200km of bus `{b}`" + ) n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"] logger.warning( @@ -720,14 +720,13 @@ def base_network( ): base_network = config["electricity"].get("base_network") osm_prebuilt_version = config["electricity"].get("osm-prebuilt-version") - assert ( - base_network - in { - "entsoegridkit", - "osm-raw", - "osm-prebuilt", - } - ), f"base_network must be either 'entsoegridkit', 'osm-raw' or 'osm-prebuilt', but got '{base_network}'" + assert base_network in { + "entsoegridkit", + "osm-raw", + "osm-prebuilt", + }, ( + f"base_network must be either 'entsoegridkit', 'osm-raw' or 'osm-prebuilt', but got '{base_network}'" + ) if base_network == "entsoegridkit": warnings.warn( "The 'entsoegridkit' base network is deprecated and will be removed in future versions. Please use 'osm-raw' or 'osm-prebuilt' instead.", diff --git a/scripts/build_electricity_demand.py b/scripts/build_electricity_demand.py index 7949d9026..33dc4b8b7 100755 --- a/scripts/build_electricity_demand.py +++ b/scripts/build_electricity_demand.py @@ -93,8 +93,7 @@ def fill_large_gaps(ds, shift): nhours = shift / np.timedelta64(1, "h") if (consecutive_nans(ds) > nhours).any(): logger.warning( - "There exist gaps larger then the time shift used for " - "copying time slices." + "There exist gaps larger then the time shift used for copying time slices." ) time_shift = pd.Series(ds.values, ds.index + shift) return ds.where(ds.notnull(), time_shift.reindex_like(ds)) @@ -301,9 +300,7 @@ def manual_adjustment(load, fn_load, countries): logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") load = load.interpolate(method="linear", limit=interpolate_limit) - logger.info( - "Filling larger gaps by copying time-slices of period " f"'{time_shift}'." - ) + logger.info(f"Filling larger gaps by copying time-slices of period '{time_shift}'.") load = load.apply(fill_large_gaps, shift=time_shift) if snakemake.params.load["supplement_synthetic"]: diff --git a/scripts/build_heat_source_potentials/run.py b/scripts/build_heat_source_potentials/run.py index f5d107609..4126fcc51 100644 --- a/scripts/build_heat_source_potentials/run.py +++ b/scripts/build_heat_source_potentials/run.py @@ -40,13 +40,13 @@ def get_unit_conversion_factor( ) -> float: if input_unit not in unit_scaling.keys(): raise ValueError( - f"Input unit {input_unit} not allowed. Must be one of { - unit_scaling.keys()}" + f"Input unit {input_unit} not allowed. Must be one of {unit_scaling.keys()}" ) elif output_unit not in unit_scaling.keys(): raise ValueError( f"Output unit {output_unit} not allowed. Must be one of { - unit_scaling.keys()}" + unit_scaling.keys() + }" ) return unit_scaling[input_unit] / unit_scaling[output_unit] diff --git a/scripts/build_osm_network.py b/scripts/build_osm_network.py index 1bdf3f551..2c3d13ece 100644 --- a/scripts/build_osm_network.py +++ b/scripts/build_osm_network.py @@ -1356,7 +1356,7 @@ def _map_links_to_dc_buses(links, dc_buses, distance_crs=DISTANCE_CRS): dc_buses_all.reset_index(inplace=True) logger.info( - f"Mapped {len(links_all)} links to {len(dc_buses_all)} DC buses. Dropping {len(dc_buses)-len(dc_buses_all)} DC buses." + f"Mapped {len(links_all)} links to {len(dc_buses_all)} DC buses. Dropping {len(dc_buses) - len(dc_buses_all)} DC buses." ) return links_all, dc_buses_all diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index 3ffe06d1d..143ee6bae 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -72,7 +72,7 @@ for ct in countries: logger.debug( - f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%" + f"The urbanization rate for {ct} is {round(urban_fraction[ct] * 100)}%" ) indicator_nuts3_ct = nuts3.country.apply(lambda x: 1.0 if x == ct else 0.0) diff --git a/scripts/clean_osm_data.py b/scripts/clean_osm_data.py index cd38053e7..459680409 100644 --- a/scripts/clean_osm_data.py +++ b/scripts/clean_osm_data.py @@ -423,7 +423,7 @@ def _import_lines_and_cables(path_lines): country = os.path.basename(os.path.dirname(path_lines[key][idx])) logger.info( - f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_lines[key])).zfill(2)}: {ip}" + f" - Importing {key} {str(idx + 1).zfill(2)}/{str(len(path_lines[key])).zfill(2)}: {ip}" ) with open(ip) as f: data = json.load(f) @@ -458,7 +458,7 @@ def _import_lines_and_cables(path_lines): else: logger.info( - f" - Skipping {key} {str(idx+1).zfill(2)}/{str(len(path_lines[key])).zfill(2)} (empty): {ip}" + f" - Skipping {key} {str(idx + 1).zfill(2)}/{str(len(path_lines[key])).zfill(2)} (empty): {ip}" ) continue logger.info("---") @@ -493,7 +493,7 @@ def _import_routes_relation(path_relation): country = os.path.basename(os.path.dirname(path_relation[key][idx])) logger.info( - f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_relation[key])).zfill(2)}: {ip}" + f" - Importing {key} {str(idx + 1).zfill(2)}/{str(len(path_relation[key])).zfill(2)}: {ip}" ) with open(ip) as f: data = json.load(f) @@ -528,7 +528,7 @@ def _import_routes_relation(path_relation): else: logger.info( - f" - Skipping {key} {str(idx+1).zfill(2)}/{str(len(path_relation[key])).zfill(2)} (empty): {ip}" + f" - Skipping {key} {str(idx + 1).zfill(2)}/{str(len(path_relation[key])).zfill(2)} (empty): {ip}" ) continue @@ -753,9 +753,9 @@ def _clean_substations(df_substations, list_voltages): ) df_substations.loc[bool_frequency_len & bool_split, "frequency"] = ( - df_substations.loc[ - bool_frequency_len & bool_split, - ].apply(lambda row: row["frequency"].split(";")[row["split_count"] - 1], axis=1) + df_substations.loc[bool_frequency_len & bool_split,].apply( + lambda row: row["frequency"].split(";")[row["split_count"] - 1], axis=1 + ) ) df_substations = _split_cells(df_substations, cols=["frequency"]) @@ -1260,7 +1260,7 @@ def _import_substations(path_substations): ): # unpopulated OSM json is about 51 bytes country = os.path.basename(os.path.dirname(path_substations[key][idx])) logger.info( - f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_substations[key])).zfill(2)}: {ip}" + f" - Importing {key} {str(idx + 1).zfill(2)}/{str(len(path_substations[key])).zfill(2)}: {ip}" ) with open(ip) as f: data = json.load(f) @@ -1302,7 +1302,7 @@ def _import_substations(path_substations): else: logger.info( - f" - Skipping {key} {str(idx+1).zfill(2)}/{str(len(path_substations[key])).zfill(2)} (empty): {ip}" + f" - Skipping {key} {str(idx + 1).zfill(2)}/{str(len(path_substations[key])).zfill(2)} (empty): {ip}" ) continue logger.info("---") @@ -1344,9 +1344,9 @@ def _import_substations(path_substations): .reset_index() ) df_substations_relation_members_grouped["geometry"] = ( - df_substations_relation_members_grouped[ - "linestring" - ].apply(lambda x: x.convex_hull) + df_substations_relation_members_grouped["linestring"].apply( + lambda x: x.convex_hull + ) ) df_substations_relation = ( @@ -1802,7 +1802,7 @@ def _check_if_ways_in_multi(list, longer_list): df_links = df_links.dropna(subset=["rating"]) len_after = len(df_links) logger.info( - f"Dropped {len_before-len_after} elements without rating. " + f"Dropped {len_before - len_after} elements without rating. " + f"Imported {len_after} elements." ) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index ba3cdb315..1ea72d66c 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -184,16 +184,16 @@ def distribute_n_clusters_to_countries( N = n.buses.groupby(["country", "sub_network"]).size()[L.index] - assert ( - n_clusters >= len(N) and n_clusters <= N.sum() - ), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." + assert n_clusters >= len(N) and n_clusters <= N.sum(), ( + f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." + ) if isinstance(focus_weights, dict): total_focus = sum(list(focus_weights.values())) - assert ( - total_focus <= 1.0 - ), "The sum of focus weights must be less than or equal to 1." + assert total_focus <= 1.0, ( + "The sum of focus weights must be less than or equal to 1." + ) for country, weight in focus_weights.items(): L[country] = weight / len(L[country]) @@ -205,9 +205,9 @@ def distribute_n_clusters_to_countries( logger.warning("Using custom focus weights for determining number of clusters.") - assert np.isclose( - L.sum(), 1.0, rtol=1e-3 - ), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." + assert np.isclose(L.sum(), 1.0, rtol=1e-3), ( + f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." + ) m = linopy.Model() clusters = m.add_variables( diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 03cfab3a7..36e447003 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -233,14 +233,14 @@ def plot_balances(): units = "MtCO2/a" if v[0] in co2_carriers else "TWh/a" logger.debug( - f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}" + f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold'] / 10} {units}" ) logger.debug(df.loc[to_drop]) df = df.drop(to_drop) logger.debug( - f"Total energy balance for {v} of {round(df.sum().iloc[0],2)} {units}" + f"Total energy balance for {v} of {round(df.sum().iloc[0], 2)} {units}" ) if df.empty: diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index 91cbb4e61..88df8b0ad 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -240,7 +240,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): import tsam.timeseriesaggregation as tsam except ImportError: raise ModuleNotFoundError( - "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" + "Optional dependency 'tsam' not found.Install via 'pip install tsam'" ) p_max_pu_norm = n.generators_t.p_max_pu.max() diff --git a/scripts/prepare_perfect_foresight.py b/scripts/prepare_perfect_foresight.py index c351a8f2f..e6838124f 100644 --- a/scripts/prepare_perfect_foresight.py +++ b/scripts/prepare_perfect_foresight.py @@ -438,7 +438,7 @@ def apply_time_segmentation_perfect( import tsam.timeseriesaggregation as tsam except ImportError: raise ModuleNotFoundError( - "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" + "Optional dependency 'tsam' not found.Install via 'pip install tsam'" ) # get all time-dependent data diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 3eac7ee0e..3593b0bfe 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1218,7 +1218,7 @@ def insert_electricity_distribution_grid(n, costs): .get("efficiency_static") ): logger.info( - f"Deducting distribution losses from electricity demand: {np.around(100*(1-efficiency), decimals=2)}%" + f"Deducting distribution losses from electricity demand: {np.around(100 * (1 - efficiency), decimals=2)}%" ) n.loads_t.p_set.loc[:, n.loads.carrier == "electricity"] *= efficiency @@ -1959,7 +1959,7 @@ def add_land_transport(n, costs): shares = pd.Series() for engine in engine_types: shares[engine] = get(options[f"land_transport_{engine}_share"], investment_year) - logger.info(f"{engine} share: {shares[engine]*100}%") + logger.info(f"{engine} share: {shares[engine] * 100}%") check_land_transport_shares(shares) @@ -3617,7 +3617,7 @@ def add_industry(n, costs): # naphtha demand_factor = options["HVC_demand_factor"] if demand_factor != 1: - logger.warning(f"Changing HVC demand by {demand_factor*100-100:+.2f}%.") + logger.warning(f"Changing HVC demand by {demand_factor * 100 - 100:+.2f}%.") p_set_naphtha = ( demand_factor @@ -3785,7 +3785,9 @@ def add_industry(n, costs): # aviation demand_factor = options["aviation_demand_factor"] if demand_factor != 1: - logger.warning(f"Changing aviation demand by {demand_factor*100-100:+.2f}%.") + logger.warning( + f"Changing aviation demand by {demand_factor * 100 - 100:+.2f}%." + ) all_aviation = ["total international aviation", "total domestic aviation"] @@ -4442,9 +4444,9 @@ def add_enhanced_geothermal(n, egs_potentials, egs_overlap, costs): * Nyears ) - assert ( - egs_potentials["capital_cost"] > 0 - ).all(), "Error in EGS cost, negative values found." + assert (egs_potentials["capital_cost"] > 0).all(), ( + "Error in EGS cost, negative values found." + ) orc_annuity = calculate_annuity(costs.at["organic rankine cycle", "lifetime"], dr) orc_capital_cost = (orc_annuity + FOM / (1 + FOM)) * orc_capex * Nyears diff --git a/scripts/retrieve_osm_data.py b/scripts/retrieve_osm_data.py index 5e32da60d..b50a7497a 100644 --- a/scripts/retrieve_osm_data.py +++ b/scripts/retrieve_osm_data.py @@ -80,7 +80,7 @@ def retrieve_osm_data( retries = 3 for attempt in range(retries): logger.info( - f" - Fetching OSM data for feature '{f}' in {country} (Attempt {attempt+1})..." + f" - Fetching OSM data for feature '{f}' in {country} (Attempt {attempt + 1})..." ) # Build the overpass query