From 2729b4562522f461cf02367a7901012feb28bf86 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Tue, 20 Jun 2023 19:33:19 +0200 Subject: [PATCH 001/103] Adds a public API to convert and convert_results methods In previous versions of otoole, the internal read and write strategies were exposed to the user. This was not ideal as it meant that the user had to know the internal structure of the model. This commit adds a public API to convert and convert_results methods. These methods are used to convert between different file formats. This commit also: - adds tests for the convert and convert_results methods - adds a new module to the otoole package called convert.py - adds a test fixture for the super_simple model - fixes typing errors --- setup.cfg | 2 +- src/otoole/__init__.py | 21 +- src/otoole/cli.py | 163 +----- src/otoole/convert.py | 210 +++++++ src/otoole/input.py | 13 +- src/otoole/read_strategies.py | 36 +- src/otoole/utils.py | 2 +- .../csv/AccumulatedAnnualDemand.csv | 1 + .../super_simple/csv/AnnualEmissionLimit.csv | 1 + .../csv/AnnualExogenousEmission.csv | 1 + .../super_simple/csv/AvailabilityFactor.csv | 1 + .../super_simple/csv/CapacityFactor.csv | 1 + .../csv/CapacityOfOneTechnologyUnit.csv | 1 + .../csv/CapacityToActivityUnit.csv | 2 + .../fixtures/super_simple/csv/CapitalCost.csv | 2 + .../super_simple/csv/CapitalCostStorage.csv | 1 + .../super_simple/csv/Conversionld.csv | 1 + .../super_simple/csv/Conversionlh.csv | 1 + .../super_simple/csv/Conversionls.csv | 1 + .../super_simple/csv/DAILYTIMEBRACKET.csv | 1 + tests/fixtures/super_simple/csv/DAYTYPE.csv | 1 + tests/fixtures/super_simple/csv/DaySplit.csv | 1 + .../super_simple/csv/DaysInDayType.csv | 1 + .../super_simple/csv/DepreciationMethod.csv | 1 + .../super_simple/csv/DiscountRate.csv | 1 + .../super_simple/csv/DiscountRateIdv.csv | 1 + .../super_simple/csv/DiscountRateStorage.csv | 1 + tests/fixtures/super_simple/csv/EMISSION.csv | 1 + .../csv/EmissionActivityRatio.csv | 1 + .../super_simple/csv/EmissionsPenalty.csv | 1 + tests/fixtures/super_simple/csv/FUEL.csv | 3 + tests/fixtures/super_simple/csv/FixedCost.csv | 2 + .../super_simple/csv/InputActivityRatio.csv | 2 + .../super_simple/csv/MODE_OF_OPERATION.csv | 2 + .../super_simple/csv/MinStorageCharge.csv | 1 + .../csv/ModelPeriodEmissionLimit.csv | 1 + .../csv/ModelPeriodExogenousEmission.csv | 1 + .../super_simple/csv/OperationalLife.csv | 1 + .../csv/OperationalLifeStorage.csv | 1 + .../super_simple/csv/OutputActivityRatio.csv | 3 + tests/fixtures/super_simple/csv/REGION.csv | 2 + .../csv/REMinProductionTarget.csv | 1 + tests/fixtures/super_simple/csv/RETagFuel.csv | 1 + .../super_simple/csv/RETagTechnology.csv | 1 + .../super_simple/csv/ReserveMargin.csv | 1 + .../super_simple/csv/ReserveMarginTagFuel.csv | 1 + .../csv/ReserveMarginTagTechnology.csv | 1 + .../super_simple/csv/ResidualCapacity.csv | 2 + .../csv/ResidualStorageCapacity.csv | 1 + tests/fixtures/super_simple/csv/SEASON.csv | 1 + tests/fixtures/super_simple/csv/STORAGE.csv | 1 + .../csv/SpecifiedAnnualDemand.csv | 2 + .../csv/SpecifiedDemandProfile.csv | 2 + .../super_simple/csv/StorageLevelStart.csv | 1 + .../super_simple/csv/StorageMaxChargeRate.csv | 1 + .../csv/StorageMaxDischargeRate.csv | 1 + .../fixtures/super_simple/csv/TECHNOLOGY.csv | 3 + tests/fixtures/super_simple/csv/TIMESLICE.csv | 2 + .../csv/TechnologyFromStorage.csv | 1 + .../super_simple/csv/TechnologyToStorage.csv | 1 + .../csv/TotalAnnualMaxCapacity.csv | 1 + .../csv/TotalAnnualMaxCapacityInvestment.csv | 1 + .../csv/TotalAnnualMinCapacity.csv | 1 + .../csv/TotalAnnualMinCapacityInvestment.csv | 1 + ...otalTechnologyAnnualActivityLowerLimit.csv | 1 + ...otalTechnologyAnnualActivityUpperLimit.csv | 1 + ...echnologyModelPeriodActivityLowerLimit.csv | 1 + ...echnologyModelPeriodActivityUpperLimit.csv | 1 + .../fixtures/super_simple/csv/TradeRoute.csv | 1 + .../super_simple/csv/VariableCost.csv | 2 + tests/fixtures/super_simple/csv/YEAR.csv | 2 + tests/fixtures/super_simple/csv/YearSplit.csv | 2 + tests/fixtures/super_simple/csv/_REGION.csv | 2 + tests/fixtures/super_simple/super_simple.txt | 153 ++++++ tests/fixtures/super_simple/super_simple.yaml | 520 ++++++++++++++++++ .../fixtures/super_simple/super_simple_gnu.lp | 222 ++++++++ .../super_simple/super_simple_gnu.sol | 48 ++ tests/test_convert.py | 129 +++-- tests/test_read_strategies.py | 2 +- 79 files changed, 1399 insertions(+), 208 deletions(-) create mode 100644 src/otoole/convert.py create mode 100644 tests/fixtures/super_simple/csv/AccumulatedAnnualDemand.csv create mode 100644 tests/fixtures/super_simple/csv/AnnualEmissionLimit.csv create mode 100644 tests/fixtures/super_simple/csv/AnnualExogenousEmission.csv create mode 100644 tests/fixtures/super_simple/csv/AvailabilityFactor.csv create mode 100644 tests/fixtures/super_simple/csv/CapacityFactor.csv create mode 100644 tests/fixtures/super_simple/csv/CapacityOfOneTechnologyUnit.csv create mode 100644 tests/fixtures/super_simple/csv/CapacityToActivityUnit.csv create mode 100644 tests/fixtures/super_simple/csv/CapitalCost.csv create mode 100644 tests/fixtures/super_simple/csv/CapitalCostStorage.csv create mode 100644 tests/fixtures/super_simple/csv/Conversionld.csv create mode 100644 tests/fixtures/super_simple/csv/Conversionlh.csv create mode 100644 tests/fixtures/super_simple/csv/Conversionls.csv create mode 100644 tests/fixtures/super_simple/csv/DAILYTIMEBRACKET.csv create mode 100644 tests/fixtures/super_simple/csv/DAYTYPE.csv create mode 100644 tests/fixtures/super_simple/csv/DaySplit.csv create mode 100644 tests/fixtures/super_simple/csv/DaysInDayType.csv create mode 100644 tests/fixtures/super_simple/csv/DepreciationMethod.csv create mode 100644 tests/fixtures/super_simple/csv/DiscountRate.csv create mode 100644 tests/fixtures/super_simple/csv/DiscountRateIdv.csv create mode 100644 tests/fixtures/super_simple/csv/DiscountRateStorage.csv create mode 100644 tests/fixtures/super_simple/csv/EMISSION.csv create mode 100644 tests/fixtures/super_simple/csv/EmissionActivityRatio.csv create mode 100644 tests/fixtures/super_simple/csv/EmissionsPenalty.csv create mode 100644 tests/fixtures/super_simple/csv/FUEL.csv create mode 100644 tests/fixtures/super_simple/csv/FixedCost.csv create mode 100644 tests/fixtures/super_simple/csv/InputActivityRatio.csv create mode 100644 tests/fixtures/super_simple/csv/MODE_OF_OPERATION.csv create mode 100644 tests/fixtures/super_simple/csv/MinStorageCharge.csv create mode 100644 tests/fixtures/super_simple/csv/ModelPeriodEmissionLimit.csv create mode 100644 tests/fixtures/super_simple/csv/ModelPeriodExogenousEmission.csv create mode 100644 tests/fixtures/super_simple/csv/OperationalLife.csv create mode 100644 tests/fixtures/super_simple/csv/OperationalLifeStorage.csv create mode 100644 tests/fixtures/super_simple/csv/OutputActivityRatio.csv create mode 100644 tests/fixtures/super_simple/csv/REGION.csv create mode 100644 tests/fixtures/super_simple/csv/REMinProductionTarget.csv create mode 100644 tests/fixtures/super_simple/csv/RETagFuel.csv create mode 100644 tests/fixtures/super_simple/csv/RETagTechnology.csv create mode 100644 tests/fixtures/super_simple/csv/ReserveMargin.csv create mode 100644 tests/fixtures/super_simple/csv/ReserveMarginTagFuel.csv create mode 100644 tests/fixtures/super_simple/csv/ReserveMarginTagTechnology.csv create mode 100644 tests/fixtures/super_simple/csv/ResidualCapacity.csv create mode 100644 tests/fixtures/super_simple/csv/ResidualStorageCapacity.csv create mode 100644 tests/fixtures/super_simple/csv/SEASON.csv create mode 100644 tests/fixtures/super_simple/csv/STORAGE.csv create mode 100644 tests/fixtures/super_simple/csv/SpecifiedAnnualDemand.csv create mode 100644 tests/fixtures/super_simple/csv/SpecifiedDemandProfile.csv create mode 100644 tests/fixtures/super_simple/csv/StorageLevelStart.csv create mode 100644 tests/fixtures/super_simple/csv/StorageMaxChargeRate.csv create mode 100644 tests/fixtures/super_simple/csv/StorageMaxDischargeRate.csv create mode 100644 tests/fixtures/super_simple/csv/TECHNOLOGY.csv create mode 100644 tests/fixtures/super_simple/csv/TIMESLICE.csv create mode 100644 tests/fixtures/super_simple/csv/TechnologyFromStorage.csv create mode 100644 tests/fixtures/super_simple/csv/TechnologyToStorage.csv create mode 100644 tests/fixtures/super_simple/csv/TotalAnnualMaxCapacity.csv create mode 100644 tests/fixtures/super_simple/csv/TotalAnnualMaxCapacityInvestment.csv create mode 100644 tests/fixtures/super_simple/csv/TotalAnnualMinCapacity.csv create mode 100644 tests/fixtures/super_simple/csv/TotalAnnualMinCapacityInvestment.csv create mode 100644 tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityLowerLimit.csv create mode 100644 tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityUpperLimit.csv create mode 100644 tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityLowerLimit.csv create mode 100644 tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityUpperLimit.csv create mode 100644 tests/fixtures/super_simple/csv/TradeRoute.csv create mode 100644 tests/fixtures/super_simple/csv/VariableCost.csv create mode 100644 tests/fixtures/super_simple/csv/YEAR.csv create mode 100644 tests/fixtures/super_simple/csv/YearSplit.csv create mode 100644 tests/fixtures/super_simple/csv/_REGION.csv create mode 100644 tests/fixtures/super_simple/super_simple.txt create mode 100644 tests/fixtures/super_simple/super_simple.yaml create mode 100644 tests/fixtures/super_simple/super_simple_gnu.lp create mode 100644 tests/fixtures/super_simple/super_simple_gnu.sol diff --git a/setup.cfg b/setup.cfg index b410ffb8..81aa8fcd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,7 +47,7 @@ install_requires = pydot importlib_resources; python_version<'3.7' pandas>=1.1 - amply>=0.1.4 + amply>=0.1.6 networkx flatten_dict openpyxl diff --git a/src/otoole/__init__.py b/src/otoole/__init__.py index 88e521e6..57365b0a 100644 --- a/src/otoole/__init__.py +++ b/src/otoole/__init__.py @@ -1,10 +1,7 @@ # -*- coding: utf-8 -*- import sys -from otoole.input import Context -from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory -from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi -from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel +from otoole.convert import convert, convert_results if sys.version_info[:2] >= (3, 8): # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8` @@ -21,17 +18,7 @@ finally: del version, PackageNotFoundError +convert = convert +convert_results = convert_results -__all__ = [ - "Context", - "ReadCbc", - "ReadCsv", - "ReadCplex", - "ReadDatafile", - "ReadExcel", - "ReadGurobi", - "ReadMemory", - "WriteCsv", - "WriteDatafile", - "WriteExcel", -] +__all__ = ["convert" "convert_results"] diff --git a/src/otoole/cli.py b/src/otoole/cli.py index 8d0ce16c..9ff2066a 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -44,21 +44,10 @@ import shutil import sys -from otoole import ( - ReadCbc, - ReadCplex, - ReadCsv, - ReadDatafile, - ReadExcel, - ReadGurobi, - WriteCsv, - WriteDatafile, - WriteExcel, - __version__, -) +from otoole import __version__, convert, convert_results from otoole.exceptions import OtooleSetupError -from otoole.input import Context from otoole.preprocess.setup import get_config_setup_data, get_csv_setup_data +from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel from otoole.utils import ( _read_file, read_deprecated_datapackage, @@ -67,6 +56,7 @@ ) from otoole.validate import main as validate from otoole.visualise import create_res +from otoole.write_strategies import WriteCsv logger = logging.getLogger(__name__) @@ -104,70 +94,21 @@ def validate_model(args): validate(input_data) -def cplex2cbc(args): - ReadCplex()._convert_cplex_file( - args.cplex_file, - args.output_file, - args.start_year, - args.end_year, - args.output_format, +def _result_matrix(args): + convert_results( + args.config, + args.input_datapackage, + args.input_csvs, + args.input_datafile, + args.to_path, + args.from_path, + args.from_format, + args.to_format, + args.write_defaults, ) -def result_matrix(args): - """Post-process results from CBC solution file into CSV format""" - msg = "Conversion from {} to {} is not yet implemented".format( - args.from_format, args.to_format - ) - - read_strategy = None - write_strategy = None - - config = None - if args.config: - _, ending = os.path.splitext(args.config) - with open(args.config, "r") as config_file: - config = _read_file(config_file, ending) - logger.info("Reading config from {}".format(args.config)) - logger.info("Validating config from {}".format(args.config)) - validate_config(config) - - # set read strategy - - if args.from_format == "cbc": - read_strategy = ReadCbc(user_config=config) - elif args.from_format == "cplex": - read_strategy = ReadCplex(user_config=config) - elif args.from_format == "gurobi": - read_strategy = ReadGurobi(user_config=config) - - # set write strategy - - write_defaults = True if args.write_defaults else False - - if args.to_format == "csv": - write_strategy = WriteCsv(user_config=config, write_defaults=write_defaults) - - if args.input_datapackage: - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - input_csvs = read_deprecated_datapackage(args.input_datapackage) - logger.info("Successfully read folder of CSVs") - input_data, _ = ReadCsv(user_config=config).read(input_csvs) - elif args.input_datafile: - input_data, _ = ReadDatafile(user_config=config).read(args.input_datafile) - else: - input_data = {} - - if read_strategy and write_strategy: - context = Context(read_strategy, write_strategy) - context.convert(args.from_path, args.to_path, input_data=input_data) - else: - raise NotImplementedError(msg) - - -def conversion_matrix(args): +def _conversion_matrix(args): """Convert from one format to another Implemented conversion functions:: @@ -179,71 +120,15 @@ def conversion_matrix(args): datafile nn -- -- """ - - msg = "Conversion from {} to {} is not yet implemented".format( - args.from_format, args.to_format + convert( + args.config, + args.from_format, + args.to_format, + args.from_path, + args.to_path, + args.write_defaults, ) - read_strategy = None - write_strategy = None - - from_path = args.from_path - to_path = args.to_path - - config = None - if args.config: - _, ending = os.path.splitext(args.config) - with open(args.config, "r") as config_file: - config = _read_file(config_file, ending) - logger.info("Reading config from {}".format(args.config)) - logger.info("Validating config from {}".format(args.config)) - validate_config(config) - - # set read strategy - - keep_whitespace = True if args.keep_whitespace else False - - if args.from_format == "datafile": - read_strategy = ReadDatafile(user_config=config) - elif args.from_format == "datapackage": - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - from_path = read_deprecated_datapackage(from_path) - logger.info("Successfully read folder of CSVs") - read_strategy = ReadCsv(user_config=config, keep_whitespace=keep_whitespace) - elif args.from_format == "csv": - read_strategy = ReadCsv(user_config=config, keep_whitespace=keep_whitespace) - elif args.from_format == "excel": - read_strategy = ReadExcel(user_config=config, keep_whitespace=keep_whitespace) - - input_data, _ = read_strategy.read(args.from_path) - - # set write strategy - - write_defaults = True if args.write_defaults else False - - if args.to_format == "datapackage": - logger.warning("Writing to datapackage is deprecated, writing to CSVs") - to_path = os.path.join(os.path.dirname(to_path), "data") - write_strategy = WriteCsv(user_config=config, write_defaults=write_defaults) - elif args.to_format == "excel": - write_strategy = WriteExcel( - user_config=config, write_defaults=write_defaults, input_data=input_data - ) - elif args.to_format == "datafile": - write_strategy = WriteDatafile( - user_config=config, write_defaults=write_defaults - ) - elif args.to_format == "csv": - write_strategy = WriteCsv(user_config=config, write_defaults=write_defaults) - - if read_strategy and write_strategy: - context = Context(read_strategy, write_strategy) - context.convert(from_path, to_path) - else: - raise NotImplementedError(msg) - def data2res(args): """Get input data and call res creation.""" @@ -349,7 +234,7 @@ def get_parser(): default=False, action="store_true", ) - result_parser.set_defaults(func=result_matrix) + result_parser.set_defaults(func=_result_matrix) # Parser for conversion convert_parser = subparsers.add_parser( @@ -382,7 +267,7 @@ def get_parser(): default=False, action="store_true", ) - convert_parser.set_defaults(func=conversion_matrix) + convert_parser.set_defaults(func=_conversion_matrix) # Parser for validation valid_parser = subparsers.add_parser("validate", help="Validate an OSeMOSYS model") diff --git a/src/otoole/convert.py b/src/otoole/convert.py new file mode 100644 index 00000000..068a66cd --- /dev/null +++ b/src/otoole/convert.py @@ -0,0 +1,210 @@ +"""This module implements the public API of the otoole package + +Use the otoole ``convert`` function to convert between different file formats. +Import the convert function from the otoole package:: + +>>> from otoole import convert +>>> convert('config.yaml', 'excel', 'datafile', 'input.xlsx', 'output.dat') + +""" +import logging +import os +from typing import Union + +from otoole.input import Context, ReadStrategy, WriteStrategy +from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel +from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi +from otoole.utils import _read_file, read_deprecated_datapackage, validate_config +from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel + +logger = logging.getLogger(__name__) + + +def convert_results( + config, + from_format, + to_format, + from_path, + to_path, + input_datapackage=None, + input_csvs=None, + input_datafile=None, + write_defaults=False, +): + """Post-process results from a CBC, CPLEX or Gurobi solution file into CSV format + + Arguments + --------- + config : str + input_datapackage : str + input_csvs : str + input_datafile : str + to_path : str + from_path : str + from_format : str + to_format : str + write_defaults : str + """ + msg = "Conversion from {} to {} is not yet implemented".format( + from_format, to_format + ) + + read_strategy = None + write_strategy = None + + if config: + _, ending = os.path.splitext(config) + with open(config, "r") as config_file: + user_config = _read_file(config_file, ending) + logger.info("Reading config from {}".format(config)) + logger.info("Validating config from {}".format(config)) + validate_config(user_config) + + # set read strategy + + if from_format == "cbc": + read_strategy = ReadCbc(user_config=user_config) + elif from_format == "cplex": + read_strategy = ReadCplex(user_config=user_config) + elif from_format == "gurobi": + read_strategy = ReadGurobi(user_config=user_config) + + # set write strategy + + write_defaults = True if write_defaults else False + + if to_format == "csv": + write_strategy = WriteCsv( + user_config=user_config, write_defaults=write_defaults + ) + + if input_datapackage: + logger.warning( + "Reading from datapackage is deprecated, trying to read from CSVs" + ) + input_csvs = read_deprecated_datapackage(input_datapackage) + logger.info("Successfully read folder of CSVs") + input_data, _ = ReadCsv(user_config=user_config).read(input_csvs) + elif input_datafile: + input_data, _ = ReadDatafile(user_config=user_config).read(input_datafile) + elif input_csvs: + input_data, _ = ReadCsv(user_config=user_config).read(input_csvs) + else: + input_data = {} + + if read_strategy and write_strategy: + context = Context(read_strategy, write_strategy) + context.convert(from_path, to_path, input_data=input_data) + else: + raise NotImplementedError(msg) + return False + + return True + + +def convert( + config, + from_format, + to_format, + from_path, + to_path, + write_defaults=False, + keep_whitespace=False, +) -> bool: + """Convert OSeMOSYS data from/to datafile, csv and Excel formats + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + to_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + from_path : str + Path to destination file (if datafile or excel) or folder (csv or datapackage) + write_defaults: bool, default: False + keep_whitespace: bool, default: False + + Returns + ------- + bool + True if conversion was successful + """ + + msg = "Conversion from {} to {} is not yet implemented".format( + from_format, to_format + ) + + if config: + _, ending = os.path.splitext(config) + with open(config, "r") as config_file: + user_config = _read_file(config_file, ending) + logger.info("Reading config from {}".format(config)) + logger.info("Validating config from {}".format(config)) + validate_config(user_config) + + # set read strategy + + keep_whitespace = True if keep_whitespace else False + + if from_format == "datafile": + read_strategy: Union[ReadStrategy, None] = ReadDatafile(user_config=user_config) + elif from_format == "datapackage": + logger.warning( + "Reading from datapackage is deprecated, trying to read from CSVs" + ) + from_path = read_deprecated_datapackage(from_path) + logger.info("Successfully read folder of CSVs") + read_strategy = ReadCsv( + user_config=user_config, keep_whitespace=keep_whitespace + ) # typing: ReadStrategy + elif from_format == "csv": + read_strategy = ReadCsv( + user_config=user_config, keep_whitespace=keep_whitespace + ) # typing: ReadStrategy + elif from_format == "excel": + read_strategy = ReadExcel( + user_config=user_config, keep_whitespace=keep_whitespace + ) # typing: ReadStrategy + else: + read_strategy = None + + if read_strategy: + input_data, _ = read_strategy.read(from_path) + + # set write strategy + + write_defaults = True if write_defaults else False + + if to_format == "datapackage": + logger.warning("Writing to datapackage is deprecated, writing to CSVs") + to_path = os.path.join(os.path.dirname(to_path), "data") + write_strategy: Union[WriteStrategy, None] = WriteCsv( + user_config=user_config, write_defaults=write_defaults + ) + elif to_format == "excel": + write_strategy = WriteExcel( + user_config=user_config, + write_defaults=write_defaults, + input_data=input_data, + ) + elif to_format == "datafile": + write_strategy = WriteDatafile( + user_config=user_config, write_defaults=write_defaults + ) + elif to_format == "csv": + write_strategy = WriteCsv( + user_config=user_config, write_defaults=write_defaults + ) + else: + write_strategy = None + + if read_strategy and write_strategy: + context = Context(read_strategy, write_strategy) + context.convert(from_path, to_path) + else: + raise NotImplementedError(msg) + return False + + return True diff --git a/src/otoole/input.py b/src/otoole/input.py index fcdc4e6e..fe04b1a4 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -174,9 +174,11 @@ class WriteStrategy(Strategy): Arguments --------- + user_config: dict, default=None filepath: str, default=None default_values: dict, default=None - user_config: dict, default=None + write_defaults: bool, default=False + input_data: dict, default=None """ @@ -296,7 +298,7 @@ def _expand_defaults( Raises ------ KeyError - If set defenitons are not in input_data and input_data is not supplied + If set definitons are not in input_data and input_data is not supplied """ sets = [x for x in self.user_config if self.user_config[x]["type"] == "set"] @@ -389,7 +391,10 @@ def _check_index( elif details["type"] == "set": self._check_set_index_names(name=name, df=df) - df = self._check_index_dtypes(name=name, config=details, df=df) + try: + df = self._check_index_dtypes(name=name, config=details, df=df) + except ValueError as ex: + raise ValueError(f"{name}: {ex}") input_data[name] = df @@ -454,7 +459,7 @@ def _check_set_index_names(name: str, df: pd.DataFrame) -> None: OtooleIndexError If actual indices do not match expected indices """ - if not df.columns == ["VALUE"]: + if not list(df.columns) == ["VALUE"]: raise OtooleIndexError( resource=name, config_indices=["VALUE"], diff --git a/src/otoole/read_strategies.py b/src/otoole/read_strategies.py index 2d680efb..01f05f5a 100644 --- a/src/otoole/read_strategies.py +++ b/src/otoole/read_strategies.py @@ -58,6 +58,7 @@ def _convert_wide_2_narrow(self, df: pd.DataFrame, name: str): if "MODEOFOPERATION" in actual_headers: df = df.rename(columns={"MODEOFOPERATION": "MODE_OF_OPERATION"}) + actual_headers = list(df.columns) if actual_headers[-1] == "VALUE": logger.info( @@ -87,6 +88,11 @@ def _convert_wide_2_narrow(self, df: pd.DataFrame, name: str): except IndexError as ex: logger.debug(f"Could not reshape {name}") raise ex + except KeyError as ex: + logger.debug( + f"Actual headers: {actual_headers}\nConverted headers: {converted_headers}" + ) + raise ex all_headers = converted_headers + ["VALUE"] return narrow[all_headers].set_index(converted_headers) @@ -166,9 +172,13 @@ def read( input_data = {} self._check_for_default_values_csv(filepath) - self._compare_read_to_expected( - names=[f.split(".csv")[0] for f in os.listdir(filepath)] - ) + names = [ + f.split(".csv")[0] + for f in os.listdir(filepath) + if f.split(".")[-1] == "csv" + ] + logger.debug(names) + self._compare_read_to_expected(names=names) default_values = self._read_default_values(self.user_config) @@ -278,12 +288,19 @@ def read( config = self.user_config default_values = self._read_default_values(config) - amply_datafile = self.read_in_datafile(filepath, config) - inputs = self._convert_amply_to_dataframe(amply_datafile, config) - for config_type in ["param", "set"]: - inputs = self._get_missing_input_dataframes(inputs, config_type=config_type) - inputs = self._check_index(inputs) - return inputs, default_values + + # Check filepath exists + if os.path.exists(filepath): + amply_datafile = self.read_in_datafile(filepath, config) + inputs = self._convert_amply_to_dataframe(amply_datafile, config) + for config_type in ["param", "set"]: + inputs = self._get_missing_input_dataframes( + inputs, config_type=config_type + ) + inputs = self._check_index(inputs) + return inputs, default_values + else: + raise FileNotFoundError(f"File not found: {filepath}") def read_in_datafile(self, path_to_datafile: str, config: Dict) -> Amply: """Read in a datafile using the Amply parsing class @@ -322,6 +339,7 @@ def _load_parameter_definitions(self, config: dict) -> str: elif attributes["type"] == "set": elements += "set {};\n".format(name) + logger.debug("Amply Elements: %s", elements) return elements def _convert_amply_to_dataframe( diff --git a/src/otoole/utils.py b/src/otoole/utils.py index 99fc7a0e..3fedc367 100644 --- a/src/otoole/utils.py +++ b/src/otoole/utils.py @@ -26,7 +26,7 @@ def _read_file(open_file, ending): if ending == ".yaml" or ending == ".yml": - contents = load(open_file, Loader=UniqueKeyLoader) # typing: Dict + contents = load(open_file, Loader=UniqueKeyLoader) # typing: Dict[str, Any] elif ending == ".json": contents = json.load(open_file) # typing: Dict else: diff --git a/tests/fixtures/super_simple/csv/AccumulatedAnnualDemand.csv b/tests/fixtures/super_simple/csv/AccumulatedAnnualDemand.csv new file mode 100644 index 00000000..326b28c6 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AccumulatedAnnualDemand.csv @@ -0,0 +1 @@ +REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/AnnualEmissionLimit.csv b/tests/fixtures/super_simple/csv/AnnualEmissionLimit.csv new file mode 100644 index 00000000..1fa535a0 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AnnualEmissionLimit.csv @@ -0,0 +1 @@ +REGION,EMISSION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/AnnualExogenousEmission.csv b/tests/fixtures/super_simple/csv/AnnualExogenousEmission.csv new file mode 100644 index 00000000..1fa535a0 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AnnualExogenousEmission.csv @@ -0,0 +1 @@ +REGION,EMISSION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/AvailabilityFactor.csv b/tests/fixtures/super_simple/csv/AvailabilityFactor.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AvailabilityFactor.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/CapacityFactor.csv b/tests/fixtures/super_simple/csv/CapacityFactor.csv new file mode 100644 index 00000000..ba3be6ef --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapacityFactor.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,TIMESLICE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/CapacityOfOneTechnologyUnit.csv b/tests/fixtures/super_simple/csv/CapacityOfOneTechnologyUnit.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapacityOfOneTechnologyUnit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/CapacityToActivityUnit.csv b/tests/fixtures/super_simple/csv/CapacityToActivityUnit.csv new file mode 100644 index 00000000..98b90656 --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapacityToActivityUnit.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,VALUE +BB,gas_plant,1.0 diff --git a/tests/fixtures/super_simple/csv/CapitalCost.csv b/tests/fixtures/super_simple/csv/CapitalCost.csv new file mode 100644 index 00000000..95879aba --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapitalCost.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,YEAR,VALUE +BB,gas_plant,2016,1.03456 diff --git a/tests/fixtures/super_simple/csv/CapitalCostStorage.csv b/tests/fixtures/super_simple/csv/CapitalCostStorage.csv new file mode 100644 index 00000000..a7bcbd7f --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapitalCostStorage.csv @@ -0,0 +1 @@ +REGION,STORAGE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/Conversionld.csv b/tests/fixtures/super_simple/csv/Conversionld.csv new file mode 100644 index 00000000..360887ad --- /dev/null +++ b/tests/fixtures/super_simple/csv/Conversionld.csv @@ -0,0 +1 @@ +TIMESLICE,DAYTYPE,VALUE diff --git a/tests/fixtures/super_simple/csv/Conversionlh.csv b/tests/fixtures/super_simple/csv/Conversionlh.csv new file mode 100644 index 00000000..6fc0a297 --- /dev/null +++ b/tests/fixtures/super_simple/csv/Conversionlh.csv @@ -0,0 +1 @@ +TIMESLICE,DAILYTIMEBRACKET,VALUE diff --git a/tests/fixtures/super_simple/csv/Conversionls.csv b/tests/fixtures/super_simple/csv/Conversionls.csv new file mode 100644 index 00000000..47b6ebde --- /dev/null +++ b/tests/fixtures/super_simple/csv/Conversionls.csv @@ -0,0 +1 @@ +TIMESLICE,SEASON,VALUE diff --git a/tests/fixtures/super_simple/csv/DAILYTIMEBRACKET.csv b/tests/fixtures/super_simple/csv/DAILYTIMEBRACKET.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DAILYTIMEBRACKET.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/DAYTYPE.csv b/tests/fixtures/super_simple/csv/DAYTYPE.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DAYTYPE.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/DaySplit.csv b/tests/fixtures/super_simple/csv/DaySplit.csv new file mode 100644 index 00000000..83dab5c1 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DaySplit.csv @@ -0,0 +1 @@ +DAILYTIMEBRACKET,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/DaysInDayType.csv b/tests/fixtures/super_simple/csv/DaysInDayType.csv new file mode 100644 index 00000000..7e5dd712 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DaysInDayType.csv @@ -0,0 +1 @@ +SEASON,DAYTYPE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/DepreciationMethod.csv b/tests/fixtures/super_simple/csv/DepreciationMethod.csv new file mode 100644 index 00000000..8f1fa36c --- /dev/null +++ b/tests/fixtures/super_simple/csv/DepreciationMethod.csv @@ -0,0 +1 @@ +REGION,VALUE diff --git a/tests/fixtures/super_simple/csv/DiscountRate.csv b/tests/fixtures/super_simple/csv/DiscountRate.csv new file mode 100644 index 00000000..8f1fa36c --- /dev/null +++ b/tests/fixtures/super_simple/csv/DiscountRate.csv @@ -0,0 +1 @@ +REGION,VALUE diff --git a/tests/fixtures/super_simple/csv/DiscountRateIdv.csv b/tests/fixtures/super_simple/csv/DiscountRateIdv.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DiscountRateIdv.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/DiscountRateStorage.csv b/tests/fixtures/super_simple/csv/DiscountRateStorage.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/DiscountRateStorage.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/EMISSION.csv b/tests/fixtures/super_simple/csv/EMISSION.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/EMISSION.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/EmissionActivityRatio.csv b/tests/fixtures/super_simple/csv/EmissionActivityRatio.csv new file mode 100644 index 00000000..7c1c3ffc --- /dev/null +++ b/tests/fixtures/super_simple/csv/EmissionActivityRatio.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,EMISSION,MODE_OF_OPERATION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/EmissionsPenalty.csv b/tests/fixtures/super_simple/csv/EmissionsPenalty.csv new file mode 100644 index 00000000..1fa535a0 --- /dev/null +++ b/tests/fixtures/super_simple/csv/EmissionsPenalty.csv @@ -0,0 +1 @@ +REGION,EMISSION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/FUEL.csv b/tests/fixtures/super_simple/csv/FUEL.csv new file mode 100644 index 00000000..0173ebb5 --- /dev/null +++ b/tests/fixtures/super_simple/csv/FUEL.csv @@ -0,0 +1,3 @@ +VALUE +natural_gas +electricity diff --git a/tests/fixtures/super_simple/csv/FixedCost.csv b/tests/fixtures/super_simple/csv/FixedCost.csv new file mode 100644 index 00000000..eff99453 --- /dev/null +++ b/tests/fixtures/super_simple/csv/FixedCost.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,YEAR,VALUE +BB,gas_plant,2016,9.1101 diff --git a/tests/fixtures/super_simple/csv/InputActivityRatio.csv b/tests/fixtures/super_simple/csv/InputActivityRatio.csv new file mode 100644 index 00000000..cc36f0b5 --- /dev/null +++ b/tests/fixtures/super_simple/csv/InputActivityRatio.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR,VALUE +BB,gas_plant,natural_gas,1,2016,1.1101 diff --git a/tests/fixtures/super_simple/csv/MODE_OF_OPERATION.csv b/tests/fixtures/super_simple/csv/MODE_OF_OPERATION.csv new file mode 100644 index 00000000..69e52e5d --- /dev/null +++ b/tests/fixtures/super_simple/csv/MODE_OF_OPERATION.csv @@ -0,0 +1,2 @@ +VALUE +1 diff --git a/tests/fixtures/super_simple/csv/MinStorageCharge.csv b/tests/fixtures/super_simple/csv/MinStorageCharge.csv new file mode 100644 index 00000000..a7bcbd7f --- /dev/null +++ b/tests/fixtures/super_simple/csv/MinStorageCharge.csv @@ -0,0 +1 @@ +REGION,STORAGE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ModelPeriodEmissionLimit.csv b/tests/fixtures/super_simple/csv/ModelPeriodEmissionLimit.csv new file mode 100644 index 00000000..ccd4bcb3 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ModelPeriodEmissionLimit.csv @@ -0,0 +1 @@ +REGION,EMISSION,VALUE diff --git a/tests/fixtures/super_simple/csv/ModelPeriodExogenousEmission.csv b/tests/fixtures/super_simple/csv/ModelPeriodExogenousEmission.csv new file mode 100644 index 00000000..ccd4bcb3 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ModelPeriodExogenousEmission.csv @@ -0,0 +1 @@ +REGION,EMISSION,VALUE diff --git a/tests/fixtures/super_simple/csv/OperationalLife.csv b/tests/fixtures/super_simple/csv/OperationalLife.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/OperationalLife.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/OperationalLifeStorage.csv b/tests/fixtures/super_simple/csv/OperationalLifeStorage.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/OperationalLifeStorage.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/OutputActivityRatio.csv b/tests/fixtures/super_simple/csv/OutputActivityRatio.csv new file mode 100644 index 00000000..37406935 --- /dev/null +++ b/tests/fixtures/super_simple/csv/OutputActivityRatio.csv @@ -0,0 +1,3 @@ +REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR,VALUE +BB,gas_import,natural_gas,1,2016,1.0 +BB,gas_plant,electricity,1,2016,1.0 diff --git a/tests/fixtures/super_simple/csv/REGION.csv b/tests/fixtures/super_simple/csv/REGION.csv new file mode 100644 index 00000000..016ac8fc --- /dev/null +++ b/tests/fixtures/super_simple/csv/REGION.csv @@ -0,0 +1,2 @@ +VALUE +BB diff --git a/tests/fixtures/super_simple/csv/REMinProductionTarget.csv b/tests/fixtures/super_simple/csv/REMinProductionTarget.csv new file mode 100644 index 00000000..b55c2264 --- /dev/null +++ b/tests/fixtures/super_simple/csv/REMinProductionTarget.csv @@ -0,0 +1 @@ +REGION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/RETagFuel.csv b/tests/fixtures/super_simple/csv/RETagFuel.csv new file mode 100644 index 00000000..326b28c6 --- /dev/null +++ b/tests/fixtures/super_simple/csv/RETagFuel.csv @@ -0,0 +1 @@ +REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/RETagTechnology.csv b/tests/fixtures/super_simple/csv/RETagTechnology.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/RETagTechnology.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ReserveMargin.csv b/tests/fixtures/super_simple/csv/ReserveMargin.csv new file mode 100644 index 00000000..b55c2264 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ReserveMargin.csv @@ -0,0 +1 @@ +REGION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ReserveMarginTagFuel.csv b/tests/fixtures/super_simple/csv/ReserveMarginTagFuel.csv new file mode 100644 index 00000000..326b28c6 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ReserveMarginTagFuel.csv @@ -0,0 +1 @@ +REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ReserveMarginTagTechnology.csv b/tests/fixtures/super_simple/csv/ReserveMarginTagTechnology.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ReserveMarginTagTechnology.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ResidualCapacity.csv b/tests/fixtures/super_simple/csv/ResidualCapacity.csv new file mode 100644 index 00000000..1b3716cf --- /dev/null +++ b/tests/fixtures/super_simple/csv/ResidualCapacity.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,YEAR,VALUE +BB,gas_plant,2016,3.1101 diff --git a/tests/fixtures/super_simple/csv/ResidualStorageCapacity.csv b/tests/fixtures/super_simple/csv/ResidualStorageCapacity.csv new file mode 100644 index 00000000..a7bcbd7f --- /dev/null +++ b/tests/fixtures/super_simple/csv/ResidualStorageCapacity.csv @@ -0,0 +1 @@ +REGION,STORAGE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/SEASON.csv b/tests/fixtures/super_simple/csv/SEASON.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/SEASON.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/STORAGE.csv b/tests/fixtures/super_simple/csv/STORAGE.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/STORAGE.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/SpecifiedAnnualDemand.csv b/tests/fixtures/super_simple/csv/SpecifiedAnnualDemand.csv new file mode 100644 index 00000000..b19cdc44 --- /dev/null +++ b/tests/fixtures/super_simple/csv/SpecifiedAnnualDemand.csv @@ -0,0 +1,2 @@ +REGION,FUEL,YEAR,VALUE +BB,electricity,2016,2.1101 diff --git a/tests/fixtures/super_simple/csv/SpecifiedDemandProfile.csv b/tests/fixtures/super_simple/csv/SpecifiedDemandProfile.csv new file mode 100644 index 00000000..dc17f3e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/SpecifiedDemandProfile.csv @@ -0,0 +1,2 @@ +REGION,FUEL,TIMESLICE,YEAR,VALUE +BB,electricity,x,2016,1.0 diff --git a/tests/fixtures/super_simple/csv/StorageLevelStart.csv b/tests/fixtures/super_simple/csv/StorageLevelStart.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/StorageLevelStart.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/StorageMaxChargeRate.csv b/tests/fixtures/super_simple/csv/StorageMaxChargeRate.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/StorageMaxChargeRate.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/StorageMaxDischargeRate.csv b/tests/fixtures/super_simple/csv/StorageMaxDischargeRate.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/StorageMaxDischargeRate.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/TECHNOLOGY.csv b/tests/fixtures/super_simple/csv/TECHNOLOGY.csv new file mode 100644 index 00000000..f563cf92 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TECHNOLOGY.csv @@ -0,0 +1,3 @@ +VALUE +gas_import +gas_plant diff --git a/tests/fixtures/super_simple/csv/TIMESLICE.csv b/tests/fixtures/super_simple/csv/TIMESLICE.csv new file mode 100644 index 00000000..9480ca01 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TIMESLICE.csv @@ -0,0 +1,2 @@ +VALUE +x diff --git a/tests/fixtures/super_simple/csv/TechnologyFromStorage.csv b/tests/fixtures/super_simple/csv/TechnologyFromStorage.csv new file mode 100644 index 00000000..384c871b --- /dev/null +++ b/tests/fixtures/super_simple/csv/TechnologyFromStorage.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION,VALUE diff --git a/tests/fixtures/super_simple/csv/TechnologyToStorage.csv b/tests/fixtures/super_simple/csv/TechnologyToStorage.csv new file mode 100644 index 00000000..384c871b --- /dev/null +++ b/tests/fixtures/super_simple/csv/TechnologyToStorage.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacity.csv b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacity.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacity.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacityInvestment.csv b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacityInvestment.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacityInvestment.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMinCapacity.csv b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacity.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacity.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMinCapacityInvestment.csv b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacityInvestment.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacityInvestment.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityLowerLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityLowerLimit.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityLowerLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityUpperLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityUpperLimit.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityUpperLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityLowerLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityLowerLimit.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityLowerLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityUpperLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityUpperLimit.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityUpperLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/TradeRoute.csv b/tests/fixtures/super_simple/csv/TradeRoute.csv new file mode 100644 index 00000000..11316319 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TradeRoute.csv @@ -0,0 +1 @@ +REGION,_REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/VariableCost.csv b/tests/fixtures/super_simple/csv/VariableCost.csv new file mode 100644 index 00000000..6948a628 --- /dev/null +++ b/tests/fixtures/super_simple/csv/VariableCost.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,MODE_OF_OPERATION,YEAR,VALUE +BB,gas_plant,1,2016,9.1202 diff --git a/tests/fixtures/super_simple/csv/YEAR.csv b/tests/fixtures/super_simple/csv/YEAR.csv new file mode 100644 index 00000000..55c26cd2 --- /dev/null +++ b/tests/fixtures/super_simple/csv/YEAR.csv @@ -0,0 +1,2 @@ +VALUE +2016 diff --git a/tests/fixtures/super_simple/csv/YearSplit.csv b/tests/fixtures/super_simple/csv/YearSplit.csv new file mode 100644 index 00000000..9656554c --- /dev/null +++ b/tests/fixtures/super_simple/csv/YearSplit.csv @@ -0,0 +1,2 @@ +TIMESLICE,YEAR,VALUE +x,2016,1.0 diff --git a/tests/fixtures/super_simple/csv/_REGION.csv b/tests/fixtures/super_simple/csv/_REGION.csv new file mode 100644 index 00000000..016ac8fc --- /dev/null +++ b/tests/fixtures/super_simple/csv/_REGION.csv @@ -0,0 +1,2 @@ +VALUE +BB diff --git a/tests/fixtures/super_simple/super_simple.txt b/tests/fixtures/super_simple/super_simple.txt new file mode 100644 index 00000000..6bde7831 --- /dev/null +++ b/tests/fixtures/super_simple/super_simple.txt @@ -0,0 +1,153 @@ +# Model file written by *otoole* +param default 0 : AccumulatedAnnualDemand := +; +param default -1 : AnnualEmissionLimit := +; +param default 0 : AnnualExogenousEmission := +; +param default 1 : AvailabilityFactor := +; +param default 1 : CapacityFactor := +; +param default 0 : CapacityOfOneTechnologyUnit := +; +param default 1 : CapacityToActivityUnit := +; +param default 0 : CapitalCost := +BB gas_plant 2016 1.03456 +; +param default 0 : CapitalCostStorage := +; +param default 0 : Conversionld := +; +param default 0 : Conversionlh := +; +param default 0 : Conversionls := +; +set DAILYTIMEBRACKET := +; +set DAYTYPE := +; +param default 0.00137 : DaySplit := +; +param default 7 : DaysInDayType := +; +param default 1 : DepreciationMethod := +; +param default 0.05 : DiscountRate := +; +param default 0.05 : DiscountRateIdv := +; +param default 0.05 : DiscountRateStorage := +; +set EMISSION := +; +param default 0 : EmissionActivityRatio := +; +param default 0 : EmissionsPenalty := +; +set FUEL := +natural_gas +electricity +; +param default 0 : FixedCost := +BB gas_plant 2016 9.1101 +; +param default 0 : InputActivityRatio := +BB gas_plant natural_gas 1 2016 1.1101 +; +set MODE_OF_OPERATION := +1 +; +param default 0 : MinStorageCharge := +; +param default -1 : ModelPeriodEmissionLimit := +; +param default 0 : ModelPeriodExogenousEmission := +; +param default 1 : OperationalLife := +; +param default 0 : OperationalLifeStorage := +; +param default 0 : OutputActivityRatio := +BB gas_import natural_gas 1 2016 1 +BB gas_plant electricity 1 2016 1 +; +set REGION := +BB +; +param default 0 : REMinProductionTarget := +; +param default 0 : RETagFuel := +; +param default 0 : RETagTechnology := +; +param default 1 : ReserveMargin := +; +param default 0 : ReserveMarginTagFuel := +; +param default 0 : ReserveMarginTagTechnology := +; +param default 0 : ResidualCapacity := +BB gas_plant 2016 3.1101 +; +param default 999 : ResidualStorageCapacity := +; +set SEASON := +; +set STORAGE := +; +param default 0 : SpecifiedAnnualDemand := +BB electricity 2016 2.1101 +; +param default 0 : SpecifiedDemandProfile := +BB electricity x 2016 1 +; +param default 0 : StorageLevelStart := +; +param default 0 : StorageMaxChargeRate := +; +param default 0 : StorageMaxDischargeRate := +; +set TECHNOLOGY := +gas_import +gas_plant +; +set TIMESLICE := +x +; +param default 0 : TechnologyFromStorage := +; +param default 0 : TechnologyToStorage := +; +param default -1 : TotalAnnualMaxCapacity := +; +param default -1 : TotalAnnualMaxCapacityInvestment := +; +param default 0 : TotalAnnualMinCapacity := +; +param default 0 : TotalAnnualMinCapacityInvestment := +; +param default 0 : TotalTechnologyAnnualActivityLowerLimit := +; +param default -1 : TotalTechnologyAnnualActivityUpperLimit := +; +param default 0 : TotalTechnologyModelPeriodActivityLowerLimit := +; +param default -1 : TotalTechnologyModelPeriodActivityUpperLimit := +; +param default 0 : TradeRoute := +; +param default 0 : VariableCost := +BB gas_plant 1 2016 9.1202 +; +set YEAR := +2016 +; +param default 0 : YearSplit := +x 2016 1 +; +set _REGION := +BB +; +end; diff --git a/tests/fixtures/super_simple/super_simple.yaml b/tests/fixtures/super_simple/super_simple.yaml new file mode 100644 index 00000000..6fcf713e --- /dev/null +++ b/tests/fixtures/super_simple/super_simple.yaml @@ -0,0 +1,520 @@ +AccumulatedAnnualDemand: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +AnnualEmissionLimit: + indices: [REGION,EMISSION,YEAR] + type: param + dtype: float + default: -1 +AnnualExogenousEmission: + indices: [REGION,EMISSION,YEAR] + type: param + dtype: float + default: 0 +AvailabilityFactor: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 1 +CapacityFactor: + indices: [REGION,TECHNOLOGY,TIMESLICE,YEAR] + type: param + dtype: float + default: 1 +CapacityOfOneTechnologyUnit: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +CapacityToActivityUnit: + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 1 +CapitalCost: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +CapitalCostStorage: + indices: [REGION,STORAGE,YEAR] + type: param + dtype: float + default: 0 +Conversionld: + indices: [TIMESLICE,DAYTYPE] + type: param + dtype: float + default: 0 +Conversionlh: + indices: [TIMESLICE,DAILYTIMEBRACKET] + type: param + dtype: float + default: 0 +Conversionls: + indices: [TIMESLICE,SEASON] + type: param + dtype: float + default: 0 +DAILYTIMEBRACKET: + dtype: int + type: set +DaysInDayType: + indices: [SEASON,DAYTYPE,YEAR] + type: param + dtype: float + default: 7 +DaySplit: + indices: [DAILYTIMEBRACKET,YEAR] + type: param + dtype: float + default: 0.00137 +DAYTYPE: + dtype: int + type: set +DepreciationMethod: + indices: [REGION] + type: param + dtype: float + default: 1 +DiscountRate: + indices: [REGION] + type: param + dtype: float + default: 0.05 +DiscountRateIdv: + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 0.05 +DiscountRateStorage: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0.05 +EMISSION: + dtype: str + type: set +EmissionActivityRatio: + indices: [REGION,TECHNOLOGY,EMISSION,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +EmissionsPenalty: + indices: [REGION,EMISSION,YEAR] + type: param + dtype: float + default: 0 +FixedCost: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +FUEL: + dtype: str + type: set +InputActivityRatio: + indices: [REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +MinStorageCharge: + indices: [REGION,STORAGE,YEAR] + type: param + dtype: float + default: 0 +MODE_OF_OPERATION: + dtype: int + type: set +ModelPeriodEmissionLimit: + indices: [REGION,EMISSION] + type: param + dtype: float + default: -1 +ModelPeriodExogenousEmission: + indices: [REGION,EMISSION] + type: param + dtype: float + default: 0 +OperationalLife: + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 1 +OperationalLifeStorage: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +OutputActivityRatio: + indices: [REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +REGION: + dtype: str + type: set +_REGION: + dtype: str + type: set +REMinProductionTarget: + indices: [REGION,YEAR] + type: param + dtype: float + default: 0 +ReserveMargin: + indices: [REGION,YEAR] + type: param + dtype: float + default: 1 +ReserveMarginTagFuel: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +ReserveMarginTagTechnology: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +ResidualCapacity: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +ResidualStorageCapacity: + indices: [REGION,STORAGE,YEAR] + type: param + dtype: float + default: 999 +RETagFuel: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +RETagTechnology: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +SEASON: + dtype: int + type: set +SpecifiedAnnualDemand: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +SpecifiedDemandProfile: + indices: [REGION,FUEL,TIMESLICE,YEAR] + type: param + dtype: float + default: 0 +STORAGE: + dtype: str + type: set +StorageLevelStart: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +StorageMaxChargeRate: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +StorageMaxDischargeRate: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +TECHNOLOGY: + dtype: str + type: set +TechnologyFromStorage: + indices: [REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION] + type: param + dtype: float + default: 0 +TechnologyToStorage: + indices: [REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION] + type: param + dtype: float + default: 0 +TIMESLICE: + dtype: str + type: set +TotalAnnualMaxCapacity: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: -1 +TotalAnnualMaxCapacityInvestment: + short_name: TotalAnnualMaxCapacityInvestmen + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: -1 +TotalAnnualMinCapacity: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +TotalAnnualMinCapacityInvestment: + short_name: TotalAnnualMinCapacityInvestmen + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +TotalTechnologyAnnualActivityLowerLimit: + short_name: TotalTechnologyAnnualActivityLo + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +TotalTechnologyAnnualActivityUpperLimit: + short_name: TotalTechnologyAnnualActivityUp + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: -1 +TotalTechnologyModelPeriodActivityLowerLimit: + short_name: TotalTechnologyModelPeriodActLo + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 0 +TotalTechnologyModelPeriodActivityUpperLimit: + short_name: TotalTechnologyModelPeriodActUp + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: -1 +TradeRoute: + indices: [REGION,_REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +VariableCost: + indices: [REGION,TECHNOLOGY,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +YEAR: + dtype: int + type: set +YearSplit: + indices: [TIMESLICE,YEAR] + type: param + dtype: float + default: 0 +AnnualEmissions: + indices: [REGION,EMISSION,YEAR] + type: result + dtype: float + default: 0 + calculated: True +AccumulatedNewCapacity: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualFixedOperatingCost: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualTechnologyEmission: + indices: [REGION, TECHNOLOGY, EMISSION, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualTechnologyEmissionByMode: + indices: [REGION, TECHNOLOGY, EMISSION, MODE_OF_OPERATION, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualVariableOperatingCost: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +CapitalInvestment: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +Demand: + indices: [REGION, TIMESLICE, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +DiscountedSalvageValue: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +DiscountedTechnologyEmissionsPenalty: + short_name: DiscountedTechEmissionsPenalty + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +NewCapacity: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +NewStorageCapacity: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +NumberOfNewTechnologyUnits: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +ProductionByTechnology: + indices: [REGION, TIMESLICE, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +ProductionByTechnologyAnnual: + indices: [REGION, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfActivity: + indices: [REGION, TIMESLICE, TECHNOLOGY, MODE_OF_OPERATION, YEAR] + type: result + dtype: float + default: 0 + calculated: False +RateOfProductionByTechnology: + indices: [REGION, TIMESLICE, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfProductionByTechnologyByMode: + short_name: RateOfProductionByTechByMode + indices: [REGION, TIMESLICE, TECHNOLOGY, MODE_OF_OPERATION, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfUseByTechnology: + indices: [REGION, TIMESLICE, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfUseByTechnologyByMode: + indices: [REGION, TIMESLICE, TECHNOLOGY, MODE_OF_OPERATION, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +SalvageValue: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +SalvageValueStorage: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelDayTypeFinish: + indices: [REGION, STORAGE, SEASON, DAYTYPE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelDayTypeStart: + indices: [REGION, STORAGE, SEASON, DAYTYPE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelSeasonStart: + indices: [REGION, STORAGE, SEASON, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelYearStart: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelYearFinish: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +TotalAnnualTechnologyActivityByMode: + short_name: TotalAnnualTechActivityByMode + indices: [REGION, TECHNOLOGY, MODE_OF_OPERATION, YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalCapacityAnnual: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalDiscountedCost: + indices: [REGION,YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalTechnologyAnnualActivity: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalTechnologyModelPeriodActivity: + short_name: TotalTechModelPeriodActivity + indices: [REGION, TECHNOLOGY] + type: result + dtype: float + default: 0 + calculated: True +Trade: + indices: [REGION,TIMESLICE,FUEL,YEAR] + type: result + dtype: float + default: 0 + calculated: False +UseByTechnology: + indices: [REGION,TIMESLICE,TECHNOLOGY,FUEL,YEAR] + type: result + dtype: float + default: 0 + calculated: False diff --git a/tests/fixtures/super_simple/super_simple_gnu.lp b/tests/fixtures/super_simple/super_simple_gnu.lp new file mode 100644 index 00000000..ba9e2917 --- /dev/null +++ b/tests/fixtures/super_simple/super_simple_gnu.lp @@ -0,0 +1,222 @@ +\* Problem: OSeMOSYS *\ + +Minimize + cost: + TotalDiscountedCost(BB,2016) + +Subject To + EQ_SpecifiedDemand(BB,x,electricity,2016): + - RateOfDemand(BB,x,electricity,2016) = -2.1101 + CAa1_TotalNewCapacity(BB,gas_import,2016): + - NewCapacity(BB,gas_import,2016) + + AccumulatedNewCapacity(BB,gas_import,2016) = -0 + CAa1_TotalNewCapacity(BB,gas_plant,2016): + - NewCapacity(BB,gas_plant,2016) + + AccumulatedNewCapacity(BB,gas_plant,2016) = -0 + CAa2_TotalAnnualCapacity(BB,gas_import,2016): + + AccumulatedNewCapacity(BB,gas_import,2016) + - TotalCapacityAnnual(BB,gas_import,2016) = -0 + CAa2_TotalAnnualCapacity(BB,gas_plant,2016): + + AccumulatedNewCapacity(BB,gas_plant,2016) + - TotalCapacityAnnual(BB,gas_plant,2016) = -3.1101 + CAa3_TotalActivityOfEachTechnology(BB,gas_import,x,2016): + + RateOfActivity(BB,x,gas_import,1,2016) + - RateOfTotalActivity(BB,gas_import,x,2016) = -0 + CAa3_TotalActivityOfEachTechnology(BB,gas_plant,x,2016): + + RateOfActivity(BB,x,gas_plant,1,2016) + - RateOfTotalActivity(BB,gas_plant,x,2016) = -0 + CAa4_Constraint_Capacity(BB,x,gas_import,2016): + - TotalCapacityAnnual(BB,gas_import,2016) + + RateOfTotalActivity(BB,gas_import,x,2016) <= -0 + CAa4_Constraint_Capacity(BB,x,gas_plant,2016): + - TotalCapacityAnnual(BB,gas_plant,2016) + + RateOfTotalActivity(BB,gas_plant,x,2016) <= -0 + EBa1_RateOfFuelProduction1(BB,x,natural_gas,gas_import,1,2016): + + RateOfActivity(BB,x,gas_import,1,2016) + - RateOfProductionByTechnologyByMode(BB,x,gas_import,1,natural_gas,2016) + = -0 + EBa1_RateOfFuelProduction1(BB,x,electricity,gas_plant,1,2016): + + RateOfActivity(BB,x,gas_plant,1,2016) + - RateOfProductionByTechnologyByMode(BB,x,gas_plant,1,electricity,2016) + = -0 + EBa2_RateOfFuelProduction2(BB,x,natural_gas,gas_import,2016): + + RateOfProductionByTechnologyByMode(BB,x,gas_import,1,natural_gas,2016) + - RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) = -0 + EBa2_RateOfFuelProduction2(BB,x,electricity,gas_plant,2016): + + RateOfProductionByTechnologyByMode(BB,x,gas_plant,1,electricity,2016) + - RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) = -0 + EBa3_RateOfFuelProduction3(BB,x,natural_gas,2016): + + RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) + - RateOfProduction(BB,x,natural_gas,2016) = -0 + EBa3_RateOfFuelProduction3(BB,x,electricity,2016): + + RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) + - RateOfProduction(BB,x,electricity,2016) = -0 + EBa4_RateOfFuelUse1(BB,x,natural_gas,gas_plant,1,2016): + + 1.1101 RateOfActivity(BB,x,gas_plant,1,2016) + - RateOfUseByTechnologyByMode(BB,x,gas_plant,1,natural_gas,2016) = -0 + EBa5_RateOfFuelUse2(BB,x,natural_gas,gas_plant,2016): + + RateOfUseByTechnologyByMode(BB,x,gas_plant,1,natural_gas,2016) + - RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) = -0 + EBa6_RateOfFuelUse3(BB,x,natural_gas,2016): + + RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) + + RateOfUseByTechnology(BB,x,gas_import,natural_gas,2016) + - RateOfUse(BB,x,natural_gas,2016) = -0 + EBa7_EnergyBalanceEachTS1(BB,x,natural_gas,2016): + + RateOfProduction(BB,x,natural_gas,2016) + - Production(BB,x,natural_gas,2016) = -0 + EBa7_EnergyBalanceEachTS1(BB,x,electricity,2016): + + RateOfProduction(BB,x,electricity,2016) + - Production(BB,x,electricity,2016) = -0 + EBa8_EnergyBalanceEachTS2(BB,x,natural_gas,2016): + + RateOfUse(BB,x,natural_gas,2016) - Use(BB,x,natural_gas,2016) = -0 + EBa9_EnergyBalanceEachTS3(BB,x,electricity,2016): + + RateOfDemand(BB,x,electricity,2016) - Demand(BB,x,electricity,2016) + = -0 + EBa11_EnergyBalanceEachTS5(BB,x,natural_gas,2016): + - Demand(BB,x,natural_gas,2016) + Production(BB,x,natural_gas,2016) + - Use(BB,x,natural_gas,2016) >= -0 + EBa11_EnergyBalanceEachTS5(BB,x,electricity,2016): + - Demand(BB,x,electricity,2016) + Production(BB,x,electricity,2016) + - Use(BB,x,electricity,2016) >= -0 + EBb1_EnergyBalanceEachYear1(BB,natural_gas,2016): + + Production(BB,x,natural_gas,2016) + - ProductionAnnual(BB,natural_gas,2016) = -0 + EBb1_EnergyBalanceEachYear1(BB,electricity,2016): + + Production(BB,x,electricity,2016) + - ProductionAnnual(BB,electricity,2016) = -0 + EBb2_EnergyBalanceEachYear2(BB,natural_gas,2016): + + Use(BB,x,natural_gas,2016) - UseAnnual(BB,natural_gas,2016) = -0 + EBb2_EnergyBalanceEachYear2(BB,electricity,2016): + + Use(BB,x,electricity,2016) - UseAnnual(BB,electricity,2016) = -0 + EBb4_EnergyBalanceEachYear4(BB,natural_gas,2016): + + ProductionAnnual(BB,natural_gas,2016) + - UseAnnual(BB,natural_gas,2016) >= -0 + EBb4_EnergyBalanceEachYear4(BB,electricity,2016): + + ProductionAnnual(BB,electricity,2016) + - UseAnnual(BB,electricity,2016) >= -0 + Acc1_FuelProductionByTechnology(BB,x,gas_import,natural_gas,2016): + + RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) + - ProductionByTechnology(BB,x,gas_import,natural_gas,2016) = -0 + Acc1_FuelProductionByTechnology(BB,x,gas_plant,electricity,2016): + + RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) + - ProductionByTechnology(BB,x,gas_plant,electricity,2016) = -0 + Acc2_FuelUseByTechnology(BB,x,gas_plant,natural_gas,2016): + + RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) + - UseByTechnology(BB,x,gas_plant,natural_gas,2016) = -0 + Acc3_AverageAnnualRateOfActivity(BB,gas_import,1,2016): + + RateOfActivity(BB,x,gas_import,1,2016) + - TotalAnnualTechnologyActivityByMode(BB,gas_import,1,2016) = -0 + Acc3_AverageAnnualRateOfActivity(BB,gas_plant,1,2016): + + RateOfActivity(BB,x,gas_plant,1,2016) + - TotalAnnualTechnologyActivityByMode(BB,gas_plant,1,2016) = -0 + Acc4_ModelPeriodCostByRegion(BB): + TotalDiscountedCost(BB,2016) + - ModelPeriodCostByRegion(BB) = -0 + CC1_UndiscountedCapitalInvestment(BB,gas_import,2016): + + 1e-05 NewCapacity(BB,gas_import,2016) + - CapitalInvestment(BB,gas_import,2016) = -0 + CC1_UndiscountedCapitalInvestment(BB,gas_plant,2016): + + 1.03456 NewCapacity(BB,gas_plant,2016) + - CapitalInvestment(BB,gas_plant,2016) = -0 + CC2_DiscountingCapitalInvestment(BB,gas_import,2016): + + CapitalInvestment(BB,gas_import,2016) + - DiscountedCapitalInvestment(BB,gas_import,2016) = -0 + CC2_DiscountingCapitalInvestment(BB,gas_plant,2016): + + CapitalInvestment(BB,gas_plant,2016) + - DiscountedCapitalInvestment(BB,gas_plant,2016) = -0 + SV3_SalvageValueAtEndOfPeriod3(BB,gas_import,2016): + + SalvageValue(BB,gas_import,2016) = -0 + SV3_SalvageValueAtEndOfPeriod3(BB,gas_plant,2016): + + SalvageValue(BB,gas_plant,2016) = -0 + SV4_SalvageValueDiscountedToStartYear(BB,gas_import,2016): + - 0.952380952380952 SalvageValue(BB,gas_import,2016) + + DiscountedSalvageValue(BB,gas_import,2016) = -0 + SV4_SalvageValueDiscountedToStartYear(BB,gas_plant,2016): + - 0.952380952380952 SalvageValue(BB,gas_plant,2016) + + DiscountedSalvageValue(BB,gas_plant,2016) = -0 + OC1_OperatingCostsVariable(BB,gas_plant,x,2016): + + 9.1202 TotalAnnualTechnologyActivityByMode(BB,gas_plant,1,2016) + - AnnualVariableOperatingCost(BB,gas_plant,2016) = -0 + OC2_OperatingCostsFixedAnnual(BB,gas_import,2016): + - AnnualFixedOperatingCost(BB,gas_import,2016) = -0 + OC2_OperatingCostsFixedAnnual(BB,gas_plant,2016): + + 9.1101 TotalCapacityAnnual(BB,gas_plant,2016) + - AnnualFixedOperatingCost(BB,gas_plant,2016) = -0 + OC3_OperatingCostsTotalAnnual(BB,gas_import,2016): + - OperatingCost(BB,gas_import,2016) + + AnnualVariableOperatingCost(BB,gas_import,2016) + + AnnualFixedOperatingCost(BB,gas_import,2016) = -0 + OC3_OperatingCostsTotalAnnual(BB,gas_plant,2016): + - OperatingCost(BB,gas_plant,2016) + + AnnualVariableOperatingCost(BB,gas_plant,2016) + + AnnualFixedOperatingCost(BB,gas_plant,2016) = -0 + OC4_DiscountedOperatingCostsTotalAnnual(BB,gas_import,2016): + + 0.975900072948533 OperatingCost(BB,gas_import,2016) + - DiscountedOperatingCost(BB,gas_import,2016) = -0 + OC4_DiscountedOperatingCostsTotalAnnual(BB,gas_plant,2016): + + 0.975900072948533 OperatingCost(BB,gas_plant,2016) + - DiscountedOperatingCost(BB,gas_plant,2016) = -0 + TDC1_TotalDiscountedCostByTechnology(BB,gas_import,2016): + + DiscountedCapitalInvestment(BB,gas_import,2016) + - DiscountedSalvageValue(BB,gas_import,2016) + + DiscountedOperatingCost(BB,gas_import,2016) + - TotalDiscountedCostByTechnology(BB,gas_import,2016) + + DiscountedTechnologyEmissionsPenalty(BB,gas_import,2016) = -0 + TDC1_TotalDiscountedCostByTechnology(BB,gas_plant,2016): + + DiscountedCapitalInvestment(BB,gas_plant,2016) + - DiscountedSalvageValue(BB,gas_plant,2016) + + DiscountedOperatingCost(BB,gas_plant,2016) + - TotalDiscountedCostByTechnology(BB,gas_plant,2016) + + DiscountedTechnologyEmissionsPenalty(BB,gas_plant,2016) = -0 + TDC2_TotalDiscountedCost(BB,2016): + + TotalDiscountedCostByTechnology(BB,gas_import,2016) + + TotalDiscountedCostByTechnology(BB,gas_plant,2016) + - TotalDiscountedCost(BB,2016) = -0 + AAC1_TotalAnnualTechnologyActivity(BB,gas_import,2016): + + RateOfTotalActivity(BB,gas_import,x,2016) + - TotalTechnologyAnnualActivity(BB,gas_import,2016) = -0 + AAC1_TotalAnnualTechnologyActivity(BB,gas_plant,2016): + + RateOfTotalActivity(BB,gas_plant,x,2016) + - TotalTechnologyAnnualActivity(BB,gas_plant,2016) = -0 + TAC1_TotalModelHorizonTechnologyActivity(BB,gas_import): + + TotalTechnologyAnnualActivity(BB,gas_import,2016) + - TotalTechnologyModelPeriodActivity(BB,gas_import) = -0 + TAC1_TotalModelHorizonTechnologyActivity(BB,gas_plant): + + TotalTechnologyAnnualActivity(BB,gas_plant,2016) + - TotalTechnologyModelPeriodActivity(BB,gas_plant) = -0 + RM1_ReserveMargin_TechnologiesIncluded_In_Activity_Units(BB,x,2016): + - TotalCapacityInReserveMargin(BB,2016) = -0 + RM2_ReserveMargin_FuelsIncluded(BB,x,2016): + - DemandNeedingReserveMargin(BB,x,2016) = -0 + RM3_ReserveMargin_Constraint(BB,x,2016): + - TotalCapacityInReserveMargin(BB,2016) + + DemandNeedingReserveMargin(BB,x,2016) <= -0 + RE1_FuelProductionByTechnologyAnnual(BB,gas_import,natural_gas,2016): + + ProductionByTechnology(BB,x,gas_import,natural_gas,2016) + - ProductionByTechnologyAnnual(BB,gas_import,natural_gas,2016) = -0 + RE1_FuelProductionByTechnologyAnnual(BB,gas_plant,electricity,2016): + + ProductionByTechnology(BB,x,gas_plant,electricity,2016) + - ProductionByTechnologyAnnual(BB,gas_plant,electricity,2016) = -0 + RE2_TechIncluded(BB,2016): - TotalREProductionAnnual(BB,2016) = -0 + RE3_FuelIncluded(BB,2016): + - RETotalProductionOfTargetFuelAnnual(BB,2016) = -0 + RE4_EnergyConstraint(BB,2016): - TotalREProductionAnnual(BB,2016) <= -0 + RE5_FuelUseByTechnologyAnnual(BB,gas_plant,natural_gas,2016): + + RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) + - UseByTechnologyAnnual(BB,gas_plant,natural_gas,2016) = -0 + E4_EmissionsPenaltyByTechnology(BB,gas_import,2016): + - AnnualTechnologyEmissionsPenalty(BB,gas_import,2016) = -0 + E4_EmissionsPenaltyByTechnology(BB,gas_plant,2016): + - AnnualTechnologyEmissionsPenalty(BB,gas_plant,2016) = -0 + E5_DiscountedEmissionsPenaltyByTechnology(BB,gas_import,2016): + + 0.975900072948533 AnnualTechnologyEmissionsPenalty(BB,gas_import,2016) + - DiscountedTechnologyEmissionsPenalty(BB,gas_import,2016) = -0 + E5_DiscountedEmissionsPenaltyByTechnology(BB,gas_plant,2016): + + 0.975900072948533 AnnualTechnologyEmissionsPenalty(BB,gas_plant,2016) + - DiscountedTechnologyEmissionsPenalty(BB,gas_plant,2016) = -0 + +Bounds + TotalTechnologyModelPeriodActivity(BB,gas_import) free + TotalTechnologyModelPeriodActivity(BB,gas_plant) free + TotalREProductionAnnual(BB,2016) free + RETotalProductionOfTargetFuelAnnual(BB,2016) free + +End diff --git a/tests/fixtures/super_simple/super_simple_gnu.sol b/tests/fixtures/super_simple/super_simple_gnu.sol new file mode 100644 index 00000000..f87af1b1 --- /dev/null +++ b/tests/fixtures/super_simple/super_simple_gnu.sol @@ -0,0 +1,48 @@ +Optimal - objective value 46.43125659 + 0 TotalDiscountedCost(BB,2016) 46.431257 0 + 1 RateOfDemand(BB,x,electricity,2016) 2.1101 0 + 2 NewCapacity(BB,gas_import,2016) 2.342422 0 + 3 AccumulatedNewCapacity(BB,gas_import,2016) 2.342422 0 + 6 TotalCapacityAnnual(BB,gas_import,2016) 2.342422 0 + 7 TotalCapacityAnnual(BB,gas_plant,2016) 3.1101 0 + 8 RateOfActivity(BB,x,gas_import,1,2016) 2.342422 0 + 9 RateOfTotalActivity(BB,gas_import,x,2016) 2.342422 0 + 10 RateOfActivity(BB,x,gas_plant,1,2016) 2.1101 0 + 11 RateOfTotalActivity(BB,gas_plant,x,2016) 2.1101 0 + 12 RateOfProductionByTechnologyByMode(BB,x,gas_import,1,natural_gas,2016) 2.342422 0 + 13 RateOfProductionByTechnologyByMode(BB,x,gas_plant,1,electricity,2016) 2.1101 0 + 14 RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) 2.342422 0 + 15 RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) 2.1101 0 + 16 RateOfProduction(BB,x,natural_gas,2016) 2.342422 0 + 17 RateOfProduction(BB,x,electricity,2016) 2.1101 0 + 18 RateOfUseByTechnologyByMode(BB,x,gas_plant,1,natural_gas,2016) 2.342422 0 + 19 RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) 2.342422 0 + 21 RateOfUse(BB,x,natural_gas,2016) 2.342422 0 + 22 Production(BB,x,natural_gas,2016) 2.342422 0 + 23 Production(BB,x,electricity,2016) 2.1101 0 + 24 Use(BB,x,natural_gas,2016) 2.342422 0 + 25 Demand(BB,x,electricity,2016) 2.1101 0 + 28 ProductionAnnual(BB,natural_gas,2016) 2.342422 0 + 29 ProductionAnnual(BB,electricity,2016) 2.1101 0 + 30 UseAnnual(BB,natural_gas,2016) 2.342422 0 + 32 ProductionByTechnology(BB,x,gas_import,natural_gas,2016) 2.342422 0 + 33 ProductionByTechnology(BB,x,gas_plant,electricity,2016) 2.1101 0 + 34 UseByTechnology(BB,x,gas_plant,natural_gas,2016) 2.342422 0 + 35 TotalAnnualTechnologyActivityByMode(BB,gas_import,1,2016) 2.342422 0 + 36 TotalAnnualTechnologyActivityByMode(BB,gas_plant,1,2016) 2.1101 0 + 37 ModelPeriodCostByRegion(BB) 46.431257 0 + 38 CapitalInvestment(BB,gas_import,2016) 2.342422e-05 0 + 40 DiscountedCapitalInvestment(BB,gas_import,2016) 2.342422e-05 0 + 46 AnnualVariableOperatingCost(BB,gas_plant,2016) 19.244534 0 + 48 AnnualFixedOperatingCost(BB,gas_plant,2016) 28.333322 0 + 51 OperatingCost(BB,gas_plant,2016) 47.577856 0 + 53 DiscountedOperatingCost(BB,gas_plant,2016) 46.431233 1.110223e-16 + 54 TotalDiscountedCostByTechnology(BB,gas_import,2016) 2.342422e-05 0 + 56 TotalDiscountedCostByTechnology(BB,gas_plant,2016) 46.431233 0 + 58 TotalTechnologyAnnualActivity(BB,gas_import,2016) 2.342422 0 + 59 TotalTechnologyAnnualActivity(BB,gas_plant,2016) 2.1101 0 + 60 TotalTechnologyModelPeriodActivity(BB,gas_import) 2.342422 0 + 61 TotalTechnologyModelPeriodActivity(BB,gas_plant) 2.1101 0 + 64 ProductionByTechnologyAnnual(BB,gas_import,natural_gas,2016) 2.342422 0 + 65 ProductionByTechnologyAnnual(BB,gas_plant,electricity,2016) 2.1101 0 + 68 UseByTechnologyAnnual(BB,gas_plant,natural_gas,2016) 2.342422 0 diff --git a/tests/test_convert.py b/tests/test_convert.py index 39b3ed69..00a95249 100644 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -1,46 +1,25 @@ -""" - -read_strategy = None -write_strategy = None - -if args.from_format == "datafile": - read_strategy = ReadDatafile() -elif args.from_format == "csv": - read_strategy = ReadCsv() -elif args.from_format == "excel": - read_strategy = ReadExcel() - -if args.to_format == "excel": - write_strategy = WriteExcel() -elif args.to_format == "datafile": - write_strategy = WriteDatafile() -elif args.to_format == "csv": - write_strategy = WriteCsv() - -if read_strategy and write_strategy: - context = Context(read_strategy, write_strategy) - context.convert(args.from_path, args.to_path) -else: - raise NotImplementedError(msg) +"""This module tests the public API of the otoole package """ import os from tempfile import NamedTemporaryFile, TemporaryDirectory -from otoole import Context, ReadExcel, WriteCsv, WriteDatafile +from pytest import raises + +from otoole import convert, convert_results class TestConvert: - def test_convert_excel_to_datafile(self, user_config): + """Test the convert function""" - read_strategy = ReadExcel(user_config) - write_strategy = WriteDatafile(user_config) - context = Context(read_strategy, write_strategy) + def test_convert_excel_to_datafile(self): + """Test converting from Excel to datafile""" + user_config = os.path.join("tests", "fixtures", "config.yaml") tmpfile = NamedTemporaryFile() from_path = os.path.join("tests", "fixtures", "combined_inputs.xlsx") - context.convert(from_path, tmpfile.name) + convert(user_config, "excel", "datafile", from_path, tmpfile.name) tmpfile.seek(0) actual = tmpfile.readlines() @@ -51,16 +30,14 @@ def test_convert_excel_to_datafile(self, user_config): assert actual[2] == b"09_ROK d_bld_2_coal_products 2017 20.8921\n" assert actual[8996] == b"param default 1 : DepreciationMethod :=\n" - def test_convert_excel_to_csv(self, user_config): - - read_strategy = ReadExcel(user_config) - write_strategy = WriteCsv(user_config) - context = Context(read_strategy, write_strategy) + def test_convert_excel_to_csv(self): + """Test converting from Excel to CSV""" tmpfile = TemporaryDirectory() + user_config = os.path.join("tests", "fixtures", "config.yaml") from_path = os.path.join("tests", "fixtures", "combined_inputs.xlsx") - context.convert(from_path, tmpfile.name) + convert(user_config, "excel", "csv", from_path, tmpfile.name) with open(os.path.join(tmpfile.name, "EMISSION.csv")) as csv_file: csv_file.seek(0) @@ -69,3 +46,83 @@ def test_convert_excel_to_csv(self, user_config): assert actual[-1] == "NOX\n" assert actual[0] == "VALUE\n" assert actual[1] == "CO2\n" + + +class TestConvertResults: + """Test the convert_results function""" + + def test_convert_results_cbc_csv(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + tmpfile = TemporaryDirectory() + to_path = tmpfile.name + input_csvs = os.path.join("tests", "fixtures", "super_simple", "csv") + + result = convert_results( + config, from_format, to_format, from_path, to_path, input_csvs=input_csvs + ) + assert result is True + + with open(os.path.join(tmpfile.name, "NewCapacity.csv")) as csv_file: + csv_file.seek(0) + actual = csv_file.readlines() + + assert actual[0] == "REGION,TECHNOLOGY,YEAR,VALUE\n" + assert actual[-1] == "BB,gas_import,2016,2.342422\n" + + def test_convert_results_cbc_csv_datafile(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + tmpfile = TemporaryDirectory() + to_path = tmpfile.name + input_datafile = os.path.join( + "tests", "fixtures", "super_simple", "super_simple.txt" + ) + + result = convert_results( + config, + from_format, + to_format, + from_path, + to_path, + input_datafile=input_datafile, + ) + assert result is True + + with open(os.path.join(tmpfile.name, "NewCapacity.csv")) as csv_file: + csv_file.seek(0) + actual = csv_file.readlines() + + assert actual[0] == "REGION,TECHNOLOGY,YEAR,VALUE\n" + assert actual[-1] == "BB,gas_import,2016,2.342422\n" + + def test_convert_results_cbc_csv_raises(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + tmpfile = TemporaryDirectory() + to_path = tmpfile.name + + with raises(FileNotFoundError): + convert_results( + config, + from_format, + to_format, + from_path, + to_path, + input_csvs="not_a_path", + ) diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index b962a157..39cbd5a9 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -7,9 +7,9 @@ from amply import Amply from pytest import mark, raises -from otoole import ReadCsv, ReadDatafile, ReadExcel, ReadMemory from otoole.exceptions import OtooleDeprecationError, OtooleError from otoole.preprocess.longify_data import check_datatypes +from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory from otoole.results.results import ( ReadCbc, ReadCplex, From ba4ac35c9e1f9bf717485106a5595f75fa5e1fcb Mon Sep 17 00:00:00 2001 From: Will Usher Date: Tue, 20 Jun 2023 22:01:22 +0200 Subject: [PATCH 002/103] Added a test for cli call to convert_results --- src/otoole/cli.py | 22 ++++++++++++++-------- tests/test_cli.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/src/otoole/cli.py b/src/otoole/cli.py index 9ff2066a..45fff461 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -97,14 +97,14 @@ def validate_model(args): def _result_matrix(args): convert_results( args.config, - args.input_datapackage, - args.input_csvs, - args.input_datafile, - args.to_path, - args.from_path, args.from_format, args.to_format, - args.write_defaults, + args.from_path, + args.to_path, + input_datapackage=args.input_datapackage, + input_csvs=args.input_csvs, + input_datafile=args.input_datafile, + write_defaults=args.write_defaults, ) @@ -126,7 +126,8 @@ def _conversion_matrix(args): args.to_format, args.from_path, args.to_path, - args.write_defaults, + write_defaults=args.write_defaults, + keep_whitespace=args.keep_whitespace, ) @@ -222,9 +223,14 @@ def get_parser(): help="Input GNUMathProg datafile required for OSeMOSYS short or fast results", default=None, ) + result_parser.add_argument( + "--input_csvs", + help="Input folder of CSVs required for OSeMOSYS short or fast results", + default=None, + ) result_parser.add_argument( "--input_datapackage", - help="Deprecated", + help="Deprecated. Use --input_csvs instead", default=None, ) result_parser.add_argument("config", help="Path to config YAML file") diff --git a/tests/test_cli.py b/tests/test_cli.py index 779af14e..b7b6294f 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -7,6 +7,35 @@ from otoole import __version__ +class TestResults: + """Test the conversion of results via the command line interface""" + + def test_convert_results(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + super_simple_csvs = os.path.join("tests", "fixtures", "super_simple", "csv") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + to_path = mkdtemp() + commands = [ + "otoole", + "results", + from_format, + to_format, + from_path, + to_path, + config, + "--input_csvs", + super_simple_csvs, + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 0, print(actual.stdout) + assert os.path.exists(os.path.join(to_path, "NewCapacity.csv")) + + class TestConvert: def test_version(self): result = run(["otoole", "--version"], capture_output=True) From 19b78394fc3a6fe052532e33b36e62c5abdce1fa Mon Sep 17 00:00:00 2001 From: Will Usher Date: Tue, 20 Jun 2023 22:39:21 +0200 Subject: [PATCH 003/103] Add documentation for Python API --- docs/convert.rst | 37 +++++++++++++++++++++++++++++++++++++ docs/index.rst | 5 +++-- src/otoole/convert.py | 29 ++++++++++++++++++++++------- 3 files changed, 62 insertions(+), 9 deletions(-) create mode 100644 docs/convert.rst diff --git a/docs/convert.rst b/docs/convert.rst new file mode 100644 index 00000000..16111aec --- /dev/null +++ b/docs/convert.rst @@ -0,0 +1,37 @@ +.. _convert: + +========== +Python API +========== + +otoole also provides a Python API to access all the features available from the command line tool. + +Converting between formats +-------------------------- + +``otoole`` currently supports conversion between the following formats: + +- Excel +- A folder of CSV files +- GNU MathProg datafile + +>>> from otoole import convert +>>> convert('my_model.yaml', 'excel', 'csv', 'my_model.xlsx', 'my_model_csvs') + +See :py:func:`otoole.convert.convert` for more details + +Converting solver results to a folder of CSV files +-------------------------------------------------- + +The ``convert_results`` function creates a folder of CSV result files from a CBC_, CLP_, +Gurobi_ or CPLEX_ solution file:: + +>>> from otoole import convert_results +>>> convert_results('my_model.yaml', 'cbc', 'csv', 'my_model.sol', 'my_model_csvs', input_datafile='my_model.dat') + +See :func:`otoole.convert.convert_results` for more details + +.. _CBC: https://github.com/coin-or/Cbc +.. _CLP: https://github.com/coin-or/Clp +.. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer +.. _Gurobi: https://www.gurobi.com/ diff --git a/docs/index.rst b/docs/index.rst index 568e5ed6..2cc3d0cd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,8 +5,8 @@ Welcome to the documentation of ``otoole``! =========================================== **otoole**, or **O**\ SeMOSYS **tool**\ s for **e**\ nergy work, is a Python package -which provides a command-line interface for users of OSeMOSYS. The aim of the package is -to provide commonly used pre- and post-processing steps when working with OSeMOSYS models. +which provides a command-line interface and Python API for users of OSeMOSYS. The aim of the +package is to provide commonly used pre- and post-processing steps when working with OSeMOSYS models. Specifically, ``otoole`` allows the user to convert between data formats, process solutions, and visualise the reference energy system. @@ -28,6 +28,7 @@ Contents Core Functionality Data Formats Examples + Python API Contributing License Authors diff --git a/src/otoole/convert.py b/src/otoole/convert.py index 068a66cd..f3a95063 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -36,14 +36,29 @@ def convert_results( Arguments --------- config : str + Path to config file + from_format : str + Available options are 'cbc', 'cplex' and 'gurobi' + to_format : str + Available options are 'csv' + from_path : str + Path to cbc, cplex or gurobi solution file + to_path : str + Path to destination folder input_datapackage : str + Path to folder containing datapackage.json input_csvs : str + Path to folder containing CSVs input_datafile : str - to_path : str - from_path : str - from_format : str - to_format : str + Path to datafile write_defaults : str + Write default values to CSVs + + Returns + ------- + bool + True if conversion was successful, False otherwise + """ msg = "Conversion from {} to {} is not yet implemented".format( from_format, to_format @@ -124,12 +139,14 @@ def convert( from_path : str Path to destination file (if datafile or excel) or folder (csv or datapackage) write_defaults: bool, default: False + Write default values to CSVs keep_whitespace: bool, default: False + Keep whitespace in CSVs Returns ------- bool - True if conversion was successful + True if conversion was successful, False otherwise """ msg = "Conversion from {} to {} is not yet implemented".format( @@ -144,8 +161,6 @@ def convert( logger.info("Validating config from {}".format(config)) validate_config(user_config) - # set read strategy - keep_whitespace = True if keep_whitespace else False if from_format == "datafile": From 2e99548b9802258e3da9ee178ce2b66dac69c2f9 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 12:52:48 +0200 Subject: [PATCH 004/103] Refactored WriteStrategy to remove kwargs and use class variable --- src/otoole/input.py | 41 ++++++++++++++++++++++------------------- tests/test_input.py | 6 ++++-- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/src/otoole/input.py b/src/otoole/input.py index fe04b1a4..073f1c01 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -247,13 +247,14 @@ def write( handle = self._header() logger.debug(default_values) + self.input_data = inputs if self.write_defaults: try: - inputs = self._expand_defaults(inputs, default_values, **kwargs) + self.input_data = self._expand_defaults(inputs, default_values) except KeyError as ex: logger.debug(ex) - for name, df in sorted(inputs.items()): + for name, df in sorted(self.input_data.items()): logger.debug("%s has %s columns: %s", name, len(df.index.names), df.columns) try: @@ -278,37 +279,27 @@ def write( handle.close() def _expand_defaults( - self, - data_to_expand: Dict[str, pd.DataFrame], - default_values: Dict[str, float], - **kwargs, + self, data_to_expand: Dict[str, pd.DataFrame], default_values: Dict[str, float] ) -> Dict[str, pd.DataFrame]: """Populates default value entry rows in dataframes Parameters ---------- - input_data : Dict[str, pd.DataFrame], + data_to_expand : Dict[str, pd.DataFrame], default_values : Dict[str, float] Returns ------- - results : Dict[str, pd.DataFrame] - Updated available reults dictionary + Dict[str, pd.DataFrame] + Input data with expanded default values replacing missing entries Raises ------ KeyError - If set definitons are not in input_data and input_data is not supplied + If set definitions are not in input_data and input_data is not supplied """ sets = [x for x in self.user_config if self.user_config[x]["type"] == "set"] - - # if expanding results, input data is needed for set defenitions - if "input_data" in kwargs: - model_data = kwargs["input_data"] - else: - model_data = data_to_expand - output = {} for name, data in data_to_expand.items(): logger.info(f"Writing defaults for {name}") @@ -320,7 +311,7 @@ def _expand_defaults( # TODO # Issue with how otoole handles trade route right now. - # The double defenition of REGION throws an error. + # The double definition of REGION throws an error. if name == "TradeRoute": output[name] = data continue @@ -329,7 +320,7 @@ def _expand_defaults( index_data = {} for index in data.index.names: try: - index_data[index] = model_data[index]["VALUE"].to_list() + index_data[index] = self.input_data[index]["VALUE"].to_list() except KeyError as ex: logger.info("Can not write default values. Supply input data") raise KeyError(ex) @@ -587,4 +578,16 @@ def _compare_read_to_expected( def read( self, filepath: Union[str, TextIO], **kwargs ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, Any]]: + """Reads in data from file + + Arguments + --------- + filepath: Union[str, TextIO] + + Returns + ------- + Tuple[Dict[str, pd.DataFrame], Dict[str, Any]] + tuple of input_data as a dictionary of pandas DataFrames and + dictionary of default values + """ raise NotImplementedError() diff --git a/tests/test_input.py b/tests/test_input.py index feb6decb..3358aefc 100644 --- a/tests/test_input.py +++ b/tests/test_input.py @@ -277,12 +277,13 @@ def result_data(region): parameter_test_data, ids=parameter_test_data_ids, ) - def test_expand_parmaters_defaults( + def test_expand_parameters_defaults( self, user_config, simple_default_values, input_data, parameter, expected ): write_strategy = DummyWriteStrategy( user_config=user_config, default_values=simple_default_values ) + write_strategy.input_data = input_data actual = write_strategy._expand_defaults( input_data, write_strategy.default_values ) @@ -294,8 +295,9 @@ def test_expand_result_defaults( write_strategy = DummyWriteStrategy( user_config=user_config, default_values=simple_default_values ) + write_strategy.input_data = simple_input_data actual = write_strategy._expand_defaults( - result_data[0], write_strategy.default_values, input_data=simple_input_data + result_data[0], write_strategy.default_values ) assert_frame_equal(actual[result_data[1]], result_data[2]) From 7c6750e538a34f36a29e3f9f2bbb0cd225decd98 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 12:53:22 +0200 Subject: [PATCH 005/103] Added docstrings to all strategies --- src/otoole/read_strategies.py | 42 +++++++++++++++++++++++++++++++--- src/otoole/write_strategies.py | 3 +++ 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/src/otoole/read_strategies.py b/src/otoole/read_strategies.py index 01f05f5a..7f5805c6 100644 --- a/src/otoole/read_strategies.py +++ b/src/otoole/read_strategies.py @@ -15,7 +15,16 @@ class ReadMemory(ReadStrategy): - """Read a dict of OSeMOSYS parameters from memory""" + """Read a dict of OSeMOSYS parameters from memory + + Arguments + --------- + parameters : Dict[str, pd.DataFrame] + Dictionary of OSeMOSYS parameters + user_config : Dict[str, Dict] + User configuration + + """ def __init__( self, parameters: Dict[str, pd.DataFrame], user_config: Dict[str, Dict] @@ -117,7 +126,15 @@ def _whitespace_converter(self, indices: List[str]) -> Dict[str, Any]: class ReadExcel(_ReadTabular): - """Read in an Excel spreadsheet in wide format to a dict of Pandas DataFrames""" + """Read in an Excel spreadsheet in wide format to a dict of Pandas DataFrames + + Arguments + --------- + user_config : Dict[str, Dict] + User configuration + keep_whitespace : bool + Whether to keep whitespace in the dataframes + """ def read( self, filepath: Union[str, TextIO], **kwargs @@ -163,7 +180,15 @@ def read( class ReadCsv(_ReadTabular): - """Read in a folder of CSV files""" + """Read in a folder of CSV files to a dict of Pandas DataFrames + + Arguments + --------- + user_config : Dict[str, Dict] + User configuration + keep_whitespace : bool + Whether to keep whitespace in the dataframes + """ def read( self, filepath, **kwargs @@ -282,6 +307,17 @@ def _check_for_default_values_csv(filepath: str) -> None: class ReadDatafile(ReadStrategy): + """Read in a datafile to a dict of Pandas DataFrames + + Arguments + --------- + user_config : Dict[str, Dict] + User configuration + keep_whitespace : bool + Whether to keep whitespace in the dataframes + + """ + def read( self, filepath, **kwargs ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, Any]]: diff --git a/src/otoole/write_strategies.py b/src/otoole/write_strategies.py index cfe8a2e6..c329d8db 100644 --- a/src/otoole/write_strategies.py +++ b/src/otoole/write_strategies.py @@ -57,6 +57,9 @@ def _form_parameter( def _form_parameter_template(self, parameter_name: str, **kwargs) -> pd.DataFrame: """Creates wide format excel template + Pivots the data to wide format using the data from the YEAR set as the columns. + This requires input data to be passed into this function. + Arguments --------- parameter_name: str From 25d617627123fe79b77b401ba8b887b84b766e6c Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 12:55:09 +0200 Subject: [PATCH 006/103] Factor out obtaining strategies in preparation for read and write only API functions --- src/otoole/convert.py | 116 ++++++++++++++++++++++++++++++------------ 1 file changed, 83 insertions(+), 33 deletions(-) diff --git a/src/otoole/convert.py b/src/otoole/convert.py index f3a95063..141137f3 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -117,42 +117,19 @@ def convert_results( return True -def convert( - config, - from_format, - to_format, - from_path, - to_path, - write_defaults=False, - keep_whitespace=False, -) -> bool: - """Convert OSeMOSYS data from/to datafile, csv and Excel formats +def _get_user_config(config) -> dict: + """Read in the configuration file Arguments --------- config : str Path to config file - from_format : str - Available options are 'datafile', 'datapackage', 'csv' and 'excel' - to_format : str - Available options are 'datafile', 'datapackage', 'csv' and 'excel' - from_path : str - Path to destination file (if datafile or excel) or folder (csv or datapackage) - write_defaults: bool, default: False - Write default values to CSVs - keep_whitespace: bool, default: False - Keep whitespace in CSVs Returns ------- - bool - True if conversion was successful, False otherwise + dict + A dictionary containing the user configuration """ - - msg = "Conversion from {} to {} is not yet implemented".format( - from_format, to_format - ) - if config: _, ending = os.path.splitext(config) with open(config, "r") as config_file: @@ -160,7 +137,31 @@ def convert( logger.info("Reading config from {}".format(config)) logger.info("Validating config from {}".format(config)) validate_config(user_config) + return user_config + + +def _get_read_strategy( + user_config, from_format, from_path, keep_whitespace=False +) -> Union[ReadStrategy, None]: + """Read OSeMOSYS parameter data from csv/datafile/excel format + + Arguments + --------- + config : dict + User configuration describing parameters and sets + from_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + from_path : str + Path to destination file (if datafile or excel) or folder (csv or datapackage) + keep_whitespace: bool, default: False + Keep whitespace in CSVs + Returns + ------- + dict[str, pandas.DataFrame] + Dictionary of pandas DataFrames containing the data + + """ keep_whitespace = True if keep_whitespace else False if from_format == "datafile": @@ -185,11 +186,14 @@ def convert( else: read_strategy = None - if read_strategy: - input_data, _ = read_strategy.read(from_path) + return read_strategy - # set write strategy +def _get_write_strategy( + user_config, to_format, to_path, write_defaults=False +) -> Union[WriteStrategy, None]: + """ """ + # set write strategy write_defaults = True if write_defaults else False if to_format == "datapackage": @@ -200,9 +204,7 @@ def convert( ) elif to_format == "excel": write_strategy = WriteExcel( - user_config=user_config, - write_defaults=write_defaults, - input_data=input_data, + user_config=user_config, write_defaults=write_defaults ) elif to_format == "datafile": write_strategy = WriteDatafile( @@ -215,6 +217,54 @@ def convert( else: write_strategy = None + return write_strategy + + +def convert( + config, + from_format, + to_format, + from_path, + to_path, + write_defaults=False, + keep_whitespace=False, +) -> bool: + """Convert OSeMOSYS data from/to datafile, csv and Excel formats + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + to_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + from_path : str + Path to destination file (if datafile or excel) or folder (csv or datapackage) + write_defaults: bool, default: False + Write default values to CSVs + keep_whitespace: bool, default: False + Keep whitespace in CSVs + + Returns + ------- + bool + True if conversion was successful, False otherwise + """ + + msg = "Conversion from {} to {} is not yet implemented".format( + from_format, to_format + ) + + user_config = _get_user_config(config) + read_strategy = _get_read_strategy( + user_config, from_format, from_path, keep_whitespace=keep_whitespace + ) + + write_strategy = _get_write_strategy( + user_config, to_format, to_path, write_defaults=write_defaults + ) + if read_strategy and write_strategy: context = Context(read_strategy, write_strategy) context.convert(from_path, to_path) From 189545fa2259004b5760b87c97866488d9fece60 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 12:55:27 +0200 Subject: [PATCH 007/103] Update amply version qualifier --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 81aa8fcd..eb6e129a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,7 +47,7 @@ install_requires = pydot importlib_resources; python_version<'3.7' pandas>=1.1 - amply>=0.1.6 + Amply>=0.1.6 networkx flatten_dict openpyxl From c1dd1a683af8047f7931ca54da0b3d4f0d6fa3d2 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 13:28:29 +0200 Subject: [PATCH 008/103] Remove redundant arguments, move errors to get strategies --- src/otoole/convert.py | 132 ++++++++++++++++++++++++++++++++---------- 1 file changed, 101 insertions(+), 31 deletions(-) diff --git a/src/otoole/convert.py b/src/otoole/convert.py index 141137f3..f9e7160d 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -9,7 +9,8 @@ """ import logging import os -from typing import Union + +import pandas as pd from otoole.input import Context, ReadStrategy, WriteStrategy from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel @@ -140,10 +141,8 @@ def _get_user_config(config) -> dict: return user_config -def _get_read_strategy( - user_config, from_format, from_path, keep_whitespace=False -) -> Union[ReadStrategy, None]: - """Read OSeMOSYS parameter data from csv/datafile/excel format +def _get_read_strategy(user_config, from_format, keep_whitespace=False) -> ReadStrategy: + """Get ``ReadStrategy`` for csv/datafile/excel format Arguments --------- @@ -151,26 +150,23 @@ def _get_read_strategy( User configuration describing parameters and sets from_format : str Available options are 'datafile', 'datapackage', 'csv' and 'excel' - from_path : str - Path to destination file (if datafile or excel) or folder (csv or datapackage) keep_whitespace: bool, default: False Keep whitespace in CSVs Returns ------- - dict[str, pandas.DataFrame] - Dictionary of pandas DataFrames containing the data + ReadStrategy or None + A ReadStrategy object. Returns None if from_format is not recognised """ keep_whitespace = True if keep_whitespace else False if from_format == "datafile": - read_strategy: Union[ReadStrategy, None] = ReadDatafile(user_config=user_config) + read_strategy: ReadStrategy = ReadDatafile(user_config=user_config) elif from_format == "datapackage": logger.warning( "Reading from datapackage is deprecated, trying to read from CSVs" ) - from_path = read_deprecated_datapackage(from_path) logger.info("Successfully read folder of CSVs") read_strategy = ReadCsv( user_config=user_config, keep_whitespace=keep_whitespace @@ -184,22 +180,35 @@ def _get_read_strategy( user_config=user_config, keep_whitespace=keep_whitespace ) # typing: ReadStrategy else: - read_strategy = None + msg = f"Conversion from {from_format} is not supported" + raise NotImplementedError(msg) return read_strategy -def _get_write_strategy( - user_config, to_format, to_path, write_defaults=False -) -> Union[WriteStrategy, None]: - """ """ +def _get_write_strategy(user_config, to_format, write_defaults=False) -> WriteStrategy: + """Get ``WriteStrategy`` for csv/datafile/excel format + + Arguments + --------- + user_config : dict + User configuration describing parameters and sets + to_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + write_defaults: bool, default: False + Write default values to output format + + Returns + ------- + WriteStrategy or None + A ReadStrategy object. Returns None if to_format is not recognised + + """ # set write strategy write_defaults = True if write_defaults else False if to_format == "datapackage": - logger.warning("Writing to datapackage is deprecated, writing to CSVs") - to_path = os.path.join(os.path.dirname(to_path), "data") - write_strategy: Union[WriteStrategy, None] = WriteCsv( + write_strategy: WriteStrategy = WriteCsv( user_config=user_config, write_defaults=write_defaults ) elif to_format == "excel": @@ -215,7 +224,8 @@ def _get_write_strategy( user_config=user_config, write_defaults=write_defaults ) else: - write_strategy = None + msg = f"Conversion to {to_format} is not supported" + raise NotImplementedError(msg) return write_strategy @@ -252,24 +262,84 @@ def convert( True if conversion was successful, False otherwise """ - msg = "Conversion from {} to {} is not yet implemented".format( - from_format, to_format - ) - user_config = _get_user_config(config) read_strategy = _get_read_strategy( - user_config, from_format, from_path, keep_whitespace=keep_whitespace + user_config, from_format, keep_whitespace=keep_whitespace ) write_strategy = _get_write_strategy( - user_config, to_format, to_path, write_defaults=write_defaults + user_config, to_format, write_defaults=write_defaults ) - if read_strategy and write_strategy: - context = Context(read_strategy, write_strategy) - context.convert(from_path, to_path) + if from_format == "datapackage": + logger.warning("Writing to datapackage is deprecated, writing to CSVs") + from_path = read_deprecated_datapackage(from_path) + to_path = os.path.join(os.path.dirname(to_path), "data") + + context = Context(read_strategy, write_strategy) + context.convert(from_path, to_path) + + return True + + +def read( + config, from_format, from_path, keep_whitespace=False +) -> tuple[dict[str, pd.DataFrame], dict[str, float]]: + """Read OSeMOSYS data from datafile, csv or Excel formats + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'datafile', 'csv', 'excel' and 'datapackage' [deprecated] + from_path : str + Path to source file (if datafile or excel) or folder (csv) + keep_whitespace: bool, default: False + Keep whitespace in source files + + Returns + ------- + tuple[dict[str, pd.DataFrame], dict[str, float]] + Dictionary of parameter and set data and dictionary of default values + """ + user_config = _get_user_config(config) + read_strategy = _get_read_strategy( + user_config, from_format, keep_whitespace=keep_whitespace + ) + + if from_format == "datapackage": + from_path = read_deprecated_datapackage(from_path) + + return read_strategy.read(from_path) + + +def write(config, to_format, to_path, inputs, default_values=None) -> bool: + """Write OSeMOSYS data to datafile, csv or Excel formats + + Arguments + --------- + config : str + Path to config file + to_format : str + Available options are 'datafile', 'csv', 'excel' and 'datapackage' [deprecated], + to_path : str + Path to destination file (if datafile or excel) or folder (csv)) + inputs : dict[str, pd.DataFrame] + Dictionary of pandas data frames to write + default_values: dict[str, float], default: None + Dictionary of default values to write to datafile + + """ + user_config = _get_user_config(config) + if default_values is None: + write_defaults = False else: - raise NotImplementedError(msg) - return False + write_defaults = True + + write_strategy = _get_write_strategy( + user_config, to_format, write_defaults=write_defaults + ) + write_strategy.write(inputs, to_path, default_values=default_values) return True From bb7d6748bb7cbbe1582c2997d83675a35bda33d1 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 13:54:15 +0200 Subject: [PATCH 009/103] Add tests for read and write functions --- src/otoole/__init__.py | 6 ++-- tests/test_convert.py | 79 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 82 insertions(+), 3 deletions(-) diff --git a/src/otoole/__init__.py b/src/otoole/__init__.py index 57365b0a..323f4e78 100644 --- a/src/otoole/__init__.py +++ b/src/otoole/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import sys -from otoole.convert import convert, convert_results +from otoole.convert import convert, convert_results, read, write if sys.version_info[:2] >= (3, 8): # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8` @@ -20,5 +20,7 @@ convert = convert convert_results = convert_results +read = read +write = write -__all__ = ["convert" "convert_results"] +__all__ = ["convert" "convert_results", "read", "write"] diff --git a/tests/test_convert.py b/tests/test_convert.py index 00a95249..2d87c901 100644 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -4,9 +4,86 @@ import os from tempfile import NamedTemporaryFile, TemporaryDirectory +import pandas as pd from pytest import raises -from otoole import convert, convert_results +from otoole import convert, convert_results, read, write + + +class TestRead: + """Tests the public api for reading data""" + + def test_read_datafile(self): + """Test reading data from a file""" + data, defaults = read( + os.path.join("tests", "fixtures", "config.yaml"), + "datafile", + os.path.join("tests", "fixtures", "simplicity.txt"), + ) + + assert isinstance(data, dict) + assert isinstance(defaults, dict) + + def test_read_excel(self): + """Test reading data from an Excel file""" + data, defaults = read( + os.path.join("tests", "fixtures", "config.yaml"), + "excel", + os.path.join("tests", "fixtures", "combined_inputs.xlsx"), + ) + + assert isinstance(data, dict) + assert isinstance(defaults, dict) + + def test_read_csv(self): + """Test reading data from a CSV file""" + data, defaults = read( + os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml"), + "csv", + os.path.join("tests", "fixtures", "super_simple", "csv"), + ) + + assert isinstance(data, dict) + assert "REGION" in data + pd.testing.assert_frame_equal(data["REGION"], pd.DataFrame({"VALUE": ["BB"]})) + assert "TECHNOLOGY" in data + assert "MODE_OF_OPERATION" in data + assert "YEAR" in data + assert isinstance(defaults, dict) + + +class TestWrite: + """Tests the public api for writing data""" + + def test_write_datafile(self): + """Test writing data to a file""" + data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} + temp = NamedTemporaryFile() + assert write( + os.path.join("tests", "fixtures", "config.yaml"), + "datafile", + temp.name, + data, + ) + + def test_write_excel(self): + """Test writing data to an Excel file""" + data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} + temp = NamedTemporaryFile(suffix=".xlsx") + assert write( + os.path.join("tests", "fixtures", "config.yaml"), "excel", temp.name, data + ) + + def test_write_csv(self): + """Test writing data to a CSV file""" + data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} + temp = TemporaryDirectory() + assert write( + os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml"), + "csv", + temp.name, + data, + ) class TestConvert: From aad666bfa85d9fce3156ba90e865b69d66640293 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 14:14:32 +0200 Subject: [PATCH 010/103] Typing --- src/otoole/convert.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/otoole/convert.py b/src/otoole/convert.py index f9e7160d..db094327 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -9,6 +9,7 @@ """ import logging import os +from typing import Dict, Tuple import pandas as pd @@ -284,7 +285,7 @@ def convert( def read( config, from_format, from_path, keep_whitespace=False -) -> tuple[dict[str, pd.DataFrame], dict[str, float]]: +) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: """Read OSeMOSYS data from datafile, csv or Excel formats Arguments @@ -300,7 +301,7 @@ def read( Returns ------- - tuple[dict[str, pd.DataFrame], dict[str, float]] + Tuple[dict[str, pd.DataFrame], dict[str, float]] Dictionary of parameter and set data and dictionary of default values """ user_config = _get_user_config(config) From 20f547f577c584974feaeba154f8b1eefc02979b Mon Sep 17 00:00:00 2001 From: Will Usher Date: Wed, 21 Jun 2023 14:16:41 +0200 Subject: [PATCH 011/103] Add read and write to docs --- docs/convert.rst | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/convert.rst b/docs/convert.rst index 16111aec..46adc077 100644 --- a/docs/convert.rst +++ b/docs/convert.rst @@ -31,6 +31,28 @@ Gurobi_ or CPLEX_ solution file:: See :func:`otoole.convert.convert_results` for more details +Read in data from different Formats +----------------------------------- + +You can use the :py:func:`otoole.convert.read` function to read data in from different formats to a Python object. +This allows you to then use all the features offered by Python to manipulate the data. + +>>> from otoole import read +>>> data, defaults = read('my_model.yaml', 'csv', 'my_model_csvs') # read from a folder of csv files +>>> data, defaults = read('my_model.yaml', 'excel', 'my_model.xlsx') # read from an Excel file +>>> data, defaults = read('my_model.yaml', 'datafile', 'my_model.dat') # read from a GNU MathProg datafile + +Write out data to different Formats +----------------------------------- + +You can use the :py:func:`otoole.convert.write` function to write data out to different formats from a Python object. + +>>> from otoole import read, write +>>> data, defaults = read('my_model.yaml', 'csv', 'my_model_csvs') # read from a folder of csv files +>>> write('my_model.yaml', 'excel', 'my_model.xlsx', data, defaults) # write to an Excel file +>>> write('my_model.yaml', 'datafile', 'my_model.dat', data, defaults) # write to a GNU MathProg datafile + + .. _CBC: https://github.com/coin-or/Cbc .. _CLP: https://github.com/coin-or/Clp .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer From 00a41440eef6534223bab84e9484927c35286ed4 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 09:19:41 +0200 Subject: [PATCH 012/103] Pin pydantic to less than v2 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index b410ffb8..0395c3ee 100644 --- a/setup.cfg +++ b/setup.cfg @@ -51,7 +51,7 @@ install_requires = networkx flatten_dict openpyxl - pydantic + pydantic<2 [options.packages.find] where = src exclude = From 5e32250e6868d178f653dd5aad68b30b56b39e4e Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 09:37:12 +0200 Subject: [PATCH 013/103] Updated call to resources library --- src/otoole/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/otoole/utils.py b/src/otoole/utils.py index 99fc7a0e..db697ce7 100644 --- a/src/otoole/utils.py +++ b/src/otoole/utils.py @@ -42,7 +42,7 @@ def read_packaged_file(filename: str, module_name: str = None): with open(filename, "r") as open_file: contents = _read_file(open_file, ending) else: - with resources.open_text(module_name, filename) as open_file: + with resources.files(module_name).joinpath(filename).open("r") as open_file: contents = _read_file(open_file, ending) return contents From 9a7d7620023cebfdd91a3e3a208cd33f757237ec Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 09:48:27 +0200 Subject: [PATCH 014/103] Simplify dependencies for reading packaged files --- .github/workflows/python.yaml | 2 +- setup.cfg | 1 - src/otoole/utils.py | 9 ++------- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 3a14dcd9..1139cf11 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -25,7 +25,7 @@ jobs: - name: Lint with flake8 run: | tox -e lint - - name: Test with pytest + - name: Run tests using tox run: | tox - name: Upload coverage data to converalls.io diff --git a/setup.cfg b/setup.cfg index 0395c3ee..e9682a3a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -45,7 +45,6 @@ install_requires = xlrd pyyaml pydot - importlib_resources; python_version<'3.7' pandas>=1.1 amply>=0.1.4 networkx diff --git a/src/otoole/utils.py b/src/otoole/utils.py index db697ce7..251bb5f8 100644 --- a/src/otoole/utils.py +++ b/src/otoole/utils.py @@ -1,6 +1,7 @@ import json import logging import os +from importlib.resources import files from typing import Any, Dict, List, Union import pandas as pd @@ -15,12 +16,6 @@ UserDefinedValue, ) -try: - import importlib.resources as resources -except ImportError: - # Try backported to PY<37 `importlib_resources`. - import importlib_resources as resources # type: ignore - logger = logging.getLogger(__name__) @@ -42,7 +37,7 @@ def read_packaged_file(filename: str, module_name: str = None): with open(filename, "r") as open_file: contents = _read_file(open_file, ending) else: - with resources.files(module_name).joinpath(filename).open("r") as open_file: + with files(module_name).joinpath(filename).open("r") as open_file: contents = _read_file(open_file, ending) return contents From 33f5a2103768753df3d78be70581e980bac446e6 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 10:01:10 +0200 Subject: [PATCH 015/103] Drop support for Python 3.8 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index e9682a3a..934adb1f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -35,7 +35,7 @@ package_dir = =src # Require a min/specific Python version (comma-separated conditions) -python_requires = >=3.8 +python_requires = >=3.9 # Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. # Version specifiers like >=2.2,<3.0 avoid problems due to API changes in From 3ef9bdc38a031a2c5c57ca90ff9c7e107b026906 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 10:08:01 +0200 Subject: [PATCH 016/103] Drop Python 3.8 from test matrix --- .github/workflows/python.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 1139cf11..0bd4d0df 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v3 @@ -20,8 +20,6 @@ jobs: run: | python -m pip install --upgrade pip pip install tox - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - pip install -e . - name: Lint with flake8 run: | tox -e lint From 872d2e68b4948beb83f30fa7595bb08179042353 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 10:15:18 +0200 Subject: [PATCH 017/103] Create python-publish.yml --- .github/workflows/python-publish.yml | 39 ++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/python-publish.yml diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 00000000..bdaab28a --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,39 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - name: Build package + run: python -m build + - name: Publish package + uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} From f420d3dc67d39efb16af5a499a7de6792e148010 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 10:34:13 +0200 Subject: [PATCH 018/103] Use tox config for publishing --- .github/workflows/python-publish.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index bdaab28a..fc3dac1a 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -29,11 +29,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install build + pip install tox - name: Build package - run: python -m build + run: tox -e build - name: Publish package - uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} + run: tox -e publish From b628302e628785f05b8574422da334097771da2e Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 10:56:02 +0200 Subject: [PATCH 019/103] Define environment variables passed to tox --- .github/workflows/python-publish.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index fc3dac1a..d5a5e6f1 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -34,3 +34,7 @@ jobs: run: tox -e build - name: Publish package run: tox -e publish + env: + TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} + TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} + TWINE_REPOSITORY: ${{ secrets.TWINE_REPOSITORY }} From 973c44ad66c15b3b0e48b1ac2562a7fe615a2c29 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 11:01:08 +0200 Subject: [PATCH 020/103] Use vars instead of secrets --- .github/workflows/python-publish.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index d5a5e6f1..e034a0c8 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -35,6 +35,6 @@ jobs: - name: Publish package run: tox -e publish env: - TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} + TWINE_USERNAME: ${{ vars.TWINE_USERNAME }} TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} - TWINE_REPOSITORY: ${{ secrets.TWINE_REPOSITORY }} + TWINE_REPOSITORY: ${{ vars.TWINE_REPOSITORY }} From ad8522ddc1869d9c83c2568354dbda5c2318656f Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 11:05:58 +0200 Subject: [PATCH 021/103] Use Python 3.11 for ReadTheDocs --- .readthedocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 21b08145..16065bdd 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -17,7 +17,7 @@ formats: - pdf python: - version: 3.8 + version: 3.11 install: - requirements: docs/requirements.txt - {path: ., method: pip} From 721e0553ddc93962b644b45775580e3d04f237cf Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 11:17:40 +0200 Subject: [PATCH 022/103] Update configuration for documentation --- .readthedocs.yml | 10 +++++----- docs/requirements.txt | 10 ++++++++++ setup.cfg | 2 ++ 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 16065bdd..d7cd0bd5 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,16 +8,16 @@ version: 2 sphinx: configuration: docs/conf.py -# Build documentation with MkDocs -#mkdocs: -# configuration: mkdocs.yml - # Optionally build your docs in additional formats such as PDF formats: - pdf python: - version: 3.11 install: - requirements: docs/requirements.txt - {path: ., method: pip} + +build: + os: ubuntu-22.04 + tools: + python: "3.11" diff --git a/docs/requirements.txt b/docs/requirements.txt index 4afe7af2..a575fdd3 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,13 @@ +amply>=0.1.4 docutils<0.18 +flatten_dict Jinja2<3.1 +networkx +openpyxl +pandas>=1.1 +pydantic<2 +pydot +pyyaml # Requirements file for ReadTheDocs, check .readthedocs.yml. # To build the module reference correctly, make sure every external package # under `install_requires` in `setup.cfg` is also listed here! @@ -7,3 +15,5 @@ sphinx>=3.2.1 sphinx-book-theme urllib3<2 # sphinx_rtd_theme +#otoole dependencies from setup.cfg +xlrd diff --git a/setup.cfg b/setup.cfg index 934adb1f..00cc9ce3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,6 +41,8 @@ python_requires = >=3.9 # Version specifiers like >=2.2,<3.0 avoid problems due to API changes in # new major versions. This works if the required packages follow Semantic Versioning. # For more information, check out https://semver.org/. + +# If this list changes, update docs/requirements.txt as well. install_requires = xlrd pyyaml From 4ecb8da0c8356270ebbdb2b0b93cd6faecadb8a3 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Mon, 10 Jul 2023 11:21:43 +0200 Subject: [PATCH 023/103] Update changelog --- CHANGELOG.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 497a5e39..970c3ea5 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,11 @@ Changelog ========= +Version 1.0.4 +============= +- Fixed issue with pydantic v2.0.0 +- Dropped support for Python 3.8. Otoole now requires Python 3.9 or later + Version 1.0.3 ============= - Improved error message for multiple names mismatches From e96cfc503ec3b6f88769b989affc334661d7c657 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 11 Aug 2023 01:39:20 -0700 Subject: [PATCH 024/103] glpk parsing logic and tests --- src/otoole/results/results.py | 210 +++++++++++++++++++++++++++++++++- tests/test_read_strategies.py | 86 ++++++++++++++ 2 files changed, 295 insertions(+), 1 deletion(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index e4757f9b..42892922 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -8,6 +8,7 @@ from otoole.input import ReadStrategy from otoole.preprocess.longify_data import check_datatypes from otoole.results.result_package import ResultsPackage +from otoole.exceptions import OtooleError LOGGER = logging.getLogger(__name__) @@ -37,7 +38,7 @@ def read( input_data = None available_results = self.get_results_from_file( - filepath, input_data + filepath, input_data, kwargs ) # type: Dict[str, pd.DataFrame] default_values = self._read_default_values(self.results_config) # type: Dict @@ -328,3 +329,210 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: df["Index"] = df["Index"].str.replace(")", "", regex=False) df = df.drop(columns=["indexvalue"]) return df[["Variable", "Index", "Value"]].astype({"Value": float}) + +class ReadGlpk(ReadResultsCBC): + """Reads a GLPK Solution file into memory + + The user must provide both the solution file (results.sol) and the glpk + model file (model.lp). These can be generated from the following command + + glpsol --wglp model.lp -m osemosys.txt -d simplicity.txt --write results.sol + + Arguments + --------- + user_config + glpk model file + """ + + def _convert_to_dataframe(self, glpk_model: str, glpk_sol: str) -> pd.DataFrame: + """Creates a wide formatted dataframe from GLPK solution + + Arguments + --------- + glpk_model: str + Path to GLPK model file. Can be created using the `--wglp` flag + glpk_sol: str + Path to GLPK solution file. Can be created using the `--write` flag + + Returns + ------- + pd.DataFrame + """ + + model = self.read_model(glpk_model) + _, sol = self.read_solution(glpk_sol) + return self.merge_model_sol(model, sol) + + def read_solution(self, file_path: str) -> Tuple[Dict[str,Union[str, float]], pd.DataFrame]: + """Reads a GLPK solution file + + Arguments + --------- + file_path: str + Path to GLPK solution file. Can be created using the `--write` flag + + Returns + ------- + Tuple[Dict[str,Union[str, float]], pd.DataFrame] + Dict[str,Union[str, float]] -> Problem name, status, and objective value + pd.DataFrame -> Variables and constraints + + + {"name":"osemosys", "status":"OPTIMAL", "objective":4497.31976} + + ID NUM STATUS PRIM DUAL + 0 i 1 b 5 0 + 1 j 2 l 0 2 + + Notes + ----- + + -> ROWS IN SOLUTION FILE + + i ROW ST PRIM DUAL + + ROW is the ordinal number of the row + ST is one of: + b = inactive constraint; + l = inequality constraint active on its lower bound; + u = inequality constraint active on its upper bound; + f = active free (unounded) row; + s = active equality constraint. + PRIM specifies the row primal value (float) + DUAL specifies the row dual value (float) + + -> COLUMNS IN SOLUTION FILE + + j COL ST PRIM DUAL + + COL specifies the column ordinal number + ST contains one of the following lower-case letters that specifies the column status in the basic solution: + b = basic variable + l = non-basic variable having its lower bound active + u = non-basic variable having its upper bound active + f = non-basic free (unbounded) variable + s = non-basic fixed variable. + PRIM field contains column primal value (float) + DUAL field contains the column dual value (float) + """ + + data = [] + status = {} + + with open(file_path, "r") as f: + for line in f: + parts = line.strip().split() + if parts[0] in ("i", "j"): + data.append([ + parts[0], int(parts[1]), parts[2], float(parts[3]), float(parts[4]) + ]) + elif len(parts) > 1: + if parts[1] == "Problem:": + status["name"] = parts[2] + elif parts[1] == "Status:": + status["status"] = parts[2] + elif parts[1] == "Objective:": + status["objective"] = float(parts[4]) + + df = pd.DataFrame(data, columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"]) + + for info in ["name", "status", "objective"]: + if info not in status: + LOGGER.warning(f"No {info} extracted from the GLPK solution") + + return status, df + + def read_model(self, file_path: str) -> pd.DataFrame: + """Reads in a GLPK Model File + + Arguments + --------- + file_path: str + Path to GLPK model file. Can be created using the `--wglp` flag. + + Returns + ------- + pd.DataFrame + + ID NUM NAME INDEX + 0 i 1 CAa4_Constraint_Capacity "SIMPLICITY,ID,BACKSTOP1,2015" + 1 j 2 NewCapacity "SIMPLICITY,WINDPOWER,2039" + + Notes + ----- + + -> GENERAL LAYOUT OF SOLUTION FILE + + n p NAME # p = problem instance + n z NAME # z = objective function + n i ROW NAME # i = constraint name, ROW is the row ordinal number + n j COL NAME # j = variable name, COL is the column ordinal number + """ + + data = [] + + with open(file_path, "r") as f: + for line in f: + parts = line.strip().split() + if not parts[0] == "n": + continue + if len(parts) < 4: + continue + data.append([parts[1], int(parts[2]), parts[3]]) + + df = pd.DataFrame(data, columns=["ID", "NUM", "INDEX_LIST"]) + df = df.loc[df["INDEX_LIST"].str.contains("\[")] # removes "n i 1 cost" row + + df[["NAME", "INDEX"]] = df["INDEX_LIST"].str.split("[", expand=True) + df["INDEX"] = df["INDEX"].map(lambda x: x.split("]")[0]) + + df = df[["ID", "NUM", "NAME", "INDEX"]].reset_index(drop=True) + return df + + def merge_model_sol(self, model: pd.DataFrame, sol: pd.DataFrame) -> pd.DataFrame: + """Merges GLPK model and solution file into one dataframe + + Arguments + --------- + model: pd.DataFrame, + see output from ReadGlpk.read_model(...) + sol: pd.DataFrame + see output from ReadGlpk.read_solution(...) + + Returns + ------- + pd.DataFrame + + >>> pd.DataFrame(data=[ + ['TotalDiscountedCost', "SIMPLICITY,2015", 187.01576], + ['TotalDiscountedCost', "SIMPLICITY,2016", 183.30788]], + columns=['Variable', 'Index', 'Value']) + """ + + # create lookup ids using the id and num columns to coordinate merge + model["lookup"] = model["ID"].str.cat(model["NUM"].astype(str)) + model = model.set_index("lookup") + model_lookup = model.to_dict(orient="index") + + sol = sol.loc[sol["ID"]=="j"] # remove constraints and leave variables + sol["lookup"] = sol["ID"].str.cat(sol["NUM"].astype(str)) + sol = sol.set_index("lookup") + sol_lookup = sol.to_dict(orient="index") + + # assemble dataframe + data = [] + for lookup_id, lookup_values in sol_lookup.items(): + try: + data.append([ + model_lookup[lookup_id]["NAME"], + model_lookup[lookup_id]["INDEX"], + lookup_values["PRIM"] + ]) + except KeyError: + raise OtooleError( + resource=lookup_id, + message=f"No corresponding id for {lookup_id} in the GLPK model file" + ) + + return pd.DataFrame(data, columns=["Variable", "Index", "Value"]) + \ No newline at end of file diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index b962a157..76a3b8fd 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -14,6 +14,7 @@ ReadCbc, ReadCplex, ReadGurobi, + ReadGlpk, check_for_duplicates, identify_duplicate, rename_duplicate_column, @@ -610,6 +611,91 @@ def test_manage_infeasible_variables(self, user_config): ) pd.testing.assert_frame_equal(actual, expected) +class TestReadGlpk: + """Use fixtures instead of StringIO due to the use of context managers in the logic""" + + expected_sol_data = pd.DataFrame([ + ["i", 1, "b", 3942.19479265207, 0], + ["i", 2, "b", 0, 0], + ["i", 3, "b", 0, 0], + ["i", 300, "b", 37.499, 0], + ["i", 301, "b", 31.7309999999999, 0], + ["j", 1, "b", 0, 0], + ["j", 2, "b", 0, 0], + ["j", 130, "l", 0, 0.282765294823514], + ["j", 131, "l", 0, 0.601075755990521], + ["j", 1025, "b", 0.0305438002923389, 0], + ["j", 1026, "b", 0.0422503416065477, 0], + ["j", 1027, "l", 0, 162679.693161095], + ["j", 1028, "l", 0, 81291.0524314291], + ], columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"]) + + expected_model_data = pd.DataFrame([ + ["i", 2, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2014"], + ["i", 3, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2015"], + ["i", 300, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2015"], + ["i", 301, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2016"], + ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], + ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], + ["j", 130, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035"], + ["j", 131, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036"], + ["j", 1025, "NewCapacity", "SIMPLICITY,WINDPOWER,2039"], + ["j", 1026, "NewCapacity", "SIMPLICITY,WINDPOWER,2040"], + ["j", 1027, "RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014"], + ["j", 1028, "RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014"], + ], columns=["ID", "NUM", "NAME", "INDEX"]) + + def test_read_solution(self, user_config): + input_file = os.path.join("tests", "fixtures", "glpk_sol.txt") + reader = ReadGlpk(user_config) + actual_status, actual_data = reader.read_solution(input_file) + expected_status = { + "name":"osemosys_fast", + "status":"OPTIMAL", + "objective":4497.31967 + } + assert actual_status == expected_status + pd.testing.assert_frame_equal(actual_data, self.expected_sol_data) + + def test_read_model(self, user_config): + input_file = os.path.join("tests", "fixtures", "glpk_model.txt") + reader = ReadGlpk(user_config) + actual = reader.read_model(input_file) + + pd.testing.assert_frame_equal(actual, self.expected_model_data) + + def test_merge_model_sol(self, user_config): + reader = ReadGlpk(user_config) + actual = reader.merge_model_sol(self.expected_model_data, self.expected_sol_data) + expected = pd.DataFrame([ + ["SalvageValueStorage", "SIMPLICITY,DAM,2014", 0], + ["SalvageValueStorage", "SIMPLICITY,DAM,2015", 0], + ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035", 0], + ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036", 0], + ["NewCapacity", "SIMPLICITY,WINDPOWER,2039", 0.0305438002923389], + ["NewCapacity", "SIMPLICITY,WINDPOWER,2040", 0.0422503416065477], + ["RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014", 0], + ["RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014", 0], + ], columns=['Variable', 'Index', 'Value']) + + pd.testing.assert_frame_equal(actual, expected) + + def test_merge_model_sol_error(self, user_config): + reader = ReadGlpk(user_config) + + model = pd.DataFrame([ + ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], + ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], + ], columns=["ID", "NUM", "NAME", "INDEX"]) + + sol = pd.DataFrame([ + ["j", 1025, "b", 0.0305438002923389, 0], + ["j", 1026, "b", 0.0422503416065477, 0], + ], columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"]) + + with raises(OtooleError): + reader.merge_model_sol(model, sol) + class TestCleanOnRead: """Tests that a data is cleaned and indexed upon reading""" From 8a077c0d727a69ad1ea99792f5c21fe6458e4e79 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 11 Aug 2023 01:45:02 -0700 Subject: [PATCH 025/103] fixed warnings --- src/otoole/results/results.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index 42892922..8028809c 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -481,7 +481,7 @@ def read_model(self, file_path: str) -> pd.DataFrame: data.append([parts[1], int(parts[2]), parts[3]]) df = pd.DataFrame(data, columns=["ID", "NUM", "INDEX_LIST"]) - df = df.loc[df["INDEX_LIST"].str.contains("\[")] # removes "n i 1 cost" row + df = df.loc[df["INDEX_LIST"].str.contains(r"\[")] # removes "n i 1 cost" row df[["NAME", "INDEX"]] = df["INDEX_LIST"].str.split("[", expand=True) df["INDEX"] = df["INDEX"].map(lambda x: x.split("]")[0]) @@ -515,13 +515,14 @@ def merge_model_sol(self, model: pd.DataFrame, sol: pd.DataFrame) -> pd.DataFram model_lookup = model.to_dict(orient="index") sol = sol.loc[sol["ID"]=="j"] # remove constraints and leave variables - sol["lookup"] = sol["ID"].str.cat(sol["NUM"].astype(str)) - sol = sol.set_index("lookup") - sol_lookup = sol.to_dict(orient="index") + vars = sol.copy() # setting with copy warning + vars["lookup"] = vars["ID"].str.cat(vars["NUM"].astype(str)) + vars = vars.set_index("lookup") + vars_lookup = vars.to_dict(orient="index") # assemble dataframe data = [] - for lookup_id, lookup_values in sol_lookup.items(): + for lookup_id, lookup_values in vars_lookup.items(): try: data.append([ model_lookup[lookup_id]["NAME"], From 588ea912696b8c5c05b813be0a1d2e0417e57751 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 11 Aug 2023 05:23:25 -0700 Subject: [PATCH 026/103] glpk results cli update --- src/otoole/__init__.py | 3 ++- src/otoole/cli.py | 12 +++++++++-- src/otoole/results/results.py | 38 ++++++++++++++++++++++------------- tests/fixtures/glpk_model.txt | 21 +++++++++++++++++++ tests/fixtures/glpk_sol.txt | 21 +++++++++++++++++++ tests/test_read_strategies.py | 9 +++++++-- 6 files changed, 85 insertions(+), 19 deletions(-) create mode 100644 tests/fixtures/glpk_model.txt create mode 100644 tests/fixtures/glpk_sol.txt diff --git a/src/otoole/__init__.py b/src/otoole/__init__.py index 88e521e6..a5ede3f7 100644 --- a/src/otoole/__init__.py +++ b/src/otoole/__init__.py @@ -3,7 +3,7 @@ from otoole.input import Context from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory -from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi +from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi, ReadGlpk from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel if sys.version_info[:2] >= (3, 8): @@ -29,6 +29,7 @@ "ReadCplex", "ReadDatafile", "ReadExcel", + "ReadGlpk", "ReadGurobi", "ReadMemory", "WriteCsv", diff --git a/src/otoole/cli.py b/src/otoole/cli.py index 8d0ce16c..ae116dfe 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -50,13 +50,14 @@ ReadCsv, ReadDatafile, ReadExcel, + ReadGlpk, ReadGurobi, WriteCsv, WriteDatafile, WriteExcel, __version__, ) -from otoole.exceptions import OtooleSetupError +from otoole.exceptions import OtooleSetupError, OtooleError from otoole.input import Context from otoole.preprocess.setup import get_config_setup_data, get_csv_setup_data from otoole.utils import ( @@ -140,6 +141,8 @@ def result_matrix(args): read_strategy = ReadCplex(user_config=config) elif args.from_format == "gurobi": read_strategy = ReadGurobi(user_config=config) + elif args.from_format == "glpk": + read_strategy = ReadGlpk(user_config=config, glpk_model=args.glpk_model) # set write strategy @@ -321,7 +324,7 @@ def get_parser(): result_parser.add_argument( "from_format", help="Result data format to convert from", - choices=sorted(["cbc", "cplex", "gurobi"]), + choices=sorted(["cbc", "cplex", "gurobi", "glpk"]), ) result_parser.add_argument( "to_format", @@ -343,6 +346,11 @@ def get_parser(): default=None, ) result_parser.add_argument("config", help="Path to config YAML file") + result_parser.add_argument( + "--glpk_model", + help="GLPK model file required for processing GLPK results", + default=None, + ) result_parser.add_argument( "--write_defaults", help="Writes default values", diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index 8028809c..adce34cb 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -38,8 +38,7 @@ def read( input_data = None available_results = self.get_results_from_file( - filepath, input_data, kwargs - ) # type: Dict[str, pd.DataFrame] + filepath, input_data) # type: Dict[str, pd.DataFrame] default_values = self._read_default_values(self.results_config) # type: Dict @@ -334,23 +333,26 @@ class ReadGlpk(ReadResultsCBC): """Reads a GLPK Solution file into memory The user must provide both the solution file (results.sol) and the glpk - model file (model.lp). These can be generated from the following command + model file (model.lp) to generate the complete solution. glpsol --wglp model.lp -m osemosys.txt -d simplicity.txt --write results.sol - - Arguments - --------- - user_config - glpk model file """ - def _convert_to_dataframe(self, glpk_model: str, glpk_sol: str) -> pd.DataFrame: + def __init__(self, user_config: Dict[str, Dict], glpk_model: str = None): + """ + glpk_model: str + Path to GLPK model file. Can be created using the `--wglp` flag. + If not provided, the solution file will be processed without + corresponding english names or index defenitions. + """ + super().__init__(user_config) + self.glpk_model = glpk_model + + def _convert_to_dataframe(self, glpk_sol: str) -> pd.DataFrame: """Creates a wide formatted dataframe from GLPK solution Arguments --------- - glpk_model: str - Path to GLPK model file. Can be created using the `--wglp` flag glpk_sol: str Path to GLPK solution file. Can be created using the `--write` flag @@ -359,7 +361,7 @@ def _convert_to_dataframe(self, glpk_model: str, glpk_sol: str) -> pd.DataFrame: pd.DataFrame """ - model = self.read_model(glpk_model) + model = self.read_model() _, sol = self.read_solution(glpk_sol) return self.merge_model_sol(model, sol) @@ -442,7 +444,7 @@ def read_solution(self, file_path: str) -> Tuple[Dict[str,Union[str, float]], pd return status, df - def read_model(self, file_path: str) -> pd.DataFrame: + def read_model(self) -> pd.DataFrame: """Reads in a GLPK Model File Arguments @@ -469,9 +471,17 @@ def read_model(self, file_path: str) -> pd.DataFrame: n j COL NAME # j = variable name, COL is the column ordinal number """ + if not self.glpk_model: + raise OtooleError( + resource="GLPK.lp", + message="No GLPK model file provided. This can be generated via the `--wglp` command." + ) + else: + model_path = self.glpk_model + data = [] - with open(file_path, "r") as f: + with open(model_path, "r") as f: for line in f: parts = line.strip().split() if not parts[0] == "n": diff --git a/tests/fixtures/glpk_model.txt b/tests/fixtures/glpk_model.txt new file mode 100644 index 00000000..a22b8e98 --- /dev/null +++ b/tests/fixtures/glpk_model.txt @@ -0,0 +1,21 @@ +p lp min 12665 9450 82606 +n p osemosys_fast +n z cost +i 1 f +n i 1 cost +i 2 u -0 +n i 2 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2014] +i 3 u -0 +n i 3 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2015] +i 300 u 147.115 +n i 300 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2015] +i 301 u 144.231 +n i 301 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2016] +n j 1 SalvageValueStorage[SIMPLICITY,DAM,2014] +n j 2 SalvageValueStorage[SIMPLICITY,DAM,2015] +n j 130 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2035] +n j 131 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2036] +n j 1025 NewCapacity[SIMPLICITY,WINDPOWER,2039] +n j 1026 NewCapacity[SIMPLICITY,WINDPOWER,2040] +n j 1027 RateOfActivity[SIMPLICITY,ID,BACKSTOP1,1,2014] +n j 1028 RateOfActivity[SIMPLICITY,IN,BACKSTOP1,1,2014] \ No newline at end of file diff --git a/tests/fixtures/glpk_sol.txt b/tests/fixtures/glpk_sol.txt new file mode 100644 index 00000000..6af27eea --- /dev/null +++ b/tests/fixtures/glpk_sol.txt @@ -0,0 +1,21 @@ +c Problem: osemosys_fast +c Rows: 12665 +c Columns: 9450 +c Non-zeros: 82606 +c Status: OPTIMAL +c Objective: cost = 4497.31967 (MINimum) +c +s bas 12665 9450 f f 4497.31967015205 +i 1 b 3942.19479265207 0 +i 2 b 0 0 +i 3 b 0 0 +i 300 b 37.499 0 +i 301 b 31.7309999999999 0 +j 1 b 0 0 +j 2 b 0 0 +j 130 l 0 0.282765294823514 +j 131 l 0 0.601075755990521 +j 1025 b 0.0305438002923389 0 +j 1026 b 0.0422503416065477 0 +j 1027 l 0 162679.693161095 +j 1028 l 0 81291.0524314291 \ No newline at end of file diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index 76a3b8fd..a214b4b7 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -659,8 +659,8 @@ def test_read_solution(self, user_config): def test_read_model(self, user_config): input_file = os.path.join("tests", "fixtures", "glpk_model.txt") - reader = ReadGlpk(user_config) - actual = reader.read_model(input_file) + reader = ReadGlpk(user_config=user_config, glpk_model=input_file) + actual = reader.read_model() pd.testing.assert_frame_equal(actual, self.expected_model_data) @@ -695,6 +695,11 @@ def test_merge_model_sol_error(self, user_config): with raises(OtooleError): reader.merge_model_sol(model, sol) + + def test_read_model_error(self, user_config): + reader = ReadGlpk(user_config) + with raises(OtooleError): + reader.read_model() class TestCleanOnRead: From fa6d9e59b26afb08025c48f9fac64a5c585f1b1c Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 11 Aug 2023 07:53:32 -0700 Subject: [PATCH 027/103] glpk reading bug --- docs/examples.rst | 23 +++++ docs/functionality.rst | 14 +-- src/otoole/__init__.py | 2 +- src/otoole/cli.py | 2 +- src/otoole/results/results.py | 189 ++++++++++++++++++---------------- tests/fixtures/glpk_model.txt | 2 +- tests/fixtures/glpk_sol.txt | 2 +- tests/test_read_strategies.py | 121 +++++++++++++++------- 8 files changed, 220 insertions(+), 135 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index 9e192332..a259a674 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -107,6 +107,29 @@ Use ``otoole``'s ``result`` package to generate the result CSVs:: $ otoole results cbc csv simplicity.sol results config.yaml +Data Processing with GLPK +------------------------- + +Objective +~~~~~~~~~ + +Build and solve a model using only GLPK and otoole + +1. Build the solve the model using GLPK +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model, save it as ``simplicity.lp``, solve the model, +and save the solution as ``simplicity.sol```:: + + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wglp simplicity.lp --write simplicity.sol + +2. Use otoole to process the solution in CSVs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +When processing solutions from GLPK, both the model file (``*.lp``) and solution +file (``*.sol``) must be passed:: + + $ otoole results glpk csv simplicity.sol results config.yaml --glpk_model simplicity.lp --input_datafile simplicity.txt + + Model Visualization ------------------- diff --git a/docs/functionality.rst b/docs/functionality.rst index 7a027bf2..d966c431 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -75,7 +75,7 @@ apparent. CBC_ is an alternative open-source solver which offers better performa GLPK_ and can handle much larger models. However, CBC_ has no way of knowing how to write out the CSV files you were used to dealing with when using GLPK_. ``otoole`` to the rescue! -``otoole`` currently supports using CBC_, CPLEX_ or Gurobi_ with all three versions of +``otoole`` currently supports using GLPK_, CBC_, CPLEX_ or Gurobi_ with all versions of GNU MathProg OSeMOSYS - the long, short and fast versions. The long version includes all results as variables within the formulation, so the @@ -90,13 +90,13 @@ so as to speed up the model matrix generation and solution times. ~~~~~~~~~~~~~~~~~~ The ``results`` command creates a folder of CSV result files from a CBC_, CLP_, -Gurobi_ or CPLEX_ solution file:: +GLPK_, Gurobi_ or CPLEX_ solution file:: $ otoole results --help - usage: otoole results [-h] [--input_datafile INPUT_DATAFILE] [--input_datapackage INPUT_DATAPACKAGE] [--write_defaults] {cbc,cplex,gurobi} {csv} from_path to_path config + usage: otoole results [-h] [--input_datafile INPUT_DATAFILE] [--input_datapackage INPUT_DATAPACKAGE] [--glpk_model GLPK_MODEL] [--write_defaults] {cbc,cplex,glpk,gurobi} {csv} from_path to_path config positional arguments: - {cbc,cplex,gurobi} Result data format to convert from + {cbc,cplex,glpk,gurobi} Result data format to convert from {csv} Result data format to convert to from_path Path to file or folder to convert from to_path Path to file or folder to convert to @@ -105,9 +105,11 @@ Gurobi_ or CPLEX_ solution file:: optional arguments: -h, --help show this help message and exit --input_datafile INPUT_DATAFILE - Input GNUMathProg datafile required for OSeMOSYS short or fast results + Input GNUMathProg datafile required for OSeMOSYS short or fast results --input_datapackage INPUT_DATAPACKAGE - Deprecated + Deprecated + --glpk_model GLPK_MODEL + GLPK model file required for processing GLPK results --write_defaults Writes default values .. versionadded:: v1.0.0 diff --git a/src/otoole/__init__.py b/src/otoole/__init__.py index a5ede3f7..6e07f615 100644 --- a/src/otoole/__init__.py +++ b/src/otoole/__init__.py @@ -3,7 +3,7 @@ from otoole.input import Context from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory -from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi, ReadGlpk +from otoole.results.results import ReadCbc, ReadCplex, ReadGlpk, ReadGurobi from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel if sys.version_info[:2] >= (3, 8): diff --git a/src/otoole/cli.py b/src/otoole/cli.py index ae116dfe..c9772a49 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -57,7 +57,7 @@ WriteExcel, __version__, ) -from otoole.exceptions import OtooleSetupError, OtooleError +from otoole.exceptions import OtooleSetupError from otoole.input import Context from otoole.preprocess.setup import get_config_setup_data, get_csv_setup_data from otoole.utils import ( diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index adce34cb..bd8b04cb 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -5,10 +5,10 @@ import pandas as pd +from otoole.exceptions import OtooleError from otoole.input import ReadStrategy from otoole.preprocess.longify_data import check_datatypes from otoole.results.result_package import ResultsPackage -from otoole.exceptions import OtooleError LOGGER = logging.getLogger(__name__) @@ -38,7 +38,8 @@ def read( input_data = None available_results = self.get_results_from_file( - filepath, input_data) # type: Dict[str, pd.DataFrame] + filepath, input_data + ) # type: Dict[str, pd.DataFrame] default_values = self._read_default_values(self.results_config) # type: Dict @@ -329,70 +330,80 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: df = df.drop(columns=["indexvalue"]) return df[["Variable", "Index", "Value"]].astype({"Value": float}) + class ReadGlpk(ReadResultsCBC): - """Reads a GLPK Solution file into memory - - The user must provide both the solution file (results.sol) and the glpk - model file (model.lp) to generate the complete solution. - + """Reads a GLPK Solution file into memory + + The user must provide both the solution file (results.sol) and the glpk + model file (model.lp) to generate the complete solution. + glpsol --wglp model.lp -m osemosys.txt -d simplicity.txt --write results.sol """ - + def __init__(self, user_config: Dict[str, Dict], glpk_model: str = None): """ glpk_model: str Path to GLPK model file. Can be created using the `--wglp` flag. - If not provided, the solution file will be processed without - corresponding english names or index defenitions. + If not provided, the solution file will be processed without + corresponding english names or index defenitions. """ super().__init__(user_config) self.glpk_model = glpk_model - - def _convert_to_dataframe(self, glpk_sol: str) -> pd.DataFrame: + + def _convert_to_dataframe(self, glpk_sol: Union[str, TextIO]) -> pd.DataFrame: """Creates a wide formatted dataframe from GLPK solution - + Arguments --------- glpk_sol: str Path to GLPK solution file. Can be created using the `--write` flag - + Returns ------- pd.DataFrame """ - + model = self.read_model() - _, sol = self.read_solution(glpk_sol) + + if isinstance(glpk_sol, str): + with open(glpk_sol, "r") as sol_file: + _, sol = self.read_solution(sol_file) + elif isinstance(glpk_sol, StringIO): + _, sol = self.read_solution(glpk_sol) + else: + raise TypeError("Argument filepath type must be a string or an open file") + return self.merge_model_sol(model, sol) - - def read_solution(self, file_path: str) -> Tuple[Dict[str,Union[str, float]], pd.DataFrame]: - """Reads a GLPK solution file - + + def read_solution( + self, file_path: Union[str, TextIO] + ) -> Tuple[Dict[str, Union[str, float]], pd.DataFrame]: + """Reads a GLPK solution file + Arguments --------- file_path: str Path to GLPK solution file. Can be created using the `--write` flag - + Returns ------- Tuple[Dict[str,Union[str, float]], pd.DataFrame] - Dict[str,Union[str, float]] -> Problem name, status, and objective value - pd.DataFrame -> Variables and constraints - - + Dict[str,Union[str, float]] -> Problem name, status, and objective value + pd.DataFrame -> Variables and constraints + {"name":"osemosys", "status":"OPTIMAL", "objective":4497.31976} - + ID NUM STATUS PRIM DUAL 0 i 1 b 5 0 1 j 2 l 0 2 - + Notes ----- - - -> ROWS IN SOLUTION FILE - + + -> ROWS IN SOLUTION FILE + i ROW ST PRIM DUAL - + ROW is the ordinal number of the row ST is one of: b = inactive constraint; @@ -402,9 +413,9 @@ def read_solution(self, file_path: str) -> Tuple[Dict[str,Union[str, float]], pd s = active equality constraint. PRIM specifies the row primal value (float) DUAL specifies the row dual value (float) - + -> COLUMNS IN SOLUTION FILE - + j COL ST PRIM DUAL COL specifies the column ordinal number @@ -417,70 +428,75 @@ def read_solution(self, file_path: str) -> Tuple[Dict[str,Union[str, float]], pd PRIM field contains column primal value (float) DUAL field contains the column dual value (float) """ - + data = [] status = {} - with open(file_path, "r") as f: - for line in f: - parts = line.strip().split() - if parts[0] in ("i", "j"): - data.append([ - parts[0], int(parts[1]), parts[2], float(parts[3]), float(parts[4]) - ]) - elif len(parts) > 1: - if parts[1] == "Problem:": - status["name"] = parts[2] - elif parts[1] == "Status:": - status["status"] = parts[2] - elif parts[1] == "Objective:": - status["objective"] = float(parts[4]) - + for line in file_path: + parts = line.strip().split() + if parts[0] in ("i", "j"): + data.append( + [ + parts[0], + int(parts[1]), + parts[2], + float(parts[3]), + float(parts[4]), + ] + ) + elif len(parts) > 1: + if parts[1] == "Problem:": + status["name"] = parts[2] + elif parts[1] == "Status:": + status["status"] = parts[2] + elif parts[1] == "Objective:": + status["objective"] = float(parts[4]) + df = pd.DataFrame(data, columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"]) - + for info in ["name", "status", "objective"]: if info not in status: LOGGER.warning(f"No {info} extracted from the GLPK solution") - + return status, df - + def read_model(self) -> pd.DataFrame: - """Reads in a GLPK Model File - + """Reads in a GLPK Model File + Arguments --------- file_path: str Path to GLPK model file. Can be created using the `--wglp` flag. - + Returns ------- pd.DataFrame - - ID NUM NAME INDEX + + ID NUM NAME INDEX 0 i 1 CAa4_Constraint_Capacity "SIMPLICITY,ID,BACKSTOP1,2015" 1 j 2 NewCapacity "SIMPLICITY,WINDPOWER,2039" - + Notes ----- - - -> GENERAL LAYOUT OF SOLUTION FILE - + + -> GENERAL LAYOUT OF SOLUTION FILE + n p NAME # p = problem instance n z NAME # z = objective function n i ROW NAME # i = constraint name, ROW is the row ordinal number n j COL NAME # j = variable name, COL is the column ordinal number """ - + if not self.glpk_model: raise OtooleError( resource="GLPK.lp", - message="No GLPK model file provided. This can be generated via the `--wglp` command." + message="No GLPK model file provided. This can be generated via the `--wglp` command.", ) else: model_path = self.glpk_model - + data = [] - + with open(model_path, "r") as f: for line in f: parts = line.strip().split() @@ -489,22 +505,22 @@ def read_model(self) -> pd.DataFrame: if len(parts) < 4: continue data.append([parts[1], int(parts[2]), parts[3]]) - + df = pd.DataFrame(data, columns=["ID", "NUM", "INDEX_LIST"]) - df = df.loc[df["INDEX_LIST"].str.contains(r"\[")] # removes "n i 1 cost" row - + df = df.loc[df["INDEX_LIST"].str.contains(r"\[")] # removes "n i 1 cost" row + df[["NAME", "INDEX"]] = df["INDEX_LIST"].str.split("[", expand=True) df["INDEX"] = df["INDEX"].map(lambda x: x.split("]")[0]) - + df = df[["ID", "NUM", "NAME", "INDEX"]].reset_index(drop=True) return df - + def merge_model_sol(self, model: pd.DataFrame, sol: pd.DataFrame) -> pd.DataFrame: """Merges GLPK model and solution file into one dataframe - + Arguments --------- - model: pd.DataFrame, + model: pd.DataFrame, see output from ReadGlpk.read_model(...) sol: pd.DataFrame see output from ReadGlpk.read_solution(...) @@ -512,38 +528,39 @@ def merge_model_sol(self, model: pd.DataFrame, sol: pd.DataFrame) -> pd.DataFram Returns ------- pd.DataFrame - + >>> pd.DataFrame(data=[ ['TotalDiscountedCost', "SIMPLICITY,2015", 187.01576], ['TotalDiscountedCost', "SIMPLICITY,2016", 183.30788]], columns=['Variable', 'Index', 'Value']) """ - + # create lookup ids using the id and num columns to coordinate merge model["lookup"] = model["ID"].str.cat(model["NUM"].astype(str)) model = model.set_index("lookup") model_lookup = model.to_dict(orient="index") - - sol = sol.loc[sol["ID"]=="j"] # remove constraints and leave variables - vars = sol.copy() # setting with copy warning + + sol = sol.loc[sol["ID"] == "j"] # remove constraints and leave variables + vars = sol.copy() # setting with copy warning vars["lookup"] = vars["ID"].str.cat(vars["NUM"].astype(str)) vars = vars.set_index("lookup") vars_lookup = vars.to_dict(orient="index") - + # assemble dataframe data = [] for lookup_id, lookup_values in vars_lookup.items(): try: - data.append([ - model_lookup[lookup_id]["NAME"], - model_lookup[lookup_id]["INDEX"], - lookup_values["PRIM"] - ]) + data.append( + [ + model_lookup[lookup_id]["NAME"], + model_lookup[lookup_id]["INDEX"], + lookup_values["PRIM"], + ] + ) except KeyError: raise OtooleError( resource=lookup_id, - message=f"No corresponding id for {lookup_id} in the GLPK model file" + message=f"No corresponding id for {lookup_id} in the GLPK model file", ) - + return pd.DataFrame(data, columns=["Variable", "Index", "Value"]) - \ No newline at end of file diff --git a/tests/fixtures/glpk_model.txt b/tests/fixtures/glpk_model.txt index a22b8e98..63cddbf7 100644 --- a/tests/fixtures/glpk_model.txt +++ b/tests/fixtures/glpk_model.txt @@ -18,4 +18,4 @@ n j 131 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2036] n j 1025 NewCapacity[SIMPLICITY,WINDPOWER,2039] n j 1026 NewCapacity[SIMPLICITY,WINDPOWER,2040] n j 1027 RateOfActivity[SIMPLICITY,ID,BACKSTOP1,1,2014] -n j 1028 RateOfActivity[SIMPLICITY,IN,BACKSTOP1,1,2014] \ No newline at end of file +n j 1028 RateOfActivity[SIMPLICITY,IN,BACKSTOP1,1,2014] diff --git a/tests/fixtures/glpk_sol.txt b/tests/fixtures/glpk_sol.txt index 6af27eea..37fbe7f1 100644 --- a/tests/fixtures/glpk_sol.txt +++ b/tests/fixtures/glpk_sol.txt @@ -18,4 +18,4 @@ j 131 l 0 0.601075755990521 j 1025 b 0.0305438002923389 0 j 1026 b 0.0422503416065477 0 j 1027 l 0 162679.693161095 -j 1028 l 0 81291.0524314291 \ No newline at end of file +j 1028 l 0 81291.0524314291 diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index a214b4b7..90b6316e 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -13,8 +13,8 @@ from otoole.results.results import ( ReadCbc, ReadCplex, - ReadGurobi, ReadGlpk, + ReadGurobi, check_for_duplicates, identify_duplicate, rename_duplicate_column, @@ -611,10 +611,36 @@ def test_manage_infeasible_variables(self, user_config): ) pd.testing.assert_frame_equal(actual, expected) + class TestReadGlpk: """Use fixtures instead of StringIO due to the use of context managers in the logic""" - expected_sol_data = pd.DataFrame([ + sol_data = dedent( + """c Problem: osemosys_fast +c Rows: 12665 +c Columns: 9450 +c Non-zeros: 82606 +c Status: OPTIMAL +c Objective: cost = 4497.31967 (MINimum) +c +s bas 12665 9450 f f 4497.31967015205 +i 1 b 3942.19479265207 0 +i 2 b 0 0 +i 3 b 0 0 +i 300 b 37.499 0 +i 301 b 31.7309999999999 0 +j 1 b 0 0 +j 2 b 0 0 +j 130 l 0 0.282765294823514 +j 131 l 0 0.601075755990521 +j 1025 b 0.0305438002923389 0 +j 1026 b 0.0422503416065477 0 +j 1027 l 0 162679.693161095 +j 1028 l 0 81291.0524314291""" + ) + + expected_sol_data = pd.DataFrame( + [ ["i", 1, "b", 3942.19479265207, 0], ["i", 2, "b", 0, 0], ["i", 3, "b", 0, 0], @@ -628,9 +654,12 @@ class TestReadGlpk: ["j", 1026, "b", 0.0422503416065477, 0], ["j", 1027, "l", 0, 162679.693161095], ["j", 1028, "l", 0, 81291.0524314291], - ], columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"]) - - expected_model_data = pd.DataFrame([ + ], + columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], + ) + + expected_model_data = pd.DataFrame( + [ ["i", 2, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2014"], ["i", 3, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2015"], ["i", 300, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2015"], @@ -643,64 +672,78 @@ class TestReadGlpk: ["j", 1026, "NewCapacity", "SIMPLICITY,WINDPOWER,2040"], ["j", 1027, "RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014"], ["j", 1028, "RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014"], - ], columns=["ID", "NUM", "NAME", "INDEX"]) + ], + columns=["ID", "NUM", "NAME", "INDEX"], + ) def test_read_solution(self, user_config): - input_file = os.path.join("tests", "fixtures", "glpk_sol.txt") + input_file = self.sol_data reader = ReadGlpk(user_config) - actual_status, actual_data = reader.read_solution(input_file) + with StringIO(input_file) as file_buffer: + actual_status, actual_data = reader.read_solution(file_buffer) expected_status = { - "name":"osemosys_fast", - "status":"OPTIMAL", - "objective":4497.31967 + "name": "osemosys_fast", + "status": "OPTIMAL", + "objective": 4497.31967, } assert actual_status == expected_status pd.testing.assert_frame_equal(actual_data, self.expected_sol_data) - + def test_read_model(self, user_config): input_file = os.path.join("tests", "fixtures", "glpk_model.txt") reader = ReadGlpk(user_config=user_config, glpk_model=input_file) actual = reader.read_model() - + pd.testing.assert_frame_equal(actual, self.expected_model_data) - + def test_merge_model_sol(self, user_config): reader = ReadGlpk(user_config) - actual = reader.merge_model_sol(self.expected_model_data, self.expected_sol_data) - expected = pd.DataFrame([ - ["SalvageValueStorage", "SIMPLICITY,DAM,2014", 0], - ["SalvageValueStorage", "SIMPLICITY,DAM,2015", 0], - ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035", 0], - ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036", 0], - ["NewCapacity", "SIMPLICITY,WINDPOWER,2039", 0.0305438002923389], - ["NewCapacity", "SIMPLICITY,WINDPOWER,2040", 0.0422503416065477], - ["RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014", 0], - ["RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014", 0], - ], columns=['Variable', 'Index', 'Value']) - + actual = reader.merge_model_sol( + self.expected_model_data, self.expected_sol_data + ) + expected = pd.DataFrame( + [ + ["SalvageValueStorage", "SIMPLICITY,DAM,2014", 0], + ["SalvageValueStorage", "SIMPLICITY,DAM,2015", 0], + ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035", 0], + ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036", 0], + ["NewCapacity", "SIMPLICITY,WINDPOWER,2039", 0.0305438002923389], + ["NewCapacity", "SIMPLICITY,WINDPOWER,2040", 0.0422503416065477], + ["RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014", 0], + ["RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014", 0], + ], + columns=["Variable", "Index", "Value"], + ) + pd.testing.assert_frame_equal(actual, expected) def test_merge_model_sol_error(self, user_config): reader = ReadGlpk(user_config) - - model = pd.DataFrame([ - ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], - ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], - ], columns=["ID", "NUM", "NAME", "INDEX"]) - - sol = pd.DataFrame([ - ["j", 1025, "b", 0.0305438002923389, 0], - ["j", 1026, "b", 0.0422503416065477, 0], - ], columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"]) - + + model = pd.DataFrame( + [ + ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], + ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], + ], + columns=["ID", "NUM", "NAME", "INDEX"], + ) + + sol = pd.DataFrame( + [ + ["j", 1025, "b", 0.0305438002923389, 0], + ["j", 1026, "b", 0.0422503416065477, 0], + ], + columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], + ) + with raises(OtooleError): reader.merge_model_sol(model, sol) - + def test_read_model_error(self, user_config): reader = ReadGlpk(user_config) with raises(OtooleError): reader.read_model() - + class TestCleanOnRead: """Tests that a data is cleaned and indexed upon reading""" From 464241b635c391a3d7ff15ae378ab130aaab6794 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sat, 12 Aug 2023 04:32:35 -0700 Subject: [PATCH 028/103] update to pydantic 2 --- docs/requirements.txt | 2 +- setup.cfg | 2 +- src/otoole/preprocess/validate_config.py | 106 +++++++++++------------ 3 files changed, 54 insertions(+), 56 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index a575fdd3..5e98bc20 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -5,7 +5,7 @@ Jinja2<3.1 networkx openpyxl pandas>=1.1 -pydantic<2 +pydantic>=2 pydot pyyaml # Requirements file for ReadTheDocs, check .readthedocs.yml. diff --git a/setup.cfg b/setup.cfg index 00cc9ce3..d14c3729 100644 --- a/setup.cfg +++ b/setup.cfg @@ -52,7 +52,7 @@ install_requires = networkx flatten_dict openpyxl - pydantic<2 + pydantic>=2 [options.packages.find] where = src exclude = diff --git a/src/otoole/preprocess/validate_config.py b/src/otoole/preprocess/validate_config.py index b6d8d17b..7903b0ab 100644 --- a/src/otoole/preprocess/validate_config.py +++ b/src/otoole/preprocess/validate_config.py @@ -3,7 +3,10 @@ import logging from typing import List, Optional, Union -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +# from pydantic import FieldValidationInfo + logger = logging.getLogger(__name__) @@ -11,39 +14,41 @@ class UserDefinedValue(BaseModel): """Represents any user defined value""" + model_config = ConfigDict(extra="forbid") + name: str type: str dtype: str - defined_sets: Optional[List[str]] - indices: Optional[List[str]] - default: Optional[Union[int, float]] - calculated: Optional[bool] - short_name: Optional[str] + defined_sets: Optional[List[str]] = None + indices: Optional[List[str]] = None + default: Optional[Union[int, float]] = None + calculated: Optional[bool] = None + short_name: Optional[str] = None - @validator("type") + @field_validator("type") @classmethod - def check_param_type(cls, value, values): + def check_param_type(cls, value, info): if value not in ["param", "result", "set"]: raise ValueError( - f"{values['name']} -> Type must be 'param', 'result', or 'set'" + f"{info.field_name} -> Type must be 'param', 'result', or 'set'" ) return value - @validator("name", "short_name") + @field_validator("name", "short_name") @classmethod # for linting purposes def check_name_for_spaces(cls, value): if " " in value: raise ValueError(f"{value} -> Name can not have spaces") return value - @validator("name", "short_name") + @field_validator("name", "short_name") @classmethod def check_name_for_numbers(cls, value): if any(char.isdigit() for char in value): raise ValueError(f"{value} -> Name can not have digits") return value - @validator("name", "short_name") + @field_validator("name", "short_name") @classmethod def check_name_for_special_chars(cls, value): # removed underscore from the recommeded special char list @@ -54,7 +59,7 @@ def check_name_for_special_chars(cls, value): ) return value - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_name_length(cls, values): if len(values["name"]) > 31: @@ -69,32 +74,29 @@ def check_name_length(cls, values): ) return values - class Config: - extra = "forbid" - class UserDefinedSet(UserDefinedValue): """Represents a set""" - @validator("dtype") + @field_validator("dtype") @classmethod - def check_dtype(cls, value, values): + def check_dtype(cls, value, info): if value not in ["str", "int"]: - raise ValueError(f"{values['name']} -> Value must be a 'str' or 'int'") + raise ValueError(f"{info.field_name} -> Value must be a 'str' or 'int'") return value class UserDefinedParameter(UserDefinedValue): """Represents a parameter""" - @validator("dtype") + @field_validator("dtype") @classmethod - def check_dtype(cls, value, values): + def check_dtype(cls, value, info): if value not in ["float", "int"]: - raise ValueError(f"{values['name']} -> Value must be an 'int' or 'float'") + raise ValueError(f"{info.field_name} -> Value must be an 'int' or 'float'") return value - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_required_inputs(cls, values): required = ["default", "defined_sets", "indices"] @@ -104,38 +106,36 @@ def check_required_inputs(cls, values): ) return values - @root_validator(pre=True) - @classmethod - def check_index_in_set(cls, values): - if not all(i in values["defined_sets"] for i in values["indices"]): - raise ValueError(f"{values['name']} -> Index not in user supplied sets") - return values + @model_validator(mode="after") + def check_index_in_set(self): + if not all(i in self.defined_sets for i in self.indices): + raise ValueError(f"{self.name} -> Index not in user supplied sets") + return self - @root_validator(pre=True) - @classmethod - def check_dtype_default(cls, values): - dtype_input = values["dtype"] - dtype_default = type(values["default"]).__name__ + @model_validator(mode="after") + def check_dtype_default(self): + dtype_input = self.dtype + dtype_default = type(self.default).__name__ if dtype_input != dtype_default: # allow ints to be cast as floats if not ((dtype_default == "int") and (dtype_input == "float")): raise ValueError( - f"{values['name']} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" + f"{self.name} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" ) - return values + return self class UserDefinedResult(UserDefinedValue): """Represents a result""" - @validator("dtype") + @field_validator("dtype") @classmethod - def check_dtype(cls, value, values): + def check_dtype(cls, value, info): if value not in ["float", "int"]: - raise ValueError(f"{values['name']} -> Value must be an 'int' or 'float'") + raise ValueError(f"{info.field_name} -> Value must be an 'int' or 'float'") return value - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_required_inputs(cls, values): required = ["default", "defined_sets", "indices"] @@ -145,7 +145,7 @@ def check_required_inputs(cls, values): ) return values - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_deprecated_values(cls, values): deprecated = ["calculated", "Calculated"] @@ -156,22 +156,20 @@ def check_deprecated_values(cls, values): ) return values - @root_validator(pre=True) - @classmethod - def check_index_in_set(cls, values): - if not all(i in values["defined_sets"] for i in values["indices"]): - raise ValueError(f"{values['name']} -> Index not in user supplied sets") - return values + @model_validator(mode="after") + def check_index_in_set(self): + if not all(i in self.defined_sets for i in self.indices): + raise ValueError(f"{self.name} -> Index not in user supplied sets") + return self - @root_validator(pre=True) - @classmethod - def check_dtype_default(cls, values): - dtype_input = values["dtype"] - dtype_default = type(values["default"]).__name__ + @model_validator(mode="after") + def check_dtype_default(self): + dtype_input = self.dtype + dtype_default = type(self.default).__name__ if dtype_input != dtype_default: # allow ints to be cast as floats if not ((dtype_default == "int") and (dtype_input == "float")): raise ValueError( - f"{values['name']} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" + f"{self.name} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" ) - return values + return self From 58c30211a37dad866e42cef8dbbb29af06b34447 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 14 Aug 2023 07:49:26 -0700 Subject: [PATCH 029/103] spelling fixes --- docs/data.rst | 4 ++-- docs/examples.rst | 6 +++--- docs/functionality.rst | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/data.rst b/docs/data.rst index 4328c3f9..6d144aa7 100644 --- a/docs/data.rst +++ b/docs/data.rst @@ -68,7 +68,7 @@ Sets are defined as follows:: .. NOTE:: It's convention in OSeMOSYS to capitalize set names -Parmaters Foramt +Parameters Format ~~~~~~~~~~~~~~~~ Parameters are defined as follows. When referencing set indices use the full @@ -112,7 +112,7 @@ repository for a complete example. dtype: str type: set -2. Parmater definition of ``AccumulatedAnnualDemand``:: +2. Parameter definition of ``AccumulatedAnnualDemand``:: AccumulatedAnnualDemand: short_name: AccAnnualDemand diff --git a/docs/examples.rst b/docs/examples.rst index 9e192332..233712d6 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -200,7 +200,7 @@ Model Validation .. NOTE:: In this example, we will use a very simple model instead of the Simplicity_ demonstration model. This way the user does not need to be - familar with the naming convenations of the model. + familiar with the naming conventions of the model. Objective ~~~~~~~~~ @@ -281,8 +281,8 @@ against the validation configuration file:: $ otoole validate datafile data.txt config.yaml --validate_config validate.yaml .. WARNING:: - Do not confuse the user configuation file (``config.yaml``) and the - validation configuation file (``validate.yaml``). Both configuartion files + Do not confuse the user configuration file (``config.yaml``) and the + validation configuration file (``validate.yaml``). Both configuration files are required for validation functionality. The final validation configuration file in this example will look like:: diff --git a/docs/functionality.rst b/docs/functionality.rst index 7a027bf2..720c646e 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -17,7 +17,7 @@ Gurobi_, is called results and post-processing. .. image:: _static/workflow.png .. NOTE:: - While ``otoole`` is targetted at OSeMOSYS users, the functionality can eaisly be extended + While ``otoole`` is targeted at OSeMOSYS users, the functionality can easily be extended to work with any workflow that involves the use of a MathProg file! Data Conversion @@ -38,7 +38,7 @@ conversion between the following formats: ``otoole convert`` ~~~~~~~~~~~~~~~~~~ -THe ``otoole convert``` command allows you to convert between various different +The ``otoole convert``` command allows you to convert between various different input formats:: $ otoole convert --help From b844570ca29aeb501307ef4081faa514c632a5b4 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 14 Aug 2023 13:32:20 -0700 Subject: [PATCH 030/103] update install instructions --- docs/examples.rst | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index 233712d6..fe97fe14 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -334,17 +334,23 @@ Install GLPK_ and CBC_ to use in the otoole examples. GLPK_ is a free and open-source linear program solver. -To install it on **Linux**, run the command:: +To install GLPK on **Linux**, run the command:: sudo apt-get update sudo apt-get install glpk glpk-utils -To install it on **Mac**, run the command:: +To install GLPK on **Mac**, run the command:: brew install glpk -.. To install it on **Windows**, follow the install instruction on the GLPK_ -.. website, and/or follow the instructions_ from the OSeMOSYS community +To install GLPK on **Windows**, follow the instructions on the +`GLPK Website `. Be sure to add GLPK to +your environment variables if installing on Windows. + +Alternatively, if you use `Anaconda ` to manage +your Python packages, you can install GLPK via the command:: + + conda install -c conda-forge glpk 2. Test the GLPK install ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -361,16 +367,21 @@ CBC_ is a free and open-source mixed integer linear programming solver. Full install instructions can be found on the CBC_ website. However, the abbreviated instructions are shown below -To install it on **Linux**, run the command:: +To install CBC on **Linux**, run the command:: sudo apt-get install coinor-cbc coinor-libcbc-dev -To install it on **Mac**, run the command:: +To install CBC on **Mac**, run the command:: brew install coin-or-tools/coinor/cbc -.. To install it on **Windows**, follow the install instruction on the CBC_ -.. website by downloading a binary +To install CBC on **Windows**, follow the install instruction on the CBC_ +website. + +Alternatively, if you use `Anaconda ` to manage +your Python packages, you can install CBC via the command:: + + conda install -c conda-forge coincbc 4. Test the CBC install ~~~~~~~~~~~~~~~~~~~~~~~ From a5ee3e0a40fde8631371469e290bd0fb7cb68cd2 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 14 Aug 2023 13:33:52 -0700 Subject: [PATCH 031/103] fixed typo in docs --- docs/examples.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples.rst b/docs/examples.rst index fe97fe14..2638f7ae 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -327,7 +327,7 @@ Solver Setup Objective ~~~~~~~~~ -Install GLPK_ and CBC_ to use in the otoole examples. +Install GLPK_ (required) and CBC_ (optional) to use in the otoole examples. 1. Install GLPK ~~~~~~~~~~~~~~~~ From 01e9f06cfcb1472f26e21c534955dff6a61c958f Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 14 Aug 2023 19:08:19 -0700 Subject: [PATCH 032/103] validation docs update --- docs/_static/validation-data.txt | 202 +++++++++++++++++++++++++ docs/conf.py | 10 +- docs/data.rst | 2 +- docs/examples-validation.rst | 213 +++++++++++++++++++++++++++ docs/examples.rst | 244 ++++++++++++++++++++----------- 5 files changed, 576 insertions(+), 95 deletions(-) create mode 100644 docs/_static/validation-data.txt create mode 100644 docs/examples-validation.rst diff --git a/docs/_static/validation-data.txt b/docs/_static/validation-data.txt new file mode 100644 index 00000000..2f0f8132 --- /dev/null +++ b/docs/_static/validation-data.txt @@ -0,0 +1,202 @@ +# Model file written by *otoole* +param default 0 : AccumulatedAnnualDemand := +; +param default -1 : AnnualEmissionLimit := +; +param default 0 : AnnualExogenousEmission := +; +param default 1 : AvailabilityFactor := +; +param default 1 : CapacityFactor := +; +param default 0 : CapacityOfOneTechnologyUnit := +; +param default 1 : CapacityToActivityUnit := +R PWRWND 31.536 +R PWRCOA 31.536 +R TRNELC 31.536 +; +param default 0 : CapitalCost := +R PWRWND 2020 1500 +R PWRWND 2021 1500 +R PWRWND 2022 1500 +R PWRCOA 2020 5000 +R PWRCOA 2021 5000 +R PWRCOA 2022 5000 +; +param default 0 : CapitalCostStorage := +; +param default 0 : Conversionld := +; +param default 0 : Conversionlh := +; +param default 0 : Conversionls := +; +set DAILYTIMEBRACKET := +; +set DAYTYPE := +; +param default 0.00137 : DaySplit := +; +param default 7 : DaysInDayType := +; +param default 1 : DepreciationMethod := +; +param default 0.05 : DiscountRate := +; +param default 0.05 : DiscountRateStorage := +; +set EMISSION := +; +param default 0 : EmissionActivityRatio := +; +param default 0 : EmissionsPenalty := +; +set FUEL := +WND00 +COA00 +ELC01 +ELC02 +; +param default 0 : FixedCost := +; +param default 0 : InputActivityRatio := +R PWRWND WND00 1 2020 1 +R PWRWND WND00 1 2021 1 +R PWRWND WND00 1 2022 1 +R PWRCOA COA00 1 2020 1 +R PWRCOA COA00 1 2021 1 +R PWRCOA COA00 1 2022 1 +R TRNELC ELC01 1 2020 1 +R TRNELC ELC01 1 2021 1 +R TRNELC ELC01 1 2022 1 +; +set MODE_OF_OPERATION := +1 +; +param default 0 : MinStorageCharge := +; +param default -1 : ModelPeriodEmissionLimit := +; +param default 0 : ModelPeriodExogenousEmission := +; +param default 1 : OperationalLife := +R PWRWND 20 +R PWRCOA 30 +; +param default 0 : OperationalLifeStorage := +; +param default 0 : OutputActivityRatio := +R MINWND WND00 1 2020 1 +R MINWND WND00 1 2021 1 +R MINWND WND00 1 2022 1 +R MINCOA COA00 1 2020 1 +R MINCOA COA00 1 2021 1 +R MINCOA COA00 1 2022 1 +R PWRWND ELC01 1 2020 1 +R PWRWND ELC01 1 2021 1 +R PWRWND ELC01 1 2022 1 +R PWRCOA ELC01 1 2020 1 +R PWRCOA ELC01 1 2021 1 +R PWRCOA ELC01 1 2022 1 +R TRNELC ELC02 1 2020 1 +R TRNELC ELC02 1 2021 1 +R TRNELC ELC02 1 2022 1 +; +set REGION := +R +; +param default 0 : REMinProductionTarget := +; +param default 0 : RETagFuel := +; +param default 0 : RETagTechnology := +; +param default 1 : ReserveMargin := +; +param default 0 : ReserveMarginTagFuel := +; +param default 0 : ReserveMarginTagTechnology := +; +param default 0 : ResidualCapacity := +R PWRCOA 2020 0.25 +R PWRCOA 2021 0.25 +R PWRCOA 2022 0.25 +; +param default 999 : ResidualStorageCapacity := +; +set SEASON := +; +set STORAGE := +; +param default 0 : SpecifiedAnnualDemand := +R ELC02 2020 10 +R ELC02 2021 15 +R ELC02 2022 20 +; +param default 0 : SpecifiedDemandProfile := +R ELC02 S 2020 0.5 +R ELC02 W 2020 0.5 +R ELC02 S 2021 0.5 +R ELC02 W 2021 0.5 +R ELC02 S 2022 0.5 +R ELC02 W 2022 0.5 +; +param default 0 : StorageLevelStart := +; +param default 0 : StorageMaxChargeRate := +; +param default 0 : StorageMaxDischargeRate := +; +set TECHNOLOGY := +MINWND +MINCOA +PWRWND +PWRCOA +TRNELC +; +set TIMESLICE := +S +W +; +param default 0 : TechnologyFromStorage := +; +param default 0 : TechnologyToStorage := +; +param default -1 : TotalAnnualMaxCapacity := +; +param default -1 : TotalAnnualMaxCapacityInvestment := +; +param default 0 : TotalAnnualMinCapacity := +; +param default 0 : TotalAnnualMinCapacityInvestment := +; +param default 0 : TotalTechnologyAnnualActivityLowerLimit := +; +param default -1 : TotalTechnologyAnnualActivityUpperLimit := +; +param default 0 : TotalTechnologyModelPeriodActivityLowerLimit := +; +param default -1 : TotalTechnologyModelPeriodActivityUpperLimit := +; +param default 0 : TradeRoute := +; +param default 0 : VariableCost := +R MINCOA 1 2020 5 +R MINCOA 1 2021 5 +R MINCOA 1 2022 5 +; +set YEAR := +2020 +2021 +2022 +; +param default 0 : YearSplit := +S 2020 0.5 +W 2020 0.5 +S 2021 0.5 +W 2021 0.5 +S 2022 0.5 +W 2022 0.5 +; +end; diff --git a/docs/conf.py b/docs/conf.py index c005bcc8..2a5fbc5c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -172,11 +172,11 @@ "path_to_docs": "docs", "use_repository_button": True, "use_edit_page_button": True, - "extra_navbar": - """ -

Theme by the Executable Book Project

-

Logo by looka.com

- """, + # "extra_navbar": + # """ + #

Theme by the Executable Book Project

+ #

Logo by looka.com

+ # """, "icon_links": [], } diff --git a/docs/data.rst b/docs/data.rst index 6d144aa7..c6f68974 100644 --- a/docs/data.rst +++ b/docs/data.rst @@ -69,7 +69,7 @@ Sets are defined as follows:: It's convention in OSeMOSYS to capitalize set names Parameters Format -~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~ Parameters are defined as follows. When referencing set indices use the full name, **not** the ``short_name``:: diff --git a/docs/examples-validation.rst b/docs/examples-validation.rst new file mode 100644 index 00000000..03988427 --- /dev/null +++ b/docs/examples-validation.rst @@ -0,0 +1,213 @@ +:orphan: + +.. _examples-validation: + +----------------------- +Example Validation File +----------------------- + +This page holds the datafile used in the validation example. The file can +either be copy/pasted from below, or directly downloaded from :download:`here <_static/validation-data.txt>` :: + + # Model file written by *otoole* + param default 0 : AccumulatedAnnualDemand := + ; + param default -1 : AnnualEmissionLimit := + ; + param default 0 : AnnualExogenousEmission := + ; + param default 1 : AvailabilityFactor := + ; + param default 1 : CapacityFactor := + ; + param default 0 : CapacityOfOneTechnologyUnit := + ; + param default 1 : CapacityToActivityUnit := + R PWRWND 31.536 + R PWRCOA 31.536 + R TRNELC 31.536 + ; + param default 0 : CapitalCost := + R PWRWND 2020 1500 + R PWRWND 2021 1500 + R PWRWND 2022 1500 + R PWRCOA 2020 5000 + R PWRCOA 2021 5000 + R PWRCOA 2022 5000 + ; + param default 0 : CapitalCostStorage := + ; + param default 0 : Conversionld := + ; + param default 0 : Conversionlh := + ; + param default 0 : Conversionls := + ; + set DAILYTIMEBRACKET := + ; + set DAYTYPE := + ; + param default 0.00137 : DaySplit := + ; + param default 7 : DaysInDayType := + ; + param default 1 : DepreciationMethod := + ; + param default 0.05 : DiscountRate := + ; + param default 0.05 : DiscountRateStorage := + ; + set EMISSION := + ; + param default 0 : EmissionActivityRatio := + ; + param default 0 : EmissionsPenalty := + ; + set FUEL := + WND00 + COA00 + ELC01 + ELC02 + ; + param default 0 : FixedCost := + ; + param default 0 : InputActivityRatio := + R PWRWND WND00 1 2020 1 + R PWRWND WND00 1 2021 1 + R PWRWND WND00 1 2022 1 + R PWRCOA COA00 1 2020 1 + R PWRCOA COA00 1 2021 1 + R PWRCOA COA00 1 2022 1 + R TRNELC ELC01 1 2020 1 + R TRNELC ELC01 1 2021 1 + R TRNELC ELC01 1 2022 1 + ; + set MODE_OF_OPERATION := + 1 + ; + param default 0 : MinStorageCharge := + ; + param default -1 : ModelPeriodEmissionLimit := + ; + param default 0 : ModelPeriodExogenousEmission := + ; + param default 1 : OperationalLife := + R PWRWND 20 + R PWRCOA 30 + ; + param default 0 : OperationalLifeStorage := + ; + param default 0 : OutputActivityRatio := + R MINWND WND00 1 2020 1 + R MINWND WND00 1 2021 1 + R MINWND WND00 1 2022 1 + R MINCOA COA00 1 2020 1 + R MINCOA COA00 1 2021 1 + R MINCOA COA00 1 2022 1 + R PWRWND ELC01 1 2020 1 + R PWRWND ELC01 1 2021 1 + R PWRWND ELC01 1 2022 1 + R PWRCOA ELC01 1 2020 1 + R PWRCOA ELC01 1 2021 1 + R PWRCOA ELC01 1 2022 1 + R TRNELC ELC02 1 2020 1 + R TRNELC ELC02 1 2021 1 + R TRNELC ELC02 1 2022 1 + ; + set REGION := + R + ; + param default 0 : REMinProductionTarget := + ; + param default 0 : RETagFuel := + ; + param default 0 : RETagTechnology := + ; + param default 1 : ReserveMargin := + ; + param default 0 : ReserveMarginTagFuel := + ; + param default 0 : ReserveMarginTagTechnology := + ; + param default 0 : ResidualCapacity := + R PWRCOA 2020 0.25 + R PWRCOA 2021 0.25 + R PWRCOA 2022 0.25 + ; + param default 999 : ResidualStorageCapacity := + ; + set SEASON := + ; + set STORAGE := + ; + param default 0 : SpecifiedAnnualDemand := + R ELC02 2020 10 + R ELC02 2021 15 + R ELC02 2022 20 + ; + param default 0 : SpecifiedDemandProfile := + R ELC02 S 2020 0.5 + R ELC02 W 2020 0.5 + R ELC02 S 2021 0.5 + R ELC02 W 2021 0.5 + R ELC02 S 2022 0.5 + R ELC02 W 2022 0.5 + ; + param default 0 : StorageLevelStart := + ; + param default 0 : StorageMaxChargeRate := + ; + param default 0 : StorageMaxDischargeRate := + ; + set TECHNOLOGY := + MINWND + MINCOA + PWRWND + PWRCOA + TRNELC + ; + set TIMESLICE := + S + W + ; + param default 0 : TechnologyFromStorage := + ; + param default 0 : TechnologyToStorage := + ; + param default -1 : TotalAnnualMaxCapacity := + ; + param default -1 : TotalAnnualMaxCapacityInvestment := + ; + param default 0 : TotalAnnualMinCapacity := + ; + param default 0 : TotalAnnualMinCapacityInvestment := + ; + param default 0 : TotalTechnologyAnnualActivityLowerLimit := + ; + param default -1 : TotalTechnologyAnnualActivityUpperLimit := + ; + param default 0 : TotalTechnologyModelPeriodActivityLowerLimit := + ; + param default -1 : TotalTechnologyModelPeriodActivityUpperLimit := + ; + param default 0 : TradeRoute := + ; + param default 0 : VariableCost := + R MINCOA 1 2020 5 + R MINCOA 1 2021 5 + R MINCOA 1 2022 5 + ; + set YEAR := + 2020 + 2021 + 2022 + ; + param default 0 : YearSplit := + S 2020 0.5 + W 2020 0.5 + S 2021 0.5 + W 2021 0.5 + S 2022 0.5 + W 2022 0.5 + ; + end; diff --git a/docs/examples.rst b/docs/examples.rst index 2638f7ae..c4f2aab8 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -15,10 +15,86 @@ functionality in seperate simple use cases. git clone https://github.com/OSeMOSYS/simplicity.git cd simplicity -.. CAUTION:: - While ``otoole`` does not require a solver, these examples - will use the free and open source solvers GLPK_ and CBC_. - Installation instructions are described in the `Solver Setup`_ section. +Solver Setup +------------ + +Objective +~~~~~~~~~ + +Install GLPK_ (required) and CBC_ (optional) to use in the otoole examples. +While ``otoole`` does not require a solver, these examples will use the free +and open source solvers GLPK_ and CBC_. + +1. Install GLPK +~~~~~~~~~~~~~~~~ + +GLPK_ is a free and open-source linear program solver. Full +install instructions can be found on the `GLPK Website`_, however, the +abbreviated instructions are shown below + +To install GLPK on **Linux**, run the command:: + + sudo apt-get update + sudo apt-get install glpk glpk-utils + +To install GLPK on **Mac**, run the command:: + + brew install glpk + +To install GLPK on **Windows**, follow the instructions on the +`GLPK Website`_. Be sure to add GLPK to +your environment variables after installation + +Alternatively, if you use Anaconda_ to manage +your Python packages, you can install GLPK via the command:: + + conda install -c conda-forge glpk + +2. Test the GLPK install +~~~~~~~~~~~~~~~~~~~~~~~~ +Once installed, you should be able to call the ``glpsol`` command:: + + $ glpsol + GLPSOL: GLPK LP/MIP Solver, v4.65 + No input problem file specified; try glpsol --help + +3. Install CBC +~~~~~~~~~~~~~~ + +CBC_ is a free and open-source mixed integer linear programming solver. Full +install instructions can be found on the CBC_ website, however, the abbreviated +instructions are shown below + +To install CBC on **Linux**, run the command:: + + sudo apt-get install coinor-cbc coinor-libcbc-dev + +To install CBC on **Mac**, run the command:: + + brew install coin-or-tools/coinor/cbc + +To install CBC on **Windows**, follow the install instruction on the CBC_ +website. + +Alternatively, if you use Anaconda_ to manage +your Python packages, you can install CBC via the command:: + + conda install -c conda-forge coincbc + +4. Test the CBC install +~~~~~~~~~~~~~~~~~~~~~~~ +Once installed, you should be able to directly call CBC:: + + $ cbc + Welcome to the CBC MILP Solver + Version: 2.10.3 + Build Date: Mar 24 2020 + + CoinSolver takes input from arguments ( - switches to stdin) + Enter ? for list of commands or help + Coin: + +You can exit the solver by typing ``quit`` Data Conversion with CSVs ------------------------- @@ -211,13 +287,18 @@ codes are shown in bold face. .. image:: _static/validataion_model.png -1. Create the Validation File +1. Download the example datafile +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The MathProg datafile describing this model can be found on the +:ref:`examples-validation` page. Download the file and save it as ``data.txt`` + +2. Create the Validation File ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a configuration validation ``yaml`` file:: $ touch validate.yaml -2. Create ``FUEL`` Codes +3. Create ``FUEL`` Codes ~~~~~~~~~~~~~~~~~~~~~~~~ Create the fuel codes and descriptions in the validation configuration file:: @@ -227,11 +308,11 @@ Create the fuel codes and descriptions in the validation configuration file:: 'COA': Coal 'ELC': Electricity indetifiers: - '00': Raw Resource + '00': Primary Resource '01': Intermediate '02': End Use -3. Create ``TECHNOLOGY`` Codes +4. Create ``TECHNOLOGY`` Codes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add the technology codes to the validation configuration file. Note that the powerplant types are the same codes as the fuels, so there is no need to @@ -243,49 +324,40 @@ redefine these codes:: 'PWR': Generator 'TRN': Transmission -4. Create ``FUEL`` Schema +5. Create ``FUEL`` Schema ~~~~~~~~~~~~~~~~~~~~~~~~~ Use the defined codes to create a schema for the fuel codes:: schema: FUEL: - name: fuel_name - items: - - name: fuels + items: + - name: type valid: fuels position: (1, 3) - - name: indetifiers + - name: indentifier valid: indetifiers position: (4, 5) -5. Create ``TECHNOLOGY`` Schema +6. Create ``TECHNOLOGY`` Schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the defined codes to create a schema for the technology codes:: schema: TECHNOLOGY: - name: technology_name - items: - - name: techs + items: + - name: tech valid: techs position: (1, 3) - - name: fuels + - name: fuel valid: fuels position: (4, 6) -6. ``otoole`` validate -~~~~~~~~~~~~~~~~~~~~~~ -Use otoole to validate the input data (can be any of a ``datafile``, ``csv``, or ``excel``) -against the validation configuration file:: - - $ otoole validate datafile data.txt config.yaml --validate_config validate.yaml - -.. WARNING:: - Do not confuse the user configuration file (``config.yaml``) and the - validation configuration file (``validate.yaml``). Both configuration files - are required for validation functionality. +7. Save changes +~~~~~~~~~~~~~~~ -The final validation configuration file in this example will look like:: +The final validation configuration file for this example will look like:: codes: fuels: @@ -293,7 +365,7 @@ The final validation configuration file in this example will look like:: 'COA': Coal 'ELC': Electricity indetifiers: - '00': Raw Resource + '00': Primary Resource '01': Intermediate '02': End Use techs: @@ -304,103 +376,97 @@ The final validation configuration file in this example will look like:: schema: FUEL: - name: fuel_name - items: - - name: fuels + items: + - name: type valid: fuels position: (1, 3) - - name: indetifiers + - name: indentifier valid: indetifiers position: (4, 5) TECHNOLOGY: - name: technology_name - items: - - name: techs + items: + - name: tech valid: techs position: (1, 3) - - name: fuels + - name: fuel valid: fuels position: (4, 6) -Solver Setup ------------- - -Objective -~~~~~~~~~ - -Install GLPK_ (required) and CBC_ (optional) to use in the otoole examples. - -1. Install GLPK -~~~~~~~~~~~~~~~~ +8. ``otoole validate`` +~~~~~~~~~~~~~~~~~~~~~~ +Use otoole to validate the input data (can be any of a ``datafile``, ``csv``, or ``excel``) +against the validation configuration file:: -GLPK_ is a free and open-source linear program solver. + $ otoole validate datafile data.txt config.yaml --validate_config validate.yaml -To install GLPK on **Linux**, run the command:: + ***Beginning validation*** - sudo apt-get update - sudo apt-get install glpk glpk-utils + Validating FUEL with fuel_name -To install GLPK on **Mac**, run the command:: + ^(WND|COA|ELC)(00|01|02) + 4 valid names: + WND00, COA00, ELC01, ELC02 - brew install glpk + Validating TECHNOLOGY with technology_name -To install GLPK on **Windows**, follow the instructions on the -`GLPK Website `. Be sure to add GLPK to -your environment variables if installing on Windows. + ^(MIN|PWR|TRN)(WND|COA|ELC) + 5 valid names: + MINWND, MINCOA, PWRWND, PWRCOA, TRNELC -Alternatively, if you use `Anaconda ` to manage -your Python packages, you can install GLPK via the command:: - conda install -c conda-forge glpk + ***Checking graph structure*** -2. Test the GLPK install -~~~~~~~~~~~~~~~~~~~~~~~~ -Once installed, you should be able to call the ``glpsol`` command:: +.. WARNING:: + Do not confuse the user configuration file (``config.yaml``) and the + validation configuration file (``validate.yaml``). Both configuration files + are required for validation functionality. - $ glpsol - GLPSOL: GLPK LP/MIP Solver, v4.65 - No input problem file specified; try glpsol --help +9. Use ``otoole validate`` to identify an issue +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In the datafile create a new technology that does not follow the specified schema. +For example, add the value ``ELC03`` to the ``FUEL`` set:: -3. Install CBC -~~~~~~~~~~~~~~ + set FUEL := + WND00 + COA00 + ELC01 + ELC02 + ELC03 -CBC_ is a free and open-source mixed integer linear programming solver. Full -install instructions can be found on the CBC_ website. However, the abbreviated -instructions are shown below +Running ``otoole validate`` again will flag this improperly named value. Moreover it +will also flag it as an isolated fuel. This means the fuel is unconnected from the model:: -To install CBC on **Linux**, run the command:: + $ otoole validate datafile data.txt config.yaml --validate_config validate.yaml - sudo apt-get install coinor-cbc coinor-libcbc-dev + ***Beginning validation*** -To install CBC on **Mac**, run the command:: + Validating FUEL with fuel_name - brew install coin-or-tools/coinor/cbc + ^(WND|COA|ELC)(00|01|02) + 1 invalid names: + ELC03 -To install CBC on **Windows**, follow the install instruction on the CBC_ -website. + 4 valid names: + WND00, COA00, ELC01, ELC02 -Alternatively, if you use `Anaconda ` to manage -your Python packages, you can install CBC via the command:: + Validating TECHNOLOGY with technology_name - conda install -c conda-forge coincbc + ^(MIN|PWR|TRN)(WND|COA|ELC) + 5 valid names: + MINWND, MINCOA, PWRWND, PWRCOA, TRNELC -4. Test the CBC install -~~~~~~~~~~~~~~~~~~~~~~~ -Once installed, you should be able to directly call CBC:: - $ cbc - Welcome to the CBC MILP Solver - Version: 2.10.3 - Build Date: Mar 24 2020 + ***Checking graph structure*** - CoinSolver takes input from arguments ( - switches to stdin) - Enter ? for list of commands or help - Coin: + 1 'fuel' nodes are isolated: + ELC03 -You can exit the solver by typing ``quit`` .. _Simplicity: https://github.com/OSeMOSYS/simplicity .. _GLPK: https://www.gnu.org/software/glpk/ .. _GLPK Wiki: https://en.wikibooks.org/wiki/GLPK/Using_GLPSOL +.. _GLPK Website: https://winglpk.sourceforge.net/ .. _CBC: https://github.com/coin-or/Cbc .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer -.. _instructions: http://www.osemosys.org/uploads/1/8/5/0/18504136/glpk_installation_guide_for_windows10_-_201702.pdf +.. _Anaconda: https://www.anaconda.com/ From 1700224fa54beac1257c8183d05942367b12a342 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 14 Aug 2023 19:17:47 -0700 Subject: [PATCH 033/103] fix linting issue --- docs/examples-validation.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/examples-validation.rst b/docs/examples-validation.rst index 03988427..ee6d87c1 100644 --- a/docs/examples-validation.rst +++ b/docs/examples-validation.rst @@ -3,10 +3,10 @@ .. _examples-validation: ----------------------- -Example Validation File +Example Validation File ----------------------- -This page holds the datafile used in the validation example. The file can +This page holds the datafile used in the validation example. The file can either be copy/pasted from below, or directly downloaded from :download:`here <_static/validation-data.txt>` :: # Model file written by *otoole* From 3758c44458bf3c30386c8ff6d361eb792dfcf748 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Tue, 15 Aug 2023 12:47:49 +0200 Subject: [PATCH 034/103] Refactor cli functions to use python interface --- src/otoole/cli.py | 55 ++++++----------------------------------------- 1 file changed, 7 insertions(+), 48 deletions(-) diff --git a/src/otoole/cli.py b/src/otoole/cli.py index 45fff461..a34a103a 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -44,16 +44,10 @@ import shutil import sys -from otoole import __version__, convert, convert_results +from otoole import __version__, convert, convert_results, read from otoole.exceptions import OtooleSetupError from otoole.preprocess.setup import get_config_setup_data, get_csv_setup_data -from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel -from otoole.utils import ( - _read_file, - read_deprecated_datapackage, - read_packaged_file, - validate_config, -) +from otoole.utils import read_packaged_file from otoole.validate import main as validate from otoole.visualise import create_res from otoole.write_strategies import WriteCsv @@ -66,26 +60,7 @@ def validate_model(args): data_file = args.data_file user_config = args.user_config - _, ending = os.path.splitext(user_config) - with open(user_config, "r") as user_config_file: - config = _read_file(user_config_file, ending) - validate_config(config) - - if data_format == "datafile": - read_strategy = ReadDatafile(user_config=config) - elif data_format == "datapackage": - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - data_file = read_deprecated_datapackage(data_file) - logger.info("Successfully read folder of CSVs") - read_strategy = ReadCsv(user_config=config) - elif data_format == "csv": - read_strategy = ReadCsv(user_config=config) - elif data_format == "excel": - read_strategy = ReadExcel(user_config=config) - - input_data, _ = read_strategy.read(data_file) + input_data, _ = read(user_config, data_format, data_file) if args.validate_config: validation_config = read_packaged_file(args.validate_config) @@ -136,28 +111,12 @@ def data2res(args): data_format = args.data_format data_path = args.data_path + config = args.config + resfile = args.resfile - _, ending = os.path.splitext(args.config) - with open(args.config, "r") as config_file: - config = _read_file(config_file, ending) - validate_config(config) - - if data_format == "datafile": - read_strategy = ReadDatafile(user_config=config) - elif data_format == "datapackage": - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - data_path = read_deprecated_datapackage(data_path) - read_strategy = ReadCsv(user_config=config) - elif data_format == "csv": - read_strategy = ReadCsv(user_config=config) - elif data_format == "excel": - read_strategy = ReadExcel(user_config=config) - - input_data, _ = read_strategy.read(data_path) + input_data, _ = read(config, data_format, data_path) - create_res(input_data, args.resfile) + create_res(input_data, resfile) def setup(args): From c1f8865bb975a3e978ab07ef4ea7652224a8c119 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 15 Aug 2023 08:56:29 -0700 Subject: [PATCH 035/103] update ReadGlpk logic --- src/otoole/results/results.py | 175 ++++++++++++++++----------------- tests/fixtures/glpk_model.txt | 21 ---- tests/fixtures/glpk_sol.txt | 21 ---- tests/test_read_strategies.py | 180 ++++++++++++++++++++-------------- 4 files changed, 192 insertions(+), 205 deletions(-) delete mode 100644 tests/fixtures/glpk_model.txt delete mode 100644 tests/fixtures/glpk_sol.txt diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index bd8b04cb..d06a79a5 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -340,15 +340,22 @@ class ReadGlpk(ReadResultsCBC): glpsol --wglp model.lp -m osemosys.txt -d simplicity.txt --write results.sol """ - def __init__(self, user_config: Dict[str, Dict], glpk_model: str = None): + def __init__(self, user_config: Dict[str, Dict], glpk_model: Union[str, TextIO]): """ - glpk_model: str + glpk_model: Union[str, TextIO] Path to GLPK model file. Can be created using the `--wglp` flag. If not provided, the solution file will be processed without corresponding english names or index defenitions. """ super().__init__(user_config) - self.glpk_model = glpk_model + + if isinstance(glpk_model, str): + with open(glpk_model, "r") as model_file: + self.model = self.read_model(model_file) + elif isinstance(glpk_model, StringIO): + self.model = self.read_model(glpk_model) + else: + raise TypeError("Argument filepath type must be a string or an open file") def _convert_to_dataframe(self, glpk_sol: Union[str, TextIO]) -> pd.DataFrame: """Creates a wide formatted dataframe from GLPK solution @@ -363,17 +370,62 @@ def _convert_to_dataframe(self, glpk_sol: Union[str, TextIO]) -> pd.DataFrame: pd.DataFrame """ - model = self.read_model() - if isinstance(glpk_sol, str): - with open(glpk_sol, "r") as sol_file: - _, sol = self.read_solution(sol_file) + with open(glpk_sol, "r"): + _, sol = self.read_solution(glpk_sol) elif isinstance(glpk_sol, StringIO): _, sol = self.read_solution(glpk_sol) else: raise TypeError("Argument filepath type must be a string or an open file") - return self.merge_model_sol(model, sol) + return self._merge_model_sol(sol) + + def read_model(self, file_path: Union[str, TextIO]) -> pd.DataFrame: + """Reads in a GLPK Model File + + Arguments + --------- + file_path: str + Path to GLPK model file. Can be created using the `--wglp` flag. + + Returns + ------- + pd.DataFrame + + ID NUM NAME INDEX + 0 i 1 CAa4_Constraint_Capacity "SIMPLICITY,ID,BACKSTOP1,2015" + 1 j 2 NewCapacity "SIMPLICITY,WINDPOWER,2039" + + Notes + ----- + + -> GENERAL LAYOUT OF SOLUTION FILE + + n p NAME # p = problem instance + n z NAME # z = objective function + n i ROW NAME # i = constraint name, ROW is the row ordinal number + n j COL NAME # j = variable name, COL is the column ordinal number + """ + + df = pd.read_csv( + file_path, + header=None, + sep=r"\s+", + index_col=0, + names=["ID", "NUM", "value", 4, 5], + ).drop(columns=[4, 5]) + + df = df[(df["ID"].isin(["i", "j"])) & (df["value"] != "cost")] + + df[["NAME", "INDEX"]] = df["value"].str.split("[", expand=True) + df["INDEX"] = df["INDEX"].map(lambda x: x.split("]")[0]) + df = ( + df[["ID", "NUM", "NAME", "INDEX"]] + .astype({"ID": str, "NUM": int, "NAME": str, "INDEX": str}) + .reset_index(drop=True) + ) + + return df def read_solution( self, file_path: Union[str, TextIO] @@ -429,99 +481,36 @@ def read_solution( DUAL field contains the column dual value (float) """ - data = [] - status = {} + df = pd.read_csv(file_path, header=None, sep=":") - for line in file_path: - parts = line.strip().split() - if parts[0] in ("i", "j"): - data.append( - [ - parts[0], - int(parts[1]), - parts[2], - float(parts[3]), - float(parts[4]), - ] - ) - elif len(parts) > 1: - if parts[1] == "Problem:": - status["name"] = parts[2] - elif parts[1] == "Status:": - status["status"] = parts[2] - elif parts[1] == "Objective:": - status["objective"] = float(parts[4]) - - df = pd.DataFrame(data, columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"]) - - for info in ["name", "status", "objective"]: - if info not in status: - LOGGER.warning(f"No {info} extracted from the GLPK solution") - - return status, df - - def read_model(self) -> pd.DataFrame: - """Reads in a GLPK Model File - - Arguments - --------- - file_path: str - Path to GLPK model file. Can be created using the `--wglp` flag. - - Returns - ------- - pd.DataFrame - - ID NUM NAME INDEX - 0 i 1 CAa4_Constraint_Capacity "SIMPLICITY,ID,BACKSTOP1,2015" - 1 j 2 NewCapacity "SIMPLICITY,WINDPOWER,2039" - - Notes - ----- - - -> GENERAL LAYOUT OF SOLUTION FILE - - n p NAME # p = problem instance - n z NAME # z = objective function - n i ROW NAME # i = constraint name, ROW is the row ordinal number - n j COL NAME # j = variable name, COL is the column ordinal number - """ + # get status information + status = {} + df_status = df.loc[:8].set_index(0) + status["name"] = df_status.loc["c Problem", 1].strip() + status["status"] = df_status.loc["c Status", 1].strip() + status["objective"] = float(df_status.loc["c Objective", 1].split()[2]) + + # get solution infromation + data = df.iloc[8:-1].copy() + data[["ID", "NUM", "STATUS", "PRIM", "DUAL"]] = data[0].str.split( + " ", expand=True + ) - if not self.glpk_model: - raise OtooleError( - resource="GLPK.lp", - message="No GLPK model file provided. This can be generated via the `--wglp` command.", + data = ( + data[["ID", "NUM", "STATUS", "PRIM", "DUAL"]] + .astype( + {"ID": str, "NUM": int, "STATUS": str, "PRIM": float, "DUAL": float} ) - else: - model_path = self.glpk_model - - data = [] - - with open(model_path, "r") as f: - for line in f: - parts = line.strip().split() - if not parts[0] == "n": - continue - if len(parts) < 4: - continue - data.append([parts[1], int(parts[2]), parts[3]]) - - df = pd.DataFrame(data, columns=["ID", "NUM", "INDEX_LIST"]) - df = df.loc[df["INDEX_LIST"].str.contains(r"\[")] # removes "n i 1 cost" row + .reset_index(drop=True) + ) - df[["NAME", "INDEX"]] = df["INDEX_LIST"].str.split("[", expand=True) - df["INDEX"] = df["INDEX"].map(lambda x: x.split("]")[0]) + return status, data - df = df[["ID", "NUM", "NAME", "INDEX"]].reset_index(drop=True) - return df - - def merge_model_sol(self, model: pd.DataFrame, sol: pd.DataFrame) -> pd.DataFrame: + def _merge_model_sol(self, sol: pd.DataFrame) -> pd.DataFrame: """Merges GLPK model and solution file into one dataframe Arguments --------- - model: pd.DataFrame, - see output from ReadGlpk.read_model(...) sol: pd.DataFrame see output from ReadGlpk.read_solution(...) @@ -535,6 +524,8 @@ def merge_model_sol(self, model: pd.DataFrame, sol: pd.DataFrame) -> pd.DataFram columns=['Variable', 'Index', 'Value']) """ + model = self.model.copy() + # create lookup ids using the id and num columns to coordinate merge model["lookup"] = model["ID"].str.cat(model["NUM"].astype(str)) model = model.set_index("lookup") diff --git a/tests/fixtures/glpk_model.txt b/tests/fixtures/glpk_model.txt deleted file mode 100644 index 63cddbf7..00000000 --- a/tests/fixtures/glpk_model.txt +++ /dev/null @@ -1,21 +0,0 @@ -p lp min 12665 9450 82606 -n p osemosys_fast -n z cost -i 1 f -n i 1 cost -i 2 u -0 -n i 2 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2014] -i 3 u -0 -n i 3 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2015] -i 300 u 147.115 -n i 300 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2015] -i 301 u 144.231 -n i 301 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2016] -n j 1 SalvageValueStorage[SIMPLICITY,DAM,2014] -n j 2 SalvageValueStorage[SIMPLICITY,DAM,2015] -n j 130 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2035] -n j 131 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2036] -n j 1025 NewCapacity[SIMPLICITY,WINDPOWER,2039] -n j 1026 NewCapacity[SIMPLICITY,WINDPOWER,2040] -n j 1027 RateOfActivity[SIMPLICITY,ID,BACKSTOP1,1,2014] -n j 1028 RateOfActivity[SIMPLICITY,IN,BACKSTOP1,1,2014] diff --git a/tests/fixtures/glpk_sol.txt b/tests/fixtures/glpk_sol.txt deleted file mode 100644 index 37fbe7f1..00000000 --- a/tests/fixtures/glpk_sol.txt +++ /dev/null @@ -1,21 +0,0 @@ -c Problem: osemosys_fast -c Rows: 12665 -c Columns: 9450 -c Non-zeros: 82606 -c Status: OPTIMAL -c Objective: cost = 4497.31967 (MINimum) -c -s bas 12665 9450 f f 4497.31967015205 -i 1 b 3942.19479265207 0 -i 2 b 0 0 -i 3 b 0 0 -i 300 b 37.499 0 -i 301 b 31.7309999999999 0 -j 1 b 0 0 -j 2 b 0 0 -j 130 l 0 0.282765294823514 -j 131 l 0 0.601075755990521 -j 1025 b 0.0305438002923389 0 -j 1026 b 0.0422503416065477 0 -j 1027 l 0 162679.693161095 -j 1028 l 0 81291.0524314291 diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index 90b6316e..ddcd1823 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -615,6 +615,31 @@ def test_manage_infeasible_variables(self, user_config): class TestReadGlpk: """Use fixtures instead of StringIO due to the use of context managers in the logic""" + model_data = dedent( + """p lp min 12665 9450 82606 +n p osemosys_fast +n z cost +i 1 f +n i 1 cost +i 2 u -0 +n i 2 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2014] +i 3 u -0 +n i 3 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2015] +i 300 u 147.115 +n i 300 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2015] +i 301 u 144.231 +n i 301 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2016] +n j 1 SalvageValueStorage[SIMPLICITY,DAM,2014] +n j 2 SalvageValueStorage[SIMPLICITY,DAM,2015] +n j 130 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2035] +n j 131 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2036] +n j 1025 NewCapacity[SIMPLICITY,WINDPOWER,2039] +n j 1026 NewCapacity[SIMPLICITY,WINDPOWER,2040] +n j 1027 RateOfActivity[SIMPLICITY,ID,BACKSTOP1,1,2014] +n j 1028 RateOfActivity[SIMPLICITY,IN,BACKSTOP1,1,2014] +""" + ) + sol_data = dedent( """c Problem: osemosys_fast c Rows: 12665 @@ -636,71 +661,97 @@ class TestReadGlpk: j 1025 b 0.0305438002923389 0 j 1026 b 0.0422503416065477 0 j 1027 l 0 162679.693161095 -j 1028 l 0 81291.0524314291""" +j 1028 l 0 81291.0524314291 +e o f +""" ) - expected_sol_data = pd.DataFrame( - [ - ["i", 1, "b", 3942.19479265207, 0], - ["i", 2, "b", 0, 0], - ["i", 3, "b", 0, 0], - ["i", 300, "b", 37.499, 0], - ["i", 301, "b", 31.7309999999999, 0], - ["j", 1, "b", 0, 0], - ["j", 2, "b", 0, 0], - ["j", 130, "l", 0, 0.282765294823514], - ["j", 131, "l", 0, 0.601075755990521], - ["j", 1025, "b", 0.0305438002923389, 0], - ["j", 1026, "b", 0.0422503416065477, 0], - ["j", 1027, "l", 0, 162679.693161095], - ["j", 1028, "l", 0, 81291.0524314291], - ], - columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], - ) + def test_read_model(self, user_config): + model_data = self.model_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + actual = reader.model - expected_model_data = pd.DataFrame( - [ - ["i", 2, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2014"], - ["i", 3, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2015"], - ["i", 300, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2015"], - ["i", 301, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2016"], - ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], - ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], - ["j", 130, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035"], - ["j", 131, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036"], - ["j", 1025, "NewCapacity", "SIMPLICITY,WINDPOWER,2039"], - ["j", 1026, "NewCapacity", "SIMPLICITY,WINDPOWER,2040"], - ["j", 1027, "RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014"], - ["j", 1028, "RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014"], - ], - columns=["ID", "NUM", "NAME", "INDEX"], - ) + expected = pd.DataFrame( + [ + ["i", 2, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2014"], + ["i", 3, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2015"], + ["i", 300, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2015"], + ["i", 301, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2016"], + ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], + ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], + ["j", 130, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035"], + ["j", 131, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036"], + ["j", 1025, "NewCapacity", "SIMPLICITY,WINDPOWER,2039"], + ["j", 1026, "NewCapacity", "SIMPLICITY,WINDPOWER,2040"], + ["j", 1027, "RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014"], + ["j", 1028, "RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014"], + ], + columns=["ID", "NUM", "NAME", "INDEX"], + ) + + pd.testing.assert_frame_equal(actual, expected) def test_read_solution(self, user_config): - input_file = self.sol_data - reader = ReadGlpk(user_config) - with StringIO(input_file) as file_buffer: + model_data = self.model_data + sol_data = self.sol_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + with StringIO(sol_data) as file_buffer: actual_status, actual_data = reader.read_solution(file_buffer) + expected_status = { "name": "osemosys_fast", "status": "OPTIMAL", "objective": 4497.31967, } assert actual_status == expected_status - pd.testing.assert_frame_equal(actual_data, self.expected_sol_data) - - def test_read_model(self, user_config): - input_file = os.path.join("tests", "fixtures", "glpk_model.txt") - reader = ReadGlpk(user_config=user_config, glpk_model=input_file) - actual = reader.read_model() - pd.testing.assert_frame_equal(actual, self.expected_model_data) + expected_data = pd.DataFrame( + [ + ["i", 1, "b", 3942.19479265207, 0], + ["i", 2, "b", 0, 0], + ["i", 3, "b", 0, 0], + ["i", 300, "b", 37.499, 0], + ["i", 301, "b", 31.7309999999999, 0], + ["j", 1, "b", 0, 0], + ["j", 2, "b", 0, 0], + ["j", 130, "l", 0, 0.282765294823514], + ["j", 131, "l", 0, 0.601075755990521], + ["j", 1025, "b", 0.0305438002923389, 0], + ["j", 1026, "b", 0.0422503416065477, 0], + ["j", 1027, "l", 0, 162679.693161095], + ["j", 1028, "l", 0, 81291.0524314291], + ], + columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], + ) + pd.testing.assert_frame_equal(actual_data, expected_data) def test_merge_model_sol(self, user_config): - reader = ReadGlpk(user_config) - actual = reader.merge_model_sol( - self.expected_model_data, self.expected_sol_data + model_data = self.model_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + + sol_data = pd.DataFrame( + [ + ["i", 1, "b", 3942.19479265207, 0], + ["i", 2, "b", 0, 0], + ["i", 3, "b", 0, 0], + ["i", 300, "b", 37.499, 0], + ["i", 301, "b", 31.7309999999999, 0], + ["j", 1, "b", 0, 0], + ["j", 2, "b", 0, 0], + ["j", 130, "l", 0, 0.282765294823514], + ["j", 131, "l", 0, 0.601075755990521], + ["j", 1025, "b", 0.0305438002923389, 0], + ["j", 1026, "b", 0.0422503416065477, 0], + ["j", 1027, "l", 0, 162679.693161095], + ["j", 1028, "l", 0, 81291.0524314291], + ], + columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], ) + + actual = reader._merge_model_sol(sol_data) expected = pd.DataFrame( [ ["SalvageValueStorage", "SIMPLICITY,DAM,2014", 0], @@ -717,32 +768,19 @@ def test_merge_model_sol(self, user_config): pd.testing.assert_frame_equal(actual, expected) - def test_merge_model_sol_error(self, user_config): - reader = ReadGlpk(user_config) - - model = pd.DataFrame( - [ - ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], - ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], - ], - columns=["ID", "NUM", "NAME", "INDEX"], - ) + def test_convert_to_dataframe_error(self, user_config): + model_data = self.model_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) - sol = pd.DataFrame( - [ - ["j", 1025, "b", 0.0305438002923389, 0], - ["j", 1026, "b", 0.0422503416065477, 0], - ], - columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], - ) + sol = pd.DataFrame() - with raises(OtooleError): - reader.merge_model_sol(model, sol) + with raises(TypeError): + reader._convert_to_dataframe(sol) def test_read_model_error(self, user_config): - reader = ReadGlpk(user_config) - with raises(OtooleError): - reader.read_model() + with raises(TypeError): + ReadGlpk(user_config) class TestCleanOnRead: From 0756c9d6e84af9d0b94ecf80e039b6c879ee50ec Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 15 Aug 2023 08:59:35 -0700 Subject: [PATCH 036/103] update ReadGlpk docstrings --- src/otoole/results/results.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index d06a79a5..15ec93c6 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -344,8 +344,6 @@ def __init__(self, user_config: Dict[str, Dict], glpk_model: Union[str, TextIO]) """ glpk_model: Union[str, TextIO] Path to GLPK model file. Can be created using the `--wglp` flag. - If not provided, the solution file will be processed without - corresponding english names or index defenitions. """ super().__init__(user_config) @@ -362,7 +360,7 @@ def _convert_to_dataframe(self, glpk_sol: Union[str, TextIO]) -> pd.DataFrame: Arguments --------- - glpk_sol: str + glpk_sol: Union[str, TextIO] Path to GLPK solution file. Can be created using the `--write` flag Returns @@ -385,7 +383,7 @@ def read_model(self, file_path: Union[str, TextIO]) -> pd.DataFrame: Arguments --------- - file_path: str + file_path: Union[str, TextIO] Path to GLPK model file. Can be created using the `--wglp` flag. Returns @@ -434,7 +432,7 @@ def read_solution( Arguments --------- - file_path: str + file_path: Union[str, TextIO] Path to GLPK solution file. Can be created using the `--write` flag Returns From 738d3d92ea294e26f96658e1474c7bbdab6891f6 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 15 Aug 2023 09:08:00 -0700 Subject: [PATCH 037/103] fixed typos --- docs/examples.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index c4f2aab8..69f9f211 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -307,7 +307,7 @@ Create the fuel codes and descriptions in the validation configuration file:: 'WND': Wind 'COA': Coal 'ELC': Electricity - indetifiers: + identifiers: '00': Primary Resource '01': Intermediate '02': End Use @@ -335,8 +335,8 @@ Use the defined codes to create a schema for the fuel codes:: - name: type valid: fuels position: (1, 3) - - name: indentifier - valid: indetifiers + - name: identifier + valid: identifiers position: (4, 5) 6. Create ``TECHNOLOGY`` Schema @@ -364,7 +364,7 @@ The final validation configuration file for this example will look like:: 'WND': Wind 'COA': Coal 'ELC': Electricity - indetifiers: + identifiers: '00': Primary Resource '01': Intermediate '02': End Use @@ -380,8 +380,8 @@ The final validation configuration file for this example will look like:: - name: type valid: fuels position: (1, 3) - - name: indentifier - valid: indetifiers + - name: identifier + valid: identifiers position: (4, 5) TECHNOLOGY: - name: technology_name From 773c64488c59b8c3c02c675543bbe7109b410d63 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 15 Aug 2023 09:35:11 -0700 Subject: [PATCH 038/103] update ReadGlpk --- src/otoole/results/results.py | 21 ++++------- tests/test_read_strategies.py | 68 +++++++++++++++-------------------- 2 files changed, 36 insertions(+), 53 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index 15ec93c6..ddc02fd4 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -5,7 +5,6 @@ import pandas as pd -from otoole.exceptions import OtooleError from otoole.input import ReadStrategy from otoole.preprocess.longify_data import check_datatypes from otoole.results.result_package import ResultsPackage @@ -538,18 +537,12 @@ def _merge_model_sol(self, sol: pd.DataFrame) -> pd.DataFrame: # assemble dataframe data = [] for lookup_id, lookup_values in vars_lookup.items(): - try: - data.append( - [ - model_lookup[lookup_id]["NAME"], - model_lookup[lookup_id]["INDEX"], - lookup_values["PRIM"], - ] - ) - except KeyError: - raise OtooleError( - resource=lookup_id, - message=f"No corresponding id for {lookup_id} in the GLPK model file", - ) + data.append( + [ + model_lookup[lookup_id]["NAME"], + model_lookup[lookup_id]["INDEX"], + lookup_values["PRIM"], + ] + ) return pd.DataFrame(data, columns=["Variable", "Index", "Value"]) diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index ddcd1823..2878034b 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -666,6 +666,25 @@ class TestReadGlpk: """ ) + expected_solution = pd.DataFrame( + [ + ["i", 1, "b", 3942.19479265207, 0], + ["i", 2, "b", 0, 0], + ["i", 3, "b", 0, 0], + ["i", 300, "b", 37.499, 0], + ["i", 301, "b", 31.7309999999999, 0], + ["j", 1, "b", 0, 0], + ["j", 2, "b", 0, 0], + ["j", 130, "l", 0, 0.282765294823514], + ["j", 131, "l", 0, 0.601075755990521], + ["j", 1025, "b", 0.0305438002923389, 0], + ["j", 1026, "b", 0.0422503416065477, 0], + ["j", 1027, "l", 0, 162679.693161095], + ["j", 1028, "l", 0, 81291.0524314291], + ], + columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], + ) + def test_read_model(self, user_config): model_data = self.model_data with StringIO(model_data) as file_buffer: @@ -707,51 +726,14 @@ def test_read_solution(self, user_config): } assert actual_status == expected_status - expected_data = pd.DataFrame( - [ - ["i", 1, "b", 3942.19479265207, 0], - ["i", 2, "b", 0, 0], - ["i", 3, "b", 0, 0], - ["i", 300, "b", 37.499, 0], - ["i", 301, "b", 31.7309999999999, 0], - ["j", 1, "b", 0, 0], - ["j", 2, "b", 0, 0], - ["j", 130, "l", 0, 0.282765294823514], - ["j", 131, "l", 0, 0.601075755990521], - ["j", 1025, "b", 0.0305438002923389, 0], - ["j", 1026, "b", 0.0422503416065477, 0], - ["j", 1027, "l", 0, 162679.693161095], - ["j", 1028, "l", 0, 81291.0524314291], - ], - columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], - ) - pd.testing.assert_frame_equal(actual_data, expected_data) + pd.testing.assert_frame_equal(actual_data, self.expected_solution) def test_merge_model_sol(self, user_config): model_data = self.model_data with StringIO(model_data) as file_buffer: reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) - sol_data = pd.DataFrame( - [ - ["i", 1, "b", 3942.19479265207, 0], - ["i", 2, "b", 0, 0], - ["i", 3, "b", 0, 0], - ["i", 300, "b", 37.499, 0], - ["i", 301, "b", 31.7309999999999, 0], - ["j", 1, "b", 0, 0], - ["j", 2, "b", 0, 0], - ["j", 130, "l", 0, 0.282765294823514], - ["j", 131, "l", 0, 0.601075755990521], - ["j", 1025, "b", 0.0305438002923389, 0], - ["j", 1026, "b", 0.0422503416065477, 0], - ["j", 1027, "l", 0, 162679.693161095], - ["j", 1028, "l", 0, 81291.0524314291], - ], - columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], - ) - - actual = reader._merge_model_sol(sol_data) + actual = reader._merge_model_sol(self.expected_solution) expected = pd.DataFrame( [ ["SalvageValueStorage", "SIMPLICITY,DAM,2014", 0], @@ -768,6 +750,14 @@ def test_merge_model_sol(self, user_config): pd.testing.assert_frame_equal(actual, expected) + def test_convert_to_dataframe(self, user_config): + model_data = self.model_data + sol_data = self.sol_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + with StringIO(sol_data) as file_buffer: + reader._convert_to_dataframe(file_buffer) + def test_convert_to_dataframe_error(self, user_config): model_data = self.model_data with StringIO(model_data) as file_buffer: From fba31452440796b27db41650498644b0bdec35e9 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Thu, 17 Aug 2023 13:15:43 +0200 Subject: [PATCH 039/103] Add a read_results function to Python interface --- docs/convert.rst | 11 +++ docs/examples.rst | 4 +- docs/functionality.rst | 22 ++++-- src/otoole/__init__.py | 5 +- src/otoole/cli.py | 24 ++---- src/otoole/convert.py | 168 +++++++++++++++++++++++++++-------------- tests/test_cli.py | 4 +- tests/test_convert.py | 39 +++++++++- 8 files changed, 186 insertions(+), 91 deletions(-) diff --git a/docs/convert.rst b/docs/convert.rst index 46adc077..3c9093e7 100644 --- a/docs/convert.rst +++ b/docs/convert.rst @@ -31,6 +31,17 @@ Gurobi_ or CPLEX_ solution file:: See :func:`otoole.convert.convert_results` for more details +Reading solver results into a dict of Pandas DataFrames +------------------------------------------------------- + +The ``read_results`` function reads a CBC_, CLP_, +Gurobi_ or CPLEX_ solution file into memory:: + +>>> from otoole import read_results +>>> read_results('my_model.yaml', 'cbc', 'csv', 'my_model.sol', 'my_model_csvs', 'datafile', 'my_model.dat') + +See :func:`otoole.convert.read_results` for more details + Read in data from different Formats ----------------------------------- diff --git a/docs/examples.rst b/docs/examples.rst index 588beb48..19ffd624 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -54,7 +54,7 @@ Create a folder to hold the results:: Use ``otoole``'s ``result`` package to generate the results file:: - $ otoole results cbc csv simplicity.sol results config.yaml + $ otoole results cbc csv simplicity.sol results datafile simplicity.txt config.yaml Result Processing ----------------- @@ -104,7 +104,7 @@ Create a folder to hold the results:: Use ``otoole``'s ``result`` package to generate the result CSVs:: - $ otoole results cbc csv simplicity.sol results config.yaml + $ otoole results cbc csv simplicity.sol results datafile simplicity.txt config.yaml Template Setup -------------- diff --git a/docs/functionality.rst b/docs/functionality.rst index 7a027bf2..3841fd51 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -90,32 +90,38 @@ so as to speed up the model matrix generation and solution times. ~~~~~~~~~~~~~~~~~~ The ``results`` command creates a folder of CSV result files from a CBC_, CLP_, -Gurobi_ or CPLEX_ solution file:: +Gurobi_ or CPLEX_ solution file together with the input data:: $ otoole results --help - usage: otoole results [-h] [--input_datafile INPUT_DATAFILE] [--input_datapackage INPUT_DATAPACKAGE] [--write_defaults] {cbc,cplex,gurobi} {csv} from_path to_path config + usage: otoole results [-h] [--write_defaults] + {cbc,cplex,gurobi} {csv} from_path to_path {csv,datafile,excel} input_path config positional arguments: {cbc,cplex,gurobi} Result data format to convert from {csv} Result data format to convert to from_path Path to file or folder to convert from to_path Path to file or folder to convert to + {csv,datafile,excel} Input data format + input_path Path to input_data config Path to config YAML file optional arguments: - -h, --help show this help message and exit - --input_datafile INPUT_DATAFILE - Input GNUMathProg datafile required for OSeMOSYS short or fast results - --input_datapackage INPUT_DATAPACKAGE - Deprecated - --write_defaults Writes default values + -h, --help show this help message and exit + --write_defaults Writes default values .. versionadded:: v1.0.0 The ``config`` positional argument is now required +.. versionadded:: v1.1.0 + The ``input_data_format`` and ``input_path`` positional arguments are now required + .. deprecated:: v1.0.0 The ``--input_datapackage`` flag is no longer supported +.. deprecated:: v1.1.0 + The ``--input_datapackage``, ``--input_csvs`` and ``--input_datafile`` flags + have been replaced by new positional arguments ``input data format`` and ``input_path`` + .. WARNING:: If using CPLEX_, you will need to transform and sort the solution file before processing it with ``otoole``. Instructions on how to run the transformation diff --git a/src/otoole/__init__.py b/src/otoole/__init__.py index 323f4e78..0b4a959b 100644 --- a/src/otoole/__init__.py +++ b/src/otoole/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import sys -from otoole.convert import convert, convert_results, read, write +from otoole.convert import convert, convert_results, read, read_results, write if sys.version_info[:2] >= (3, 8): # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8` @@ -22,5 +22,6 @@ convert_results = convert_results read = read write = write +read_results = read_results -__all__ = ["convert" "convert_results", "read", "write"] +__all__ = ["convert" "convert_results", "read", "write", "read_results"] diff --git a/src/otoole/cli.py b/src/otoole/cli.py index a34a103a..1b3c9746 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -70,15 +70,15 @@ def validate_model(args): def _result_matrix(args): + """Covert results""" convert_results( args.config, args.from_format, args.to_format, args.from_path, args.to_path, - input_datapackage=args.input_datapackage, - input_csvs=args.input_csvs, - input_datafile=args.input_datafile, + args.input_format, + args.input_path, write_defaults=args.write_defaults, ) @@ -177,21 +177,13 @@ def get_parser(): "from_path", help="Path to file or folder to convert from" ) result_parser.add_argument("to_path", help="Path to file or folder to convert to") + result_parser.add_argument( - "--input_datafile", - help="Input GNUMathProg datafile required for OSeMOSYS short or fast results", - default=None, - ) - result_parser.add_argument( - "--input_csvs", - help="Input folder of CSVs required for OSeMOSYS short or fast results", - default=None, - ) - result_parser.add_argument( - "--input_datapackage", - help="Deprecated. Use --input_csvs instead", - default=None, + "input_format", + help="Input data format", + choices=sorted(["csv", "datafile", "excel"]), ) + result_parser.add_argument("input_path", help="Path to input_data") result_parser.add_argument("config", help="Path to config YAML file") result_parser.add_argument( "--write_defaults", diff --git a/src/otoole/convert.py b/src/otoole/convert.py index db094327..b43a484d 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -9,30 +9,72 @@ """ import logging import os -from typing import Dict, Tuple +from typing import Dict, Optional, Tuple, Union import pandas as pd from otoole.input import Context, ReadStrategy, WriteStrategy from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel -from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi +from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi, ReadResults from otoole.utils import _read_file, read_deprecated_datapackage, validate_config from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel logger = logging.getLogger(__name__) +def read_results( + config: str, from_format: str, from_path: str, input_format: str, input_path: str +) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: + """Read OSeMOSYS results from CBC, GLPK or Gurobi results files + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'datafile', 'csv', 'excel' and 'datapackage' [deprecated] + from_path : str + Path to source file (if datafile or excel) or folder (csv) + input_format: str + Format of input data. Available options are 'datafile', 'csv' and 'excel' + input_path: str + Path to input data input_format: str + Format of input data. Available options are 'datafile', 'csv' and 'excel' + input_path: str + Path to input data + + Returns + ------- + Tuple[dict[str, pd.DataFrame], dict[str, float]] + Dictionary of parameter and set data and dictionary of default values + """ + user_config = _get_user_config(config) + input_strategy = _get_read_strategy(user_config, input_format) + result_strategy = _get_read_result_strategy(user_config, from_format) + + if input_strategy: + input_data, _ = input_strategy.read(input_path) + else: + input_data = {} + + if result_strategy: + results, default_values = result_strategy.read(from_path, input_data=input_data) + return results, default_values + else: + msg = "Conversion from {} is not yet implemented".format(from_format) + raise NotImplementedError(msg) + + def convert_results( - config, - from_format, - to_format, - from_path, - to_path, - input_datapackage=None, - input_csvs=None, - input_datafile=None, + config: str, + from_format: str, + to_format: str, + from_path: str, + to_path: str, + input_format: str, + input_path: str, write_defaults=False, -): +) -> bool: """Post-process results from a CBC, CPLEX or Gurobi solution file into CSV format Arguments @@ -47,12 +89,10 @@ def convert_results( Path to cbc, cplex or gurobi solution file to_path : str Path to destination folder - input_datapackage : str - Path to folder containing datapackage.json - input_csvs : str - Path to folder containing CSVs - input_datafile : str - Path to datafile + input_format: str + Format of input data. Available options are 'datafile', 'csv' and 'excel' + input_path: str + Path to input data write_defaults : str Write default values to CSVs @@ -66,25 +106,11 @@ def convert_results( from_format, to_format ) - read_strategy = None - write_strategy = None - - if config: - _, ending = os.path.splitext(config) - with open(config, "r") as config_file: - user_config = _read_file(config_file, ending) - logger.info("Reading config from {}".format(config)) - logger.info("Validating config from {}".format(config)) - validate_config(user_config) + user_config = _get_user_config(config) # set read strategy - if from_format == "cbc": - read_strategy = ReadCbc(user_config=user_config) - elif from_format == "cplex": - read_strategy = ReadCplex(user_config=user_config) - elif from_format == "gurobi": - read_strategy = ReadGurobi(user_config=user_config) + read_strategy = _get_read_result_strategy(user_config, from_format) # set write strategy @@ -94,31 +120,50 @@ def convert_results( write_strategy = WriteCsv( user_config=user_config, write_defaults=write_defaults ) - - if input_datapackage: - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - input_csvs = read_deprecated_datapackage(input_datapackage) - logger.info("Successfully read folder of CSVs") - input_data, _ = ReadCsv(user_config=user_config).read(input_csvs) - elif input_datafile: - input_data, _ = ReadDatafile(user_config=user_config).read(input_datafile) - elif input_csvs: - input_data, _ = ReadCsv(user_config=user_config).read(input_csvs) else: - input_data = {} + raise NotImplementedError(msg) + + # read in input file + input_data, _ = read(config, input_format, input_path) if read_strategy and write_strategy: context = Context(read_strategy, write_strategy) context.convert(from_path, to_path, input_data=input_data) else: raise NotImplementedError(msg) - return False return True +def _get_read_result_strategy(user_config, from_format) -> Union[ReadResults, None]: + """Get ``ReadResults`` for gurobi, cbc and cplex formats + + Arguments + --------- + config : dict + User configuration describing parameters and sets + from_format : str + Available options are 'cbc', 'gurobi', and 'cplex' + + Returns + ------- + ReadStrategy or None + A ReadStrategy object. Returns None if from_format is not recognised + + """ + + if from_format == "cbc": + read_strategy: ReadResults = ReadCbc(user_config) + elif from_format == "gurobi": + read_strategy = ReadGurobi(user_config=user_config) + elif from_format == "cplex": + read_strategy = ReadCplex(user_config=user_config) + else: + return None + + return read_strategy + + def _get_user_config(config) -> dict: """Read in the configuration file @@ -273,7 +318,9 @@ def convert( ) if from_format == "datapackage": - logger.warning("Writing to datapackage is deprecated, writing to CSVs") + logger.warning( + "Reading from and writing to datapackage is deprecated, writing to CSVs" + ) from_path = read_deprecated_datapackage(from_path) to_path = os.path.join(os.path.dirname(to_path), "data") @@ -284,7 +331,7 @@ def convert( def read( - config, from_format, from_path, keep_whitespace=False + config: str, from_format: str, from_path: str, keep_whitespace: bool = False ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: """Read OSeMOSYS data from datafile, csv or Excel formats @@ -315,7 +362,13 @@ def read( return read_strategy.read(from_path) -def write(config, to_format, to_path, inputs, default_values=None) -> bool: +def write( + config: str, + to_format: str, + to_path: str, + inputs, + default_values: Optional[Dict[str, float]] = None, +) -> bool: """Write OSeMOSYS data to datafile, csv or Excel formats Arguments @@ -334,13 +387,14 @@ def write(config, to_format, to_path, inputs, default_values=None) -> bool: """ user_config = _get_user_config(config) if default_values is None: - write_defaults = False + write_strategy = _get_write_strategy( + user_config, to_format, write_defaults=False + ) + write_strategy.write(inputs, to_path, {}) else: - write_defaults = True - - write_strategy = _get_write_strategy( - user_config, to_format, write_defaults=write_defaults - ) - write_strategy.write(inputs, to_path, default_values=default_values) + write_strategy = _get_write_strategy( + user_config, to_format, write_defaults=True + ) + write_strategy.write(inputs, to_path, default_values) return True diff --git a/tests/test_cli.py b/tests/test_cli.py index b7b6294f..17a8b40d 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -27,9 +27,9 @@ def test_convert_results(self): to_format, from_path, to_path, - config, - "--input_csvs", + "csv", super_simple_csvs, + config, ] actual = run(commands, capture_output=True) assert actual.returncode == 0, print(actual.stdout) diff --git a/tests/test_convert.py b/tests/test_convert.py index 2d87c901..a3287db9 100644 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -7,7 +7,7 @@ import pandas as pd from pytest import raises -from otoole import convert, convert_results, read, write +from otoole import convert, convert_results, read, read_results, write class TestRead: @@ -125,6 +125,35 @@ def test_convert_excel_to_csv(self): assert actual[1] == "CO2\n" +class TestReadResults: + """Test the read_results function""" + + def test_read_results(self): + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + + input_path = os.path.join("tests", "fixtures", "super_simple", "csv") + input_format = "csv" + from_format = "cbc" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + + actual, _ = read_results( + config, from_format, from_path, input_format, input_path + ) + + expected_data = [["BB", "gas_import", 2016, 2.342422]] + expected_columns = ["REGION", "TECHNOLOGY", "YEAR", "VALUE"] + index = ["REGION", "TECHNOLOGY", "YEAR"] + expected_data_frame = pd.DataFrame( + expected_data, columns=expected_columns + ).set_index(index) + + pd.testing.assert_frame_equal( + actual["AccumulatedNewCapacity"], expected_data_frame + ) + + class TestConvertResults: """Test the convert_results function""" @@ -141,7 +170,7 @@ def test_convert_results_cbc_csv(self): input_csvs = os.path.join("tests", "fixtures", "super_simple", "csv") result = convert_results( - config, from_format, to_format, from_path, to_path, input_csvs=input_csvs + config, from_format, to_format, from_path, to_path, "csv", input_csvs ) assert result is True @@ -172,7 +201,8 @@ def test_convert_results_cbc_csv_datafile(self): to_format, from_path, to_path, - input_datafile=input_datafile, + "datafile", + input_datafile, ) assert result is True @@ -201,5 +231,6 @@ def test_convert_results_cbc_csv_raises(self): to_format, from_path, to_path, - input_csvs="not_a_path", + "csv", + "not_a_path", ) From 1116d026483367185a8e174f9c761e52e3b7b62b Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 17 Aug 2023 22:54:48 -0700 Subject: [PATCH 040/103] add ReadGlpk to Python API --- src/otoole/cli.py | 1 + src/otoole/convert.py | 44 ++++++++++++++++++++++++----------- src/otoole/results/results.py | 13 ++++------- tests/test_convert.py | 14 +++++++++++ 4 files changed, 50 insertions(+), 22 deletions(-) diff --git a/src/otoole/cli.py b/src/otoole/cli.py index 7ce5c5d8..c0305f74 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -80,6 +80,7 @@ def _result_matrix(args): args.input_format, args.input_path, write_defaults=args.write_defaults, + glpk_model=args.glpk_model, ) diff --git a/src/otoole/convert.py b/src/otoole/convert.py index b43a484d..fa152c51 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -13,9 +13,10 @@ import pandas as pd +from otoole.exceptions import OtooleError from otoole.input import Context, ReadStrategy, WriteStrategy from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel -from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi, ReadResults +from otoole.results.results import ReadCbc, ReadCplex, ReadGlpk, ReadGurobi, ReadResults from otoole.utils import _read_file, read_deprecated_datapackage, validate_config from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel @@ -23,7 +24,12 @@ def read_results( - config: str, from_format: str, from_path: str, input_format: str, input_path: str + config: str, + from_format: str, + from_path: str, + input_format: str, + input_path: str, + glpk_model: str = None, ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: """Read OSeMOSYS results from CBC, GLPK or Gurobi results files @@ -32,16 +38,15 @@ def read_results( config : str Path to config file from_format : str - Available options are 'datafile', 'csv', 'excel' and 'datapackage' [deprecated] + Available options are 'cbc', 'gurobi', 'cplex', and 'glpk' from_path : str Path to source file (if datafile or excel) or folder (csv) input_format: str Format of input data. Available options are 'datafile', 'csv' and 'excel' - input_path: str - Path to input data input_format: str - Format of input data. Available options are 'datafile', 'csv' and 'excel' input_path: str Path to input data + glpk_model : str + Path to *.glp model file Returns ------- @@ -50,7 +55,7 @@ def read_results( """ user_config = _get_user_config(config) input_strategy = _get_read_strategy(user_config, input_format) - result_strategy = _get_read_result_strategy(user_config, from_format) + result_strategy = _get_read_result_strategy(user_config, from_format, glpk_model) if input_strategy: input_data, _ = input_strategy.read(input_path) @@ -73,9 +78,10 @@ def convert_results( to_path: str, input_format: str, input_path: str, - write_defaults=False, + write_defaults: bool = False, + glpk_model: str = None, ) -> bool: - """Post-process results from a CBC, CPLEX or Gurobi solution file into CSV format + """Post-process results from a CBC, CPLEX, Gurobi, or GLPK solution file into CSV format Arguments --------- @@ -93,8 +99,10 @@ def convert_results( Format of input data. Available options are 'datafile', 'csv' and 'excel' input_path: str Path to input data - write_defaults : str + write_defaults : bool Write default values to CSVs + glpk_model : str + Path to *.glp model file Returns ------- @@ -110,7 +118,7 @@ def convert_results( # set read strategy - read_strategy = _get_read_result_strategy(user_config, from_format) + read_strategy = _get_read_result_strategy(user_config, from_format, glpk_model) # set write strategy @@ -135,15 +143,19 @@ def convert_results( return True -def _get_read_result_strategy(user_config, from_format) -> Union[ReadResults, None]: - """Get ``ReadResults`` for gurobi, cbc and cplex formats +def _get_read_result_strategy( + user_config, from_format, glpk_model=None +) -> Union[ReadResults, None]: + """Get ``ReadResults`` for gurobi, cbc, cplex, and glpk formats Arguments --------- config : dict User configuration describing parameters and sets from_format : str - Available options are 'cbc', 'gurobi', and 'cplex' + Available options are 'cbc', 'gurobi', 'cplex', and 'glpk' + glpk_model : str + Path to *.glp model file Returns ------- @@ -158,6 +170,10 @@ def _get_read_result_strategy(user_config, from_format) -> Union[ReadResults, No read_strategy = ReadGurobi(user_config=user_config) elif from_format == "cplex": read_strategy = ReadCplex(user_config=user_config) + elif from_format == "glpk": + if not glpk_model: + raise OtooleError(resource="Read GLPK", message="Provide glpk model file") + read_strategy = ReadGlpk(user_config=user_config, glpk_model=glpk_model) else: return None diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index ddc02fd4..11ce0656 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -333,17 +333,14 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: class ReadGlpk(ReadResultsCBC): """Reads a GLPK Solution file into memory - The user must provide both the solution file (results.sol) and the glpk - model file (model.lp) to generate the complete solution. - - glpsol --wglp model.lp -m osemosys.txt -d simplicity.txt --write results.sol + Arguments + --------- + user_config + glpk_model: Union[str, TextIO] + Path to GLPK model file. Can be created using the `--wglp` flag. """ def __init__(self, user_config: Dict[str, Dict], glpk_model: Union[str, TextIO]): - """ - glpk_model: Union[str, TextIO] - Path to GLPK model file. Can be created using the `--wglp` flag. - """ super().__init__(user_config) if isinstance(glpk_model, str): diff --git a/tests/test_convert.py b/tests/test_convert.py index a3287db9..8e94bccd 100644 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -8,6 +8,7 @@ from pytest import raises from otoole import convert, convert_results, read, read_results, write +from otoole.exceptions import OtooleError class TestRead: @@ -234,3 +235,16 @@ def test_convert_results_cbc_csv_raises(self): "csv", "not_a_path", ) + + +class TestGetReadResultsStrategy: + def test_read_results_glpk_raises(self): + """Checks for .glp model file""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + input_path = "" + input_format = "csv" + from_format = "glpk" + from_path = "" + + with raises(OtooleError): + read_results(config, from_format, from_path, input_format, input_path) From ea63ed13acf12ba540e05d389cb2c2528e6570b3 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 17 Aug 2023 23:02:30 -0700 Subject: [PATCH 041/103] rename ReadResultsCbc to ReadWideResults --- src/otoole/results/results.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index 11ce0656..d5bacb68 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -75,7 +75,7 @@ def calculate_results( return results -class ReadResultsCBC(ReadResults): +class ReadWideResults(ReadResults): def get_results_from_file(self, filepath, input_data): cbc = self._convert_to_dataframe(filepath) available_results = self._convert_wide_to_long(cbc) @@ -266,7 +266,7 @@ def convert_df( return df -class ReadGurobi(ReadResultsCBC): +class ReadGurobi(ReadWideResults): """Read a Gurobi solution file into memory""" def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: @@ -290,7 +290,7 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: return df[["Variable", "Index", "Value"]].astype({"Value": float}) -class ReadCbc(ReadResultsCBC): +class ReadCbc(ReadWideResults): """Read a CBC solution file into memory Arguments @@ -330,7 +330,7 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: return df[["Variable", "Index", "Value"]].astype({"Value": float}) -class ReadGlpk(ReadResultsCBC): +class ReadGlpk(ReadWideResults): """Reads a GLPK Solution file into memory Arguments From 6b058d0cfbb8fcef8e9760a0fdc4dd92c0f8f528 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 18 Aug 2023 08:22:35 +0200 Subject: [PATCH 042/103] Resolves final issues raised in PR --- docs/functionality.rst | 5 +++-- src/otoole/input.py | 10 +--------- tests/test_input.py | 11 ----------- 3 files changed, 4 insertions(+), 22 deletions(-) diff --git a/docs/functionality.rst b/docs/functionality.rst index a4a6b8ed..671f7204 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -114,13 +114,14 @@ Gurobi_ or CPLEX_ solution file together with the input data:: .. versionadded:: v1.1.0 The ``input_data_format`` and ``input_path`` positional arguments are now required + supporting any supported format of input data for results processing. .. deprecated:: v1.0.0 The ``--input_datapackage`` flag is no longer supported .. deprecated:: v1.1.0 - The ``--input_datapackage``, ``--input_csvs`` and ``--input_datafile`` flags - have been replaced by new positional arguments ``input data format`` and ``input_path`` + The ``--input_datapackage`` and ``--input_datafile`` flags + have been replaced by new positional arguments ``input_data_format`` and ``input_path`` .. WARNING:: If using CPLEX_, you will need to transform and sort the solution file before diff --git a/src/otoole/input.py b/src/otoole/input.py index 21b0d3b6..0c97c586 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -293,10 +293,6 @@ def _expand_defaults( Dict[str, pd.DataFrame] Input data with expanded default values replacing missing entries - Raises - ------ - KeyError - If set definitions are not in input_data and input_data is not supplied """ sets = [x for x in self.user_config if self.user_config[x]["type"] == "set"] @@ -319,11 +315,7 @@ def _expand_defaults( # save set information for each parameter index_data = {} for index in data.index.names: - try: - index_data[index] = self.input_data[index]["VALUE"].to_list() - except KeyError as ex: - logger.info("Can not write default values. Supply input data") - raise KeyError(ex) + index_data[index] = self.input_data[index]["VALUE"].to_list() # set index if len(index_data) > 1: diff --git a/tests/test_input.py b/tests/test_input.py index 773a7c53..292e4309 100644 --- a/tests/test_input.py +++ b/tests/test_input.py @@ -301,17 +301,6 @@ def test_expand_result_defaults( ) assert_frame_equal(actual[result_data[1]], result_data[2]) - def test_expand_defaults_exception( - self, user_config, simple_default_values, result_data - ): - write_strategy = DummyWriteStrategy( - user_config=user_config, default_values=simple_default_values - ) - with raises(KeyError): - write_strategy._expand_defaults( - result_data[0], write_strategy.default_values - ) - class TestReadStrategy: From e02f034068f5770b299d12561c5b8e5ecb439197 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 17 Aug 2023 23:36:38 -0700 Subject: [PATCH 043/103] refactor ReadGlpk merge_model_sol() --- src/otoole/results/results.py | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index d5bacb68..43205659 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -519,27 +519,17 @@ def _merge_model_sol(self, sol: pd.DataFrame) -> pd.DataFrame: """ model = self.model.copy() + model.index = model["ID"].str.cat(model["NUM"].astype(str)) + model = model.drop(columns=["ID", "NUM"]) - # create lookup ids using the id and num columns to coordinate merge - model["lookup"] = model["ID"].str.cat(model["NUM"].astype(str)) - model = model.set_index("lookup") - model_lookup = model.to_dict(orient="index") - - sol = sol.loc[sol["ID"] == "j"] # remove constraints and leave variables - vars = sol.copy() # setting with copy warning - vars["lookup"] = vars["ID"].str.cat(vars["NUM"].astype(str)) - vars = vars.set_index("lookup") - vars_lookup = vars.to_dict(orient="index") - - # assemble dataframe - data = [] - for lookup_id, lookup_values in vars_lookup.items(): - data.append( - [ - model_lookup[lookup_id]["NAME"], - model_lookup[lookup_id]["INDEX"], - lookup_values["PRIM"], - ] - ) + sol.index = sol["ID"].str.cat(sol["NUM"].astype(str)) + sol = sol.drop(columns=["ID", "NUM", "STATUS", "DUAL"]) + + df = model.join(sol) + df = ( + df[df.index.str.startswith("j")] + .reset_index(drop=True) + .rename(columns={"NAME": "Variable", "INDEX": "Index", "PRIM": "Value"}) + ) - return pd.DataFrame(data, columns=["Variable", "Index", "Value"]) + return df From 9a42fb60f66f87eb574f8b0770b88771bf2f1c7d Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 18 Aug 2023 01:30:04 -0700 Subject: [PATCH 044/103] minor doc fixes --- docs/examples.rst | 19 +++++++++++++------ src/otoole/convert.py | 6 +++--- src/otoole/results/results.py | 20 ++++++++++---------- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index 077da367..3c2f1885 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -189,21 +189,28 @@ Data Processing with GLPK Objective ~~~~~~~~~ -Build and solve a model using only GLPK and otoole +Build and solve a model using GLPK and otoole 1. Build the solve the model using GLPK ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use GLPK_ to build the model, save it as ``simplicity.lp``, solve the model, +Use GLPK_ to build the model, save the problem as ``simplicity.glp``, solve the model, and save the solution as ``simplicity.sol```:: - $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wglp simplicity.lp --write simplicity.sol + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wglp simplicity.glp --write simplicity.sol 2. Use otoole to process the solution in CSVs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When processing solutions from GLPK, both the model file (``*.lp``) and solution -file (``*.sol``) must be passed:: +Use ``otoole``'s ``results`` command to transform the soltuion file into a folder of CSVs +under the directory ``results-glpk``. When processing solutions from GLPK, the model file (``*.glp``) +must also be passed:: - $ otoole results glpk csv simplicity.sol results config.yaml --glpk_model simplicity.lp --input_datafile simplicity.txt + $ otoole results glpk csv simplicity.sol results-glpk datafile simplicity.txt config.yaml --glpk_model simplicity.glp + +.. NOTE:: + By default, MathProg OSeMOSYS models will write out folder of CSV results to a ``results/`` + directory if solving via GLPK. However, for programatically accessing results, using ``otoole`` + to control the read/write location, and for supporting future implementations of OSeMOSYS, + using ``otoole`` can be benifical. Model Visualization diff --git a/src/otoole/convert.py b/src/otoole/convert.py index fa152c51..9a315886 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -46,7 +46,7 @@ def read_results( input_path: str Path to input data glpk_model : str - Path to *.glp model file + Path to ``*.glp`` model file Returns ------- @@ -102,7 +102,7 @@ def convert_results( write_defaults : bool Write default values to CSVs glpk_model : str - Path to *.glp model file + Path to ``*.glp`` model file Returns ------- @@ -155,7 +155,7 @@ def _get_read_result_strategy( from_format : str Available options are 'cbc', 'gurobi', 'cplex', and 'glpk' glpk_model : str - Path to *.glp model file + Path to ``*.glp`` model file Returns ------- diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index 43205659..ee688a77 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -452,11 +452,11 @@ def read_solution( ROW is the ordinal number of the row ST is one of: - b = inactive constraint; - l = inequality constraint active on its lower bound; - u = inequality constraint active on its upper bound; - f = active free (unounded) row; - s = active equality constraint. + - b = inactive constraint; + - l = inequality constraint active on its lower bound; + - u = inequality constraint active on its upper bound; + - f = active free (unounded) row; + - s = active equality constraint. PRIM specifies the row primal value (float) DUAL specifies the row dual value (float) @@ -466,11 +466,11 @@ def read_solution( COL specifies the column ordinal number ST contains one of the following lower-case letters that specifies the column status in the basic solution: - b = basic variable - l = non-basic variable having its lower bound active - u = non-basic variable having its upper bound active - f = non-basic free (unbounded) variable - s = non-basic fixed variable. + - b = basic variable + - l = non-basic variable having its lower bound active + - u = non-basic variable having its upper bound active + - f = non-basic free (unbounded) variable + - s = non-basic fixed variable. PRIM field contains column primal value (float) DUAL field contains the column dual value (float) """ From c3d80e234cff717542563d6d83a38529c0f4b1a5 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 21 Aug 2023 01:30:19 -0700 Subject: [PATCH 045/103] read cplex to df logic --- src/otoole/results/results.py | 187 +++++++++++++++++++--------------- 1 file changed, 103 insertions(+), 84 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index ee688a77..75e89e89 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -6,7 +6,8 @@ import pandas as pd from otoole.input import ReadStrategy -from otoole.preprocess.longify_data import check_datatypes + +# from otoole.preprocess.longify_data import check_datatypes from otoole.results.result_package import ResultsPackage LOGGER = logging.getLogger(__name__) @@ -179,91 +180,109 @@ def rename_duplicate_column(index: List) -> List: return column -class ReadCplex(ReadResults): - """ """ - - def get_results_from_file( - self, filepath: Union[str, TextIO], input_data - ) -> Dict[str, pd.DataFrame]: - - if input_data: - years = input_data["YEAR"].values # type: List - start_year = int(years[0]) - end_year = int(years[-1]) - else: - raise RuntimeError("To process CPLEX results please provide the input file") - - if isinstance(filepath, str): - with open(filepath, "r") as sol_file: - data = self.extract_rows(sol_file, start_year, end_year) - elif isinstance(filepath, StringIO): - data = self.extract_rows(filepath, start_year, end_year) - else: - raise TypeError("Argument filepath type must be a string or an open file") - - results = {} - - for name in data.keys(): - results[name] = self.convert_df(data[name], name, start_year, end_year) +# class ReadCplex(ReadResults): +# """ """ + +# def get_results_from_file( +# self, filepath: Union[str, TextIO], input_data +# ) -> Dict[str, pd.DataFrame]: + +# if input_data: +# years = input_data["YEAR"].values # type: List +# start_year = int(years[0]) +# end_year = int(years[-1]) +# else: +# raise RuntimeError("To process CPLEX results please provide the input file") + +# if isinstance(filepath, str): +# with open(filepath, "r") as sol_file: +# data = self.extract_rows(sol_file, start_year, end_year) +# elif isinstance(filepath, StringIO): +# data = self.extract_rows(filepath, start_year, end_year) +# else: +# raise TypeError("Argument filepath type must be a string or an open file") + +# results = {} + +# for name in data.keys(): +# results[name] = self.convert_df(data[name], name, start_year, end_year) + +# return results + +# def extract_rows( +# self, sol_file: TextIO, start_year: int, end_year: int +# ) -> Dict[str, List[List[str]]]: +# """ """ +# data = {} # type: Dict[str, List[List[str]]] +# for linenum, line in enumerate(sol_file): +# line = line.replace("\n", "") +# try: +# row_as_list = line.split("\t") # type: List[str] +# name = row_as_list[0] # type: str + +# if name in data.keys(): +# data[name].append(row_as_list) +# else: +# data[name] = [row_as_list] +# except ValueError as ex: +# msg = "Error caused at line {}: {}. {}" +# raise ValueError(msg.format(linenum, line, ex)) +# return data + +# def extract_variable_dimensions_values(self, data: List) -> Tuple[str, Tuple, List]: +# """Extracts useful information from a line of a results file""" +# variable = data[0] +# try: +# number = len(self.results_config[variable]["indices"]) +# except KeyError as ex: +# print(data) +# raise KeyError(ex) +# dimensions = tuple(data[1:(number)]) +# values = data[(number):] +# return (variable, dimensions, values) + +# def convert_df( +# self, data: List[List[str]], variable: str, start_year: int, end_year: int +# ) -> pd.DataFrame: +# """Read the cplex lines into a pandas DataFrame""" +# index = self.results_config[variable]["indices"] +# columns = ["variable"] + index[:-1] + list(range(start_year, end_year + 1, 1)) +# df = pd.DataFrame(data=data, columns=columns) +# df, index = check_duplicate_index(df, columns, index) +# df = df.drop(columns="variable") + +# LOGGER.debug( +# f"Attempting to set index for {variable} with columns {index[:-1]}" +# ) +# try: +# df = df.set_index(index[:-1]) +# except NotImplementedError as ex: +# LOGGER.error(f"Error setting index for {df.head()}") +# raise NotImplementedError(ex) +# df = df.melt(var_name="YEAR", value_name="VALUE", ignore_index=False) +# df = df.reset_index() +# df = check_datatypes(df, self.user_config, variable) +# df = df.set_index(index) +# df = df[(df != 0).any(axis=1)] +# return df + + +class ReadCplex(ReadWideResults): + """Read a CPLEX solution file into memeory""" - return results + def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: + """Reads a Cplex solution file into a pandas DataFrame - def extract_rows( - self, sol_file: TextIO, start_year: int, end_year: int - ) -> Dict[str, List[List[str]]]: - """ """ - data = {} # type: Dict[str, List[List[str]]] - for linenum, line in enumerate(sol_file): - line = line.replace("\n", "") - try: - row_as_list = line.split("\t") # type: List[str] - name = row_as_list[0] # type: str - - if name in data.keys(): - data[name].append(row_as_list) - else: - data[name] = [row_as_list] - except ValueError as ex: - msg = "Error caused at line {}: {}. {}" - raise ValueError(msg.format(linenum, line, ex)) - return data - - def extract_variable_dimensions_values(self, data: List) -> Tuple[str, Tuple, List]: - """Extracts useful information from a line of a results file""" - variable = data[0] - try: - number = len(self.results_config[variable]["indices"]) - except KeyError as ex: - print(data) - raise KeyError(ex) - dimensions = tuple(data[1:(number)]) - values = data[(number):] - return (variable, dimensions, values) - - def convert_df( - self, data: List[List[str]], variable: str, start_year: int, end_year: int - ) -> pd.DataFrame: - """Read the cplex lines into a pandas DataFrame""" - index = self.results_config[variable]["indices"] - columns = ["variable"] + index[:-1] + list(range(start_year, end_year + 1, 1)) - df = pd.DataFrame(data=data, columns=columns) - df, index = check_duplicate_index(df, columns, index) - df = df.drop(columns="variable") - - LOGGER.debug( - f"Attempting to set index for {variable} with columns {index[:-1]}" - ) - try: - df = df.set_index(index[:-1]) - except NotImplementedError as ex: - LOGGER.error(f"Error setting index for {df.head()}") - raise NotImplementedError(ex) - df = df.melt(var_name="YEAR", value_name="VALUE", ignore_index=False) - df = df.reset_index() - df = check_datatypes(df, self.user_config, variable) - df = df.set_index(index) - df = df[(df != 0).any(axis=1)] - return df + Arguments + --------- + file_path : str + """ + df = pd.read_xml(file_path, xpath=".//variable", parser="etree") + df[["Variable", "Index"]] = df["name"].str.split("(", expand=True) + df["Index"] = df["Index"].str.replace(")", "", regex=False) + LOGGER.debug(df) + df = df[(df["value"] != 0)].reset_index().rename(columns={"value": "Value"}) + return df[["Variable", "Index", "Value"]].astype({"Value": float}) class ReadGurobi(ReadWideResults): From 81fbabac8cdaedac6f7ec72890d0221994cb9dc2 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 21 Aug 2023 02:10:37 -0700 Subject: [PATCH 046/103] remove old commented code --- src/otoole/results/results.py | 87 ----------------------------------- 1 file changed, 87 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index 75e89e89..f12ffc6e 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -180,93 +180,6 @@ def rename_duplicate_column(index: List) -> List: return column -# class ReadCplex(ReadResults): -# """ """ - -# def get_results_from_file( -# self, filepath: Union[str, TextIO], input_data -# ) -> Dict[str, pd.DataFrame]: - -# if input_data: -# years = input_data["YEAR"].values # type: List -# start_year = int(years[0]) -# end_year = int(years[-1]) -# else: -# raise RuntimeError("To process CPLEX results please provide the input file") - -# if isinstance(filepath, str): -# with open(filepath, "r") as sol_file: -# data = self.extract_rows(sol_file, start_year, end_year) -# elif isinstance(filepath, StringIO): -# data = self.extract_rows(filepath, start_year, end_year) -# else: -# raise TypeError("Argument filepath type must be a string or an open file") - -# results = {} - -# for name in data.keys(): -# results[name] = self.convert_df(data[name], name, start_year, end_year) - -# return results - -# def extract_rows( -# self, sol_file: TextIO, start_year: int, end_year: int -# ) -> Dict[str, List[List[str]]]: -# """ """ -# data = {} # type: Dict[str, List[List[str]]] -# for linenum, line in enumerate(sol_file): -# line = line.replace("\n", "") -# try: -# row_as_list = line.split("\t") # type: List[str] -# name = row_as_list[0] # type: str - -# if name in data.keys(): -# data[name].append(row_as_list) -# else: -# data[name] = [row_as_list] -# except ValueError as ex: -# msg = "Error caused at line {}: {}. {}" -# raise ValueError(msg.format(linenum, line, ex)) -# return data - -# def extract_variable_dimensions_values(self, data: List) -> Tuple[str, Tuple, List]: -# """Extracts useful information from a line of a results file""" -# variable = data[0] -# try: -# number = len(self.results_config[variable]["indices"]) -# except KeyError as ex: -# print(data) -# raise KeyError(ex) -# dimensions = tuple(data[1:(number)]) -# values = data[(number):] -# return (variable, dimensions, values) - -# def convert_df( -# self, data: List[List[str]], variable: str, start_year: int, end_year: int -# ) -> pd.DataFrame: -# """Read the cplex lines into a pandas DataFrame""" -# index = self.results_config[variable]["indices"] -# columns = ["variable"] + index[:-1] + list(range(start_year, end_year + 1, 1)) -# df = pd.DataFrame(data=data, columns=columns) -# df, index = check_duplicate_index(df, columns, index) -# df = df.drop(columns="variable") - -# LOGGER.debug( -# f"Attempting to set index for {variable} with columns {index[:-1]}" -# ) -# try: -# df = df.set_index(index[:-1]) -# except NotImplementedError as ex: -# LOGGER.error(f"Error setting index for {df.head()}") -# raise NotImplementedError(ex) -# df = df.melt(var_name="YEAR", value_name="VALUE", ignore_index=False) -# df = df.reset_index() -# df = check_datatypes(df, self.user_config, variable) -# df = df.set_index(index) -# df = df[(df != 0).any(axis=1)] -# return df - - class ReadCplex(ReadWideResults): """Read a CPLEX solution file into memeory""" From c413ae66afa67cae2e1adb2c755bb485cf0476cd Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 22 Aug 2023 10:56:51 -0700 Subject: [PATCH 047/103] updated ReadCplex tests --- src/otoole/results/results.py | 12 +- tests/test_read_strategies.py | 301 ++++++++++++---------------------- 2 files changed, 116 insertions(+), 197 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index f12ffc6e..63604154 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -188,7 +188,8 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: Arguments --------- - file_path : str + user_config : Dict[str, Dict] + file_path : Union[str, TextIO] """ df = pd.read_xml(file_path, xpath=".//variable", parser="etree") df[["Variable", "Index"]] = df["name"].str.split("(", expand=True) @@ -206,7 +207,8 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: Arguments --------- - file_path : str + user_config : Dict[str, Dict] + file_path : Union[str, TextIO] """ df = pd.read_csv( file_path, @@ -227,8 +229,8 @@ class ReadCbc(ReadWideResults): Arguments --------- - user_config - results_config + user_config : Dict[str, Dict] + results_config : Dict[str, Dict] """ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: @@ -267,7 +269,7 @@ class ReadGlpk(ReadWideResults): Arguments --------- - user_config + user_config : Dict[str, Dict] glpk_model: Union[str, TextIO] Path to GLPK model file. Can be created using the `--wglp` flag. """ diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index 5658dd68..243f2dfe 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -8,7 +8,8 @@ from pytest import mark, raises from otoole.exceptions import OtooleDeprecationError, OtooleError -from otoole.preprocess.longify_data import check_datatypes + +# from otoole.preprocess.longify_data import check_datatypes from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory from otoole.results.results import ( ReadCbc, @@ -23,212 +24,128 @@ class TestReadCplex: - cplex_empty = ( - "AnnualFixedOperatingCost REGION AOBACKSTOP 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" - ) - cplex_short = "AnnualFixedOperatingCost REGION CDBACKSTOP 0.0 0.0 137958.8400384134 305945.38410619126 626159.9611543404 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" - cplex_long = "RateOfActivity REGION S1D1 CGLFRCFURX 1 0.0 0.0 0.0 0.0 0.0 0.3284446367303371 0.3451714779880536 0.3366163200621617 0.3394945166233896 0.3137488154250392 0.28605725055560716 0.2572505015401749 0.06757558148965725 0.0558936625751148 0.04330608461292407 0.0" - - cplex_mid_empty = ( - pd.DataFrame( - data=[], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ) - .astype({"VALUE": float}) - .set_index(["REGION", "TECHNOLOGY", "YEAR"]) - ) - - cplex_mid_short = pd.DataFrame( - data=[ - ["REGION", "CDBACKSTOP", 2017, 137958.8400384134], - ["REGION", "CDBACKSTOP", 2018, 305945.38410619126], - ["REGION", "CDBACKSTOP", 2019, 626159.9611543404], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - - cplex_mid_long = pd.DataFrame( - data=[ - ["REGION", "S1D1", "CGLFRCFURX", 1, 2020, 0.3284446367303371], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2021, 0.3451714779880536], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2022, 0.3366163200621617], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2023, 0.3394945166233896], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2024, 0.3137488154250392], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2025, 0.28605725055560716], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2026, 0.2572505015401749], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2027, 0.06757558148965725], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2028, 0.0558936625751148], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2029, 0.04330608461292407], - ], - columns=[ - "REGION", - "TIMESLICE", - "TECHNOLOGY", - "MODE_OF_OPERATION", - "YEAR", - "VALUE", - ], - ).set_index(["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"]) + cplex_data = """ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +""" - dataframe_short = { - "AnnualFixedOperatingCost": pd.DataFrame( - data=[ - ["REGION", "CDBACKSTOP", 2017, 137958.8400384134], - ["REGION", "CDBACKSTOP", 2018, 305945.3841061913], - ["REGION", "CDBACKSTOP", 2019, 626159.9611543404], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - } - - dataframe_long = { - "RateOfActivity": pd.DataFrame( - data=[ - ["REGION", "S1D1", "CGLFRCFURX", 1, 2020, 0.3284446367303371], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2021, 0.3451714779880536], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2022, 0.3366163200621617], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2023, 0.3394945166233896], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2024, 0.3137488154250392], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2025, 0.28605725055560716], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2026, 0.2572505015401749], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2027, 0.06757558148965725], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2028, 0.0558936625751148], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2029, 0.04330608461292407], - ], - columns=[ - "REGION", - "TIMESLICE", - "TECHNOLOGY", - "MODE_OF_OPERATION", - "YEAR", - "VALUE", + def test_convert_to_dataframe(self, user_config): + input_file = self.cplex_data + reader = ReadCplex(user_config) + with StringIO(input_file) as file_buffer: + actual = reader._convert_to_dataframe(file_buffer) + # print(actual) + expected = pd.DataFrame( + [ + ["NewCapacity", "SIMPLICITY,ETHPLANT,2015", 0.030000000000000027], + ["NewCapacity", "SIMPLICITY,ETHPLANT,2016", 0.030999999999999917], + ["RateOfActivity", "SIMPLICITY,ID,HYD1,1,2020", 0.25228800000000001], + ["RateOfActivity", "SIMPLICITY,ID,HYD1,1,2021", 0.25228800000000001], + ["RateOfActivity", "SIMPLICITY,ID,HYD1,1,2022", 0.25228800000000001], ], - ).set_index(["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"]) - } - - test_data = [ - (cplex_short, dataframe_short), - (cplex_long, dataframe_long), - ] - - @mark.parametrize("cplex_input,expected", test_data, ids=["short", "long"]) - def test_read_cplex_to_dataframe(self, cplex_input, expected, user_config): - cplex_reader = ReadCplex(user_config=user_config) - - input_data = { - "YEAR": pd.DataFrame(data=list(range(2015, 2031, 1)), columns=["VALUE"]), - "REGION": pd.DataFrame(data=["REGION"], columns=["VALUE"]), - "TECHNOLOGY": pd.DataFrame( - data=["CDBACKSTOP", "CGLFRCFURX"], columns=["VALUE"] - ), - "MODE_OF_OPERATION": pd.DataFrame(data=[1], columns=["VALUE"]), - "TIMESLICE": pd.DataFrame(data=["S1D1"], columns=["VALUE"]), - } - - with StringIO(cplex_input) as file_buffer: - actual, _ = cplex_reader.read(file_buffer, input_data=input_data) - for name, item in actual.items(): - pd.testing.assert_frame_equal(item, expected[name]) - - test_data_mid = [(cplex_short, cplex_mid_short), (cplex_long, cplex_mid_long)] - - def test_read_empty_cplex_to_dataframe(self, user_config): - cplex_input = self.cplex_empty - - cplex_reader = ReadCplex(user_config) + columns=["Variable", "Index", "Value"], + ).astype({"Variable": str, "Index": str, "Value": float}) - input_data = { - "YEAR": pd.DataFrame(data=list(range(2015, 2031, 1)), columns=["VALUE"]) - } + pd.testing.assert_frame_equal(actual, expected) - with StringIO(cplex_input) as file_buffer: - data, _ = cplex_reader.read(file_buffer, input_data=input_data) - assert "AnnualFixedOperatingCost" in data + def test_solution_to_dataframe(self, user_config): + input_file = self.cplex_data + reader = ReadCplex(user_config) + with StringIO(input_file) as file_buffer: + actual = reader.read(file_buffer) + # print(actual) expected = ( pd.DataFrame( - data=[], + [ + ["SIMPLICITY", "ETHPLANT", 2015, 0.030000000000000027], + ["SIMPLICITY", "ETHPLANT", 2016, 0.030999999999999917], + ], columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], ) - .astype({"REGION": str, "VALUE": float, "YEAR": int, "TECHNOLOGY": str}) + .astype({"REGION": str, "TECHNOLOGY": str, "YEAR": int, "VALUE": float}) .set_index(["REGION", "TECHNOLOGY", "YEAR"]) ) - actual = data["AnnualFixedOperatingCost"] - pd.testing.assert_frame_equal(actual, expected, check_index_type=False) - - test_data_to_cplex = [ - (cplex_empty, cplex_mid_empty), - (cplex_short, cplex_mid_short), - (cplex_long, cplex_mid_long), - ] - - @mark.parametrize( - "cplex_input,expected", test_data_to_cplex, ids=["empty", "short", "long"] - ) - def test_convert_cplex_to_df(self, cplex_input, expected, user_config): - - data = cplex_input.split("\t") - variable = data[0] - cplex_reader = ReadCplex(user_config=user_config) - actual = cplex_reader.convert_df([data], variable, 2015, 2030) - pd.testing.assert_frame_equal(actual, expected, check_index_type=False) - def test_convert_lines_to_df_empty(self, user_config): + pd.testing.assert_frame_equal(actual[0]["NewCapacity"], expected) - data = [ - [ - "AnnualFixedOperatingCost", - "REGION", - "AOBACKSTOP", - "0", - "0", - "0", - "0", - "0", - "0", - "0", - "0", - "0", - ] - ] - variable = "AnnualFixedOperatingCost" - cplex_reader = ReadCplex(user_config) - actual = cplex_reader.convert_df(data, variable, 2015, 2023) - pd.testing.assert_frame_equal( - actual, + expected = ( pd.DataFrame( - data=[], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + [ + ["SIMPLICITY", "ID", "HYD1", 1, 2020, 0.25228800000000001], + ["SIMPLICITY", "ID", "HYD1", 1, 2021, 0.25228800000000001], + ["SIMPLICITY", "ID", "HYD1", 1, 2022, 0.25228800000000001], + ], + columns=[ + "REGION", + "TIMESLICE", + "TECHNOLOGY", + "MODE_OF_OPERATION", + "YEAR", + "VALUE", + ], + ) + .astype( + { + "REGION": str, + "TIMESLICE": str, + "TECHNOLOGY": str, + "MODE_OF_OPERATION": int, + "YEAR": int, + "VALUE": float, + } + ) + .set_index( + ["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"] ) - .astype({"REGION": str, "TECHNOLOGY": str, "YEAR": int, "VALUE": float}) - .set_index(["REGION", "TECHNOLOGY", "YEAR"]), - check_index_type=False, ) - - def test_check_datatypes_with_empty(self): - - df = pd.DataFrame(data=[], columns=["REGION", "FUEL", "YEAR", "VALUE"]) - - parameter = "AccumulatedAnnualDemand" - - config_dict = { - "AccumulatedAnnualDemand": { - "indices": ["REGION", "FUEL", "YEAR"], - "type": "param", - "dtype": float, - "default": 0, - }, - "REGION": {"dtype": "str", "type": "set"}, - "FUEL": {"dtype": "str", "type": "set"}, - "YEAR": {"dtype": "int", "type": "set"}, - } - - actual = check_datatypes(df, config_dict, parameter) - - expected = pd.DataFrame( - data=[], columns=["REGION", "FUEL", "YEAR", "VALUE"] - ).astype({"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float}) - - pd.testing.assert_frame_equal(actual, expected, check_index_type=False) + pd.testing.assert_frame_equal(actual[0]["RateOfActivity"], expected) class TestReadGurobi: From 28dcf28c0badb16b6e15f2717df95951a2598646 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 22 Aug 2023 13:16:14 -0700 Subject: [PATCH 048/103] added tests for longify data --- tests/test_read_strategies.py | 43 +++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index 243f2dfe..a7a20d10 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -8,8 +8,7 @@ from pytest import mark, raises from otoole.exceptions import OtooleDeprecationError, OtooleError - -# from otoole.preprocess.longify_data import check_datatypes +from otoole.preprocess.longify_data import check_datatypes from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory from otoole.results.results import ( ReadCbc, @@ -1164,3 +1163,43 @@ def test_whitespace_converter( reader = ReadCsv(user_config=user_config, keep_whitespace=keep_whitespace) actual = reader._whitespace_converter(indices) assert actual == expected + + +class TestLongifyData: + """Tests for the preprocess.longify_data module""" + + # example availability factor data + data_valid = pd.DataFrame( + [ + ["SIMPLICITY", "ETH", 2014, 1.0], + ["SIMPLICITY", "RAWSUG", 2014, 0.5], + ["SIMPLICITY", "ETH", 2015, 1.03], + ["SIMPLICITY", "RAWSUG", 2015, 0.51], + ["SIMPLICITY", "ETH", 2016, 1.061], + ["SIMPLICITY", "RAWSUG", 2016, 0.519], + ], + columns=["REGION", "FUEL", "YEAR", "VALUE"], + ) + + data_invalid = pd.DataFrame( + [ + ["SIMPLICITY", "ETH", "invalid", 1.0], + ["SIMPLICITY", "RAWSUG", 2014, 0.5], + ], + columns=["REGION", "FUEL", "YEAR", "VALUE"], + ) + + def test_check_datatypes_valid(self, user_config): + df = self.data_valid.astype( + {"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float} + ) + actual = check_datatypes(df, user_config, "AvailabilityFactor") + expected = df.copy() + + pd.testing.assert_frame_equal(actual, expected) + + def test_check_datatypes_invalid(self, user_config): + df = self.data_invalid + + with raises(ValueError): + check_datatypes(df, user_config, "AvailabilityFactor") From 53871a850877822d13018be9d2b226d9072fd69d Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 22 Aug 2023 14:29:10 -0700 Subject: [PATCH 049/103] update cli example docs --- docs/examples.rst | 168 +++++++++++++++++++++------------------------- 1 file changed, 77 insertions(+), 91 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index 3c2f1885..281a9831 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -6,7 +6,7 @@ Examples This page will present examples to show the full functionality of ``otoole``. It will walk through the ``convert``, ``results``, ``setup``, ``viz`` and ``validate`` -functionality in seperate simple use cases. +functionality in separate simple use cases. .. NOTE:: To follow these examples, clone the Simplicity_ repository and run all commands @@ -34,12 +34,12 @@ abbreviated instructions are shown below To install GLPK on **Linux**, run the command:: - sudo apt-get update - sudo apt-get install glpk glpk-utils + $ sudo apt-get update + $ sudo apt-get install glpk glpk-utils To install GLPK on **Mac**, run the command:: - brew install glpk + $ brew install glpk To install GLPK on **Windows**, follow the instructions on the `GLPK Website`_. Be sure to add GLPK to @@ -48,7 +48,7 @@ your environment variables after installation Alternatively, if you use Anaconda_ to manage your Python packages, you can install GLPK via the command:: - conda install -c conda-forge glpk + $ conda install -c conda-forge glpk 2. Test the GLPK install ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -58,6 +58,9 @@ Once installed, you should be able to call the ``glpsol`` command:: GLPSOL: GLPK LP/MIP Solver, v4.65 No input problem file specified; try glpsol --help +.. TIP:: + See the `GLPK Wiki`_ for more information on the ``glpsol`` command + 3. Install CBC ~~~~~~~~~~~~~~ @@ -67,11 +70,11 @@ instructions are shown below To install CBC on **Linux**, run the command:: - sudo apt-get install coinor-cbc coinor-libcbc-dev + $ sudo apt-get install coinor-cbc coinor-libcbc-dev To install CBC on **Mac**, run the command:: - brew install coin-or-tools/coinor/cbc + $ brew install coin-or-tools/coinor/cbc To install CBC on **Windows**, follow the install instruction on the CBC_ website. @@ -79,7 +82,7 @@ website. Alternatively, if you use Anaconda_ to manage your Python packages, you can install CBC via the command:: - conda install -c conda-forge coincbc + $ conda install -c conda-forge coincbc 4. Test the CBC install ~~~~~~~~~~~~~~~~~~~~~~~ @@ -96,122 +99,103 @@ Once installed, you should be able to directly call CBC:: You can exit the solver by typing ``quit`` -Data Conversion with CSVs -------------------------- +Input Data Conversion +--------------------- Objective ~~~~~~~~~ -Use a folder of CSV data to build and solve an OSeMOSYS model with CBC_. Generate -the full suite of OSeMOSYS results. - -1. ``otoole`` Convert -~~~~~~~~~~~~~~~~~~~~~ -We first want to convert the folder of Simplicity_ CSVs into -an OSeMOSYS datafile called ``simplicity.txt``:: - - $ otoole convert csv datafile data simplicity.txt config.yaml - -2. Build the Model -~~~~~~~~~~~~~~~~~~~ -Use GLPK_ to build the model and save it as ``simplicity.lp``:: +Convert input data between CSV, Excel, and GNU MathProg data formats. - $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check +1. Clone ``Simplicity`` +~~~~~~~~~~~~~~~~~~~~~~~ +If not already done so, clone the Simplicity_ repository:: -.. TIP:: - See the `GLPK Wiki`_ for more information on the ``glpsol`` command + $ git clone https://github.com/OSeMOSYS/simplicity.git + $ cd simplicity -3. Solve the Model -~~~~~~~~~~~~~~~~~~ -Use CBC_ to solve the model and save the solution file as ``simplicity.sol``:: +.. NOTE:: + Further information on the ``config.yaml`` file is in the :ref:`template-setup` section - $ cbc simplicity.lp solve -solu simplicity.sol +2. Convert CSV data into MathProg data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert the folder of Simplicity_ CSVs (``data/``) into an OSeMOSYS datafile called ``simplicity.txt``:: -4. Generate the full set of results -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use ``otoole``'s ``result`` package to generate the results file:: + $ otoole convert csv datafile data simplicity.txt config.yaml - $ otoole results cbc csv simplicity.sol results datafile simplicity.txt config.yaml +3. Convert MathProg data into Excel Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert the new Simplicity_ datafile (``simplicity.txt``) into Excel data called ``simplicity.xlsx``:: -5. View Results -~~~~~~~~~~~~~~~ -Results are now viewable in the files ``results/*.csv`` + $ otoole convert datafile excel simplicity.txt simplicity.xlsx config.yaml .. TIP:: - Before moving onto the next section, remove all the generated files:: + Excel workbooks are an easy way for humans to interface with OSeMOSYS data! + +4. Convert Excel Data into CSV data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert the new Simplicity_ excel data (``simplicity.xlsx``) into a folder of CSV data +called ``simplicity/``. Note that this data will be the exact same as the original CSV data folder (``data/``):: - $ rm simplicity.lp simplicity.sol simplicity.txt results/* + $ otoole convert excel csv simplicity.xlsx simplicity config.yaml -Data Conversion with Excel --------------------------- +Process Solutions from Different Solvers +---------------------------------------- Objective ~~~~~~~~~ -Use an excel worksheet to build and solve an OSeMOSYS model with CBC. +Process solutions from GLPK_, CBC_, Gurobi_, and CPLEX_. This example assumes +you have an existing GNU MathProg datafile called ``simplicity.txt`` (from the +previous example). -1. Create the Excel Workbook -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use the example CSV data to create an Excel Workbook using ``otoole convert``:: - - $ otoole convert csv excel data simplicity.xlsx config.yaml +1. Process a solution from GLPK +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model, save the problem as ``simplicity.glp``, solve the model, and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results-glpk/``. +When processing solutions from GLPK, the model file (``*.glp``) must also be passed:: -Excel workbooks are an easy way for humans to interface with OSeMOSYS data! + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wglp simplicity.glp --write simplicity.sol -2. Create the MathProg datafile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Next, we want to convert the excel workbook (``simplicity.xlsx``) into -an OSeMOSYS datafile (``simplicity.txt``):: + $ otoole results glpk csv simplicity.sol results-glpk datafile simplicity.txt config.yaml --glpk_model simplicity.glp - $ otoole convert excel datafile simplicity.xlsx simplicity.txt config.yaml +.. NOTE:: + By default, MathProg OSeMOSYS models will write out folder of CSV results to a ``results/`` + directory if solving via GLPK. However, using ``otoole`` allows the user to programmatically access results + and control read/write locations -3. Build the Model -~~~~~~~~~~~~~~~~~~ -Use GLPK_ to build the model and save it as ``simplicity.lp``:: +2. Process a solution from CBC +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model and save the problem as ``simplicity.lp``. Use CBC_ to solve the model and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results/`` from the solution file:: $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check -4. Solve the Model -~~~~~~~~~~~~~~~~~~ -Use CBC_ to solve the model and save the solution file as ``simplicity.sol``:: - $ cbc simplicity.lp solve -solu simplicity.sol -5. Generate the selected results -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use ``otoole``'s ``result`` package to generate the result CSVs:: - - $ otoole results cbc csv simplicity.sol results datafile simplicity.txt config.yaml - -Data Processing with GLPK -------------------------- + $ otoole results cbc csv simplicity.sol results csv data config.yaml -Objective -~~~~~~~~~ +3. Process a solution from Gurobi +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model and save the problem as ``simplicity.lp``. Use Gurobi_ to solve the model and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results/`` from the solution file:: -Build and solve a model using GLPK and otoole + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check -1. Build the solve the model using GLPK -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use GLPK_ to build the model, save the problem as ``simplicity.glp``, solve the model, -and save the solution as ``simplicity.sol```:: + $ gurobi_cl ResultFile=simplicity.sol simplicity.lp - $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wglp simplicity.glp --write simplicity.sol + $ otoole results gurobi csv simplicity.sol results csv data config.yaml -2. Use otoole to process the solution in CSVs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use ``otoole``'s ``results`` command to transform the soltuion file into a folder of CSVs -under the directory ``results-glpk``. When processing solutions from GLPK, the model file (``*.glp``) -must also be passed:: +4. Process a solution from CPLEX +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model and save the problem as ``simplicity.lp``. Use CPLEX_ to solve the model and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results/`` from the solution file:: - $ otoole results glpk csv simplicity.sol results-glpk datafile simplicity.txt config.yaml --glpk_model simplicity.glp + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check -.. NOTE:: - By default, MathProg OSeMOSYS models will write out folder of CSV results to a ``results/`` - directory if solving via GLPK. However, for programatically accessing results, using ``otoole`` - to control the read/write location, and for supporting future implementations of OSeMOSYS, - using ``otoole`` can be benifical. + $ cplex -c "read simplicity.lp" "optimize" "write simplicity.sol" + $ otoole results cplex csv simplicity.sol results csv data config.yaml Model Visualization ------------------- @@ -238,6 +222,8 @@ displayed .. image:: _static/simplicity_res.png +.. _template-setup: + Template Setup -------------- @@ -284,13 +270,12 @@ horizon. For example, if the model horizon is from 2020 to 2050, the .. NOTE:: While this step in not technically required, by filling out the years in - CSV format, ``otoole`` will pivot all the Excel sheets on the years - during the conversion process. This will save significant formatting time! + CSV format ``otoole`` will pivot all the Excel sheets on these years. + This will save significant formatting time! 4. Convert the CSV Template Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To convert the template CSV data into Excel formatted data, run the following -``convert`` command:: +Convert the template CSV data into Excel formatted data:: $ otoole convert csv excel template_data template.xlsx template_config.yaml @@ -500,3 +485,4 @@ will also flag it as an isolated fuel. This means the fuel is unconnected from t .. _CBC: https://github.com/coin-or/Cbc .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer .. _Anaconda: https://www.anaconda.com/ +.. _Gurobi: https://www.gurobi.com/ From f6d9c35f393ad4521a63f48ed8b42781cfc13e39 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 22 Aug 2023 14:48:29 -0700 Subject: [PATCH 050/103] minor doc updates --- docs/functionality.rst | 23 ++++++++++++----------- src/otoole/convert.py | 2 +- src/otoole/results/results.py | 8 +++----- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/docs/functionality.rst b/docs/functionality.rst index abb61bb6..b923f1b4 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -89,25 +89,26 @@ so as to speed up the model matrix generation and solution times. ``otoole results`` ~~~~~~~~~~~~~~~~~~ -The ``results`` command creates a folder of CSV result files from a CBC_, CLP_, +The ``results`` command creates a folder of CSV result files from a GLPK_, CBC_, CLP_, Gurobi_ or CPLEX_ solution file together with the input data:: $ otoole results --help - usage: otoole results [-h] [--write_defaults] + usage: otoole results [-h] [--glpk_model GLPK_MODEL] [--write_defaults] {cbc,cplex,gurobi} {csv} from_path to_path {csv,datafile,excel} input_path config positional arguments: - {cbc,cplex,glpk,gurobi} Result data format to convert from - {csv} Result data format to convert to - from_path Path to file or folder to convert from - to_path Path to file or folder to convert to - {csv,datafile,excel} Input data format - input_path Path to input_data - config Path to config YAML file + {cbc,cplex,glpk,gurobi} Result data format to convert from + {csv} Result data format to convert to + from_path Path to file or folder to convert from + to_path Path to file or folder to convert to + {csv,datafile,excel} Input data format + input_path Path to input_data + config Path to config YAML file optional arguments: - -h, --help show this help message and exit - --write_defaults Writes default values + -h, --help show this help message and exit + --glpk_model GLPK_MODEL GLPK model file required for processing GLPK results + --write_defaults Writes default values .. versionadded:: v1.0.0 The ``config`` positional argument is now required diff --git a/src/otoole/convert.py b/src/otoole/convert.py index 9a315886..3cecd343 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -31,7 +31,7 @@ def read_results( input_path: str, glpk_model: str = None, ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: - """Read OSeMOSYS results from CBC, GLPK or Gurobi results files + """Read OSeMOSYS results from CBC, GLPK, Gurobi, or CPLEX results files Arguments --------- diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index 63604154..dfb7c426 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -6,8 +6,6 @@ import pandas as pd from otoole.input import ReadStrategy - -# from otoole.preprocess.longify_data import check_datatypes from otoole.results.result_package import ResultsPackage LOGGER = logging.getLogger(__name__) @@ -22,7 +20,7 @@ def read( Arguments --------- filepath : str, TextIO - A path name or file buffer pointing to the CBC solution file + A path name or file buffer pointing to the solution file input_data : dict, default=None dict of dataframes @@ -89,13 +87,13 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: def _convert_wide_to_long(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]: """Convert from wide to long format - Converts a pandas DataFrame containing all CBC results to reformatted + Converts a pandas DataFrame containing all wide format results to reformatted dictionary of pandas DataFrames in long format ready to write out Arguments --------- data : pandas.DataFrame - CBC results stored in a dataframe + results stored in a dataframe Example ------- From 65f5dd861b37a33271a4047c9bcf612fd3ec8958 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 29 Aug 2023 05:41:36 -0700 Subject: [PATCH 051/103] changelog update --- CHANGELOG.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 970c3ea5..a782b270 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,16 @@ Changelog ========= +Version 1.1.0 +============= +- Public Python API added to call otoole directly in Python files +- ReadCplex directly reads in CPLEX solution files. Drops the need to transform and sort solution files +- ReadGlpk class added to process GLPK solution files +- Update to Pydantic v2.0 +- ReadResultsCbc renamed to ReadWideResults +- Model validation instructions updated in documentation +- The ``--input_datafile`` argument is deprecated, and the user now must supply the input data to process results + Version 1.0.4 ============= - Fixed issue with pydantic v2.0.0 From c2592bcc8195c8bec608c156dba20a38f718afd9 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 29 Aug 2023 05:43:03 -0700 Subject: [PATCH 052/103] update changelog --- CHANGELOG.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a782b270..c74d3730 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,10 +6,10 @@ Version 1.1.0 ============= - Public Python API added to call otoole directly in Python files - ReadCplex directly reads in CPLEX solution files. Drops the need to transform and sort solution files -- ReadGlpk class added to process GLPK solution files +- ReadGlpk class added to process GLPK solution files - Update to Pydantic v2.0 - ReadResultsCbc renamed to ReadWideResults -- Model validation instructions updated in documentation +- Model validation instructions updated in documentation - The ``--input_datafile`` argument is deprecated, and the user now must supply the input data to process results Version 1.0.4 From 386a114935cf041811cf945c035e2e1c7124cd1b Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 29 Aug 2023 06:03:36 -0700 Subject: [PATCH 053/103] lock pandas <2.1 --- docs/requirements.txt | 2 +- setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 5e98bc20..6b659fb8 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,7 +4,7 @@ flatten_dict Jinja2<3.1 networkx openpyxl -pandas>=1.1 +pandas>=1.1,<2.1 pydantic>=2 pydot pyyaml diff --git a/setup.cfg b/setup.cfg index 1de641c8..7fbcbb43 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,7 +47,7 @@ install_requires = xlrd pyyaml pydot - pandas>=1.1 + pandas>=1.1,<2.1 Amply>=0.1.6 networkx flatten_dict From 3692050c9336f036b9b4f40f3c71f6b7431296b3 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 29 Aug 2023 06:18:58 -0700 Subject: [PATCH 054/103] update changelog --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c74d3730..54b51389 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Version 1.1.0 - ReadResultsCbc renamed to ReadWideResults - Model validation instructions updated in documentation - The ``--input_datafile`` argument is deprecated, and the user now must supply the input data to process results +- Locks pandas to <2.1 Version 1.0.4 ============= From 0f8108c2e8164eb47da019fc7b31c59700de0542 Mon Sep 17 00:00:00 2001 From: Trevor Barnes <67297083+trevorb1@users.noreply.github.com> Date: Thu, 31 Aug 2023 15:33:29 -0700 Subject: [PATCH 055/103] Update convert.rst --- docs/convert.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/convert.rst b/docs/convert.rst index 3c9093e7..2333d976 100644 --- a/docs/convert.rst +++ b/docs/convert.rst @@ -27,7 +27,7 @@ The ``convert_results`` function creates a folder of CSV result files from a CBC Gurobi_ or CPLEX_ solution file:: >>> from otoole import convert_results ->>> convert_results('my_model.yaml', 'cbc', 'csv', 'my_model.sol', 'my_model_csvs', input_datafile='my_model.dat') +>>> convert_results('my_model.yaml', 'cbc', 'csv', 'my_model.sol', 'my_model_csvs', 'datafile', 'my_model.dat') See :func:`otoole.convert.convert_results` for more details From d62b8f3252d793e81389690d43653c097d97161a Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 12 Sep 2023 03:56:32 -0700 Subject: [PATCH 056/103] removed cplex sorting warning --- docs/functionality.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/functionality.rst b/docs/functionality.rst index b923f1b4..503fc899 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -124,12 +124,6 @@ Gurobi_ or CPLEX_ solution file together with the input data:: The ``--input_datapackage`` and ``--input_datafile`` flags have been replaced by new positional arguments ``input_data_format`` and ``input_path`` -.. WARNING:: - If using CPLEX_, you will need to transform and sort the solution file before - processing it with ``otoole``. Instructions on how to run the transformation - script are on the `OSeMOSYS Repository`_. After transformation, sort the file - with the command ``sort > ``. - Setup ----- The ``setup`` module in ``otoole`` allows you to generate template files to From 633880542fa86f3d2956df0f5328c9cd166461cd Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 22 Sep 2023 09:04:40 +0100 Subject: [PATCH 057/103] Added joss badge to readme --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index 43177917..bf6c6b35 100644 --- a/README.rst +++ b/README.rst @@ -12,6 +12,10 @@ otoole: OSeMOSYS tools for energy work .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/psf/black +.. image:: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467/status.svg + :target: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467 + :alt: JOSS status + A Python toolkit to support use of OSeMOSYS Description From 022de26fad76957a4699f57c8a3c11531a7c452b Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 20 Oct 2023 20:28:19 -0700 Subject: [PATCH 058/103] change int to int64 --- src/otoole/input.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/otoole/input.py b/src/otoole/input.py index 0c97c586..fc33b402 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -116,7 +116,6 @@ def convert(self, input_filepath: str, output_filepath: str, **kwargs: Dict): class Strategy(ABC): """ - Arguments --------- user_config : dict, default=None @@ -139,9 +138,9 @@ def _add_dtypes(self, config: Dict): dtypes = {} for column in details["indices"] + ["VALUE"]: if column == "VALUE": - dtypes["VALUE"] = details["dtype"] + dtypes["VALUE"] = details["dtype"] if details["dtype"] != "int" else "int64" else: - dtypes[column] = config[column]["dtype"] + dtypes[column] = config[column]["dtype"] if config[column]["dtype"] != "int" else "int64" details["index_dtypes"] = dtypes return config @@ -482,6 +481,7 @@ def _check_index_dtypes( logger.debug(df.head()) # Drop empty rows try: + dtype = config["index_dtypes"] if config["index_dtypes"] != "int" else "int64" df = ( df.dropna(axis=0, how="all") .reset_index() @@ -492,7 +492,7 @@ def _check_index_dtypes( df = df.dropna(axis=0, how="all").reset_index() for index, dtype in config["index_dtypes"].items(): if dtype == "int": - df[index] = df[index].astype(float).astype(int) + df[index] = df[index].astype(float).astype("int64") else: df[index] = df[index].astype(dtype) df = df.set_index(config["indices"]) From d9a78eedf7c685c5677217f723c7754836aac52b Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 20 Oct 2023 21:13:16 -0700 Subject: [PATCH 059/103] update param and set int dtypes to int64 --- src/otoole/input.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/otoole/input.py b/src/otoole/input.py index fc33b402..2ed49398 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -142,6 +142,8 @@ def _add_dtypes(self, config: Dict): else: dtypes[column] = config[column]["dtype"] if config[column]["dtype"] != "int" else "int64" details["index_dtypes"] = dtypes + elif details["type"] == "set": + details["dtype"] = details["dtype"] if details["dtype"] != "int" else "int64" return config @property @@ -481,7 +483,6 @@ def _check_index_dtypes( logger.debug(df.head()) # Drop empty rows try: - dtype = config["index_dtypes"] if config["index_dtypes"] != "int" else "int64" df = ( df.dropna(axis=0, how="all") .reset_index() From 019a096c70e18813f50ed78dadb474c6b33be738 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 20 Oct 2023 21:25:35 -0700 Subject: [PATCH 060/103] fix int64 check --- src/otoole/input.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/otoole/input.py b/src/otoole/input.py index 2ed49398..3a9f03d3 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -492,7 +492,7 @@ def _check_index_dtypes( except ValueError: # ValueError: invalid literal for int() with base 10: df = df.dropna(axis=0, how="all").reset_index() for index, dtype in config["index_dtypes"].items(): - if dtype == "int": + if dtype == "int64": df[index] = df[index].astype(float).astype("int64") else: df[index] = df[index].astype(dtype) From 28907705c46d7c9a364b4d4c90037ef995577571 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 20 Oct 2023 21:39:46 -0700 Subject: [PATCH 061/103] linting fix --- src/otoole/input.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/otoole/input.py b/src/otoole/input.py index 3a9f03d3..b3c979c7 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -138,12 +138,20 @@ def _add_dtypes(self, config: Dict): dtypes = {} for column in details["indices"] + ["VALUE"]: if column == "VALUE": - dtypes["VALUE"] = details["dtype"] if details["dtype"] != "int" else "int64" + dtypes["VALUE"] = ( + details["dtype"] if details["dtype"] != "int" else "int64" + ) else: - dtypes[column] = config[column]["dtype"] if config[column]["dtype"] != "int" else "int64" + dtypes[column] = ( + config[column]["dtype"] + if config[column]["dtype"] != "int" + else "int64" + ) details["index_dtypes"] = dtypes elif details["type"] == "set": - details["dtype"] = details["dtype"] if details["dtype"] != "int" else "int64" + details["dtype"] = ( + details["dtype"] if details["dtype"] != "int" else "int64" + ) return config @property From 9b21c53090af72ce0d1c8b3841504bc3712b2ad1 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Fri, 20 Oct 2023 21:48:12 -0700 Subject: [PATCH 062/103] update int to int64 --- tests/test_read_strategies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index a7a20d10..7cd610c1 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -700,7 +700,7 @@ def test_index_dtypes_available(self, user_config): assert actual == { "REGION": "str", "FUEL": "str", - "YEAR": "int", + "YEAR": "int64", "VALUE": "float", } @@ -834,7 +834,7 @@ def test_read_config(self, user_config): "FUEL": "str", "REGION": "str", "VALUE": "float", - "YEAR": "int", + "YEAR": "int64", }, } assert actual["AccumulatedAnnualDemand"] == expected From c21fe2ae394b709be0c9b119ba519df8b016b51f Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sat, 21 Oct 2023 12:39:47 -0700 Subject: [PATCH 063/103] fix int to int64 in results --- src/otoole/results/result_package.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/otoole/results/result_package.py b/src/otoole/results/result_package.py index 991b3fe2..255f6847 100644 --- a/src/otoole/results/result_package.py +++ b/src/otoole/results/result_package.py @@ -858,7 +858,7 @@ def discount_factor( if regions and years: discount_rate["YEAR"] = [years] discount_factor = discount_rate.explode("YEAR").reset_index(level="REGION") - discount_factor["YEAR"] = discount_factor["YEAR"].astype(int) + discount_factor["YEAR"] = discount_factor["YEAR"].astype("int64") discount_factor["NUM"] = discount_factor["YEAR"] - discount_factor["YEAR"].min() discount_factor["RATE"] = discount_factor["VALUE"] + 1 discount_factor["VALUE"] = ( From 2f3df1815a42e6973eb06f5a482355a45e69f13e Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sat, 21 Oct 2023 17:06:33 -0700 Subject: [PATCH 064/103] specify lineterminator in writing datafiles --- src/otoole/write_strategies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/otoole/write_strategies.py b/src/otoole/write_strategies.py index 1d10ef5d..b1d32a9f 100644 --- a/src/otoole/write_strategies.py +++ b/src/otoole/write_strategies.py @@ -156,7 +156,7 @@ def _write_parameter( df = self._form_parameter(df, default) handle.write("param default {} : {} :=\n".format(default, parameter_name)) df.to_csv( - path_or_buf=handle, sep=" ", header=False, index=True, float_format="%g" + path_or_buf=handle, sep=" ", header=False, index=True, float_format="%g", lineterminator="\n" ) handle.write(";\n") @@ -171,7 +171,7 @@ def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO): """ handle.write("set {} :=\n".format(set_name)) df.to_csv( - path_or_buf=handle, sep=" ", header=False, index=False, float_format="%g" + path_or_buf=handle, sep=" ", header=False, index=False, float_format="%g", lineterminator="\n" ) handle.write(";\n") From d06a051e553f4ee016947fd13979ae2de5f03a6c Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sat, 21 Oct 2023 17:09:22 -0700 Subject: [PATCH 065/103] fix linting --- src/otoole/write_strategies.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/otoole/write_strategies.py b/src/otoole/write_strategies.py index b1d32a9f..d4472f8b 100644 --- a/src/otoole/write_strategies.py +++ b/src/otoole/write_strategies.py @@ -156,7 +156,12 @@ def _write_parameter( df = self._form_parameter(df, default) handle.write("param default {} : {} :=\n".format(default, parameter_name)) df.to_csv( - path_or_buf=handle, sep=" ", header=False, index=True, float_format="%g", lineterminator="\n" + path_or_buf=handle, + sep=" ", + header=False, + index=True, + float_format="%g", + lineterminator="\n", ) handle.write(";\n") @@ -171,7 +176,12 @@ def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO): """ handle.write("set {} :=\n".format(set_name)) df.to_csv( - path_or_buf=handle, sep=" ", header=False, index=False, float_format="%g", lineterminator="\n" + path_or_buf=handle, + sep=" ", + header=False, + index=False, + float_format="%g", + lineterminator="\n", ) handle.write(";\n") From 6066f34570144885aa470d398509ef227a864223 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sat, 21 Oct 2023 23:32:23 -0700 Subject: [PATCH 066/103] fix int to int64 issue in read_strategies --- src/otoole/results/results.py | 4 ++-- tests/test_read_strategies.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index dfb7c426..ae45d737 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -347,7 +347,7 @@ def read_model(self, file_path: Union[str, TextIO]) -> pd.DataFrame: df["INDEX"] = df["INDEX"].map(lambda x: x.split("]")[0]) df = ( df[["ID", "NUM", "NAME", "INDEX"]] - .astype({"ID": str, "NUM": int, "NAME": str, "INDEX": str}) + .astype({"ID": str, "NUM": "int64", "NAME": str, "INDEX": str}) .reset_index(drop=True) ) @@ -425,7 +425,7 @@ def read_solution( data = ( data[["ID", "NUM", "STATUS", "PRIM", "DUAL"]] .astype( - {"ID": str, "NUM": int, "STATUS": str, "PRIM": float, "DUAL": float} + {"ID": str, "NUM": "int64", "STATUS": str, "PRIM": float, "DUAL": float} ) .reset_index(drop=True) ) diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index 7cd610c1..574fcee8 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -108,7 +108,7 @@ def test_solution_to_dataframe(self, user_config): ], columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], ) - .astype({"REGION": str, "TECHNOLOGY": str, "YEAR": int, "VALUE": float}) + .astype({"REGION": str, "TECHNOLOGY": str, "YEAR": "int64", "VALUE": float}) .set_index(["REGION", "TECHNOLOGY", "YEAR"]) ) @@ -135,8 +135,8 @@ def test_solution_to_dataframe(self, user_config): "REGION": str, "TIMESLICE": str, "TECHNOLOGY": str, - "MODE_OF_OPERATION": int, - "YEAR": int, + "MODE_OF_OPERATION": "int64", + "YEAR": "int64", "VALUE": float, } ) @@ -202,7 +202,7 @@ def test_solution_to_dataframe(self, user_config): ], columns=["REGION", "YEAR", "VALUE"], ) - .astype({"YEAR": int, "VALUE": float}) + .astype({"YEAR": "int64", "VALUE": float}) .set_index(["REGION", "YEAR"]) ) @@ -225,7 +225,7 @@ def test_solution_to_dataframe(self, user_config): "VALUE", ], ) - .astype({"YEAR": int, "VALUE": float, "MODE_OF_OPERATION": int}) + .astype({"YEAR": "int64", "VALUE": float, "MODE_OF_OPERATION": "int64"}) .set_index( ["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"] ) @@ -623,7 +623,7 @@ def test_read_model(self, user_config): ["j", 1028, "RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014"], ], columns=["ID", "NUM", "NAME", "INDEX"], - ) + ).astype({"ID": str, "NUM": "int64", "NAME": str, "INDEX": str}) pd.testing.assert_frame_equal(actual, expected) @@ -726,7 +726,7 @@ def test_remove_empty_lines(self, user_config): ], columns=["REGION", "FUEL", "YEAR", "VALUE"], ) - .astype({"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float}) + .astype({"REGION": str, "FUEL": str, "YEAR": "int64", "VALUE": float}) .set_index(["REGION", "FUEL", "YEAR"]) } @@ -757,7 +757,7 @@ def test_change_types(self, user_config): ], columns=["REGION", "FUEL", "YEAR", "VALUE"], ) - .astype({"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float}) + .astype({"REGION": str, "FUEL": str, "YEAR": "int64", "VALUE": float}) .set_index(["REGION", "FUEL", "YEAR"]) } From ad87fd291e7c91630f5b5b1d690d444af8d28ebc Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sun, 22 Oct 2023 17:09:19 -0700 Subject: [PATCH 067/103] fix NamedTemporaryFile tests for Windows --- tests/test_cli.py | 170 +++++++++++++++++++++------------ tests/test_convert.py | 60 +++++++----- tests/test_utils.py | 23 +++-- tests/test_write_strategies.py | 20 ++-- 4 files changed, 171 insertions(+), 102 deletions(-) diff --git a/tests/test_cli.py b/tests/test_cli.py index 17a8b40d..3e109c03 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -41,15 +41,20 @@ def test_version(self): result = run(["otoole", "--version"], capture_output=True) assert result.stdout.strip().decode() == str(__version__) + def test_help(self): + commands = ["otoole", "-v", "convert", "--help"] + expected = "usage: otoole convert [-h]" + actual = run(commands, capture_output=True) + assert expected in str(actual.stdout) + assert actual.returncode == 0, print(actual.stdout) + temp = mkdtemp() - temp_excel = NamedTemporaryFile(suffix=".xlsx") - temp_datafile = NamedTemporaryFile(suffix=".dat") simplicity = os.path.join("tests", "fixtures", "simplicity.txt") config_path = os.path.join("tests", "fixtures", "config.yaml") test_data = [ - (["otoole", "-v", "convert", "--help"], "usage: otoole convert [-h]"), ( + "excel", [ "otoole", "-v", @@ -57,12 +62,13 @@ def test_version(self): "datafile", "excel", simplicity, - temp_excel.name, + "convert_to_file_path", # replaced with NamedTemporaryFile config_path, ], "", ), ( + "datafile", [ "otoole", "-v", @@ -70,19 +76,34 @@ def test_version(self): "datafile", "datafile", simplicity, - temp_datafile.name, + "convert_to_file_path", # replaced with NamedTemporaryFile config_path, ], "", ), ] - @mark.parametrize("commands,expected", test_data, ids=["help", "excel", "datafile"]) - def test_convert_commands(self, commands, expected): - actual = run(commands, capture_output=True) - assert expected in str(actual.stdout) - print(" ".join(commands)) - assert actual.returncode == 0, print(actual.stdout) + @mark.parametrize( + "convert_to,commands,expected", test_data, ids=["excel", "datafile"] + ) + def test_convert_commands(self, convert_to, commands, expected): + if convert_to == "datafile": + temp = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + elif convert_to == "excel": + temp = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="w") + else: + raise NotImplementedError + try: + commands_adjusted = [ + x if x != "convert_to_file_path" else temp.name for x in commands + ] + actual = run(commands_adjusted, capture_output=True) + assert expected in str(actual.stdout) + print(" ".join(commands_adjusted)) + assert actual.returncode == 0, print(actual.stdout) + finally: + temp.close() + os.unlink(temp.name) test_errors = [ ( @@ -98,59 +119,68 @@ def test_convert_error(self, commands, expected): def test_convert_datafile_datafile_no_user_config(self): simplicity = os.path.join("tests", "fixtures", "simplicity.txt") - temp_datafile = NamedTemporaryFile(suffix=".dat") - commands = [ - "otoole", - "convert", - "datafile", - "datafile", - simplicity, - temp_datafile.name, - ] - actual = run(commands, capture_output=True) - assert actual.returncode == 2 + temp_datafile = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + try: + commands = [ + "otoole", + "convert", + "datafile", + "datafile", + simplicity, + temp_datafile.name, + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 2 + finally: + temp_datafile.close() + os.unlink(temp_datafile.name) def test_convert_datafile_datafile_with_user_config(self): simplicity = os.path.join("tests", "fixtures", "simplicity.txt") user_config = os.path.join("tests", "fixtures", "config.yaml") - temp_datafile = NamedTemporaryFile(suffix=".dat") - commands = [ - "otoole", - "-vvv", - "convert", - "datafile", - "datafile", - simplicity, - temp_datafile.name, - user_config, - ] - actual = run(commands, capture_output=True) - assert actual.returncode == 0 + temp_datafile = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + try: + commands = [ + "otoole", + "-vvv", + "convert", + "datafile", + "datafile", + simplicity, + temp_datafile.name, + user_config, + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 0 + finally: + temp_datafile.close() + os.unlink(temp_datafile.name) def test_convert_datafile_datafile_with_default_flag(self): simplicity = os.path.join("tests", "fixtures", "simplicity.txt") user_config = os.path.join("tests", "fixtures", "config.yaml") - temp_datafile = NamedTemporaryFile(suffix=".dat") - commands = [ - "otoole", - "-vvv", - "convert", - "datafile", - "datafile", - simplicity, - temp_datafile.name, - user_config, - "--write_defaults", - ] - actual = run(commands, capture_output=True) - assert actual.returncode == 0 + temp_datafile = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + try: + commands = [ + "otoole", + "-vvv", + "convert", + "datafile", + "datafile", + simplicity, + temp_datafile.name, + user_config, + "--write_defaults", + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 0 + finally: + temp_datafile.close() + os.unlink(temp_datafile.name) class TestSetup: - temp = mkdtemp() - temp_config = NamedTemporaryFile(suffix=".yaml") - test_data = [ ( [ @@ -158,27 +188,45 @@ class TestSetup: "-v", "setup", "config", - NamedTemporaryFile(suffix=".yaml").name, + NamedTemporaryFile( + suffix=".yaml" + ).name, # representes a new config file ], "", ), - (["otoole", "-v", "setup", "config", temp_config.name, "--overwrite"], ""), + (["otoole", "-v", "setup", "config", "temp_file", "--overwrite"], ""), ] @mark.parametrize( "commands,expected", test_data, ids=["setup", "setup_with_overwrite"] ) def test_setup_commands(self, commands, expected): - actual = run(commands, capture_output=True) - assert expected in str(actual.stdout) - print(" ".join(commands)) - assert actual.returncode == 0, print(actual.stdout) + temp_yaml = NamedTemporaryFile(suffix=".yaml", delete=False, mode="w+b") + try: + commands_adjusted = [ + x if x != "temp_file" else temp_yaml.name for x in commands + ] + actual = run(commands_adjusted, capture_output=True) + assert expected in str(actual.stdout) + print(" ".join(commands_adjusted)) + assert actual.returncode == 0, print(actual.stdout) + finally: + temp_yaml.close() + os.unlink(temp_yaml.name) test_errors = [ - (["otoole", "-v", "setup", "config", temp_config.name], "OtooleSetupError"), + (["otoole", "-v", "setup", "config", "temp_file"], "OtooleSetupError"), ] @mark.parametrize("commands,expected", test_errors, ids=["setup_fails"]) def test_setup_error(self, commands, expected): - actual = run(commands, capture_output=True) - assert expected in str(actual.stderr) + temp_yaml = NamedTemporaryFile(suffix=".yaml", delete=False, mode="w") + try: + commands_adjusted = [ + x if x != "temp_file" else temp_yaml.name for x in commands + ] + actual = run(commands_adjusted, capture_output=True) + assert expected in str(actual.stderr) + finally: + temp_yaml.close() + os.unlink(temp_yaml.name) diff --git a/tests/test_convert.py b/tests/test_convert.py index 8e94bccd..e4c99046 100644 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -59,21 +59,32 @@ class TestWrite: def test_write_datafile(self): """Test writing data to a file""" data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} - temp = NamedTemporaryFile() - assert write( - os.path.join("tests", "fixtures", "config.yaml"), - "datafile", - temp.name, - data, - ) + temp = NamedTemporaryFile(delete=False, mode="w") + try: + assert write( + os.path.join("tests", "fixtures", "config.yaml"), + "datafile", + temp.name, + data, + ) + finally: + temp.close() + os.unlink(temp.name) def test_write_excel(self): """Test writing data to an Excel file""" data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} - temp = NamedTemporaryFile(suffix=".xlsx") - assert write( - os.path.join("tests", "fixtures", "config.yaml"), "excel", temp.name, data - ) + temp = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="w") + try: + assert write( + os.path.join("tests", "fixtures", "config.yaml"), + "excel", + temp.name, + data, + ) + finally: + temp.close() + os.unlink(temp.name) def test_write_csv(self): """Test writing data to a CSV file""" @@ -92,21 +103,22 @@ class TestConvert: def test_convert_excel_to_datafile(self): """Test converting from Excel to datafile""" - user_config = os.path.join("tests", "fixtures", "config.yaml") - tmpfile = NamedTemporaryFile() from_path = os.path.join("tests", "fixtures", "combined_inputs.xlsx") - - convert(user_config, "excel", "datafile", from_path, tmpfile.name) - - tmpfile.seek(0) - actual = tmpfile.readlines() - tmpfile.close() - - assert actual[-1] == b"end;\n" - assert actual[0] == b"# Model file written by *otoole*\n" - assert actual[2] == b"09_ROK d_bld_2_coal_products 2017 20.8921\n" - assert actual[8996] == b"param default 1 : DepreciationMethod :=\n" + tmpfile = NamedTemporaryFile(delete=False, mode="w+b") + + try: + convert(user_config, "excel", "datafile", from_path, tmpfile.name) + tmpfile.seek(0) + actual = tmpfile.readlines() + + assert actual[-1] == b"end;\n" + assert actual[0] == b"# Model file written by *otoole*\n" + assert actual[2] == b"09_ROK d_bld_2_coal_products 2017 20.8921\n" + assert actual[8996] == b"param default 1 : DepreciationMethod :=\n" + finally: + tmpfile.close() + os.unlink(tmpfile.name) def test_convert_excel_to_csv(self): """Test converting from Excel to CSV""" diff --git a/tests/test_utils.py b/tests/test_utils.py index 8fac9aa3..1a50ccf8 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,3 +1,4 @@ +import os from tempfile import NamedTemporaryFile import pandas as pd @@ -77,16 +78,18 @@ def test_create_name_mappings_reversed(self, user_config): def test_excel_name_length_error(user_config_simple, request): user_config = request.getfixturevalue(user_config_simple) write_excel = WriteExcel(user_config=user_config) - temp_excel = NamedTemporaryFile(suffix=".xlsx") - handle = pd.ExcelWriter(temp_excel.name) - - with pytest.raises(OtooleExcelNameLengthError): - write_excel._write_parameter( - df=pd.DataFrame(), - parameter_name="ParameterNameLongerThanThirtyOneChars", - handle=pd.ExcelWriter(handle), - default=0, - ) + temp_excel = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="r") + try: + with pytest.raises(OtooleExcelNameLengthError): + write_excel._write_parameter( + df=pd.DataFrame(), + parameter_name="ParameterNameLongerThanThirtyOneChars", + handle=pd.ExcelWriter(temp_excel.name), + default=0, + ) + finally: + temp_excel.close() + os.unlink(temp_excel.name) class TestYamlUniqueKeyReader: diff --git a/tests/test_write_strategies.py b/tests/test_write_strategies.py index af4d8b8e..18cf64ae 100644 --- a/tests/test_write_strategies.py +++ b/tests/test_write_strategies.py @@ -1,4 +1,5 @@ import io +import os from tempfile import NamedTemporaryFile import pandas as pd @@ -114,15 +115,20 @@ def test_form_no_pivot(self, user_config): def test_write_out_empty_dataframe(self, user_config): - temp_excel = NamedTemporaryFile(suffix=".xlsx") - handle = pd.ExcelWriter(temp_excel.name) - convert = WriteExcel(user_config) + temp_excel = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="w") + try: + handle = pd.ExcelWriter(temp_excel.name) + convert = WriteExcel(user_config) - df = pd.DataFrame( - data=None, columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"] - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + df = pd.DataFrame( + data=None, columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"] + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - convert._write_parameter(df, "AvailabilityFactor", handle, default=0) + convert._write_parameter(df, "AvailabilityFactor", handle, default=0) + finally: + handle.close() + temp_excel.close() + os.unlink(temp_excel.name) class TestWriteDatafile: From f84d1f7694467b2e11ab5cd2b6887a14fe653121 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 23 Oct 2023 15:12:37 -0700 Subject: [PATCH 068/103] add Windows to CI --- .github/workflows/python.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 0bd4d0df..a2fd49a3 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -5,10 +5,11 @@ on: [push, pull_request] jobs: build: - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} strategy: matrix: python-version: ["3.9", "3.10", "3.11"] + os: [ubuntu-latest, windows-latest] steps: - uses: actions/checkout@v3 From 19c055a494b4c2369dada2f4f35aff831b7cd1b4 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 24 Oct 2023 11:25:24 -0700 Subject: [PATCH 069/103] update graphviz install instructions --- docs/examples.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/examples.rst b/docs/examples.rst index 281a9831..97c52554 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -215,6 +215,20 @@ Run the following command, where the RES will be saved as the file ``res.png``:: $ otoole viz res excel simplicity.xlsx res.png config.yaml +.. WARNING:: + If you encounter a ``graphviz`` dependency error, please install it in your + virtual environment through:: + + pip install graphviz # if using pip + conda install graphviz # if using conda + + Alternatively, you can install it on your system via the commands:: + + sudo apt install graphviz # if on Ubuntu + brew install graphviz # if on Mac + + Or from the graphviz_ website if on Windows. + 2. View the RES ~~~~~~~~~~~~~~~ Open the newly created file, ``res.png`` and the following image should be @@ -486,3 +500,4 @@ will also flag it as an isolated fuel. This means the fuel is unconnected from t .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer .. _Anaconda: https://www.anaconda.com/ .. _Gurobi: https://www.gurobi.com/ +.. _graphviz: https://www.graphviz.org/download/ From 6843b5e5fc4eabbd564d6c61f9c424088991298d Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 24 Oct 2023 11:44:22 -0700 Subject: [PATCH 070/103] added graphviz to requirements --- docs/requirements.txt | 1 + setup.cfg | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/requirements.txt b/docs/requirements.txt index 6b659fb8..0665d857 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,6 @@ amply>=0.1.4 docutils<0.18 +graphviz flatten_dict Jinja2<3.1 networkx diff --git a/setup.cfg b/setup.cfg index 7fbcbb43..3f94cc37 100644 --- a/setup.cfg +++ b/setup.cfg @@ -53,6 +53,7 @@ install_requires = flatten_dict openpyxl pydantic>=2 + graphviz [options.packages.find] where = src exclude = From 806a3f7498ebbc007fa86361b3ae5d319876027c Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 24 Oct 2023 11:59:47 -0700 Subject: [PATCH 071/103] add contributing guidelines link --- README.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index bf6c6b35..e482e173 100644 --- a/README.rst +++ b/README.rst @@ -53,5 +53,7 @@ Contributing New ideas and bugs `should be submitted `_ to the repository issue tracker. Please do contribute by discussing and developing these -ideas further. To contribute directly to the documentation of code development, please see -the contribution guidelines document. +ideas further. + +To contribute directly to the code and documentation development, please see +the `contribution guidelines `_. From f9eca92484a3bdb294100833996dc9eda0a76441 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 2 Nov 2023 18:01:41 -0700 Subject: [PATCH 072/103] update readme --- README.rst | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/README.rst b/README.rst index e482e173..5d0da2b4 100644 --- a/README.rst +++ b/README.rst @@ -2,28 +2,45 @@ otoole: OSeMOSYS tools for energy work ================================================== -.. image:: https://coveralls.io/repos/github/OSeMOSYS/otoole/badge.svg?branch=master&kill_cache=1 - :target: https://coveralls.io/github/OSeMOSYS/otoole?branch=master +.. image:: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467/status.svg + :target: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467 + :alt: JOSS status -.. image:: https://readthedocs.org/projects/otoole/badge/?version=latest - :target: https://otoole.readthedocs.io/en/latest/?badge=latest - :alt: Documentation Status +.. image:: https://img.shields.io/pypi/v/otoole.svg + :target: https://pypi.org/project/otoole/ + :alt: PyPI .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/psf/black + :alt: Code Style -.. image:: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467/status.svg - :target: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467 - :alt: JOSS status +.. image:: https://img.shields.io/badge/python-3.9|3.10|3.11-blue.svg + :target: https://crate.io/packages/otoole/ + :alt: Python Version + +.. image:: https://img.shields.io/badge/License-MIT-yellow.svg + :target: https://opensource.org/licenses/MIT + :alt: License + + +.. image:: https://coveralls.io/repos/github/OSeMOSYS/otoole/badge.svg?branch=master&kill_cache=1 + :target: https://coveralls.io/github/OSeMOSYS/otoole?branch=master + :alt: Code Coverage + +.. image:: https://github.com/OSeMOSYS/otoole/actions/workflows/python.yaml/badge.svg?branch=master + :target: https://github.com/OSeMOSYS/otoole/actions/workflows/python.yaml + :alt: GitHub CI -A Python toolkit to support use of OSeMOSYS +.. image:: https://readthedocs.org/projects/otoole/badge/?version=latest + :target: https://otoole.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status Description =========== OSeMOSYS tools for energy work, or otoole, is a Python package -which provides a command-line interface for users of OSeMOSYS. The aim of the -package is to provide commonly used pre- and post-processing steps for OSeMOSYS. +to support the users of OSeMOSYS. The aim of the package is to provide commonly +used pre- and post-processing steps for OSeMOSYS. **otoole** aims to support different ways of storing input data and results, including csv files and Excel workbooks, as well as different implementations From f3eb56b61446affdee4e898a6487b26cf5d00cbc Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 2 Nov 2023 18:35:22 -0700 Subject: [PATCH 073/103] badge formatting --- README.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 5d0da2b4..df348434 100644 --- a/README.rst +++ b/README.rst @@ -14,14 +14,15 @@ otoole: OSeMOSYS tools for energy work :target: https://github.com/psf/black :alt: Code Style -.. image:: https://img.shields.io/badge/python-3.9|3.10|3.11-blue.svg +.. image:: https://img.shields.io/badge/python-3.9_|_3.10_|_3.11-blue.svg :target: https://crate.io/packages/otoole/ :alt: Python Version -.. image:: https://img.shields.io/badge/License-MIT-yellow.svg +.. image:: https://img.shields.io/badge/License-MIT-green.svg :target: https://opensource.org/licenses/MIT :alt: License +| .. image:: https://coveralls.io/repos/github/OSeMOSYS/otoole/badge.svg?branch=master&kill_cache=1 :target: https://coveralls.io/github/OSeMOSYS/otoole?branch=master From d7f668ed9411f1087b231e21c5947762b83b4479 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 2 Nov 2023 20:44:25 -0700 Subject: [PATCH 074/103] fix graphviz dependency --- docs/examples.rst | 17 ++++++++--------- docs/requirements.txt | 1 - setup.cfg | 1 - 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index 97c52554..ba779362 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -216,20 +216,19 @@ Run the following command, where the RES will be saved as the file ``res.png``:: $ otoole viz res excel simplicity.xlsx res.png config.yaml .. WARNING:: - If you encounter a ``graphviz`` dependency error, please install it in your - virtual environment through:: - - pip install graphviz # if using pip - conda install graphviz # if using conda - - Alternatively, you can install it on your system via the commands:: + If you encounter a ``graphviz`` dependency error, install it on your system + from the graphviz_ website (if on Windows) or via the command:: sudo apt install graphviz # if on Ubuntu brew install graphviz # if on Mac - Or from the graphviz_ website if on Windows. + To check that ``graphviz`` installed correctly, run ``dot -V`` to check the + version:: + + ~$ dot -V + dot - graphviz version 2.43.0 (0) -2. View the RES +1. View the RES ~~~~~~~~~~~~~~~ Open the newly created file, ``res.png`` and the following image should be displayed diff --git a/docs/requirements.txt b/docs/requirements.txt index 0665d857..6b659fb8 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,5 @@ amply>=0.1.4 docutils<0.18 -graphviz flatten_dict Jinja2<3.1 networkx diff --git a/setup.cfg b/setup.cfg index 3f94cc37..7fbcbb43 100644 --- a/setup.cfg +++ b/setup.cfg @@ -53,7 +53,6 @@ install_requires = flatten_dict openpyxl pydantic>=2 - graphviz [options.packages.find] where = src exclude = From 21afaef76988ad331083f1926196854f7dcd71c5 Mon Sep 17 00:00:00 2001 From: Trevor Barnes <67297083+trevorb1@users.noreply.github.com> Date: Tue, 7 Nov 2023 21:12:58 -0800 Subject: [PATCH 075/103] Fix validation example typo --- docs/examples.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/examples.rst b/docs/examples.rst index ba779362..aec99b71 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -324,8 +324,12 @@ The MathProg datafile describing this model can be found on the ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a configuration validation ``yaml`` file:: + # on UNIX $ touch validate.yaml + # on Windows + > type nul > validate.yaml + 3. Create ``FUEL`` Codes ~~~~~~~~~~~~~~~~~~~~~~~~ Create the fuel codes and descriptions in the validation configuration file:: From 18273bf278cae75a859f4d61991892d6c12ee7af Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 14:57:37 -0800 Subject: [PATCH 076/103] update citations for joss --- .zenodo.json | 13 ++----------- CITATION.cff | 4 ++-- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 7361f012..d03af56e 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -2,24 +2,15 @@ "license": "MIT", "upload_type": "software", "creators": [ - { - "name": "Will Usher", - "affiliation": "KTH Royal Institute of Technology", - "orcid": "0000-0001-9367-1791" - }, { "name": "Trevor Barnes", "affiliation": "Simon Fraser University", "orcid": "0000-0003-2458-2968" }, { - "name": "Hauke Henke", + "name": "Will Usher", "affiliation": "KTH Royal Institute of Technology", - "orcid": "0000-0003-0098-8701" - }, - { - "name": "Christoph Muschner", - "orcid": "0000-0001-8144-5260" + "orcid": "0000-0001-9367-1791" } ], "access_right": "open" diff --git a/CITATION.cff b/CITATION.cff index 1acfae13..9c914d76 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -14,6 +14,6 @@ authors: given-names: Christoph orcid: https://orcid.org/0000-0001-8144-5260 title: "otoole: OSeMOSYS tools for energy work" -version: 1.0.0 -doi: 10.5281/zenodo.7677990 +version: 1.1.1 +doi: 10.5281/zenodo.10292217 date-released: 2023-02-26 From 79fea518e1fdb876bcb5259ba28798d7dbf77db0 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 14:59:30 -0800 Subject: [PATCH 077/103] update changelog --- CHANGELOG.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 54b51389..8c0f8eb2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,17 @@ Changelog ========= +Version 1.1.2 +============= +- Update citation information for JOSS + +Version 1.1.1 +============= +- Fixes CPLEX result processing docs +- Added joss status badge to readme +- Fix Tests on Windows +- Update graphviz install instructions + Version 1.1.0 ============= - Public Python API added to call otoole directly in Python files From 33c63e36ecd66a19c729a55e5235cd5353c6b256 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 15:09:47 -0800 Subject: [PATCH 078/103] add zenodo title and description --- .zenodo.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index d03af56e..7f28eec9 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,4 +1,6 @@ { + "title": "otoole: OSeMOSYS Tools for Energy Work", + "description": "A Python package to provide commonly used pre- and post-processing steps when working with OSeMOSYS models", "license": "MIT", "upload_type": "software", "creators": [ From 0b962f5ae4edf4579517c607574bef8a3cfa0c80 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 15:12:12 -0800 Subject: [PATCH 079/103] linting fix --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8c0f8eb2..7a94d3bb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,7 +4,7 @@ Changelog Version 1.1.2 ============= -- Update citation information for JOSS +- Update citation information for JOSS Version 1.1.1 ============= From cd16d6d3cc19b67646f478008a8dee90f4d65006 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 15:50:29 -0800 Subject: [PATCH 080/103] CHANGELOG.rst --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7a94d3bb..edb7af3d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,7 +4,7 @@ Changelog Version 1.1.2 ============= -- Update citation information for JOSS +- Update zenodo metadata for JOSS Version 1.1.1 ============= From 1752755e9b808912e9828587ae2a8549d0fcd9ff Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 16:51:42 -0800 Subject: [PATCH 081/103] update coveralls to use github action --- .github/workflows/python.yaml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index a2fd49a3..e7c6153e 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -28,8 +28,6 @@ jobs: run: | tox - name: Upload coverage data to converalls.io - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - pip install --upgrade coveralls - coveralls --service=github + uses: coverallsapp/github-action@v2 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} From fdd690f739b8131e337d148432c3b136b32f8195 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 16:58:40 -0800 Subject: [PATCH 082/103] coveralls update --- .github/workflows/python.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index e7c6153e..39699521 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -31,3 +31,4 @@ jobs: uses: coverallsapp/github-action@v2 with: github-token: ${{ secrets.GITHUB_TOKEN }} + debug: True From 77e269d99d5e0b8e4b97d827623c513e00274a77 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 7 Dec 2023 20:55:37 -0800 Subject: [PATCH 083/103] revert coveralls action --- .github/workflows/python.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 39699521..a2fd49a3 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -28,7 +28,8 @@ jobs: run: | tox - name: Upload coverage data to converalls.io - uses: coverallsapp/github-action@v2 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - debug: True + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + pip install --upgrade coveralls + coveralls --service=github From d6f40c6fe4788e2dbd612dd0c925ea447253b9be Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 11 Dec 2023 14:04:18 -0800 Subject: [PATCH 084/103] update converage upload condition --- .github/workflows/python.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index a2fd49a3..0f23b3cc 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -30,6 +30,7 @@ jobs: - name: Upload coverage data to converalls.io env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + if: ${{ matrix.os }} == "ubuntu-latest" && ${{ matrix.python-version }} == "ubuntu-latest" run: | pip install --upgrade coveralls coveralls --service=github From 754fe814a552f86e2a191832b5945bbe083ec0c2 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 11 Dec 2023 14:14:13 -0800 Subject: [PATCH 085/103] fix action --- .github/workflows/python.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 0f23b3cc..292995c9 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -30,7 +30,7 @@ jobs: - name: Upload coverage data to converalls.io env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: ${{ matrix.os }} == "ubuntu-latest" && ${{ matrix.python-version }} == "ubuntu-latest" + if: ${{ matrix.os }} == "ubuntu-latest" && ${{ matrix.python-version }} == "3.11" run: | pip install --upgrade coveralls coveralls --service=github From 5db9938f61f05d869c80dccfafc71c8c2bd5978c Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 11 Dec 2023 14:18:20 -0800 Subject: [PATCH 086/103] update syntax in action --- .github/workflows/python.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 292995c9..03bd989d 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -30,7 +30,7 @@ jobs: - name: Upload coverage data to converalls.io env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: ${{ matrix.os }} == "ubuntu-latest" && ${{ matrix.python-version }} == "3.11" + if: ${{ matrix.os }} == 'ubuntu-latest' && ${{ matrix.python-version }} == '3.11' run: | pip install --upgrade coveralls coveralls --service=github From 4ad32db29119e52426001350ce4771f306f96a95 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 11 Dec 2023 14:25:28 -0800 Subject: [PATCH 087/103] switch to coveralls action --- .github/workflows/python.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 03bd989d..1f138d36 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -31,6 +31,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} if: ${{ matrix.os }} == 'ubuntu-latest' && ${{ matrix.python-version }} == '3.11' - run: | - pip install --upgrade coveralls - coveralls --service=github + uses: coverallsapp/github-action@v2 + # run: | + # pip install --upgrade coveralls + # coveralls --service=github From e5df4550ab8bea592cea0b544f737953736ad6bb Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 11 Dec 2023 15:24:35 -0800 Subject: [PATCH 088/103] please work --- .github/workflows/python.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 1f138d36..1d609712 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -30,8 +30,8 @@ jobs: - name: Upload coverage data to converalls.io env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: ${{ matrix.os }} == 'ubuntu-latest' && ${{ matrix.python-version }} == '3.11' uses: coverallsapp/github-action@v2 + if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' # run: | # pip install --upgrade coveralls # coveralls --service=github From 724b735050823b1fa48a01272563a61092da187d Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Mon, 11 Dec 2023 15:29:33 -0800 Subject: [PATCH 089/103] revert to not using action --- .github/workflows/python.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 1d609712..d891110c 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -30,8 +30,7 @@ jobs: - name: Upload coverage data to converalls.io env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - uses: coverallsapp/github-action@v2 if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' - # run: | - # pip install --upgrade coveralls - # coveralls --service=github + run: | + pip install --upgrade coveralls + coveralls --service=github From f9ffec2c66ecced0054452c77d29b41f3b98af97 Mon Sep 17 00:00:00 2001 From: Trevor Barnes <67297083+trevorb1@users.noreply.github.com> Date: Mon, 11 Dec 2023 20:56:41 -0800 Subject: [PATCH 090/103] Update .zenodo.json Remove description from Zenodo config file so release notes are automatically copied over --- .zenodo.json | 1 - 1 file changed, 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index 7f28eec9..7efe5208 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,6 +1,5 @@ { "title": "otoole: OSeMOSYS Tools for Energy Work", - "description": "A Python package to provide commonly used pre- and post-processing steps when working with OSeMOSYS models", "license": "MIT", "upload_type": "software", "creators": [ From dcd5a49188b43f63f08b3714c1195e352a5d972b Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 15 Dec 2023 15:01:09 +0100 Subject: [PATCH 091/103] Regression in tests caused by pandas fixed by pandas 2.1.4 --- setup.cfg | 2 +- src/otoole/input.py | 7 +++++-- tests/test_input.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7fbcbb43..50272515 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,7 +47,7 @@ install_requires = xlrd pyyaml pydot - pandas>=1.1,<2.1 + pandas>=2.1.4 Amply>=0.1.6 networkx flatten_dict diff --git a/src/otoole/input.py b/src/otoole/input.py index b3c979c7..210647a2 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -341,8 +341,11 @@ def _expand_defaults( df_default["VALUE"] = default_values[name] # combine result and default value dataframe - df = pd.concat([data, df_default]) - df = df[~df.index.duplicated(keep="first")] + if not data.empty: + df = pd.concat([data, df_default]) + df = df[~df.index.duplicated(keep="first")] + else: + df = df_default df = df.sort_index() output[name] = df diff --git a/tests/test_input.py b/tests/test_input.py index 292e4309..e9a7b148 100644 --- a/tests/test_input.py +++ b/tests/test_input.py @@ -265,7 +265,7 @@ def result_data(region): input_data_single_index_empty(region), ] parameter_test_data_ids = [ - "multi_index_no_defaluts", + "multi_index_no_defaults", "multi_index", "multi_index_empty", "single_index", From 34166a3f162aad93d061ceeda0e063422fe4e3be Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Thu, 14 Dec 2023 12:40:04 -0800 Subject: [PATCH 092/103] docs req and changlog update --- CHANGELOG.rst | 4 ++++ docs/requirements.txt | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index edb7af3d..4ebb5bb9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,10 @@ Changelog ========= +(Development) Version 1.1.3 +=========================== +- Lock pandas to 2.1.4 or later + Version 1.1.2 ============= - Update zenodo metadata for JOSS diff --git a/docs/requirements.txt b/docs/requirements.txt index 6b659fb8..335ad615 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,7 +4,7 @@ flatten_dict Jinja2<3.1 networkx openpyxl -pandas>=1.1,<2.1 +pandas>=2.1.4 pydantic>=2 pydot pyyaml From 3a346de0c925541dfae2eef69593886a4a136118 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 15 Dec 2023 13:20:22 +0100 Subject: [PATCH 093/103] Modified citation file with template for JOSS article --- CITATION.cff | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CITATION.cff b/CITATION.cff index 9c914d76..ee08aebf 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -17,3 +17,21 @@ title: "otoole: OSeMOSYS tools for energy work" version: 1.1.1 doi: 10.5281/zenodo.10292217 date-released: 2023-02-26 +preferred-citation: + type: article + authors: + - family-names: Usher + given-names: Will + orcid: https://orcid.org/0000-0001-9367-1791 + - family-names: Barnes + given-names: Trevor + orcid: https://orcid.org/0000-0003-2458-2968 + doi: "0.5281/zenodo.10292217" + journal: "Journal of Open Source Software" + month: 5 + start: 1 # First page number + end: 15 # Last page number + title: "otoole: OSeMOSYS tools for energy work" + issue: 1 + volume: 4 + year: 2023 From cc90480cdbb5734d5655b764a85038b6951843c2 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 15 Dec 2023 13:25:55 +0100 Subject: [PATCH 094/103] Update coveralls workflow to use lcov --- .github/workflows/python.yaml | 4 +--- setup.cfg | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index d891110c..835a4e48 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -31,6 +31,4 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' - run: | - pip install --upgrade coveralls - coveralls --service=github + uses: coverallsapp/github-action@v2 diff --git a/setup.cfg b/setup.cfg index 7fbcbb43..f033876c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -87,7 +87,7 @@ console_scripts = # CAUTION: --cov flags may prohibit setting breakpoints while debugging. # Comment those flags to avoid this pytest issue. addopts = - --cov otoole --cov-report html + --cov otoole --cov-report lcov --verbose -s # --log-cli-level=10 From d9bccb55b5930d58a23fa1b12267b13299612c70 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 15 Dec 2023 13:35:18 +0100 Subject: [PATCH 095/103] Added note about coverage in contributing docs --- docs/contributing.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/contributing.rst b/docs/contributing.rst index 31d3c73a..96bdfb1d 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -196,6 +196,10 @@ Find more detailed information in `creating a PR`_. You might also want to open the PR as a draft first and mark it as ready for review after the feedbacks from the continuous integration (CI) system or any required fixes. +We track test coverage using coveralls_. You can check the coverage +of your PR by clicking on the "details" link in the "Coverage" section of +the pull request checks. Try to ensure that your pull requests always increase +test coverage. Troubleshooting --------------- @@ -281,6 +285,7 @@ on PyPI_, the following steps can be used to release a new version for .. |tox| replace:: ``tox`` +.. _coveralls: https://coveralls.io/github/OSeMOSYS/otoole .. _black: https://pypi.org/project/black/ .. _CommonMark: https://commonmark.org/ .. _contribution-guide.org: https://www.contribution-guide.org/ From 53fa0d485e5cfa3f8dcf0b01f7e2340edfb36832 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 22 Dec 2023 11:09:39 +0000 Subject: [PATCH 096/103] Use recommended citation file from JOSS --- CITATION.cff | 63 ++++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index ee08aebf..0b4b23ae 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,37 +1,36 @@ -cff-version: 1.2.0 -message: "If you use this software, please cite it as below." +cff-version: "1.2.0" authors: - - family-names: Usher - given-names: Will - orcid: https://orcid.org/0000-0001-9367-1791 - - family-names: Barnes - given-names: Trevor - orcid: https://orcid.org/0000-0003-2458-2968 - - family-names: Henke - given-names: Hauke - orcid: https://orcid.org/0000-0003-0098-8701 - - family-names: Muschner - given-names: Christoph - orcid: https://orcid.org/0000-0001-8144-5260 -title: "otoole: OSeMOSYS tools for energy work" -version: 1.1.1 -doi: 10.5281/zenodo.10292217 -date-released: 2023-02-26 +- family-names: Barnes + given-names: Trevor + orcid: "https://orcid.org/0000-0003-2458-2968" +- family-names: Usher + given-names: Will + orcid: "https://orcid.org/0000-0001-9367-1791" +contact: +- family-names: Barnes + given-names: Trevor + orcid: "https://orcid.org/0000-0003-2458-2968" +doi: 10.5281/zenodo.10360538 +message: If you use this software, please cite our article in the + Journal of Open Source Software. preferred-citation: - type: article authors: - - family-names: Usher - given-names: Will - orcid: https://orcid.org/0000-0001-9367-1791 - family-names: Barnes given-names: Trevor - orcid: https://orcid.org/0000-0003-2458-2968 - doi: "0.5281/zenodo.10292217" - journal: "Journal of Open Source Software" - month: 5 - start: 1 # First page number - end: 15 # Last page number - title: "otoole: OSeMOSYS tools for energy work" - issue: 1 - volume: 4 - year: 2023 + orcid: "https://orcid.org/0000-0003-2458-2968" + - family-names: Usher + given-names: Will + orcid: "https://orcid.org/0000-0001-9367-1791" + date-published: 2023-12-20 + doi: 10.21105/joss.05511 + issn: 2475-9066 + issue: 92 + journal: Journal of Open Source Software + publisher: + name: Open Journals + start: 5511 + title: "otoole: OSeMOSYS Tools for Energy Work" + type: article + url: "https://joss.theoj.org/papers/10.21105/joss.05511" + volume: 8 +title: "otoole: OSeMOSYS Tools for Energy Work" From ccb9e35f6590ae74a28e1ec2f77c4624bcf3ec99 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Fri, 19 Jan 2024 15:32:21 +0100 Subject: [PATCH 097/103] Allow discount rate with/out technology index --- src/otoole/results/result_package.py | 54 ++++++++++++++++++--------- tests/results/test_results_package.py | 18 +++++++++ 2 files changed, 54 insertions(+), 18 deletions(-) diff --git a/src/otoole/results/result_package.py b/src/otoole/results/result_package.py index 255f6847..a393fec5 100644 --- a/src/otoole/results/result_package.py +++ b/src/otoole/results/result_package.py @@ -307,8 +307,11 @@ def capital_investment(self) -> pd.DataFrame: capital_cost = self["CapitalCost"] new_capacity = self["NewCapacity"] operational_life = self["OperationalLife"] - discount_rate = self["DiscountRate"] - discount_rate_idv = self["DiscountRateIdv"] + + if "DiscountRateIdv" in self.keys(): + discount_rate = self["DiscountRateIdv"] + else: + discount_rate = self["DiscountRate"] regions = self["REGION"]["VALUE"].to_list() technologies = self.get_unique_values_from_index( @@ -323,10 +326,9 @@ def capital_investment(self) -> pd.DataFrame: raise KeyError(self._msg("CapitalInvestment", str(ex))) crf = capital_recovery_factor( - regions, technologies, discount_rate_idv, operational_life + regions, technologies, discount_rate, operational_life ) pva = pv_annuity(regions, technologies, discount_rate, operational_life) - capital_investment = capital_cost.mul(new_capacity, fill_value=0.0) capital_investment = capital_investment.mul(crf, fill_value=0.0).mul( pva, fill_value=0.0 @@ -765,22 +767,38 @@ def capital_recovery_factor( param CapitalRecoveryFactor{r in REGION, t in TECHNOLOGY} := (1 - (1 + DiscountRateIdv[r,t])^(-1))/(1 - (1 + DiscountRateIdv[r,t])^(-(OperationalLife[r,t]))); """ - if regions and technologies: - index = pd.MultiIndex.from_product( - [regions, technologies], names=["REGION", "TECHNOLOGY"] - ) + + def calc_crf(df, operational_life): + rate = df["VALUE"] + 1 + numerator = 1 - rate.pow(-1) + denominator = 1 - rate.pow(-operational_life) + + return numerator / denominator + + if not regions and not technologies: + return pd.DataFrame( + data=[], + columns=["REGION", "TECHNOLOGY", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY"]) + + index = pd.MultiIndex.from_product( + [regions, technologies], names=["REGION", "TECHNOLOGY"] + ) + if "TECHNOLOGY" in discount_rate_idv.index.names: crf = discount_rate_idv.reindex(index) - crf["RATE"] = crf["VALUE"] + 1 - crf["NUMER"] = 1 - crf["RATE"].pow(-1) - crf["DENOM"] = 1 - crf["RATE"].pow(-operational_life["VALUE"]) - crf["VALUE"] = (crf["NUMER"] / crf["DENOM"]).round(6) - return crf.reset_index()[["REGION", "TECHNOLOGY", "VALUE"]].set_index( - ["REGION", "TECHNOLOGY"] - ) + crf["VALUE"] = calc_crf(crf, operational_life["VALUE"]) + else: - return pd.DataFrame([], columns=["REGION", "TECHNOLOGY", "VALUE"]).set_index( - ["REGION", "TECHNOLOGY"] - ) + values = discount_rate_idv["VALUE"].copy() + crf = discount_rate_idv.reindex(index) + # This is a hack to get around the fact that the discount rate is + # indexed by REGION and not REGION, TECHNOLOGY + crf[::1] = values + crf["VALUE"] = calc_crf(crf, operational_life["VALUE"]) + + return crf.reset_index()[["REGION", "TECHNOLOGY", "VALUE"]].set_index( + ["REGION", "TECHNOLOGY"] + ) def pv_annuity( diff --git a/tests/results/test_results_package.py b/tests/results/test_results_package.py index 10fc8725..33c784b9 100644 --- a/tests/results/test_results_package.py +++ b/tests/results/test_results_package.py @@ -651,6 +651,24 @@ def test_crf_null(self, discount_rate_idv, operational_life): assert_frame_equal(actual, expected) + def test_crf_no_tech_discount_rate(self, region, discount_rate, operational_life): + + technologies = ["GAS_EXTRACTION", "DUMMY"] + regions = region["VALUE"].to_list() + actual = capital_recovery_factor( + regions, technologies, discount_rate, operational_life + ) + + expected = pd.DataFrame( + data=[ + ["SIMPLICITY", "GAS_EXTRACTION", 0.5121951219512197], + ["SIMPLICITY", "DUMMY", 0.34972244250594786], + ], + columns=["REGION", "TECHNOLOGY", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY"]) + + assert_frame_equal(actual, expected) + class TestPvAnnuity: def test_pva(self, region, discount_rate, operational_life): From 49bfbaf44faacc12602cd613506e7dffafe02063 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 23 Jan 2024 12:33:22 -0800 Subject: [PATCH 098/103] update changelog --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4ebb5bb9..63a24aa9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Changelog (Development) Version 1.1.3 =========================== - Lock pandas to 2.1.4 or later +- Capital Investment result calculation fixed Version 1.1.2 ============= From 57accd1604fc053ff6a3963abc55a2c4cd65a7b2 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 23 Jan 2024 12:46:03 -0800 Subject: [PATCH 099/103] fix linting issue on changelog --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 63a24aa9..bdc2c670 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,7 +5,7 @@ Changelog (Development) Version 1.1.3 =========================== - Lock pandas to 2.1.4 or later -- Capital Investment result calculation fixed +- Capital Investment result calculation fixed Version 1.1.2 ============= From f877a3a47d4043a7ee1c03da445a48af02eb8589 Mon Sep 17 00:00:00 2001 From: Will Usher Date: Thu, 25 Jan 2024 12:56:38 +0100 Subject: [PATCH 100/103] Simplify assignment --- src/otoole/results/result_package.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/otoole/results/result_package.py b/src/otoole/results/result_package.py index a393fec5..a63de927 100644 --- a/src/otoole/results/result_package.py +++ b/src/otoole/results/result_package.py @@ -768,7 +768,7 @@ def capital_recovery_factor( (1 - (1 + DiscountRateIdv[r,t])^(-1))/(1 - (1 + DiscountRateIdv[r,t])^(-(OperationalLife[r,t]))); """ - def calc_crf(df, operational_life): + def calc_crf(df: pd.DataFrame, operational_life: pd.Series) -> pd.Series: rate = df["VALUE"] + 1 numerator = 1 - rate.pow(-1) denominator = 1 - rate.pow(-operational_life) @@ -793,7 +793,7 @@ def calc_crf(df, operational_life): crf = discount_rate_idv.reindex(index) # This is a hack to get around the fact that the discount rate is # indexed by REGION and not REGION, TECHNOLOGY - crf[::1] = values + crf[:] = values crf["VALUE"] = calc_crf(crf, operational_life["VALUE"]) return crf.reset_index()[["REGION", "TECHNOLOGY", "VALUE"]].set_index( From cfc0cccbb6123da37621e8eb63d5736f734ad368 Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sun, 31 Mar 2024 10:08:11 -0700 Subject: [PATCH 101/103] upgrade to mypy 1.9.0 --- .pre-commit-config.yaml | 2 +- src/otoole/convert.py | 5 +++-- src/otoole/read_strategies.py | 2 +- src/otoole/utils.py | 4 ++-- src/otoole/validate.py | 4 ++-- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a270cf8d..edb7dc51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -53,7 +53,7 @@ repos: ## You can add flake8 plugins via `additional_dependencies`: # additional_dependencies: [flake8-bugbear] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.961 # Use the sha / tag you want to point at + rev: v1.9.0 # Use the sha / tag you want to point at hooks: - id: mypy additional_dependencies: ['types-PyYAML'] diff --git a/src/otoole/convert.py b/src/otoole/convert.py index 3cecd343..4267ac79 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -7,6 +7,7 @@ >>> convert('config.yaml', 'excel', 'datafile', 'input.xlsx', 'output.dat') """ + import logging import os from typing import Dict, Optional, Tuple, Union @@ -29,7 +30,7 @@ def read_results( from_path: str, input_format: str, input_path: str, - glpk_model: str = None, + glpk_model: Optional[str] = None, ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: """Read OSeMOSYS results from CBC, GLPK, Gurobi, or CPLEX results files @@ -79,7 +80,7 @@ def convert_results( input_format: str, input_path: str, write_defaults: bool = False, - glpk_model: str = None, + glpk_model: Optional[str] = None, ) -> bool: """Post-process results from a CBC, CPLEX, Gurobi, or GLPK solution file into CSV format diff --git a/src/otoole/read_strategies.py b/src/otoole/read_strategies.py index 7f5805c6..da362cea 100644 --- a/src/otoole/read_strategies.py +++ b/src/otoole/read_strategies.py @@ -33,7 +33,7 @@ def __init__( self._parameters = parameters def read( - self, filepath: Union[str, TextIO] = None, **kwargs + self, filepath: Union[str, TextIO, None] = None, **kwargs ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, Any]]: config = self.user_config diff --git a/src/otoole/utils.py b/src/otoole/utils.py index d5308ca4..fe989b7e 100644 --- a/src/otoole/utils.py +++ b/src/otoole/utils.py @@ -2,7 +2,7 @@ import logging import os from importlib.resources import files -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Optional, Union import pandas as pd from pydantic import ValidationError @@ -29,7 +29,7 @@ def _read_file(open_file, ending): return contents -def read_packaged_file(filename: str, module_name: str = None): +def read_packaged_file(filename: str, module_name: Optional[str] = None): _, ending = os.path.splitext(filename) diff --git a/src/otoole/validate.py b/src/otoole/validate.py index f889bf71..3c39257d 100644 --- a/src/otoole/validate.py +++ b/src/otoole/validate.py @@ -33,7 +33,7 @@ import logging import re from collections import defaultdict -from typing import Dict, List, Sequence +from typing import Dict, List, Optional, Sequence import networkx.algorithms.isolate as isolate import pandas as pd @@ -53,7 +53,7 @@ def check_for_duplicates(codes: Sequence) -> bool: return duplicate_values -def create_schema(config: Dict[str, Dict] = None) -> Dict: +def create_schema(config: Optional[Dict[str, Dict]] = None) -> Dict: """Populate the dict of schema with codes from the validation config Arguments From e0a612d78bcf2f584326bef81468cdef0cde7f1d Mon Sep 17 00:00:00 2001 From: AgnesBelt Date: Fri, 10 May 2024 11:53:45 +0200 Subject: [PATCH 102/103] added instructions to install graphviz, if needed --- docs/functionality.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/functionality.rst b/docs/functionality.rst index 503fc899..7e8acaa0 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -184,6 +184,9 @@ visualising the reference energy system through the ``vis res`` command is suppo including ``bmp``, ``jpg``, ``pdf``, ``png`` etc. The graphviz_ library used to layout the reference energy system will interpret the file ending. + Remember to check if you have already installed graphviz_ on your maching, before using this fuctionality. + To install graphviz_ use ``conda install graphviz`` command. + Validation ---------- The validation module in ``otoole`` checks technology and fuel names against a From 15e5dae0145056b12086690e689f5dbcd72fa2ea Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Sat, 18 May 2024 13:31:17 -0700 Subject: [PATCH 103/103] update graphviz install instructions --- docs/examples.rst | 17 +++++++++++------ docs/functionality.rst | 10 ++++++---- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index aec99b71..08a4aeb8 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -197,6 +197,8 @@ save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV re $ otoole results cplex csv simplicity.sol results csv data config.yaml +.. _model-visualization: + Model Visualization ------------------- @@ -217,18 +219,21 @@ Run the following command, where the RES will be saved as the file ``res.png``:: .. WARNING:: If you encounter a ``graphviz`` dependency error, install it on your system - from the graphviz_ website (if on Windows) or via the command:: + following instructions on the Graphviz_ website. If on Windows, + download the install package `from Graphviz `_. + If on Mac or Linux, or running conda, use one of the following commands:: - sudo apt install graphviz # if on Ubuntu - brew install graphviz # if on Mac + brew install graphviz # if on Mac + sudo apt install graphviz # if on Ubuntu + conda install graphviz # if using conda To check that ``graphviz`` installed correctly, run ``dot -V`` to check the version:: - ~$ dot -V + $ dot -V dot - graphviz version 2.43.0 (0) -1. View the RES +2. View the RES ~~~~~~~~~~~~~~~ Open the newly created file, ``res.png`` and the following image should be displayed @@ -503,4 +508,4 @@ will also flag it as an isolated fuel. This means the fuel is unconnected from t .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer .. _Anaconda: https://www.anaconda.com/ .. _Gurobi: https://www.gurobi.com/ -.. _graphviz: https://www.graphviz.org/download/ +.. _Graphviz: https://www.graphviz.org/download/ diff --git a/docs/functionality.rst b/docs/functionality.rst index 7e8acaa0..1502577c 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -181,11 +181,13 @@ visualising the reference energy system through the ``vis res`` command is suppo .. NOTE:: The ``resfile`` command should include a file ending used for images, - including ``bmp``, ``jpg``, ``pdf``, ``png`` etc. The graphviz_ library + including ``bmp``, ``jpg``, ``pdf``, ``png`` etc. The Graphviz_ library used to layout the reference energy system will interpret the file ending. - Remember to check if you have already installed graphviz_ on your maching, before using this fuctionality. - To install graphviz_ use ``conda install graphviz`` command. +.. WARNING:: + If you encounter a Graphviz_ dependencey error, please follow Graphviz_ + installation instructions described in the + :ref:`visualization examples `. Validation ---------- @@ -220,4 +222,4 @@ the rest of the model:: .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer .. _Gurobi: https://www.gurobi.com/ .. _`OSeMOSYS Repository`: https://github.com/OSeMOSYS/OSeMOSYS_GNU_MathProg/tree/master/scripts -.. _graphviz: https://graphviz.org/ +.. _Graphviz: https://graphviz.org/