diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 00000000..e034a0c8 --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,40 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install tox + - name: Build package + run: tox -e build + - name: Publish package + run: tox -e publish + env: + TWINE_USERNAME: ${{ vars.TWINE_USERNAME }} + TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} + TWINE_REPOSITORY: ${{ vars.TWINE_REPOSITORY }} diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 3a14dcd9..835a4e48 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -5,10 +5,11 @@ on: [push, pull_request] jobs: build: - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11"] + os: [ubuntu-latest, windows-latest] steps: - uses: actions/checkout@v3 @@ -20,17 +21,14 @@ jobs: run: | python -m pip install --upgrade pip pip install tox - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - pip install -e . - name: Lint with flake8 run: | tox -e lint - - name: Test with pytest + - name: Run tests using tox run: | tox - name: Upload coverage data to converalls.io env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - pip install --upgrade coveralls - coveralls --service=github + if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' + uses: coverallsapp/github-action@v2 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a270cf8d..edb7dc51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -53,7 +53,7 @@ repos: ## You can add flake8 plugins via `additional_dependencies`: # additional_dependencies: [flake8-bugbear] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.961 # Use the sha / tag you want to point at + rev: v1.9.0 # Use the sha / tag you want to point at hooks: - id: mypy additional_dependencies: ['types-PyYAML'] diff --git a/.readthedocs.yml b/.readthedocs.yml index 21b08145..d7cd0bd5 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,16 +8,16 @@ version: 2 sphinx: configuration: docs/conf.py -# Build documentation with MkDocs -#mkdocs: -# configuration: mkdocs.yml - # Optionally build your docs in additional formats such as PDF formats: - pdf python: - version: 3.8 install: - requirements: docs/requirements.txt - {path: ., method: pip} + +build: + os: ubuntu-22.04 + tools: + python: "3.11" diff --git a/.zenodo.json b/.zenodo.json index 7361f012..7efe5208 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,25 +1,17 @@ { + "title": "otoole: OSeMOSYS Tools for Energy Work", "license": "MIT", "upload_type": "software", "creators": [ - { - "name": "Will Usher", - "affiliation": "KTH Royal Institute of Technology", - "orcid": "0000-0001-9367-1791" - }, { "name": "Trevor Barnes", "affiliation": "Simon Fraser University", "orcid": "0000-0003-2458-2968" }, { - "name": "Hauke Henke", + "name": "Will Usher", "affiliation": "KTH Royal Institute of Technology", - "orcid": "0000-0003-0098-8701" - }, - { - "name": "Christoph Muschner", - "orcid": "0000-0001-8144-5260" + "orcid": "0000-0001-9367-1791" } ], "access_right": "open" diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 497a5e39..bdc2c670 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,38 @@ Changelog ========= +(Development) Version 1.1.3 +=========================== +- Lock pandas to 2.1.4 or later +- Capital Investment result calculation fixed + +Version 1.1.2 +============= +- Update zenodo metadata for JOSS + +Version 1.1.1 +============= +- Fixes CPLEX result processing docs +- Added joss status badge to readme +- Fix Tests on Windows +- Update graphviz install instructions + +Version 1.1.0 +============= +- Public Python API added to call otoole directly in Python files +- ReadCplex directly reads in CPLEX solution files. Drops the need to transform and sort solution files +- ReadGlpk class added to process GLPK solution files +- Update to Pydantic v2.0 +- ReadResultsCbc renamed to ReadWideResults +- Model validation instructions updated in documentation +- The ``--input_datafile`` argument is deprecated, and the user now must supply the input data to process results +- Locks pandas to <2.1 + +Version 1.0.4 +============= +- Fixed issue with pydantic v2.0.0 +- Dropped support for Python 3.8. Otoole now requires Python 3.9 or later + Version 1.0.3 ============= - Improved error message for multiple names mismatches diff --git a/CITATION.cff b/CITATION.cff index 1acfae13..0b4b23ae 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,19 +1,36 @@ -cff-version: 1.2.0 -message: "If you use this software, please cite it as below." +cff-version: "1.2.0" authors: - - family-names: Usher - given-names: Will - orcid: https://orcid.org/0000-0001-9367-1791 +- family-names: Barnes + given-names: Trevor + orcid: "https://orcid.org/0000-0003-2458-2968" +- family-names: Usher + given-names: Will + orcid: "https://orcid.org/0000-0001-9367-1791" +contact: +- family-names: Barnes + given-names: Trevor + orcid: "https://orcid.org/0000-0003-2458-2968" +doi: 10.5281/zenodo.10360538 +message: If you use this software, please cite our article in the + Journal of Open Source Software. +preferred-citation: + authors: - family-names: Barnes given-names: Trevor - orcid: https://orcid.org/0000-0003-2458-2968 - - family-names: Henke - given-names: Hauke - orcid: https://orcid.org/0000-0003-0098-8701 - - family-names: Muschner - given-names: Christoph - orcid: https://orcid.org/0000-0001-8144-5260 -title: "otoole: OSeMOSYS tools for energy work" -version: 1.0.0 -doi: 10.5281/zenodo.7677990 -date-released: 2023-02-26 + orcid: "https://orcid.org/0000-0003-2458-2968" + - family-names: Usher + given-names: Will + orcid: "https://orcid.org/0000-0001-9367-1791" + date-published: 2023-12-20 + doi: 10.21105/joss.05511 + issn: 2475-9066 + issue: 92 + journal: Journal of Open Source Software + publisher: + name: Open Journals + start: 5511 + title: "otoole: OSeMOSYS Tools for Energy Work" + type: article + url: "https://joss.theoj.org/papers/10.21105/joss.05511" + volume: 8 +title: "otoole: OSeMOSYS Tools for Energy Work" diff --git a/README.rst b/README.rst index 43177917..df348434 100644 --- a/README.rst +++ b/README.rst @@ -2,24 +2,46 @@ otoole: OSeMOSYS tools for energy work ================================================== +.. image:: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467/status.svg + :target: https://joss.theoj.org/papers/e93a191ae795b171beff782a68fdc467 + :alt: JOSS status + +.. image:: https://img.shields.io/pypi/v/otoole.svg + :target: https://pypi.org/project/otoole/ + :alt: PyPI + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: Code Style + +.. image:: https://img.shields.io/badge/python-3.9_|_3.10_|_3.11-blue.svg + :target: https://crate.io/packages/otoole/ + :alt: Python Version + +.. image:: https://img.shields.io/badge/License-MIT-green.svg + :target: https://opensource.org/licenses/MIT + :alt: License + +| + .. image:: https://coveralls.io/repos/github/OSeMOSYS/otoole/badge.svg?branch=master&kill_cache=1 :target: https://coveralls.io/github/OSeMOSYS/otoole?branch=master + :alt: Code Coverage + +.. image:: https://github.com/OSeMOSYS/otoole/actions/workflows/python.yaml/badge.svg?branch=master + :target: https://github.com/OSeMOSYS/otoole/actions/workflows/python.yaml + :alt: GitHub CI .. image:: https://readthedocs.org/projects/otoole/badge/?version=latest :target: https://otoole.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - -A Python toolkit to support use of OSeMOSYS - Description =========== OSeMOSYS tools for energy work, or otoole, is a Python package -which provides a command-line interface for users of OSeMOSYS. The aim of the -package is to provide commonly used pre- and post-processing steps for OSeMOSYS. +to support the users of OSeMOSYS. The aim of the package is to provide commonly +used pre- and post-processing steps for OSeMOSYS. **otoole** aims to support different ways of storing input data and results, including csv files and Excel workbooks, as well as different implementations @@ -49,5 +71,7 @@ Contributing New ideas and bugs `should be submitted `_ to the repository issue tracker. Please do contribute by discussing and developing these -ideas further. To contribute directly to the documentation of code development, please see -the contribution guidelines document. +ideas further. + +To contribute directly to the code and documentation development, please see +the `contribution guidelines `_. diff --git a/docs/_static/validation-data.txt b/docs/_static/validation-data.txt new file mode 100644 index 00000000..2f0f8132 --- /dev/null +++ b/docs/_static/validation-data.txt @@ -0,0 +1,202 @@ +# Model file written by *otoole* +param default 0 : AccumulatedAnnualDemand := +; +param default -1 : AnnualEmissionLimit := +; +param default 0 : AnnualExogenousEmission := +; +param default 1 : AvailabilityFactor := +; +param default 1 : CapacityFactor := +; +param default 0 : CapacityOfOneTechnologyUnit := +; +param default 1 : CapacityToActivityUnit := +R PWRWND 31.536 +R PWRCOA 31.536 +R TRNELC 31.536 +; +param default 0 : CapitalCost := +R PWRWND 2020 1500 +R PWRWND 2021 1500 +R PWRWND 2022 1500 +R PWRCOA 2020 5000 +R PWRCOA 2021 5000 +R PWRCOA 2022 5000 +; +param default 0 : CapitalCostStorage := +; +param default 0 : Conversionld := +; +param default 0 : Conversionlh := +; +param default 0 : Conversionls := +; +set DAILYTIMEBRACKET := +; +set DAYTYPE := +; +param default 0.00137 : DaySplit := +; +param default 7 : DaysInDayType := +; +param default 1 : DepreciationMethod := +; +param default 0.05 : DiscountRate := +; +param default 0.05 : DiscountRateStorage := +; +set EMISSION := +; +param default 0 : EmissionActivityRatio := +; +param default 0 : EmissionsPenalty := +; +set FUEL := +WND00 +COA00 +ELC01 +ELC02 +; +param default 0 : FixedCost := +; +param default 0 : InputActivityRatio := +R PWRWND WND00 1 2020 1 +R PWRWND WND00 1 2021 1 +R PWRWND WND00 1 2022 1 +R PWRCOA COA00 1 2020 1 +R PWRCOA COA00 1 2021 1 +R PWRCOA COA00 1 2022 1 +R TRNELC ELC01 1 2020 1 +R TRNELC ELC01 1 2021 1 +R TRNELC ELC01 1 2022 1 +; +set MODE_OF_OPERATION := +1 +; +param default 0 : MinStorageCharge := +; +param default -1 : ModelPeriodEmissionLimit := +; +param default 0 : ModelPeriodExogenousEmission := +; +param default 1 : OperationalLife := +R PWRWND 20 +R PWRCOA 30 +; +param default 0 : OperationalLifeStorage := +; +param default 0 : OutputActivityRatio := +R MINWND WND00 1 2020 1 +R MINWND WND00 1 2021 1 +R MINWND WND00 1 2022 1 +R MINCOA COA00 1 2020 1 +R MINCOA COA00 1 2021 1 +R MINCOA COA00 1 2022 1 +R PWRWND ELC01 1 2020 1 +R PWRWND ELC01 1 2021 1 +R PWRWND ELC01 1 2022 1 +R PWRCOA ELC01 1 2020 1 +R PWRCOA ELC01 1 2021 1 +R PWRCOA ELC01 1 2022 1 +R TRNELC ELC02 1 2020 1 +R TRNELC ELC02 1 2021 1 +R TRNELC ELC02 1 2022 1 +; +set REGION := +R +; +param default 0 : REMinProductionTarget := +; +param default 0 : RETagFuel := +; +param default 0 : RETagTechnology := +; +param default 1 : ReserveMargin := +; +param default 0 : ReserveMarginTagFuel := +; +param default 0 : ReserveMarginTagTechnology := +; +param default 0 : ResidualCapacity := +R PWRCOA 2020 0.25 +R PWRCOA 2021 0.25 +R PWRCOA 2022 0.25 +; +param default 999 : ResidualStorageCapacity := +; +set SEASON := +; +set STORAGE := +; +param default 0 : SpecifiedAnnualDemand := +R ELC02 2020 10 +R ELC02 2021 15 +R ELC02 2022 20 +; +param default 0 : SpecifiedDemandProfile := +R ELC02 S 2020 0.5 +R ELC02 W 2020 0.5 +R ELC02 S 2021 0.5 +R ELC02 W 2021 0.5 +R ELC02 S 2022 0.5 +R ELC02 W 2022 0.5 +; +param default 0 : StorageLevelStart := +; +param default 0 : StorageMaxChargeRate := +; +param default 0 : StorageMaxDischargeRate := +; +set TECHNOLOGY := +MINWND +MINCOA +PWRWND +PWRCOA +TRNELC +; +set TIMESLICE := +S +W +; +param default 0 : TechnologyFromStorage := +; +param default 0 : TechnologyToStorage := +; +param default -1 : TotalAnnualMaxCapacity := +; +param default -1 : TotalAnnualMaxCapacityInvestment := +; +param default 0 : TotalAnnualMinCapacity := +; +param default 0 : TotalAnnualMinCapacityInvestment := +; +param default 0 : TotalTechnologyAnnualActivityLowerLimit := +; +param default -1 : TotalTechnologyAnnualActivityUpperLimit := +; +param default 0 : TotalTechnologyModelPeriodActivityLowerLimit := +; +param default -1 : TotalTechnologyModelPeriodActivityUpperLimit := +; +param default 0 : TradeRoute := +; +param default 0 : VariableCost := +R MINCOA 1 2020 5 +R MINCOA 1 2021 5 +R MINCOA 1 2022 5 +; +set YEAR := +2020 +2021 +2022 +; +param default 0 : YearSplit := +S 2020 0.5 +W 2020 0.5 +S 2021 0.5 +W 2021 0.5 +S 2022 0.5 +W 2022 0.5 +; +end; diff --git a/docs/conf.py b/docs/conf.py index c005bcc8..2a5fbc5c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -172,11 +172,11 @@ "path_to_docs": "docs", "use_repository_button": True, "use_edit_page_button": True, - "extra_navbar": - """ -

Theme by the Executable Book Project

-

Logo by looka.com

- """, + # "extra_navbar": + # """ + #

Theme by the Executable Book Project

+ #

Logo by looka.com

+ # """, "icon_links": [], } diff --git a/docs/contributing.rst b/docs/contributing.rst index 31d3c73a..96bdfb1d 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -196,6 +196,10 @@ Find more detailed information in `creating a PR`_. You might also want to open the PR as a draft first and mark it as ready for review after the feedbacks from the continuous integration (CI) system or any required fixes. +We track test coverage using coveralls_. You can check the coverage +of your PR by clicking on the "details" link in the "Coverage" section of +the pull request checks. Try to ensure that your pull requests always increase +test coverage. Troubleshooting --------------- @@ -281,6 +285,7 @@ on PyPI_, the following steps can be used to release a new version for .. |tox| replace:: ``tox`` +.. _coveralls: https://coveralls.io/github/OSeMOSYS/otoole .. _black: https://pypi.org/project/black/ .. _CommonMark: https://commonmark.org/ .. _contribution-guide.org: https://www.contribution-guide.org/ diff --git a/docs/convert.rst b/docs/convert.rst new file mode 100644 index 00000000..2333d976 --- /dev/null +++ b/docs/convert.rst @@ -0,0 +1,70 @@ +.. _convert: + +========== +Python API +========== + +otoole also provides a Python API to access all the features available from the command line tool. + +Converting between formats +-------------------------- + +``otoole`` currently supports conversion between the following formats: + +- Excel +- A folder of CSV files +- GNU MathProg datafile + +>>> from otoole import convert +>>> convert('my_model.yaml', 'excel', 'csv', 'my_model.xlsx', 'my_model_csvs') + +See :py:func:`otoole.convert.convert` for more details + +Converting solver results to a folder of CSV files +-------------------------------------------------- + +The ``convert_results`` function creates a folder of CSV result files from a CBC_, CLP_, +Gurobi_ or CPLEX_ solution file:: + +>>> from otoole import convert_results +>>> convert_results('my_model.yaml', 'cbc', 'csv', 'my_model.sol', 'my_model_csvs', 'datafile', 'my_model.dat') + +See :func:`otoole.convert.convert_results` for more details + +Reading solver results into a dict of Pandas DataFrames +------------------------------------------------------- + +The ``read_results`` function reads a CBC_, CLP_, +Gurobi_ or CPLEX_ solution file into memory:: + +>>> from otoole import read_results +>>> read_results('my_model.yaml', 'cbc', 'csv', 'my_model.sol', 'my_model_csvs', 'datafile', 'my_model.dat') + +See :func:`otoole.convert.read_results` for more details + +Read in data from different Formats +----------------------------------- + +You can use the :py:func:`otoole.convert.read` function to read data in from different formats to a Python object. +This allows you to then use all the features offered by Python to manipulate the data. + +>>> from otoole import read +>>> data, defaults = read('my_model.yaml', 'csv', 'my_model_csvs') # read from a folder of csv files +>>> data, defaults = read('my_model.yaml', 'excel', 'my_model.xlsx') # read from an Excel file +>>> data, defaults = read('my_model.yaml', 'datafile', 'my_model.dat') # read from a GNU MathProg datafile + +Write out data to different Formats +----------------------------------- + +You can use the :py:func:`otoole.convert.write` function to write data out to different formats from a Python object. + +>>> from otoole import read, write +>>> data, defaults = read('my_model.yaml', 'csv', 'my_model_csvs') # read from a folder of csv files +>>> write('my_model.yaml', 'excel', 'my_model.xlsx', data, defaults) # write to an Excel file +>>> write('my_model.yaml', 'datafile', 'my_model.dat', data, defaults) # write to a GNU MathProg datafile + + +.. _CBC: https://github.com/coin-or/Cbc +.. _CLP: https://github.com/coin-or/Clp +.. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer +.. _Gurobi: https://www.gurobi.com/ diff --git a/docs/data.rst b/docs/data.rst index 4328c3f9..c6f68974 100644 --- a/docs/data.rst +++ b/docs/data.rst @@ -68,8 +68,8 @@ Sets are defined as follows:: .. NOTE:: It's convention in OSeMOSYS to capitalize set names -Parmaters Foramt -~~~~~~~~~~~~~~~~ +Parameters Format +~~~~~~~~~~~~~~~~~ Parameters are defined as follows. When referencing set indices use the full name, **not** the ``short_name``:: @@ -112,7 +112,7 @@ repository for a complete example. dtype: str type: set -2. Parmater definition of ``AccumulatedAnnualDemand``:: +2. Parameter definition of ``AccumulatedAnnualDemand``:: AccumulatedAnnualDemand: short_name: AccAnnualDemand diff --git a/docs/examples-validation.rst b/docs/examples-validation.rst new file mode 100644 index 00000000..ee6d87c1 --- /dev/null +++ b/docs/examples-validation.rst @@ -0,0 +1,213 @@ +:orphan: + +.. _examples-validation: + +----------------------- +Example Validation File +----------------------- + +This page holds the datafile used in the validation example. The file can +either be copy/pasted from below, or directly downloaded from :download:`here <_static/validation-data.txt>` :: + + # Model file written by *otoole* + param default 0 : AccumulatedAnnualDemand := + ; + param default -1 : AnnualEmissionLimit := + ; + param default 0 : AnnualExogenousEmission := + ; + param default 1 : AvailabilityFactor := + ; + param default 1 : CapacityFactor := + ; + param default 0 : CapacityOfOneTechnologyUnit := + ; + param default 1 : CapacityToActivityUnit := + R PWRWND 31.536 + R PWRCOA 31.536 + R TRNELC 31.536 + ; + param default 0 : CapitalCost := + R PWRWND 2020 1500 + R PWRWND 2021 1500 + R PWRWND 2022 1500 + R PWRCOA 2020 5000 + R PWRCOA 2021 5000 + R PWRCOA 2022 5000 + ; + param default 0 : CapitalCostStorage := + ; + param default 0 : Conversionld := + ; + param default 0 : Conversionlh := + ; + param default 0 : Conversionls := + ; + set DAILYTIMEBRACKET := + ; + set DAYTYPE := + ; + param default 0.00137 : DaySplit := + ; + param default 7 : DaysInDayType := + ; + param default 1 : DepreciationMethod := + ; + param default 0.05 : DiscountRate := + ; + param default 0.05 : DiscountRateStorage := + ; + set EMISSION := + ; + param default 0 : EmissionActivityRatio := + ; + param default 0 : EmissionsPenalty := + ; + set FUEL := + WND00 + COA00 + ELC01 + ELC02 + ; + param default 0 : FixedCost := + ; + param default 0 : InputActivityRatio := + R PWRWND WND00 1 2020 1 + R PWRWND WND00 1 2021 1 + R PWRWND WND00 1 2022 1 + R PWRCOA COA00 1 2020 1 + R PWRCOA COA00 1 2021 1 + R PWRCOA COA00 1 2022 1 + R TRNELC ELC01 1 2020 1 + R TRNELC ELC01 1 2021 1 + R TRNELC ELC01 1 2022 1 + ; + set MODE_OF_OPERATION := + 1 + ; + param default 0 : MinStorageCharge := + ; + param default -1 : ModelPeriodEmissionLimit := + ; + param default 0 : ModelPeriodExogenousEmission := + ; + param default 1 : OperationalLife := + R PWRWND 20 + R PWRCOA 30 + ; + param default 0 : OperationalLifeStorage := + ; + param default 0 : OutputActivityRatio := + R MINWND WND00 1 2020 1 + R MINWND WND00 1 2021 1 + R MINWND WND00 1 2022 1 + R MINCOA COA00 1 2020 1 + R MINCOA COA00 1 2021 1 + R MINCOA COA00 1 2022 1 + R PWRWND ELC01 1 2020 1 + R PWRWND ELC01 1 2021 1 + R PWRWND ELC01 1 2022 1 + R PWRCOA ELC01 1 2020 1 + R PWRCOA ELC01 1 2021 1 + R PWRCOA ELC01 1 2022 1 + R TRNELC ELC02 1 2020 1 + R TRNELC ELC02 1 2021 1 + R TRNELC ELC02 1 2022 1 + ; + set REGION := + R + ; + param default 0 : REMinProductionTarget := + ; + param default 0 : RETagFuel := + ; + param default 0 : RETagTechnology := + ; + param default 1 : ReserveMargin := + ; + param default 0 : ReserveMarginTagFuel := + ; + param default 0 : ReserveMarginTagTechnology := + ; + param default 0 : ResidualCapacity := + R PWRCOA 2020 0.25 + R PWRCOA 2021 0.25 + R PWRCOA 2022 0.25 + ; + param default 999 : ResidualStorageCapacity := + ; + set SEASON := + ; + set STORAGE := + ; + param default 0 : SpecifiedAnnualDemand := + R ELC02 2020 10 + R ELC02 2021 15 + R ELC02 2022 20 + ; + param default 0 : SpecifiedDemandProfile := + R ELC02 S 2020 0.5 + R ELC02 W 2020 0.5 + R ELC02 S 2021 0.5 + R ELC02 W 2021 0.5 + R ELC02 S 2022 0.5 + R ELC02 W 2022 0.5 + ; + param default 0 : StorageLevelStart := + ; + param default 0 : StorageMaxChargeRate := + ; + param default 0 : StorageMaxDischargeRate := + ; + set TECHNOLOGY := + MINWND + MINCOA + PWRWND + PWRCOA + TRNELC + ; + set TIMESLICE := + S + W + ; + param default 0 : TechnologyFromStorage := + ; + param default 0 : TechnologyToStorage := + ; + param default -1 : TotalAnnualMaxCapacity := + ; + param default -1 : TotalAnnualMaxCapacityInvestment := + ; + param default 0 : TotalAnnualMinCapacity := + ; + param default 0 : TotalAnnualMinCapacityInvestment := + ; + param default 0 : TotalTechnologyAnnualActivityLowerLimit := + ; + param default -1 : TotalTechnologyAnnualActivityUpperLimit := + ; + param default 0 : TotalTechnologyModelPeriodActivityLowerLimit := + ; + param default -1 : TotalTechnologyModelPeriodActivityUpperLimit := + ; + param default 0 : TradeRoute := + ; + param default 0 : VariableCost := + R MINCOA 1 2020 5 + R MINCOA 1 2021 5 + R MINCOA 1 2022 5 + ; + set YEAR := + 2020 + 2021 + 2022 + ; + param default 0 : YearSplit := + S 2020 0.5 + W 2020 0.5 + S 2021 0.5 + W 2021 0.5 + S 2022 0.5 + W 2022 0.5 + ; + end; diff --git a/docs/examples.rst b/docs/examples.rst index 9e192332..08a4aeb8 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -6,7 +6,7 @@ Examples This page will present examples to show the full functionality of ``otoole``. It will walk through the ``convert``, ``results``, ``setup``, ``viz`` and ``validate`` -functionality in seperate simple use cases. +functionality in separate simple use cases. .. NOTE:: To follow these examples, clone the Simplicity_ repository and run all commands @@ -15,97 +15,189 @@ functionality in seperate simple use cases. git clone https://github.com/OSeMOSYS/simplicity.git cd simplicity -.. CAUTION:: - While ``otoole`` does not require a solver, these examples - will use the free and open source solvers GLPK_ and CBC_. - Installation instructions are described in the `Solver Setup`_ section. - -Data Conversion with CSVs -------------------------- +Solver Setup +------------ Objective ~~~~~~~~~ -Use a folder of CSV data to build and solve an OSeMOSYS model with CBC_. Generate -the full suite of OSeMOSYS results. +Install GLPK_ (required) and CBC_ (optional) to use in the otoole examples. +While ``otoole`` does not require a solver, these examples will use the free +and open source solvers GLPK_ and CBC_. -1. ``otoole`` Convert -~~~~~~~~~~~~~~~~~~~~~ -We first want to convert the folder of Simplicity_ CSVs into -an OSeMOSYS datafile called ``simplicity.txt``:: +1. Install GLPK +~~~~~~~~~~~~~~~~ - $ otoole convert csv datafile data simplicity.txt config.yaml +GLPK_ is a free and open-source linear program solver. Full +install instructions can be found on the `GLPK Website`_, however, the +abbreviated instructions are shown below -2. Build the Model -~~~~~~~~~~~~~~~~~~~ -Use GLPK_ to build the model and save it as ``simplicity.lp``:: +To install GLPK on **Linux**, run the command:: - $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check + $ sudo apt-get update + $ sudo apt-get install glpk glpk-utils + +To install GLPK on **Mac**, run the command:: + + $ brew install glpk + +To install GLPK on **Windows**, follow the instructions on the +`GLPK Website`_. Be sure to add GLPK to +your environment variables after installation + +Alternatively, if you use Anaconda_ to manage +your Python packages, you can install GLPK via the command:: + + $ conda install -c conda-forge glpk + +2. Test the GLPK install +~~~~~~~~~~~~~~~~~~~~~~~~ +Once installed, you should be able to call the ``glpsol`` command:: + + $ glpsol + GLPSOL: GLPK LP/MIP Solver, v4.65 + No input problem file specified; try glpsol --help .. TIP:: See the `GLPK Wiki`_ for more information on the ``glpsol`` command -3. Solve the Model -~~~~~~~~~~~~~~~~~~ -Use CBC_ to solve the model and save the solution file as ``simplicity.sol``:: +3. Install CBC +~~~~~~~~~~~~~~ - $ cbc simplicity.lp solve -solu simplicity.sol +CBC_ is a free and open-source mixed integer linear programming solver. Full +install instructions can be found on the CBC_ website, however, the abbreviated +instructions are shown below -4. Generate the full set of results -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use ``otoole``'s ``result`` package to generate the results file:: +To install CBC on **Linux**, run the command:: - $ otoole results cbc csv simplicity.sol results config.yaml + $ sudo apt-get install coinor-cbc coinor-libcbc-dev -5. View Results -~~~~~~~~~~~~~~~ -Results are now viewable in the files ``results/*.csv`` +To install CBC on **Mac**, run the command:: + + $ brew install coin-or-tools/coinor/cbc + +To install CBC on **Windows**, follow the install instruction on the CBC_ +website. + +Alternatively, if you use Anaconda_ to manage +your Python packages, you can install CBC via the command:: + + $ conda install -c conda-forge coincbc + +4. Test the CBC install +~~~~~~~~~~~~~~~~~~~~~~~ +Once installed, you should be able to directly call CBC:: + + $ cbc + Welcome to the CBC MILP Solver + Version: 2.10.3 + Build Date: Mar 24 2020 + + CoinSolver takes input from arguments ( - switches to stdin) + Enter ? for list of commands or help + Coin: + +You can exit the solver by typing ``quit`` + +Input Data Conversion +--------------------- + +Objective +~~~~~~~~~ + +Convert input data between CSV, Excel, and GNU MathProg data formats. + +1. Clone ``Simplicity`` +~~~~~~~~~~~~~~~~~~~~~~~ +If not already done so, clone the Simplicity_ repository:: + + $ git clone https://github.com/OSeMOSYS/simplicity.git + $ cd simplicity + +.. NOTE:: + Further information on the ``config.yaml`` file is in the :ref:`template-setup` section + +2. Convert CSV data into MathProg data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert the folder of Simplicity_ CSVs (``data/``) into an OSeMOSYS datafile called ``simplicity.txt``:: + + $ otoole convert csv datafile data simplicity.txt config.yaml + +3. Convert MathProg data into Excel Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert the new Simplicity_ datafile (``simplicity.txt``) into Excel data called ``simplicity.xlsx``:: + + $ otoole convert datafile excel simplicity.txt simplicity.xlsx config.yaml .. TIP:: - Before moving onto the next section, remove all the generated files:: + Excel workbooks are an easy way for humans to interface with OSeMOSYS data! - $ rm simplicity.lp simplicity.sol simplicity.txt results/* +4. Convert Excel Data into CSV data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert the new Simplicity_ excel data (``simplicity.xlsx``) into a folder of CSV data +called ``simplicity/``. Note that this data will be the exact same as the original CSV data folder (``data/``):: + + $ otoole convert excel csv simplicity.xlsx simplicity config.yaml -Data Conversion with Excel --------------------------- +Process Solutions from Different Solvers +---------------------------------------- Objective ~~~~~~~~~ -Use an excel worksheet to build and solve an OSeMOSYS model with CBC. +Process solutions from GLPK_, CBC_, Gurobi_, and CPLEX_. This example assumes +you have an existing GNU MathProg datafile called ``simplicity.txt`` (from the +previous example). -1. Create the Excel Workbook -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use the example CSV data to create an Excel Workbook using ``otoole convert``:: +1. Process a solution from GLPK +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model, save the problem as ``simplicity.glp``, solve the model, and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results-glpk/``. +When processing solutions from GLPK, the model file (``*.glp``) must also be passed:: - $ otoole convert csv excel data simplicity.xlsx config.yaml + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wglp simplicity.glp --write simplicity.sol -Excel workbooks are an easy way for humans to interface with OSeMOSYS data! + $ otoole results glpk csv simplicity.sol results-glpk datafile simplicity.txt config.yaml --glpk_model simplicity.glp -2. Create the MathProg datafile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Next, we want to convert the excel workbook (``simplicity.xlsx``) into -an OSeMOSYS datafile (``simplicity.txt``):: +.. NOTE:: + By default, MathProg OSeMOSYS models will write out folder of CSV results to a ``results/`` + directory if solving via GLPK. However, using ``otoole`` allows the user to programmatically access results + and control read/write locations - $ otoole convert excel datafile simplicity.xlsx simplicity.txt config.yaml +2. Process a solution from CBC +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model and save the problem as ``simplicity.lp``. Use CBC_ to solve the model and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results/`` from the solution file:: -3. Build the Model -~~~~~~~~~~~~~~~~~~ -Use GLPK_ to build the model and save it as ``simplicity.lp``:: + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check + + $ cbc simplicity.lp solve -solu simplicity.sol + + $ otoole results cbc csv simplicity.sol results csv data config.yaml + +3. Process a solution from Gurobi +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use GLPK_ to build the model and save the problem as ``simplicity.lp``. Use Gurobi_ to solve the model and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results/`` from the solution file:: $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check -4. Solve the Model -~~~~~~~~~~~~~~~~~~ -Use CBC_ to solve the model and save the solution file as ``simplicity.sol``:: + $ gurobi_cl ResultFile=simplicity.sol simplicity.lp - $ cbc simplicity.lp solve -solu simplicity.sol + $ otoole results gurobi csv simplicity.sol results csv data config.yaml -5. Generate the selected results +4. Process a solution from CPLEX ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use ``otoole``'s ``result`` package to generate the result CSVs:: +Use GLPK_ to build the model and save the problem as ``simplicity.lp``. Use CPLEX_ to solve the model and +save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV results called ``results/`` from the solution file:: - $ otoole results cbc csv simplicity.sol results config.yaml + $ glpsol -m OSeMOSYS.txt -d simplicity.txt --wlp simplicity.lp --check + + $ cplex -c "read simplicity.lp" "optimize" "write simplicity.sol" + + $ otoole results cplex csv simplicity.sol results csv data config.yaml + +.. _model-visualization: Model Visualization ------------------- @@ -125,6 +217,22 @@ Run the following command, where the RES will be saved as the file ``res.png``:: $ otoole viz res excel simplicity.xlsx res.png config.yaml +.. WARNING:: + If you encounter a ``graphviz`` dependency error, install it on your system + following instructions on the Graphviz_ website. If on Windows, + download the install package `from Graphviz `_. + If on Mac or Linux, or running conda, use one of the following commands:: + + brew install graphviz # if on Mac + sudo apt install graphviz # if on Ubuntu + conda install graphviz # if using conda + + To check that ``graphviz`` installed correctly, run ``dot -V`` to check the + version:: + + $ dot -V + dot - graphviz version 2.43.0 (0) + 2. View the RES ~~~~~~~~~~~~~~~ Open the newly created file, ``res.png`` and the following image should be @@ -132,6 +240,8 @@ displayed .. image:: _static/simplicity_res.png +.. _template-setup: + Template Setup -------------- @@ -178,13 +288,12 @@ horizon. For example, if the model horizon is from 2020 to 2050, the .. NOTE:: While this step in not technically required, by filling out the years in - CSV format, ``otoole`` will pivot all the Excel sheets on the years - during the conversion process. This will save significant formatting time! + CSV format ``otoole`` will pivot all the Excel sheets on these years. + This will save significant formatting time! 4. Convert the CSV Template Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To convert the template CSV data into Excel formatted data, run the following -``convert`` command:: +Convert the template CSV data into Excel formatted data:: $ otoole convert csv excel template_data template.xlsx template_config.yaml @@ -200,7 +309,7 @@ Model Validation .. NOTE:: In this example, we will use a very simple model instead of the Simplicity_ demonstration model. This way the user does not need to be - familar with the naming convenations of the model. + familiar with the naming conventions of the model. Objective ~~~~~~~~~ @@ -211,13 +320,22 @@ codes are shown in bold face. .. image:: _static/validataion_model.png -1. Create the Validation File +1. Download the example datafile +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The MathProg datafile describing this model can be found on the +:ref:`examples-validation` page. Download the file and save it as ``data.txt`` + +2. Create the Validation File ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a configuration validation ``yaml`` file:: + # on UNIX $ touch validate.yaml -2. Create ``FUEL`` Codes + # on Windows + > type nul > validate.yaml + +3. Create ``FUEL`` Codes ~~~~~~~~~~~~~~~~~~~~~~~~ Create the fuel codes and descriptions in the validation configuration file:: @@ -226,12 +344,12 @@ Create the fuel codes and descriptions in the validation configuration file:: 'WND': Wind 'COA': Coal 'ELC': Electricity - indetifiers: - '00': Raw Resource + identifiers: + '00': Primary Resource '01': Intermediate '02': End Use -3. Create ``TECHNOLOGY`` Codes +4. Create ``TECHNOLOGY`` Codes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add the technology codes to the validation configuration file. Note that the powerplant types are the same codes as the fuels, so there is no need to @@ -243,57 +361,48 @@ redefine these codes:: 'PWR': Generator 'TRN': Transmission -4. Create ``FUEL`` Schema +5. Create ``FUEL`` Schema ~~~~~~~~~~~~~~~~~~~~~~~~~ Use the defined codes to create a schema for the fuel codes:: schema: FUEL: - name: fuel_name - items: - - name: fuels + items: + - name: type valid: fuels position: (1, 3) - - name: indetifiers - valid: indetifiers + - name: identifier + valid: identifiers position: (4, 5) -5. Create ``TECHNOLOGY`` Schema +6. Create ``TECHNOLOGY`` Schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the defined codes to create a schema for the technology codes:: schema: TECHNOLOGY: - name: technology_name - items: - - name: techs + items: + - name: tech valid: techs position: (1, 3) - - name: fuels + - name: fuel valid: fuels position: (4, 6) -6. ``otoole`` validate -~~~~~~~~~~~~~~~~~~~~~~ -Use otoole to validate the input data (can be any of a ``datafile``, ``csv``, or ``excel``) -against the validation configuration file:: - - $ otoole validate datafile data.txt config.yaml --validate_config validate.yaml - -.. WARNING:: - Do not confuse the user configuation file (``config.yaml``) and the - validation configuation file (``validate.yaml``). Both configuartion files - are required for validation functionality. +7. Save changes +~~~~~~~~~~~~~~~ -The final validation configuration file in this example will look like:: +The final validation configuration file for this example will look like:: codes: fuels: 'WND': Wind 'COA': Coal 'ELC': Electricity - indetifiers: - '00': Raw Resource + identifiers: + '00': Primary Resource '01': Intermediate '02': End Use techs: @@ -304,92 +413,99 @@ The final validation configuration file in this example will look like:: schema: FUEL: - name: fuel_name - items: - - name: fuels + items: + - name: type valid: fuels position: (1, 3) - - name: indetifiers - valid: indetifiers + - name: identifier + valid: identifiers position: (4, 5) TECHNOLOGY: - name: technology_name - items: - - name: techs + items: + - name: tech valid: techs position: (1, 3) - - name: fuels + - name: fuel valid: fuels position: (4, 6) -Solver Setup ------------- +8. ``otoole validate`` +~~~~~~~~~~~~~~~~~~~~~~ +Use otoole to validate the input data (can be any of a ``datafile``, ``csv``, or ``excel``) +against the validation configuration file:: -Objective -~~~~~~~~~ + $ otoole validate datafile data.txt config.yaml --validate_config validate.yaml -Install GLPK_ and CBC_ to use in the otoole examples. + ***Beginning validation*** -1. Install GLPK -~~~~~~~~~~~~~~~~ + Validating FUEL with fuel_name -GLPK_ is a free and open-source linear program solver. + ^(WND|COA|ELC)(00|01|02) + 4 valid names: + WND00, COA00, ELC01, ELC02 -To install it on **Linux**, run the command:: + Validating TECHNOLOGY with technology_name - sudo apt-get update - sudo apt-get install glpk glpk-utils + ^(MIN|PWR|TRN)(WND|COA|ELC) + 5 valid names: + MINWND, MINCOA, PWRWND, PWRCOA, TRNELC -To install it on **Mac**, run the command:: - brew install glpk + ***Checking graph structure*** -.. To install it on **Windows**, follow the install instruction on the GLPK_ -.. website, and/or follow the instructions_ from the OSeMOSYS community +.. WARNING:: + Do not confuse the user configuration file (``config.yaml``) and the + validation configuration file (``validate.yaml``). Both configuration files + are required for validation functionality. -2. Test the GLPK install -~~~~~~~~~~~~~~~~~~~~~~~~ -Once installed, you should be able to call the ``glpsol`` command:: +9. Use ``otoole validate`` to identify an issue +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In the datafile create a new technology that does not follow the specified schema. +For example, add the value ``ELC03`` to the ``FUEL`` set:: - $ glpsol - GLPSOL: GLPK LP/MIP Solver, v4.65 - No input problem file specified; try glpsol --help + set FUEL := + WND00 + COA00 + ELC01 + ELC02 + ELC03 -3. Install CBC -~~~~~~~~~~~~~~ +Running ``otoole validate`` again will flag this improperly named value. Moreover it +will also flag it as an isolated fuel. This means the fuel is unconnected from the model:: -CBC_ is a free and open-source mixed integer linear programming solver. Full -install instructions can be found on the CBC_ website. However, the abbreviated -instructions are shown below + $ otoole validate datafile data.txt config.yaml --validate_config validate.yaml -To install it on **Linux**, run the command:: + ***Beginning validation*** - sudo apt-get install coinor-cbc coinor-libcbc-dev + Validating FUEL with fuel_name -To install it on **Mac**, run the command:: + ^(WND|COA|ELC)(00|01|02) + 1 invalid names: + ELC03 - brew install coin-or-tools/coinor/cbc + 4 valid names: + WND00, COA00, ELC01, ELC02 -.. To install it on **Windows**, follow the install instruction on the CBC_ -.. website by downloading a binary + Validating TECHNOLOGY with technology_name -4. Test the CBC install -~~~~~~~~~~~~~~~~~~~~~~~ -Once installed, you should be able to directly call CBC:: + ^(MIN|PWR|TRN)(WND|COA|ELC) + 5 valid names: + MINWND, MINCOA, PWRWND, PWRCOA, TRNELC - $ cbc - Welcome to the CBC MILP Solver - Version: 2.10.3 - Build Date: Mar 24 2020 - CoinSolver takes input from arguments ( - switches to stdin) - Enter ? for list of commands or help - Coin: + ***Checking graph structure*** + + 1 'fuel' nodes are isolated: + ELC03 -You can exit the solver by typing ``quit`` .. _Simplicity: https://github.com/OSeMOSYS/simplicity .. _GLPK: https://www.gnu.org/software/glpk/ .. _GLPK Wiki: https://en.wikibooks.org/wiki/GLPK/Using_GLPSOL +.. _GLPK Website: https://winglpk.sourceforge.net/ .. _CBC: https://github.com/coin-or/Cbc .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer -.. _instructions: http://www.osemosys.org/uploads/1/8/5/0/18504136/glpk_installation_guide_for_windows10_-_201702.pdf +.. _Anaconda: https://www.anaconda.com/ +.. _Gurobi: https://www.gurobi.com/ +.. _Graphviz: https://www.graphviz.org/download/ diff --git a/docs/functionality.rst b/docs/functionality.rst index 7a027bf2..1502577c 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -17,7 +17,7 @@ Gurobi_, is called results and post-processing. .. image:: _static/workflow.png .. NOTE:: - While ``otoole`` is targetted at OSeMOSYS users, the functionality can eaisly be extended + While ``otoole`` is targeted at OSeMOSYS users, the functionality can easily be extended to work with any workflow that involves the use of a MathProg file! Data Conversion @@ -38,7 +38,7 @@ conversion between the following formats: ``otoole convert`` ~~~~~~~~~~~~~~~~~~ -THe ``otoole convert``` command allows you to convert between various different +The ``otoole convert``` command allows you to convert between various different input formats:: $ otoole convert --help @@ -75,7 +75,7 @@ apparent. CBC_ is an alternative open-source solver which offers better performa GLPK_ and can handle much larger models. However, CBC_ has no way of knowing how to write out the CSV files you were used to dealing with when using GLPK_. ``otoole`` to the rescue! -``otoole`` currently supports using CBC_, CPLEX_ or Gurobi_ with all three versions of +``otoole`` currently supports using GLPK_, CBC_, CPLEX_ or Gurobi_ with all versions of GNU MathProg OSeMOSYS - the long, short and fast versions. The long version includes all results as variables within the formulation, so the @@ -89,38 +89,40 @@ so as to speed up the model matrix generation and solution times. ``otoole results`` ~~~~~~~~~~~~~~~~~~ -The ``results`` command creates a folder of CSV result files from a CBC_, CLP_, -Gurobi_ or CPLEX_ solution file:: +The ``results`` command creates a folder of CSV result files from a GLPK_, CBC_, CLP_, +Gurobi_ or CPLEX_ solution file together with the input data:: $ otoole results --help - usage: otoole results [-h] [--input_datafile INPUT_DATAFILE] [--input_datapackage INPUT_DATAPACKAGE] [--write_defaults] {cbc,cplex,gurobi} {csv} from_path to_path config + usage: otoole results [-h] [--glpk_model GLPK_MODEL] [--write_defaults] + {cbc,cplex,gurobi} {csv} from_path to_path {csv,datafile,excel} input_path config positional arguments: - {cbc,cplex,gurobi} Result data format to convert from - {csv} Result data format to convert to - from_path Path to file or folder to convert from - to_path Path to file or folder to convert to - config Path to config YAML file + {cbc,cplex,glpk,gurobi} Result data format to convert from + {csv} Result data format to convert to + from_path Path to file or folder to convert from + to_path Path to file or folder to convert to + {csv,datafile,excel} Input data format + input_path Path to input_data + config Path to config YAML file optional arguments: - -h, --help show this help message and exit - --input_datafile INPUT_DATAFILE - Input GNUMathProg datafile required for OSeMOSYS short or fast results - --input_datapackage INPUT_DATAPACKAGE - Deprecated - --write_defaults Writes default values + -h, --help show this help message and exit + --glpk_model GLPK_MODEL GLPK model file required for processing GLPK results + --write_defaults Writes default values .. versionadded:: v1.0.0 The ``config`` positional argument is now required +.. versionadded:: v1.1.0 + The ``input_data_format`` and ``input_path`` positional arguments are now required + supporting any supported format of input data for results processing. + .. deprecated:: v1.0.0 The ``--input_datapackage`` flag is no longer supported -.. WARNING:: - If using CPLEX_, you will need to transform and sort the solution file before - processing it with ``otoole``. Instructions on how to run the transformation - script are on the `OSeMOSYS Repository`_. After transformation, sort the file - with the command ``sort > ``. +.. deprecated:: v1.1.0 + The ``--input_datapackage`` and ``--input_datafile`` flags + have been replaced by new positional arguments ``input_data_format`` and ``input_path`` Setup ----- @@ -179,9 +181,14 @@ visualising the reference energy system through the ``vis res`` command is suppo .. NOTE:: The ``resfile`` command should include a file ending used for images, - including ``bmp``, ``jpg``, ``pdf``, ``png`` etc. The graphviz_ library + including ``bmp``, ``jpg``, ``pdf``, ``png`` etc. The Graphviz_ library used to layout the reference energy system will interpret the file ending. +.. WARNING:: + If you encounter a Graphviz_ dependencey error, please follow Graphviz_ + installation instructions described in the + :ref:`visualization examples `. + Validation ---------- The validation module in ``otoole`` checks technology and fuel names against a @@ -215,4 +222,4 @@ the rest of the model:: .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer .. _Gurobi: https://www.gurobi.com/ .. _`OSeMOSYS Repository`: https://github.com/OSeMOSYS/OSeMOSYS_GNU_MathProg/tree/master/scripts -.. _graphviz: https://graphviz.org/ +.. _Graphviz: https://graphviz.org/ diff --git a/docs/index.rst b/docs/index.rst index 568e5ed6..2cc3d0cd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,8 +5,8 @@ Welcome to the documentation of ``otoole``! =========================================== **otoole**, or **O**\ SeMOSYS **tool**\ s for **e**\ nergy work, is a Python package -which provides a command-line interface for users of OSeMOSYS. The aim of the package is -to provide commonly used pre- and post-processing steps when working with OSeMOSYS models. +which provides a command-line interface and Python API for users of OSeMOSYS. The aim of the +package is to provide commonly used pre- and post-processing steps when working with OSeMOSYS models. Specifically, ``otoole`` allows the user to convert between data formats, process solutions, and visualise the reference energy system. @@ -28,6 +28,7 @@ Contents Core Functionality Data Formats Examples + Python API Contributing License Authors diff --git a/docs/requirements.txt b/docs/requirements.txt index 4afe7af2..335ad615 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,13 @@ +amply>=0.1.4 docutils<0.18 +flatten_dict Jinja2<3.1 +networkx +openpyxl +pandas>=2.1.4 +pydantic>=2 +pydot +pyyaml # Requirements file for ReadTheDocs, check .readthedocs.yml. # To build the module reference correctly, make sure every external package # under `install_requires` in `setup.cfg` is also listed here! @@ -7,3 +15,5 @@ sphinx>=3.2.1 sphinx-book-theme urllib3<2 # sphinx_rtd_theme +#otoole dependencies from setup.cfg +xlrd diff --git a/setup.cfg b/setup.cfg index b410ffb8..8276af0e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -35,23 +35,24 @@ package_dir = =src # Require a min/specific Python version (comma-separated conditions) -python_requires = >=3.8 +python_requires = >=3.9 # Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. # Version specifiers like >=2.2,<3.0 avoid problems due to API changes in # new major versions. This works if the required packages follow Semantic Versioning. # For more information, check out https://semver.org/. + +# If this list changes, update docs/requirements.txt as well. install_requires = xlrd pyyaml pydot - importlib_resources; python_version<'3.7' - pandas>=1.1 - amply>=0.1.4 + pandas>=2.1.4 + Amply>=0.1.6 networkx flatten_dict openpyxl - pydantic + pydantic>=2 [options.packages.find] where = src exclude = @@ -86,7 +87,7 @@ console_scripts = # CAUTION: --cov flags may prohibit setting breakpoints while debugging. # Comment those flags to avoid this pytest issue. addopts = - --cov otoole --cov-report html + --cov otoole --cov-report lcov --verbose -s # --log-cli-level=10 diff --git a/src/otoole/__init__.py b/src/otoole/__init__.py index 88e521e6..0b4a959b 100644 --- a/src/otoole/__init__.py +++ b/src/otoole/__init__.py @@ -1,10 +1,7 @@ # -*- coding: utf-8 -*- import sys -from otoole.input import Context -from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory -from otoole.results.results import ReadCbc, ReadCplex, ReadGurobi -from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel +from otoole.convert import convert, convert_results, read, read_results, write if sys.version_info[:2] >= (3, 8): # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8` @@ -21,17 +18,10 @@ finally: del version, PackageNotFoundError +convert = convert +convert_results = convert_results +read = read +write = write +read_results = read_results -__all__ = [ - "Context", - "ReadCbc", - "ReadCsv", - "ReadCplex", - "ReadDatafile", - "ReadExcel", - "ReadGurobi", - "ReadMemory", - "WriteCsv", - "WriteDatafile", - "WriteExcel", -] +__all__ = ["convert" "convert_results", "read", "write", "read_results"] diff --git a/src/otoole/cli.py b/src/otoole/cli.py index 8d0ce16c..c0305f74 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -44,29 +44,13 @@ import shutil import sys -from otoole import ( - ReadCbc, - ReadCplex, - ReadCsv, - ReadDatafile, - ReadExcel, - ReadGurobi, - WriteCsv, - WriteDatafile, - WriteExcel, - __version__, -) +from otoole import __version__, convert, convert_results, read from otoole.exceptions import OtooleSetupError -from otoole.input import Context from otoole.preprocess.setup import get_config_setup_data, get_csv_setup_data -from otoole.utils import ( - _read_file, - read_deprecated_datapackage, - read_packaged_file, - validate_config, -) +from otoole.utils import read_packaged_file from otoole.validate import main as validate from otoole.visualise import create_res +from otoole.write_strategies import WriteCsv logger = logging.getLogger(__name__) @@ -76,26 +60,7 @@ def validate_model(args): data_file = args.data_file user_config = args.user_config - _, ending = os.path.splitext(user_config) - with open(user_config, "r") as user_config_file: - config = _read_file(user_config_file, ending) - validate_config(config) - - if data_format == "datafile": - read_strategy = ReadDatafile(user_config=config) - elif data_format == "datapackage": - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - data_file = read_deprecated_datapackage(data_file) - logger.info("Successfully read folder of CSVs") - read_strategy = ReadCsv(user_config=config) - elif data_format == "csv": - read_strategy = ReadCsv(user_config=config) - elif data_format == "excel": - read_strategy = ReadExcel(user_config=config) - - input_data, _ = read_strategy.read(data_file) + input_data, _ = read(user_config, data_format, data_file) if args.validate_config: validation_config = read_packaged_file(args.validate_config) @@ -104,70 +69,22 @@ def validate_model(args): validate(input_data) -def cplex2cbc(args): - ReadCplex()._convert_cplex_file( - args.cplex_file, - args.output_file, - args.start_year, - args.end_year, - args.output_format, - ) - - -def result_matrix(args): - """Post-process results from CBC solution file into CSV format""" - msg = "Conversion from {} to {} is not yet implemented".format( - args.from_format, args.to_format +def _result_matrix(args): + """Covert results""" + convert_results( + args.config, + args.from_format, + args.to_format, + args.from_path, + args.to_path, + args.input_format, + args.input_path, + write_defaults=args.write_defaults, + glpk_model=args.glpk_model, ) - read_strategy = None - write_strategy = None - - config = None - if args.config: - _, ending = os.path.splitext(args.config) - with open(args.config, "r") as config_file: - config = _read_file(config_file, ending) - logger.info("Reading config from {}".format(args.config)) - logger.info("Validating config from {}".format(args.config)) - validate_config(config) - - # set read strategy - - if args.from_format == "cbc": - read_strategy = ReadCbc(user_config=config) - elif args.from_format == "cplex": - read_strategy = ReadCplex(user_config=config) - elif args.from_format == "gurobi": - read_strategy = ReadGurobi(user_config=config) - - # set write strategy - write_defaults = True if args.write_defaults else False - - if args.to_format == "csv": - write_strategy = WriteCsv(user_config=config, write_defaults=write_defaults) - - if args.input_datapackage: - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - input_csvs = read_deprecated_datapackage(args.input_datapackage) - logger.info("Successfully read folder of CSVs") - input_data, _ = ReadCsv(user_config=config).read(input_csvs) - elif args.input_datafile: - input_data, _ = ReadDatafile(user_config=config).read(args.input_datafile) - else: - input_data = {} - - if read_strategy and write_strategy: - context = Context(read_strategy, write_strategy) - context.convert(args.from_path, args.to_path, input_data=input_data) - else: - raise NotImplementedError(msg) - - -def conversion_matrix(args): +def _conversion_matrix(args): """Convert from one format to another Implemented conversion functions:: @@ -179,99 +96,28 @@ def conversion_matrix(args): datafile nn -- -- """ - - msg = "Conversion from {} to {} is not yet implemented".format( - args.from_format, args.to_format + convert( + args.config, + args.from_format, + args.to_format, + args.from_path, + args.to_path, + write_defaults=args.write_defaults, + keep_whitespace=args.keep_whitespace, ) - read_strategy = None - write_strategy = None - - from_path = args.from_path - to_path = args.to_path - - config = None - if args.config: - _, ending = os.path.splitext(args.config) - with open(args.config, "r") as config_file: - config = _read_file(config_file, ending) - logger.info("Reading config from {}".format(args.config)) - logger.info("Validating config from {}".format(args.config)) - validate_config(config) - - # set read strategy - - keep_whitespace = True if args.keep_whitespace else False - - if args.from_format == "datafile": - read_strategy = ReadDatafile(user_config=config) - elif args.from_format == "datapackage": - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - from_path = read_deprecated_datapackage(from_path) - logger.info("Successfully read folder of CSVs") - read_strategy = ReadCsv(user_config=config, keep_whitespace=keep_whitespace) - elif args.from_format == "csv": - read_strategy = ReadCsv(user_config=config, keep_whitespace=keep_whitespace) - elif args.from_format == "excel": - read_strategy = ReadExcel(user_config=config, keep_whitespace=keep_whitespace) - - input_data, _ = read_strategy.read(args.from_path) - - # set write strategy - - write_defaults = True if args.write_defaults else False - - if args.to_format == "datapackage": - logger.warning("Writing to datapackage is deprecated, writing to CSVs") - to_path = os.path.join(os.path.dirname(to_path), "data") - write_strategy = WriteCsv(user_config=config, write_defaults=write_defaults) - elif args.to_format == "excel": - write_strategy = WriteExcel( - user_config=config, write_defaults=write_defaults, input_data=input_data - ) - elif args.to_format == "datafile": - write_strategy = WriteDatafile( - user_config=config, write_defaults=write_defaults - ) - elif args.to_format == "csv": - write_strategy = WriteCsv(user_config=config, write_defaults=write_defaults) - - if read_strategy and write_strategy: - context = Context(read_strategy, write_strategy) - context.convert(from_path, to_path) - else: - raise NotImplementedError(msg) - def data2res(args): """Get input data and call res creation.""" data_format = args.data_format data_path = args.data_path + config = args.config + resfile = args.resfile - _, ending = os.path.splitext(args.config) - with open(args.config, "r") as config_file: - config = _read_file(config_file, ending) - validate_config(config) - - if data_format == "datafile": - read_strategy = ReadDatafile(user_config=config) - elif data_format == "datapackage": - logger.warning( - "Reading from datapackage is deprecated, trying to read from CSVs" - ) - data_path = read_deprecated_datapackage(data_path) - read_strategy = ReadCsv(user_config=config) - elif data_format == "csv": - read_strategy = ReadCsv(user_config=config) - elif data_format == "excel": - read_strategy = ReadExcel(user_config=config) + input_data, _ = read(config, data_format, data_path) - input_data, _ = read_strategy.read(data_path) - - create_res(input_data, args.resfile) + create_res(input_data, resfile) def setup(args): @@ -321,7 +167,7 @@ def get_parser(): result_parser.add_argument( "from_format", help="Result data format to convert from", - choices=sorted(["cbc", "cplex", "gurobi"]), + choices=sorted(["cbc", "cplex", "gurobi", "glpk"]), ) result_parser.add_argument( "to_format", @@ -332,24 +178,26 @@ def get_parser(): "from_path", help="Path to file or folder to convert from" ) result_parser.add_argument("to_path", help="Path to file or folder to convert to") + result_parser.add_argument( - "--input_datafile", - help="Input GNUMathProg datafile required for OSeMOSYS short or fast results", - default=None, + "input_format", + help="Input data format", + choices=sorted(["csv", "datafile", "excel"]), ) + result_parser.add_argument("input_path", help="Path to input_data") + result_parser.add_argument("config", help="Path to config YAML file") result_parser.add_argument( - "--input_datapackage", - help="Deprecated", + "--glpk_model", + help="GLPK model file required for processing GLPK results", default=None, ) - result_parser.add_argument("config", help="Path to config YAML file") result_parser.add_argument( "--write_defaults", help="Writes default values", default=False, action="store_true", ) - result_parser.set_defaults(func=result_matrix) + result_parser.set_defaults(func=_result_matrix) # Parser for conversion convert_parser = subparsers.add_parser( @@ -382,7 +230,7 @@ def get_parser(): default=False, action="store_true", ) - convert_parser.set_defaults(func=conversion_matrix) + convert_parser.set_defaults(func=_conversion_matrix) # Parser for validation valid_parser = subparsers.add_parser("validate", help="Validate an OSeMOSYS model") diff --git a/src/otoole/convert.py b/src/otoole/convert.py new file mode 100644 index 00000000..4267ac79 --- /dev/null +++ b/src/otoole/convert.py @@ -0,0 +1,417 @@ +"""This module implements the public API of the otoole package + +Use the otoole ``convert`` function to convert between different file formats. +Import the convert function from the otoole package:: + +>>> from otoole import convert +>>> convert('config.yaml', 'excel', 'datafile', 'input.xlsx', 'output.dat') + +""" + +import logging +import os +from typing import Dict, Optional, Tuple, Union + +import pandas as pd + +from otoole.exceptions import OtooleError +from otoole.input import Context, ReadStrategy, WriteStrategy +from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel +from otoole.results.results import ReadCbc, ReadCplex, ReadGlpk, ReadGurobi, ReadResults +from otoole.utils import _read_file, read_deprecated_datapackage, validate_config +from otoole.write_strategies import WriteCsv, WriteDatafile, WriteExcel + +logger = logging.getLogger(__name__) + + +def read_results( + config: str, + from_format: str, + from_path: str, + input_format: str, + input_path: str, + glpk_model: Optional[str] = None, +) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: + """Read OSeMOSYS results from CBC, GLPK, Gurobi, or CPLEX results files + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'cbc', 'gurobi', 'cplex', and 'glpk' + from_path : str + Path to source file (if datafile or excel) or folder (csv) + input_format: str + Format of input data. Available options are 'datafile', 'csv' and 'excel' + input_path: str + Path to input data + glpk_model : str + Path to ``*.glp`` model file + + Returns + ------- + Tuple[dict[str, pd.DataFrame], dict[str, float]] + Dictionary of parameter and set data and dictionary of default values + """ + user_config = _get_user_config(config) + input_strategy = _get_read_strategy(user_config, input_format) + result_strategy = _get_read_result_strategy(user_config, from_format, glpk_model) + + if input_strategy: + input_data, _ = input_strategy.read(input_path) + else: + input_data = {} + + if result_strategy: + results, default_values = result_strategy.read(from_path, input_data=input_data) + return results, default_values + else: + msg = "Conversion from {} is not yet implemented".format(from_format) + raise NotImplementedError(msg) + + +def convert_results( + config: str, + from_format: str, + to_format: str, + from_path: str, + to_path: str, + input_format: str, + input_path: str, + write_defaults: bool = False, + glpk_model: Optional[str] = None, +) -> bool: + """Post-process results from a CBC, CPLEX, Gurobi, or GLPK solution file into CSV format + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'cbc', 'cplex' and 'gurobi' + to_format : str + Available options are 'csv' + from_path : str + Path to cbc, cplex or gurobi solution file + to_path : str + Path to destination folder + input_format: str + Format of input data. Available options are 'datafile', 'csv' and 'excel' + input_path: str + Path to input data + write_defaults : bool + Write default values to CSVs + glpk_model : str + Path to ``*.glp`` model file + + Returns + ------- + bool + True if conversion was successful, False otherwise + + """ + msg = "Conversion from {} to {} is not yet implemented".format( + from_format, to_format + ) + + user_config = _get_user_config(config) + + # set read strategy + + read_strategy = _get_read_result_strategy(user_config, from_format, glpk_model) + + # set write strategy + + write_defaults = True if write_defaults else False + + if to_format == "csv": + write_strategy = WriteCsv( + user_config=user_config, write_defaults=write_defaults + ) + else: + raise NotImplementedError(msg) + + # read in input file + input_data, _ = read(config, input_format, input_path) + + if read_strategy and write_strategy: + context = Context(read_strategy, write_strategy) + context.convert(from_path, to_path, input_data=input_data) + else: + raise NotImplementedError(msg) + + return True + + +def _get_read_result_strategy( + user_config, from_format, glpk_model=None +) -> Union[ReadResults, None]: + """Get ``ReadResults`` for gurobi, cbc, cplex, and glpk formats + + Arguments + --------- + config : dict + User configuration describing parameters and sets + from_format : str + Available options are 'cbc', 'gurobi', 'cplex', and 'glpk' + glpk_model : str + Path to ``*.glp`` model file + + Returns + ------- + ReadStrategy or None + A ReadStrategy object. Returns None if from_format is not recognised + + """ + + if from_format == "cbc": + read_strategy: ReadResults = ReadCbc(user_config) + elif from_format == "gurobi": + read_strategy = ReadGurobi(user_config=user_config) + elif from_format == "cplex": + read_strategy = ReadCplex(user_config=user_config) + elif from_format == "glpk": + if not glpk_model: + raise OtooleError(resource="Read GLPK", message="Provide glpk model file") + read_strategy = ReadGlpk(user_config=user_config, glpk_model=glpk_model) + else: + return None + + return read_strategy + + +def _get_user_config(config) -> dict: + """Read in the configuration file + + Arguments + --------- + config : str + Path to config file + + Returns + ------- + dict + A dictionary containing the user configuration + """ + if config: + _, ending = os.path.splitext(config) + with open(config, "r") as config_file: + user_config = _read_file(config_file, ending) + logger.info("Reading config from {}".format(config)) + logger.info("Validating config from {}".format(config)) + validate_config(user_config) + return user_config + + +def _get_read_strategy(user_config, from_format, keep_whitespace=False) -> ReadStrategy: + """Get ``ReadStrategy`` for csv/datafile/excel format + + Arguments + --------- + config : dict + User configuration describing parameters and sets + from_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + keep_whitespace: bool, default: False + Keep whitespace in CSVs + + Returns + ------- + ReadStrategy or None + A ReadStrategy object. Returns None if from_format is not recognised + + """ + keep_whitespace = True if keep_whitespace else False + + if from_format == "datafile": + read_strategy: ReadStrategy = ReadDatafile(user_config=user_config) + elif from_format == "datapackage": + logger.warning( + "Reading from datapackage is deprecated, trying to read from CSVs" + ) + logger.info("Successfully read folder of CSVs") + read_strategy = ReadCsv( + user_config=user_config, keep_whitespace=keep_whitespace + ) # typing: ReadStrategy + elif from_format == "csv": + read_strategy = ReadCsv( + user_config=user_config, keep_whitespace=keep_whitespace + ) # typing: ReadStrategy + elif from_format == "excel": + read_strategy = ReadExcel( + user_config=user_config, keep_whitespace=keep_whitespace + ) # typing: ReadStrategy + else: + msg = f"Conversion from {from_format} is not supported" + raise NotImplementedError(msg) + + return read_strategy + + +def _get_write_strategy(user_config, to_format, write_defaults=False) -> WriteStrategy: + """Get ``WriteStrategy`` for csv/datafile/excel format + + Arguments + --------- + user_config : dict + User configuration describing parameters and sets + to_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + write_defaults: bool, default: False + Write default values to output format + + Returns + ------- + WriteStrategy or None + A ReadStrategy object. Returns None if to_format is not recognised + + """ + # set write strategy + write_defaults = True if write_defaults else False + + if to_format == "datapackage": + write_strategy: WriteStrategy = WriteCsv( + user_config=user_config, write_defaults=write_defaults + ) + elif to_format == "excel": + write_strategy = WriteExcel( + user_config=user_config, write_defaults=write_defaults + ) + elif to_format == "datafile": + write_strategy = WriteDatafile( + user_config=user_config, write_defaults=write_defaults + ) + elif to_format == "csv": + write_strategy = WriteCsv( + user_config=user_config, write_defaults=write_defaults + ) + else: + msg = f"Conversion to {to_format} is not supported" + raise NotImplementedError(msg) + + return write_strategy + + +def convert( + config, + from_format, + to_format, + from_path, + to_path, + write_defaults=False, + keep_whitespace=False, +) -> bool: + """Convert OSeMOSYS data from/to datafile, csv and Excel formats + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + to_format : str + Available options are 'datafile', 'datapackage', 'csv' and 'excel' + from_path : str + Path to destination file (if datafile or excel) or folder (csv or datapackage) + write_defaults: bool, default: False + Write default values to CSVs + keep_whitespace: bool, default: False + Keep whitespace in CSVs + + Returns + ------- + bool + True if conversion was successful, False otherwise + """ + + user_config = _get_user_config(config) + read_strategy = _get_read_strategy( + user_config, from_format, keep_whitespace=keep_whitespace + ) + + write_strategy = _get_write_strategy( + user_config, to_format, write_defaults=write_defaults + ) + + if from_format == "datapackage": + logger.warning( + "Reading from and writing to datapackage is deprecated, writing to CSVs" + ) + from_path = read_deprecated_datapackage(from_path) + to_path = os.path.join(os.path.dirname(to_path), "data") + + context = Context(read_strategy, write_strategy) + context.convert(from_path, to_path) + + return True + + +def read( + config: str, from_format: str, from_path: str, keep_whitespace: bool = False +) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: + """Read OSeMOSYS data from datafile, csv or Excel formats + + Arguments + --------- + config : str + Path to config file + from_format : str + Available options are 'datafile', 'csv', 'excel' and 'datapackage' [deprecated] + from_path : str + Path to source file (if datafile or excel) or folder (csv) + keep_whitespace: bool, default: False + Keep whitespace in source files + + Returns + ------- + Tuple[dict[str, pd.DataFrame], dict[str, float]] + Dictionary of parameter and set data and dictionary of default values + """ + user_config = _get_user_config(config) + read_strategy = _get_read_strategy( + user_config, from_format, keep_whitespace=keep_whitespace + ) + + if from_format == "datapackage": + from_path = read_deprecated_datapackage(from_path) + + return read_strategy.read(from_path) + + +def write( + config: str, + to_format: str, + to_path: str, + inputs, + default_values: Optional[Dict[str, float]] = None, +) -> bool: + """Write OSeMOSYS data to datafile, csv or Excel formats + + Arguments + --------- + config : str + Path to config file + to_format : str + Available options are 'datafile', 'csv', 'excel' and 'datapackage' [deprecated], + to_path : str + Path to destination file (if datafile or excel) or folder (csv)) + inputs : dict[str, pd.DataFrame] + Dictionary of pandas data frames to write + default_values: dict[str, float], default: None + Dictionary of default values to write to datafile + + """ + user_config = _get_user_config(config) + if default_values is None: + write_strategy = _get_write_strategy( + user_config, to_format, write_defaults=False + ) + write_strategy.write(inputs, to_path, {}) + else: + write_strategy = _get_write_strategy( + user_config, to_format, write_defaults=True + ) + write_strategy.write(inputs, to_path, default_values) + + return True diff --git a/src/otoole/input.py b/src/otoole/input.py index 4bb50921..210647a2 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -116,7 +116,6 @@ def convert(self, input_filepath: str, output_filepath: str, **kwargs: Dict): class Strategy(ABC): """ - Arguments --------- user_config : dict, default=None @@ -139,10 +138,20 @@ def _add_dtypes(self, config: Dict): dtypes = {} for column in details["indices"] + ["VALUE"]: if column == "VALUE": - dtypes["VALUE"] = details["dtype"] + dtypes["VALUE"] = ( + details["dtype"] if details["dtype"] != "int" else "int64" + ) else: - dtypes[column] = config[column]["dtype"] + dtypes[column] = ( + config[column]["dtype"] + if config[column]["dtype"] != "int" + else "int64" + ) details["index_dtypes"] = dtypes + elif details["type"] == "set": + details["dtype"] = ( + details["dtype"] if details["dtype"] != "int" else "int64" + ) return config @property @@ -174,9 +183,11 @@ class WriteStrategy(Strategy): Arguments --------- + user_config: dict, default=None filepath: str, default=None default_values: dict, default=None - user_config: dict, default=None + write_defaults: bool, default=False + input_data: dict, default=None """ @@ -245,13 +256,14 @@ def write( handle = self._header() logger.debug(default_values) + self.input_data = inputs if self.write_defaults: try: - inputs = self._expand_defaults(inputs, default_values, **kwargs) + self.input_data = self._expand_defaults(inputs, default_values) except KeyError as ex: logger.debug(ex) - for name, df in sorted(inputs.items()): + for name, df in sorted(self.input_data.items()): logger.debug("%s has %s columns: %s", name, len(df.index.names), df.columns) try: @@ -276,37 +288,23 @@ def write( handle.close() def _expand_defaults( - self, - data_to_expand: Dict[str, pd.DataFrame], - default_values: Dict[str, float], - **kwargs, + self, data_to_expand: Dict[str, pd.DataFrame], default_values: Dict[str, float] ) -> Dict[str, pd.DataFrame]: """Populates default value entry rows in dataframes Parameters ---------- - input_data : Dict[str, pd.DataFrame], + data_to_expand : Dict[str, pd.DataFrame], default_values : Dict[str, float] Returns ------- - results : Dict[str, pd.DataFrame] - Updated available reults dictionary + Dict[str, pd.DataFrame] + Input data with expanded default values replacing missing entries - Raises - ------ - KeyError - If set defenitons are not in input_data and input_data is not supplied """ sets = [x for x in self.user_config if self.user_config[x]["type"] == "set"] - - # if expanding results, input data is needed for set defenitions - if "input_data" in kwargs: - model_data = kwargs["input_data"] - else: - model_data = data_to_expand - output = {} for name, data in data_to_expand.items(): logger.info(f"Writing defaults for {name}") @@ -318,7 +316,7 @@ def _expand_defaults( # TODO # Issue with how otoole handles trade route right now. - # The double defenition of REGION throws an error. + # The double definition of REGION throws an error. if name == "TradeRoute": output[name] = data continue @@ -326,11 +324,7 @@ def _expand_defaults( # save set information for each parameter index_data = {} for index in data.index.names: - try: - index_data[index] = model_data[index]["VALUE"].to_list() - except KeyError as ex: - logger.info("Can not write default values. Supply input data") - raise KeyError(ex) + index_data[index] = self.input_data[index]["VALUE"].to_list() # set index if len(index_data) > 1: @@ -347,8 +341,11 @@ def _expand_defaults( df_default["VALUE"] = default_values[name] # combine result and default value dataframe - df = pd.concat([data, df_default]) - df = df[~df.index.duplicated(keep="first")] + if not data.empty: + df = pd.concat([data, df_default]) + df = df[~df.index.duplicated(keep="first")] + else: + df = df_default df = df.sort_index() output[name] = df @@ -389,7 +386,10 @@ def _check_index( elif details["type"] == "set": self._check_set_index_names(name=name, df=df) - df = self._check_index_dtypes(name=name, config=details, df=df) + try: + df = self._check_index_dtypes(name=name, config=details, df=df) + except ValueError as ex: + raise ValueError(f"{name}: {ex}") input_data[name] = df @@ -454,7 +454,7 @@ def _check_set_index_names(name: str, df: pd.DataFrame) -> None: OtooleIndexError If actual indices do not match expected indices """ - if not df.columns == ["VALUE"]: + if not list(df.columns) == ["VALUE"]: raise OtooleIndexError( resource=name, config_indices=["VALUE"], @@ -503,8 +503,8 @@ def _check_index_dtypes( except ValueError: # ValueError: invalid literal for int() with base 10: df = df.dropna(axis=0, how="all").reset_index() for index, dtype in config["index_dtypes"].items(): - if dtype == "int": - df[index] = df[index].astype(float).astype(int) + if dtype == "int64": + df[index] = df[index].astype(float).astype("int64") else: df[index] = df[index].astype(dtype) df = df.set_index(config["indices"]) @@ -592,4 +592,16 @@ def _compare_read_to_expected( def read( self, filepath: Union[str, TextIO], **kwargs ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, Any]]: + """Reads in data from file + + Arguments + --------- + filepath: Union[str, TextIO] + + Returns + ------- + Tuple[Dict[str, pd.DataFrame], Dict[str, Any]] + tuple of input_data as a dictionary of pandas DataFrames and + dictionary of default values + """ raise NotImplementedError() diff --git a/src/otoole/preprocess/validate_config.py b/src/otoole/preprocess/validate_config.py index b6d8d17b..7903b0ab 100644 --- a/src/otoole/preprocess/validate_config.py +++ b/src/otoole/preprocess/validate_config.py @@ -3,7 +3,10 @@ import logging from typing import List, Optional, Union -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +# from pydantic import FieldValidationInfo + logger = logging.getLogger(__name__) @@ -11,39 +14,41 @@ class UserDefinedValue(BaseModel): """Represents any user defined value""" + model_config = ConfigDict(extra="forbid") + name: str type: str dtype: str - defined_sets: Optional[List[str]] - indices: Optional[List[str]] - default: Optional[Union[int, float]] - calculated: Optional[bool] - short_name: Optional[str] + defined_sets: Optional[List[str]] = None + indices: Optional[List[str]] = None + default: Optional[Union[int, float]] = None + calculated: Optional[bool] = None + short_name: Optional[str] = None - @validator("type") + @field_validator("type") @classmethod - def check_param_type(cls, value, values): + def check_param_type(cls, value, info): if value not in ["param", "result", "set"]: raise ValueError( - f"{values['name']} -> Type must be 'param', 'result', or 'set'" + f"{info.field_name} -> Type must be 'param', 'result', or 'set'" ) return value - @validator("name", "short_name") + @field_validator("name", "short_name") @classmethod # for linting purposes def check_name_for_spaces(cls, value): if " " in value: raise ValueError(f"{value} -> Name can not have spaces") return value - @validator("name", "short_name") + @field_validator("name", "short_name") @classmethod def check_name_for_numbers(cls, value): if any(char.isdigit() for char in value): raise ValueError(f"{value} -> Name can not have digits") return value - @validator("name", "short_name") + @field_validator("name", "short_name") @classmethod def check_name_for_special_chars(cls, value): # removed underscore from the recommeded special char list @@ -54,7 +59,7 @@ def check_name_for_special_chars(cls, value): ) return value - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_name_length(cls, values): if len(values["name"]) > 31: @@ -69,32 +74,29 @@ def check_name_length(cls, values): ) return values - class Config: - extra = "forbid" - class UserDefinedSet(UserDefinedValue): """Represents a set""" - @validator("dtype") + @field_validator("dtype") @classmethod - def check_dtype(cls, value, values): + def check_dtype(cls, value, info): if value not in ["str", "int"]: - raise ValueError(f"{values['name']} -> Value must be a 'str' or 'int'") + raise ValueError(f"{info.field_name} -> Value must be a 'str' or 'int'") return value class UserDefinedParameter(UserDefinedValue): """Represents a parameter""" - @validator("dtype") + @field_validator("dtype") @classmethod - def check_dtype(cls, value, values): + def check_dtype(cls, value, info): if value not in ["float", "int"]: - raise ValueError(f"{values['name']} -> Value must be an 'int' or 'float'") + raise ValueError(f"{info.field_name} -> Value must be an 'int' or 'float'") return value - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_required_inputs(cls, values): required = ["default", "defined_sets", "indices"] @@ -104,38 +106,36 @@ def check_required_inputs(cls, values): ) return values - @root_validator(pre=True) - @classmethod - def check_index_in_set(cls, values): - if not all(i in values["defined_sets"] for i in values["indices"]): - raise ValueError(f"{values['name']} -> Index not in user supplied sets") - return values + @model_validator(mode="after") + def check_index_in_set(self): + if not all(i in self.defined_sets for i in self.indices): + raise ValueError(f"{self.name} -> Index not in user supplied sets") + return self - @root_validator(pre=True) - @classmethod - def check_dtype_default(cls, values): - dtype_input = values["dtype"] - dtype_default = type(values["default"]).__name__ + @model_validator(mode="after") + def check_dtype_default(self): + dtype_input = self.dtype + dtype_default = type(self.default).__name__ if dtype_input != dtype_default: # allow ints to be cast as floats if not ((dtype_default == "int") and (dtype_input == "float")): raise ValueError( - f"{values['name']} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" + f"{self.name} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" ) - return values + return self class UserDefinedResult(UserDefinedValue): """Represents a result""" - @validator("dtype") + @field_validator("dtype") @classmethod - def check_dtype(cls, value, values): + def check_dtype(cls, value, info): if value not in ["float", "int"]: - raise ValueError(f"{values['name']} -> Value must be an 'int' or 'float'") + raise ValueError(f"{info.field_name} -> Value must be an 'int' or 'float'") return value - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_required_inputs(cls, values): required = ["default", "defined_sets", "indices"] @@ -145,7 +145,7 @@ def check_required_inputs(cls, values): ) return values - @root_validator(pre=True) + @model_validator(mode="before") @classmethod def check_deprecated_values(cls, values): deprecated = ["calculated", "Calculated"] @@ -156,22 +156,20 @@ def check_deprecated_values(cls, values): ) return values - @root_validator(pre=True) - @classmethod - def check_index_in_set(cls, values): - if not all(i in values["defined_sets"] for i in values["indices"]): - raise ValueError(f"{values['name']} -> Index not in user supplied sets") - return values + @model_validator(mode="after") + def check_index_in_set(self): + if not all(i in self.defined_sets for i in self.indices): + raise ValueError(f"{self.name} -> Index not in user supplied sets") + return self - @root_validator(pre=True) - @classmethod - def check_dtype_default(cls, values): - dtype_input = values["dtype"] - dtype_default = type(values["default"]).__name__ + @model_validator(mode="after") + def check_dtype_default(self): + dtype_input = self.dtype + dtype_default = type(self.default).__name__ if dtype_input != dtype_default: # allow ints to be cast as floats if not ((dtype_default == "int") and (dtype_input == "float")): raise ValueError( - f"{values['name']} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" + f"{self.name} -> User dtype is {dtype_input} while default value dtype is {dtype_default}" ) - return values + return self diff --git a/src/otoole/read_strategies.py b/src/otoole/read_strategies.py index 2d680efb..da362cea 100644 --- a/src/otoole/read_strategies.py +++ b/src/otoole/read_strategies.py @@ -15,7 +15,16 @@ class ReadMemory(ReadStrategy): - """Read a dict of OSeMOSYS parameters from memory""" + """Read a dict of OSeMOSYS parameters from memory + + Arguments + --------- + parameters : Dict[str, pd.DataFrame] + Dictionary of OSeMOSYS parameters + user_config : Dict[str, Dict] + User configuration + + """ def __init__( self, parameters: Dict[str, pd.DataFrame], user_config: Dict[str, Dict] @@ -24,7 +33,7 @@ def __init__( self._parameters = parameters def read( - self, filepath: Union[str, TextIO] = None, **kwargs + self, filepath: Union[str, TextIO, None] = None, **kwargs ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, Any]]: config = self.user_config @@ -58,6 +67,7 @@ def _convert_wide_2_narrow(self, df: pd.DataFrame, name: str): if "MODEOFOPERATION" in actual_headers: df = df.rename(columns={"MODEOFOPERATION": "MODE_OF_OPERATION"}) + actual_headers = list(df.columns) if actual_headers[-1] == "VALUE": logger.info( @@ -87,6 +97,11 @@ def _convert_wide_2_narrow(self, df: pd.DataFrame, name: str): except IndexError as ex: logger.debug(f"Could not reshape {name}") raise ex + except KeyError as ex: + logger.debug( + f"Actual headers: {actual_headers}\nConverted headers: {converted_headers}" + ) + raise ex all_headers = converted_headers + ["VALUE"] return narrow[all_headers].set_index(converted_headers) @@ -111,7 +126,15 @@ def _whitespace_converter(self, indices: List[str]) -> Dict[str, Any]: class ReadExcel(_ReadTabular): - """Read in an Excel spreadsheet in wide format to a dict of Pandas DataFrames""" + """Read in an Excel spreadsheet in wide format to a dict of Pandas DataFrames + + Arguments + --------- + user_config : Dict[str, Dict] + User configuration + keep_whitespace : bool + Whether to keep whitespace in the dataframes + """ def read( self, filepath: Union[str, TextIO], **kwargs @@ -157,7 +180,15 @@ def read( class ReadCsv(_ReadTabular): - """Read in a folder of CSV files""" + """Read in a folder of CSV files to a dict of Pandas DataFrames + + Arguments + --------- + user_config : Dict[str, Dict] + User configuration + keep_whitespace : bool + Whether to keep whitespace in the dataframes + """ def read( self, filepath, **kwargs @@ -166,9 +197,13 @@ def read( input_data = {} self._check_for_default_values_csv(filepath) - self._compare_read_to_expected( - names=[f.split(".csv")[0] for f in os.listdir(filepath)] - ) + names = [ + f.split(".csv")[0] + for f in os.listdir(filepath) + if f.split(".")[-1] == "csv" + ] + logger.debug(names) + self._compare_read_to_expected(names=names) default_values = self._read_default_values(self.user_config) @@ -272,18 +307,36 @@ def _check_for_default_values_csv(filepath: str) -> None: class ReadDatafile(ReadStrategy): + """Read in a datafile to a dict of Pandas DataFrames + + Arguments + --------- + user_config : Dict[str, Dict] + User configuration + keep_whitespace : bool + Whether to keep whitespace in the dataframes + + """ + def read( self, filepath, **kwargs ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, Any]]: config = self.user_config default_values = self._read_default_values(config) - amply_datafile = self.read_in_datafile(filepath, config) - inputs = self._convert_amply_to_dataframe(amply_datafile, config) - for config_type in ["param", "set"]: - inputs = self._get_missing_input_dataframes(inputs, config_type=config_type) - inputs = self._check_index(inputs) - return inputs, default_values + + # Check filepath exists + if os.path.exists(filepath): + amply_datafile = self.read_in_datafile(filepath, config) + inputs = self._convert_amply_to_dataframe(amply_datafile, config) + for config_type in ["param", "set"]: + inputs = self._get_missing_input_dataframes( + inputs, config_type=config_type + ) + inputs = self._check_index(inputs) + return inputs, default_values + else: + raise FileNotFoundError(f"File not found: {filepath}") def read_in_datafile(self, path_to_datafile: str, config: Dict) -> Amply: """Read in a datafile using the Amply parsing class @@ -322,6 +375,7 @@ def _load_parameter_definitions(self, config: dict) -> str: elif attributes["type"] == "set": elements += "set {};\n".format(name) + logger.debug("Amply Elements: %s", elements) return elements def _convert_amply_to_dataframe( diff --git a/src/otoole/results/result_package.py b/src/otoole/results/result_package.py index 991b3fe2..a63de927 100644 --- a/src/otoole/results/result_package.py +++ b/src/otoole/results/result_package.py @@ -307,8 +307,11 @@ def capital_investment(self) -> pd.DataFrame: capital_cost = self["CapitalCost"] new_capacity = self["NewCapacity"] operational_life = self["OperationalLife"] - discount_rate = self["DiscountRate"] - discount_rate_idv = self["DiscountRateIdv"] + + if "DiscountRateIdv" in self.keys(): + discount_rate = self["DiscountRateIdv"] + else: + discount_rate = self["DiscountRate"] regions = self["REGION"]["VALUE"].to_list() technologies = self.get_unique_values_from_index( @@ -323,10 +326,9 @@ def capital_investment(self) -> pd.DataFrame: raise KeyError(self._msg("CapitalInvestment", str(ex))) crf = capital_recovery_factor( - regions, technologies, discount_rate_idv, operational_life + regions, technologies, discount_rate, operational_life ) pva = pv_annuity(regions, technologies, discount_rate, operational_life) - capital_investment = capital_cost.mul(new_capacity, fill_value=0.0) capital_investment = capital_investment.mul(crf, fill_value=0.0).mul( pva, fill_value=0.0 @@ -765,22 +767,38 @@ def capital_recovery_factor( param CapitalRecoveryFactor{r in REGION, t in TECHNOLOGY} := (1 - (1 + DiscountRateIdv[r,t])^(-1))/(1 - (1 + DiscountRateIdv[r,t])^(-(OperationalLife[r,t]))); """ - if regions and technologies: - index = pd.MultiIndex.from_product( - [regions, technologies], names=["REGION", "TECHNOLOGY"] - ) + + def calc_crf(df: pd.DataFrame, operational_life: pd.Series) -> pd.Series: + rate = df["VALUE"] + 1 + numerator = 1 - rate.pow(-1) + denominator = 1 - rate.pow(-operational_life) + + return numerator / denominator + + if not regions and not technologies: + return pd.DataFrame( + data=[], + columns=["REGION", "TECHNOLOGY", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY"]) + + index = pd.MultiIndex.from_product( + [regions, technologies], names=["REGION", "TECHNOLOGY"] + ) + if "TECHNOLOGY" in discount_rate_idv.index.names: crf = discount_rate_idv.reindex(index) - crf["RATE"] = crf["VALUE"] + 1 - crf["NUMER"] = 1 - crf["RATE"].pow(-1) - crf["DENOM"] = 1 - crf["RATE"].pow(-operational_life["VALUE"]) - crf["VALUE"] = (crf["NUMER"] / crf["DENOM"]).round(6) - return crf.reset_index()[["REGION", "TECHNOLOGY", "VALUE"]].set_index( - ["REGION", "TECHNOLOGY"] - ) + crf["VALUE"] = calc_crf(crf, operational_life["VALUE"]) + else: - return pd.DataFrame([], columns=["REGION", "TECHNOLOGY", "VALUE"]).set_index( - ["REGION", "TECHNOLOGY"] - ) + values = discount_rate_idv["VALUE"].copy() + crf = discount_rate_idv.reindex(index) + # This is a hack to get around the fact that the discount rate is + # indexed by REGION and not REGION, TECHNOLOGY + crf[:] = values + crf["VALUE"] = calc_crf(crf, operational_life["VALUE"]) + + return crf.reset_index()[["REGION", "TECHNOLOGY", "VALUE"]].set_index( + ["REGION", "TECHNOLOGY"] + ) def pv_annuity( @@ -858,7 +876,7 @@ def discount_factor( if regions and years: discount_rate["YEAR"] = [years] discount_factor = discount_rate.explode("YEAR").reset_index(level="REGION") - discount_factor["YEAR"] = discount_factor["YEAR"].astype(int) + discount_factor["YEAR"] = discount_factor["YEAR"].astype("int64") discount_factor["NUM"] = discount_factor["YEAR"] - discount_factor["YEAR"].min() discount_factor["RATE"] = discount_factor["VALUE"] + 1 discount_factor["VALUE"] = ( diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index e4757f9b..ae45d737 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -6,7 +6,6 @@ import pandas as pd from otoole.input import ReadStrategy -from otoole.preprocess.longify_data import check_datatypes from otoole.results.result_package import ResultsPackage LOGGER = logging.getLogger(__name__) @@ -21,7 +20,7 @@ def read( Arguments --------- filepath : str, TextIO - A path name or file buffer pointing to the CBC solution file + A path name or file buffer pointing to the solution file input_data : dict, default=None dict of dataframes @@ -75,7 +74,7 @@ def calculate_results( return results -class ReadResultsCBC(ReadResults): +class ReadWideResults(ReadResults): def get_results_from_file(self, filepath, input_data): cbc = self._convert_to_dataframe(filepath) available_results = self._convert_wide_to_long(cbc) @@ -88,13 +87,13 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: def _convert_wide_to_long(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]: """Convert from wide to long format - Converts a pandas DataFrame containing all CBC results to reformatted + Converts a pandas DataFrame containing all wide format results to reformatted dictionary of pandas DataFrames in long format ready to write out Arguments --------- data : pandas.DataFrame - CBC results stored in a dataframe + results stored in a dataframe Example ------- @@ -179,94 +178,26 @@ def rename_duplicate_column(index: List) -> List: return column -class ReadCplex(ReadResults): - """ """ +class ReadCplex(ReadWideResults): + """Read a CPLEX solution file into memeory""" - def get_results_from_file( - self, filepath: Union[str, TextIO], input_data - ) -> Dict[str, pd.DataFrame]: - - if input_data: - years = input_data["YEAR"].values # type: List - start_year = int(years[0]) - end_year = int(years[-1]) - else: - raise RuntimeError("To process CPLEX results please provide the input file") - - if isinstance(filepath, str): - with open(filepath, "r") as sol_file: - data = self.extract_rows(sol_file, start_year, end_year) - elif isinstance(filepath, StringIO): - data = self.extract_rows(filepath, start_year, end_year) - else: - raise TypeError("Argument filepath type must be a string or an open file") - - results = {} - - for name in data.keys(): - results[name] = self.convert_df(data[name], name, start_year, end_year) - - return results + def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: + """Reads a Cplex solution file into a pandas DataFrame - def extract_rows( - self, sol_file: TextIO, start_year: int, end_year: int - ) -> Dict[str, List[List[str]]]: - """ """ - data = {} # type: Dict[str, List[List[str]]] - for linenum, line in enumerate(sol_file): - line = line.replace("\n", "") - try: - row_as_list = line.split("\t") # type: List[str] - name = row_as_list[0] # type: str - - if name in data.keys(): - data[name].append(row_as_list) - else: - data[name] = [row_as_list] - except ValueError as ex: - msg = "Error caused at line {}: {}. {}" - raise ValueError(msg.format(linenum, line, ex)) - return data - - def extract_variable_dimensions_values(self, data: List) -> Tuple[str, Tuple, List]: - """Extracts useful information from a line of a results file""" - variable = data[0] - try: - number = len(self.results_config[variable]["indices"]) - except KeyError as ex: - print(data) - raise KeyError(ex) - dimensions = tuple(data[1:(number)]) - values = data[(number):] - return (variable, dimensions, values) - - def convert_df( - self, data: List[List[str]], variable: str, start_year: int, end_year: int - ) -> pd.DataFrame: - """Read the cplex lines into a pandas DataFrame""" - index = self.results_config[variable]["indices"] - columns = ["variable"] + index[:-1] + list(range(start_year, end_year + 1, 1)) - df = pd.DataFrame(data=data, columns=columns) - df, index = check_duplicate_index(df, columns, index) - df = df.drop(columns="variable") - - LOGGER.debug( - f"Attempting to set index for {variable} with columns {index[:-1]}" - ) - try: - df = df.set_index(index[:-1]) - except NotImplementedError as ex: - LOGGER.error(f"Error setting index for {df.head()}") - raise NotImplementedError(ex) - df = df.melt(var_name="YEAR", value_name="VALUE", ignore_index=False) - df = df.reset_index() - df = check_datatypes(df, self.user_config, variable) - df = df.set_index(index) - df = df[(df != 0).any(axis=1)] - return df + Arguments + --------- + user_config : Dict[str, Dict] + file_path : Union[str, TextIO] + """ + df = pd.read_xml(file_path, xpath=".//variable", parser="etree") + df[["Variable", "Index"]] = df["name"].str.split("(", expand=True) + df["Index"] = df["Index"].str.replace(")", "", regex=False) + LOGGER.debug(df) + df = df[(df["value"] != 0)].reset_index().rename(columns={"value": "Value"}) + return df[["Variable", "Index", "Value"]].astype({"Value": float}) -class ReadGurobi(ReadResultsCBC): +class ReadGurobi(ReadWideResults): """Read a Gurobi solution file into memory""" def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: @@ -274,7 +205,8 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: Arguments --------- - file_path : str + user_config : Dict[str, Dict] + file_path : Union[str, TextIO] """ df = pd.read_csv( file_path, @@ -290,13 +222,13 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: return df[["Variable", "Index", "Value"]].astype({"Value": float}) -class ReadCbc(ReadResultsCBC): +class ReadCbc(ReadWideResults): """Read a CBC solution file into memory Arguments --------- - user_config - results_config + user_config : Dict[str, Dict] + results_config : Dict[str, Dict] """ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: @@ -328,3 +260,208 @@ def _convert_to_dataframe(self, file_path: Union[str, TextIO]) -> pd.DataFrame: df["Index"] = df["Index"].str.replace(")", "", regex=False) df = df.drop(columns=["indexvalue"]) return df[["Variable", "Index", "Value"]].astype({"Value": float}) + + +class ReadGlpk(ReadWideResults): + """Reads a GLPK Solution file into memory + + Arguments + --------- + user_config : Dict[str, Dict] + glpk_model: Union[str, TextIO] + Path to GLPK model file. Can be created using the `--wglp` flag. + """ + + def __init__(self, user_config: Dict[str, Dict], glpk_model: Union[str, TextIO]): + super().__init__(user_config) + + if isinstance(glpk_model, str): + with open(glpk_model, "r") as model_file: + self.model = self.read_model(model_file) + elif isinstance(glpk_model, StringIO): + self.model = self.read_model(glpk_model) + else: + raise TypeError("Argument filepath type must be a string or an open file") + + def _convert_to_dataframe(self, glpk_sol: Union[str, TextIO]) -> pd.DataFrame: + """Creates a wide formatted dataframe from GLPK solution + + Arguments + --------- + glpk_sol: Union[str, TextIO] + Path to GLPK solution file. Can be created using the `--write` flag + + Returns + ------- + pd.DataFrame + """ + + if isinstance(glpk_sol, str): + with open(glpk_sol, "r"): + _, sol = self.read_solution(glpk_sol) + elif isinstance(glpk_sol, StringIO): + _, sol = self.read_solution(glpk_sol) + else: + raise TypeError("Argument filepath type must be a string or an open file") + + return self._merge_model_sol(sol) + + def read_model(self, file_path: Union[str, TextIO]) -> pd.DataFrame: + """Reads in a GLPK Model File + + Arguments + --------- + file_path: Union[str, TextIO] + Path to GLPK model file. Can be created using the `--wglp` flag. + + Returns + ------- + pd.DataFrame + + ID NUM NAME INDEX + 0 i 1 CAa4_Constraint_Capacity "SIMPLICITY,ID,BACKSTOP1,2015" + 1 j 2 NewCapacity "SIMPLICITY,WINDPOWER,2039" + + Notes + ----- + + -> GENERAL LAYOUT OF SOLUTION FILE + + n p NAME # p = problem instance + n z NAME # z = objective function + n i ROW NAME # i = constraint name, ROW is the row ordinal number + n j COL NAME # j = variable name, COL is the column ordinal number + """ + + df = pd.read_csv( + file_path, + header=None, + sep=r"\s+", + index_col=0, + names=["ID", "NUM", "value", 4, 5], + ).drop(columns=[4, 5]) + + df = df[(df["ID"].isin(["i", "j"])) & (df["value"] != "cost")] + + df[["NAME", "INDEX"]] = df["value"].str.split("[", expand=True) + df["INDEX"] = df["INDEX"].map(lambda x: x.split("]")[0]) + df = ( + df[["ID", "NUM", "NAME", "INDEX"]] + .astype({"ID": str, "NUM": "int64", "NAME": str, "INDEX": str}) + .reset_index(drop=True) + ) + + return df + + def read_solution( + self, file_path: Union[str, TextIO] + ) -> Tuple[Dict[str, Union[str, float]], pd.DataFrame]: + """Reads a GLPK solution file + + Arguments + --------- + file_path: Union[str, TextIO] + Path to GLPK solution file. Can be created using the `--write` flag + + Returns + ------- + Tuple[Dict[str,Union[str, float]], pd.DataFrame] + Dict[str,Union[str, float]] -> Problem name, status, and objective value + pd.DataFrame -> Variables and constraints + + {"name":"osemosys", "status":"OPTIMAL", "objective":4497.31976} + + ID NUM STATUS PRIM DUAL + 0 i 1 b 5 0 + 1 j 2 l 0 2 + + Notes + ----- + + -> ROWS IN SOLUTION FILE + + i ROW ST PRIM DUAL + + ROW is the ordinal number of the row + ST is one of: + - b = inactive constraint; + - l = inequality constraint active on its lower bound; + - u = inequality constraint active on its upper bound; + - f = active free (unounded) row; + - s = active equality constraint. + PRIM specifies the row primal value (float) + DUAL specifies the row dual value (float) + + -> COLUMNS IN SOLUTION FILE + + j COL ST PRIM DUAL + + COL specifies the column ordinal number + ST contains one of the following lower-case letters that specifies the column status in the basic solution: + - b = basic variable + - l = non-basic variable having its lower bound active + - u = non-basic variable having its upper bound active + - f = non-basic free (unbounded) variable + - s = non-basic fixed variable. + PRIM field contains column primal value (float) + DUAL field contains the column dual value (float) + """ + + df = pd.read_csv(file_path, header=None, sep=":") + + # get status information + status = {} + df_status = df.loc[:8].set_index(0) + status["name"] = df_status.loc["c Problem", 1].strip() + status["status"] = df_status.loc["c Status", 1].strip() + status["objective"] = float(df_status.loc["c Objective", 1].split()[2]) + + # get solution infromation + data = df.iloc[8:-1].copy() + data[["ID", "NUM", "STATUS", "PRIM", "DUAL"]] = data[0].str.split( + " ", expand=True + ) + + data = ( + data[["ID", "NUM", "STATUS", "PRIM", "DUAL"]] + .astype( + {"ID": str, "NUM": "int64", "STATUS": str, "PRIM": float, "DUAL": float} + ) + .reset_index(drop=True) + ) + + return status, data + + def _merge_model_sol(self, sol: pd.DataFrame) -> pd.DataFrame: + """Merges GLPK model and solution file into one dataframe + + Arguments + --------- + sol: pd.DataFrame + see output from ReadGlpk.read_solution(...) + + Returns + ------- + pd.DataFrame + + >>> pd.DataFrame(data=[ + ['TotalDiscountedCost', "SIMPLICITY,2015", 187.01576], + ['TotalDiscountedCost', "SIMPLICITY,2016", 183.30788]], + columns=['Variable', 'Index', 'Value']) + """ + + model = self.model.copy() + model.index = model["ID"].str.cat(model["NUM"].astype(str)) + model = model.drop(columns=["ID", "NUM"]) + + sol.index = sol["ID"].str.cat(sol["NUM"].astype(str)) + sol = sol.drop(columns=["ID", "NUM", "STATUS", "DUAL"]) + + df = model.join(sol) + df = ( + df[df.index.str.startswith("j")] + .reset_index(drop=True) + .rename(columns={"NAME": "Variable", "INDEX": "Index", "PRIM": "Value"}) + ) + + return df diff --git a/src/otoole/utils.py b/src/otoole/utils.py index 99fc7a0e..fe989b7e 100644 --- a/src/otoole/utils.py +++ b/src/otoole/utils.py @@ -1,7 +1,8 @@ import json import logging import os -from typing import Any, Dict, List, Union +from importlib.resources import files +from typing import Any, Dict, List, Optional, Union import pandas as pd from pydantic import ValidationError @@ -15,18 +16,12 @@ UserDefinedValue, ) -try: - import importlib.resources as resources -except ImportError: - # Try backported to PY<37 `importlib_resources`. - import importlib_resources as resources # type: ignore - logger = logging.getLogger(__name__) def _read_file(open_file, ending): if ending == ".yaml" or ending == ".yml": - contents = load(open_file, Loader=UniqueKeyLoader) # typing: Dict + contents = load(open_file, Loader=UniqueKeyLoader) # typing: Dict[str, Any] elif ending == ".json": contents = json.load(open_file) # typing: Dict else: @@ -34,7 +29,7 @@ def _read_file(open_file, ending): return contents -def read_packaged_file(filename: str, module_name: str = None): +def read_packaged_file(filename: str, module_name: Optional[str] = None): _, ending = os.path.splitext(filename) @@ -42,7 +37,7 @@ def read_packaged_file(filename: str, module_name: str = None): with open(filename, "r") as open_file: contents = _read_file(open_file, ending) else: - with resources.open_text(module_name, filename) as open_file: + with files(module_name).joinpath(filename).open("r") as open_file: contents = _read_file(open_file, ending) return contents diff --git a/src/otoole/validate.py b/src/otoole/validate.py index f889bf71..3c39257d 100644 --- a/src/otoole/validate.py +++ b/src/otoole/validate.py @@ -33,7 +33,7 @@ import logging import re from collections import defaultdict -from typing import Dict, List, Sequence +from typing import Dict, List, Optional, Sequence import networkx.algorithms.isolate as isolate import pandas as pd @@ -53,7 +53,7 @@ def check_for_duplicates(codes: Sequence) -> bool: return duplicate_values -def create_schema(config: Dict[str, Dict] = None) -> Dict: +def create_schema(config: Optional[Dict[str, Dict]] = None) -> Dict: """Populate the dict of schema with codes from the validation config Arguments diff --git a/src/otoole/write_strategies.py b/src/otoole/write_strategies.py index a2c18464..d4472f8b 100644 --- a/src/otoole/write_strategies.py +++ b/src/otoole/write_strategies.py @@ -59,6 +59,9 @@ def _form_parameter( def _form_parameter_template(self, parameter_name: str, **kwargs) -> pd.DataFrame: """Creates wide format excel template + Pivots the data to wide format using the data from the YEAR set as the columns. + This requires input data to be passed into this function. + Arguments --------- parameter_name: str @@ -153,7 +156,12 @@ def _write_parameter( df = self._form_parameter(df, default) handle.write("param default {} : {} :=\n".format(default, parameter_name)) df.to_csv( - path_or_buf=handle, sep=" ", header=False, index=True, float_format="%g" + path_or_buf=handle, + sep=" ", + header=False, + index=True, + float_format="%g", + lineterminator="\n", ) handle.write(";\n") @@ -168,7 +176,12 @@ def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO): """ handle.write("set {} :=\n".format(set_name)) df.to_csv( - path_or_buf=handle, sep=" ", header=False, index=False, float_format="%g" + path_or_buf=handle, + sep=" ", + header=False, + index=False, + float_format="%g", + lineterminator="\n", ) handle.write(";\n") diff --git a/tests/fixtures/super_simple/csv/AccumulatedAnnualDemand.csv b/tests/fixtures/super_simple/csv/AccumulatedAnnualDemand.csv new file mode 100644 index 00000000..326b28c6 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AccumulatedAnnualDemand.csv @@ -0,0 +1 @@ +REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/AnnualEmissionLimit.csv b/tests/fixtures/super_simple/csv/AnnualEmissionLimit.csv new file mode 100644 index 00000000..1fa535a0 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AnnualEmissionLimit.csv @@ -0,0 +1 @@ +REGION,EMISSION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/AnnualExogenousEmission.csv b/tests/fixtures/super_simple/csv/AnnualExogenousEmission.csv new file mode 100644 index 00000000..1fa535a0 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AnnualExogenousEmission.csv @@ -0,0 +1 @@ +REGION,EMISSION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/AvailabilityFactor.csv b/tests/fixtures/super_simple/csv/AvailabilityFactor.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/AvailabilityFactor.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/CapacityFactor.csv b/tests/fixtures/super_simple/csv/CapacityFactor.csv new file mode 100644 index 00000000..ba3be6ef --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapacityFactor.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,TIMESLICE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/CapacityOfOneTechnologyUnit.csv b/tests/fixtures/super_simple/csv/CapacityOfOneTechnologyUnit.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapacityOfOneTechnologyUnit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/CapacityToActivityUnit.csv b/tests/fixtures/super_simple/csv/CapacityToActivityUnit.csv new file mode 100644 index 00000000..98b90656 --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapacityToActivityUnit.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,VALUE +BB,gas_plant,1.0 diff --git a/tests/fixtures/super_simple/csv/CapitalCost.csv b/tests/fixtures/super_simple/csv/CapitalCost.csv new file mode 100644 index 00000000..95879aba --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapitalCost.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,YEAR,VALUE +BB,gas_plant,2016,1.03456 diff --git a/tests/fixtures/super_simple/csv/CapitalCostStorage.csv b/tests/fixtures/super_simple/csv/CapitalCostStorage.csv new file mode 100644 index 00000000..a7bcbd7f --- /dev/null +++ b/tests/fixtures/super_simple/csv/CapitalCostStorage.csv @@ -0,0 +1 @@ +REGION,STORAGE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/Conversionld.csv b/tests/fixtures/super_simple/csv/Conversionld.csv new file mode 100644 index 00000000..360887ad --- /dev/null +++ b/tests/fixtures/super_simple/csv/Conversionld.csv @@ -0,0 +1 @@ +TIMESLICE,DAYTYPE,VALUE diff --git a/tests/fixtures/super_simple/csv/Conversionlh.csv b/tests/fixtures/super_simple/csv/Conversionlh.csv new file mode 100644 index 00000000..6fc0a297 --- /dev/null +++ b/tests/fixtures/super_simple/csv/Conversionlh.csv @@ -0,0 +1 @@ +TIMESLICE,DAILYTIMEBRACKET,VALUE diff --git a/tests/fixtures/super_simple/csv/Conversionls.csv b/tests/fixtures/super_simple/csv/Conversionls.csv new file mode 100644 index 00000000..47b6ebde --- /dev/null +++ b/tests/fixtures/super_simple/csv/Conversionls.csv @@ -0,0 +1 @@ +TIMESLICE,SEASON,VALUE diff --git a/tests/fixtures/super_simple/csv/DAILYTIMEBRACKET.csv b/tests/fixtures/super_simple/csv/DAILYTIMEBRACKET.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DAILYTIMEBRACKET.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/DAYTYPE.csv b/tests/fixtures/super_simple/csv/DAYTYPE.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DAYTYPE.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/DaySplit.csv b/tests/fixtures/super_simple/csv/DaySplit.csv new file mode 100644 index 00000000..83dab5c1 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DaySplit.csv @@ -0,0 +1 @@ +DAILYTIMEBRACKET,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/DaysInDayType.csv b/tests/fixtures/super_simple/csv/DaysInDayType.csv new file mode 100644 index 00000000..7e5dd712 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DaysInDayType.csv @@ -0,0 +1 @@ +SEASON,DAYTYPE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/DepreciationMethod.csv b/tests/fixtures/super_simple/csv/DepreciationMethod.csv new file mode 100644 index 00000000..8f1fa36c --- /dev/null +++ b/tests/fixtures/super_simple/csv/DepreciationMethod.csv @@ -0,0 +1 @@ +REGION,VALUE diff --git a/tests/fixtures/super_simple/csv/DiscountRate.csv b/tests/fixtures/super_simple/csv/DiscountRate.csv new file mode 100644 index 00000000..8f1fa36c --- /dev/null +++ b/tests/fixtures/super_simple/csv/DiscountRate.csv @@ -0,0 +1 @@ +REGION,VALUE diff --git a/tests/fixtures/super_simple/csv/DiscountRateIdv.csv b/tests/fixtures/super_simple/csv/DiscountRateIdv.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/DiscountRateIdv.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/DiscountRateStorage.csv b/tests/fixtures/super_simple/csv/DiscountRateStorage.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/DiscountRateStorage.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/EMISSION.csv b/tests/fixtures/super_simple/csv/EMISSION.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/EMISSION.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/EmissionActivityRatio.csv b/tests/fixtures/super_simple/csv/EmissionActivityRatio.csv new file mode 100644 index 00000000..7c1c3ffc --- /dev/null +++ b/tests/fixtures/super_simple/csv/EmissionActivityRatio.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,EMISSION,MODE_OF_OPERATION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/EmissionsPenalty.csv b/tests/fixtures/super_simple/csv/EmissionsPenalty.csv new file mode 100644 index 00000000..1fa535a0 --- /dev/null +++ b/tests/fixtures/super_simple/csv/EmissionsPenalty.csv @@ -0,0 +1 @@ +REGION,EMISSION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/FUEL.csv b/tests/fixtures/super_simple/csv/FUEL.csv new file mode 100644 index 00000000..0173ebb5 --- /dev/null +++ b/tests/fixtures/super_simple/csv/FUEL.csv @@ -0,0 +1,3 @@ +VALUE +natural_gas +electricity diff --git a/tests/fixtures/super_simple/csv/FixedCost.csv b/tests/fixtures/super_simple/csv/FixedCost.csv new file mode 100644 index 00000000..eff99453 --- /dev/null +++ b/tests/fixtures/super_simple/csv/FixedCost.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,YEAR,VALUE +BB,gas_plant,2016,9.1101 diff --git a/tests/fixtures/super_simple/csv/InputActivityRatio.csv b/tests/fixtures/super_simple/csv/InputActivityRatio.csv new file mode 100644 index 00000000..cc36f0b5 --- /dev/null +++ b/tests/fixtures/super_simple/csv/InputActivityRatio.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR,VALUE +BB,gas_plant,natural_gas,1,2016,1.1101 diff --git a/tests/fixtures/super_simple/csv/MODE_OF_OPERATION.csv b/tests/fixtures/super_simple/csv/MODE_OF_OPERATION.csv new file mode 100644 index 00000000..69e52e5d --- /dev/null +++ b/tests/fixtures/super_simple/csv/MODE_OF_OPERATION.csv @@ -0,0 +1,2 @@ +VALUE +1 diff --git a/tests/fixtures/super_simple/csv/MinStorageCharge.csv b/tests/fixtures/super_simple/csv/MinStorageCharge.csv new file mode 100644 index 00000000..a7bcbd7f --- /dev/null +++ b/tests/fixtures/super_simple/csv/MinStorageCharge.csv @@ -0,0 +1 @@ +REGION,STORAGE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ModelPeriodEmissionLimit.csv b/tests/fixtures/super_simple/csv/ModelPeriodEmissionLimit.csv new file mode 100644 index 00000000..ccd4bcb3 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ModelPeriodEmissionLimit.csv @@ -0,0 +1 @@ +REGION,EMISSION,VALUE diff --git a/tests/fixtures/super_simple/csv/ModelPeriodExogenousEmission.csv b/tests/fixtures/super_simple/csv/ModelPeriodExogenousEmission.csv new file mode 100644 index 00000000..ccd4bcb3 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ModelPeriodExogenousEmission.csv @@ -0,0 +1 @@ +REGION,EMISSION,VALUE diff --git a/tests/fixtures/super_simple/csv/OperationalLife.csv b/tests/fixtures/super_simple/csv/OperationalLife.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/OperationalLife.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/OperationalLifeStorage.csv b/tests/fixtures/super_simple/csv/OperationalLifeStorage.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/OperationalLifeStorage.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/OutputActivityRatio.csv b/tests/fixtures/super_simple/csv/OutputActivityRatio.csv new file mode 100644 index 00000000..37406935 --- /dev/null +++ b/tests/fixtures/super_simple/csv/OutputActivityRatio.csv @@ -0,0 +1,3 @@ +REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR,VALUE +BB,gas_import,natural_gas,1,2016,1.0 +BB,gas_plant,electricity,1,2016,1.0 diff --git a/tests/fixtures/super_simple/csv/REGION.csv b/tests/fixtures/super_simple/csv/REGION.csv new file mode 100644 index 00000000..016ac8fc --- /dev/null +++ b/tests/fixtures/super_simple/csv/REGION.csv @@ -0,0 +1,2 @@ +VALUE +BB diff --git a/tests/fixtures/super_simple/csv/REMinProductionTarget.csv b/tests/fixtures/super_simple/csv/REMinProductionTarget.csv new file mode 100644 index 00000000..b55c2264 --- /dev/null +++ b/tests/fixtures/super_simple/csv/REMinProductionTarget.csv @@ -0,0 +1 @@ +REGION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/RETagFuel.csv b/tests/fixtures/super_simple/csv/RETagFuel.csv new file mode 100644 index 00000000..326b28c6 --- /dev/null +++ b/tests/fixtures/super_simple/csv/RETagFuel.csv @@ -0,0 +1 @@ +REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/RETagTechnology.csv b/tests/fixtures/super_simple/csv/RETagTechnology.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/RETagTechnology.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ReserveMargin.csv b/tests/fixtures/super_simple/csv/ReserveMargin.csv new file mode 100644 index 00000000..b55c2264 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ReserveMargin.csv @@ -0,0 +1 @@ +REGION,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ReserveMarginTagFuel.csv b/tests/fixtures/super_simple/csv/ReserveMarginTagFuel.csv new file mode 100644 index 00000000..326b28c6 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ReserveMarginTagFuel.csv @@ -0,0 +1 @@ +REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ReserveMarginTagTechnology.csv b/tests/fixtures/super_simple/csv/ReserveMarginTagTechnology.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/ReserveMarginTagTechnology.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/ResidualCapacity.csv b/tests/fixtures/super_simple/csv/ResidualCapacity.csv new file mode 100644 index 00000000..1b3716cf --- /dev/null +++ b/tests/fixtures/super_simple/csv/ResidualCapacity.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,YEAR,VALUE +BB,gas_plant,2016,3.1101 diff --git a/tests/fixtures/super_simple/csv/ResidualStorageCapacity.csv b/tests/fixtures/super_simple/csv/ResidualStorageCapacity.csv new file mode 100644 index 00000000..a7bcbd7f --- /dev/null +++ b/tests/fixtures/super_simple/csv/ResidualStorageCapacity.csv @@ -0,0 +1 @@ +REGION,STORAGE,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/SEASON.csv b/tests/fixtures/super_simple/csv/SEASON.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/SEASON.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/STORAGE.csv b/tests/fixtures/super_simple/csv/STORAGE.csv new file mode 100644 index 00000000..2dfe6a37 --- /dev/null +++ b/tests/fixtures/super_simple/csv/STORAGE.csv @@ -0,0 +1 @@ +VALUE diff --git a/tests/fixtures/super_simple/csv/SpecifiedAnnualDemand.csv b/tests/fixtures/super_simple/csv/SpecifiedAnnualDemand.csv new file mode 100644 index 00000000..b19cdc44 --- /dev/null +++ b/tests/fixtures/super_simple/csv/SpecifiedAnnualDemand.csv @@ -0,0 +1,2 @@ +REGION,FUEL,YEAR,VALUE +BB,electricity,2016,2.1101 diff --git a/tests/fixtures/super_simple/csv/SpecifiedDemandProfile.csv b/tests/fixtures/super_simple/csv/SpecifiedDemandProfile.csv new file mode 100644 index 00000000..dc17f3e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/SpecifiedDemandProfile.csv @@ -0,0 +1,2 @@ +REGION,FUEL,TIMESLICE,YEAR,VALUE +BB,electricity,x,2016,1.0 diff --git a/tests/fixtures/super_simple/csv/StorageLevelStart.csv b/tests/fixtures/super_simple/csv/StorageLevelStart.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/StorageLevelStart.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/StorageMaxChargeRate.csv b/tests/fixtures/super_simple/csv/StorageMaxChargeRate.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/StorageMaxChargeRate.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/StorageMaxDischargeRate.csv b/tests/fixtures/super_simple/csv/StorageMaxDischargeRate.csv new file mode 100644 index 00000000..2176c14c --- /dev/null +++ b/tests/fixtures/super_simple/csv/StorageMaxDischargeRate.csv @@ -0,0 +1 @@ +REGION,STORAGE,VALUE diff --git a/tests/fixtures/super_simple/csv/TECHNOLOGY.csv b/tests/fixtures/super_simple/csv/TECHNOLOGY.csv new file mode 100644 index 00000000..f563cf92 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TECHNOLOGY.csv @@ -0,0 +1,3 @@ +VALUE +gas_import +gas_plant diff --git a/tests/fixtures/super_simple/csv/TIMESLICE.csv b/tests/fixtures/super_simple/csv/TIMESLICE.csv new file mode 100644 index 00000000..9480ca01 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TIMESLICE.csv @@ -0,0 +1,2 @@ +VALUE +x diff --git a/tests/fixtures/super_simple/csv/TechnologyFromStorage.csv b/tests/fixtures/super_simple/csv/TechnologyFromStorage.csv new file mode 100644 index 00000000..384c871b --- /dev/null +++ b/tests/fixtures/super_simple/csv/TechnologyFromStorage.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION,VALUE diff --git a/tests/fixtures/super_simple/csv/TechnologyToStorage.csv b/tests/fixtures/super_simple/csv/TechnologyToStorage.csv new file mode 100644 index 00000000..384c871b --- /dev/null +++ b/tests/fixtures/super_simple/csv/TechnologyToStorage.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacity.csv b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacity.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacity.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacityInvestment.csv b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacityInvestment.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMaxCapacityInvestment.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMinCapacity.csv b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacity.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacity.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalAnnualMinCapacityInvestment.csv b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacityInvestment.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalAnnualMinCapacityInvestment.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityLowerLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityLowerLimit.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityLowerLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityUpperLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityUpperLimit.csv new file mode 100644 index 00000000..6a91e609 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyAnnualActivityUpperLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityLowerLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityLowerLimit.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityLowerLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityUpperLimit.csv b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityUpperLimit.csv new file mode 100644 index 00000000..1ca1a8e9 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TotalTechnologyModelPeriodActivityUpperLimit.csv @@ -0,0 +1 @@ +REGION,TECHNOLOGY,VALUE diff --git a/tests/fixtures/super_simple/csv/TradeRoute.csv b/tests/fixtures/super_simple/csv/TradeRoute.csv new file mode 100644 index 00000000..11316319 --- /dev/null +++ b/tests/fixtures/super_simple/csv/TradeRoute.csv @@ -0,0 +1 @@ +REGION,_REGION,FUEL,YEAR,VALUE diff --git a/tests/fixtures/super_simple/csv/VariableCost.csv b/tests/fixtures/super_simple/csv/VariableCost.csv new file mode 100644 index 00000000..6948a628 --- /dev/null +++ b/tests/fixtures/super_simple/csv/VariableCost.csv @@ -0,0 +1,2 @@ +REGION,TECHNOLOGY,MODE_OF_OPERATION,YEAR,VALUE +BB,gas_plant,1,2016,9.1202 diff --git a/tests/fixtures/super_simple/csv/YEAR.csv b/tests/fixtures/super_simple/csv/YEAR.csv new file mode 100644 index 00000000..55c26cd2 --- /dev/null +++ b/tests/fixtures/super_simple/csv/YEAR.csv @@ -0,0 +1,2 @@ +VALUE +2016 diff --git a/tests/fixtures/super_simple/csv/YearSplit.csv b/tests/fixtures/super_simple/csv/YearSplit.csv new file mode 100644 index 00000000..9656554c --- /dev/null +++ b/tests/fixtures/super_simple/csv/YearSplit.csv @@ -0,0 +1,2 @@ +TIMESLICE,YEAR,VALUE +x,2016,1.0 diff --git a/tests/fixtures/super_simple/csv/_REGION.csv b/tests/fixtures/super_simple/csv/_REGION.csv new file mode 100644 index 00000000..016ac8fc --- /dev/null +++ b/tests/fixtures/super_simple/csv/_REGION.csv @@ -0,0 +1,2 @@ +VALUE +BB diff --git a/tests/fixtures/super_simple/super_simple.txt b/tests/fixtures/super_simple/super_simple.txt new file mode 100644 index 00000000..6bde7831 --- /dev/null +++ b/tests/fixtures/super_simple/super_simple.txt @@ -0,0 +1,153 @@ +# Model file written by *otoole* +param default 0 : AccumulatedAnnualDemand := +; +param default -1 : AnnualEmissionLimit := +; +param default 0 : AnnualExogenousEmission := +; +param default 1 : AvailabilityFactor := +; +param default 1 : CapacityFactor := +; +param default 0 : CapacityOfOneTechnologyUnit := +; +param default 1 : CapacityToActivityUnit := +; +param default 0 : CapitalCost := +BB gas_plant 2016 1.03456 +; +param default 0 : CapitalCostStorage := +; +param default 0 : Conversionld := +; +param default 0 : Conversionlh := +; +param default 0 : Conversionls := +; +set DAILYTIMEBRACKET := +; +set DAYTYPE := +; +param default 0.00137 : DaySplit := +; +param default 7 : DaysInDayType := +; +param default 1 : DepreciationMethod := +; +param default 0.05 : DiscountRate := +; +param default 0.05 : DiscountRateIdv := +; +param default 0.05 : DiscountRateStorage := +; +set EMISSION := +; +param default 0 : EmissionActivityRatio := +; +param default 0 : EmissionsPenalty := +; +set FUEL := +natural_gas +electricity +; +param default 0 : FixedCost := +BB gas_plant 2016 9.1101 +; +param default 0 : InputActivityRatio := +BB gas_plant natural_gas 1 2016 1.1101 +; +set MODE_OF_OPERATION := +1 +; +param default 0 : MinStorageCharge := +; +param default -1 : ModelPeriodEmissionLimit := +; +param default 0 : ModelPeriodExogenousEmission := +; +param default 1 : OperationalLife := +; +param default 0 : OperationalLifeStorage := +; +param default 0 : OutputActivityRatio := +BB gas_import natural_gas 1 2016 1 +BB gas_plant electricity 1 2016 1 +; +set REGION := +BB +; +param default 0 : REMinProductionTarget := +; +param default 0 : RETagFuel := +; +param default 0 : RETagTechnology := +; +param default 1 : ReserveMargin := +; +param default 0 : ReserveMarginTagFuel := +; +param default 0 : ReserveMarginTagTechnology := +; +param default 0 : ResidualCapacity := +BB gas_plant 2016 3.1101 +; +param default 999 : ResidualStorageCapacity := +; +set SEASON := +; +set STORAGE := +; +param default 0 : SpecifiedAnnualDemand := +BB electricity 2016 2.1101 +; +param default 0 : SpecifiedDemandProfile := +BB electricity x 2016 1 +; +param default 0 : StorageLevelStart := +; +param default 0 : StorageMaxChargeRate := +; +param default 0 : StorageMaxDischargeRate := +; +set TECHNOLOGY := +gas_import +gas_plant +; +set TIMESLICE := +x +; +param default 0 : TechnologyFromStorage := +; +param default 0 : TechnologyToStorage := +; +param default -1 : TotalAnnualMaxCapacity := +; +param default -1 : TotalAnnualMaxCapacityInvestment := +; +param default 0 : TotalAnnualMinCapacity := +; +param default 0 : TotalAnnualMinCapacityInvestment := +; +param default 0 : TotalTechnologyAnnualActivityLowerLimit := +; +param default -1 : TotalTechnologyAnnualActivityUpperLimit := +; +param default 0 : TotalTechnologyModelPeriodActivityLowerLimit := +; +param default -1 : TotalTechnologyModelPeriodActivityUpperLimit := +; +param default 0 : TradeRoute := +; +param default 0 : VariableCost := +BB gas_plant 1 2016 9.1202 +; +set YEAR := +2016 +; +param default 0 : YearSplit := +x 2016 1 +; +set _REGION := +BB +; +end; diff --git a/tests/fixtures/super_simple/super_simple.yaml b/tests/fixtures/super_simple/super_simple.yaml new file mode 100644 index 00000000..6fcf713e --- /dev/null +++ b/tests/fixtures/super_simple/super_simple.yaml @@ -0,0 +1,520 @@ +AccumulatedAnnualDemand: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +AnnualEmissionLimit: + indices: [REGION,EMISSION,YEAR] + type: param + dtype: float + default: -1 +AnnualExogenousEmission: + indices: [REGION,EMISSION,YEAR] + type: param + dtype: float + default: 0 +AvailabilityFactor: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 1 +CapacityFactor: + indices: [REGION,TECHNOLOGY,TIMESLICE,YEAR] + type: param + dtype: float + default: 1 +CapacityOfOneTechnologyUnit: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +CapacityToActivityUnit: + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 1 +CapitalCost: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +CapitalCostStorage: + indices: [REGION,STORAGE,YEAR] + type: param + dtype: float + default: 0 +Conversionld: + indices: [TIMESLICE,DAYTYPE] + type: param + dtype: float + default: 0 +Conversionlh: + indices: [TIMESLICE,DAILYTIMEBRACKET] + type: param + dtype: float + default: 0 +Conversionls: + indices: [TIMESLICE,SEASON] + type: param + dtype: float + default: 0 +DAILYTIMEBRACKET: + dtype: int + type: set +DaysInDayType: + indices: [SEASON,DAYTYPE,YEAR] + type: param + dtype: float + default: 7 +DaySplit: + indices: [DAILYTIMEBRACKET,YEAR] + type: param + dtype: float + default: 0.00137 +DAYTYPE: + dtype: int + type: set +DepreciationMethod: + indices: [REGION] + type: param + dtype: float + default: 1 +DiscountRate: + indices: [REGION] + type: param + dtype: float + default: 0.05 +DiscountRateIdv: + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 0.05 +DiscountRateStorage: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0.05 +EMISSION: + dtype: str + type: set +EmissionActivityRatio: + indices: [REGION,TECHNOLOGY,EMISSION,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +EmissionsPenalty: + indices: [REGION,EMISSION,YEAR] + type: param + dtype: float + default: 0 +FixedCost: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +FUEL: + dtype: str + type: set +InputActivityRatio: + indices: [REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +MinStorageCharge: + indices: [REGION,STORAGE,YEAR] + type: param + dtype: float + default: 0 +MODE_OF_OPERATION: + dtype: int + type: set +ModelPeriodEmissionLimit: + indices: [REGION,EMISSION] + type: param + dtype: float + default: -1 +ModelPeriodExogenousEmission: + indices: [REGION,EMISSION] + type: param + dtype: float + default: 0 +OperationalLife: + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 1 +OperationalLifeStorage: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +OutputActivityRatio: + indices: [REGION,TECHNOLOGY,FUEL,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +REGION: + dtype: str + type: set +_REGION: + dtype: str + type: set +REMinProductionTarget: + indices: [REGION,YEAR] + type: param + dtype: float + default: 0 +ReserveMargin: + indices: [REGION,YEAR] + type: param + dtype: float + default: 1 +ReserveMarginTagFuel: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +ReserveMarginTagTechnology: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +ResidualCapacity: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +ResidualStorageCapacity: + indices: [REGION,STORAGE,YEAR] + type: param + dtype: float + default: 999 +RETagFuel: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +RETagTechnology: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +SEASON: + dtype: int + type: set +SpecifiedAnnualDemand: + indices: [REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +SpecifiedDemandProfile: + indices: [REGION,FUEL,TIMESLICE,YEAR] + type: param + dtype: float + default: 0 +STORAGE: + dtype: str + type: set +StorageLevelStart: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +StorageMaxChargeRate: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +StorageMaxDischargeRate: + indices: [REGION,STORAGE] + type: param + dtype: float + default: 0 +TECHNOLOGY: + dtype: str + type: set +TechnologyFromStorage: + indices: [REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION] + type: param + dtype: float + default: 0 +TechnologyToStorage: + indices: [REGION,TECHNOLOGY,STORAGE,MODE_OF_OPERATION] + type: param + dtype: float + default: 0 +TIMESLICE: + dtype: str + type: set +TotalAnnualMaxCapacity: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: -1 +TotalAnnualMaxCapacityInvestment: + short_name: TotalAnnualMaxCapacityInvestmen + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: -1 +TotalAnnualMinCapacity: + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +TotalAnnualMinCapacityInvestment: + short_name: TotalAnnualMinCapacityInvestmen + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +TotalTechnologyAnnualActivityLowerLimit: + short_name: TotalTechnologyAnnualActivityLo + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: 0 +TotalTechnologyAnnualActivityUpperLimit: + short_name: TotalTechnologyAnnualActivityUp + indices: [REGION,TECHNOLOGY,YEAR] + type: param + dtype: float + default: -1 +TotalTechnologyModelPeriodActivityLowerLimit: + short_name: TotalTechnologyModelPeriodActLo + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: 0 +TotalTechnologyModelPeriodActivityUpperLimit: + short_name: TotalTechnologyModelPeriodActUp + indices: [REGION,TECHNOLOGY] + type: param + dtype: float + default: -1 +TradeRoute: + indices: [REGION,_REGION,FUEL,YEAR] + type: param + dtype: float + default: 0 +VariableCost: + indices: [REGION,TECHNOLOGY,MODE_OF_OPERATION,YEAR] + type: param + dtype: float + default: 0 +YEAR: + dtype: int + type: set +YearSplit: + indices: [TIMESLICE,YEAR] + type: param + dtype: float + default: 0 +AnnualEmissions: + indices: [REGION,EMISSION,YEAR] + type: result + dtype: float + default: 0 + calculated: True +AccumulatedNewCapacity: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualFixedOperatingCost: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualTechnologyEmission: + indices: [REGION, TECHNOLOGY, EMISSION, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualTechnologyEmissionByMode: + indices: [REGION, TECHNOLOGY, EMISSION, MODE_OF_OPERATION, YEAR] + type: result + dtype: float + default: 0 + calculated: True +AnnualVariableOperatingCost: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +CapitalInvestment: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +Demand: + indices: [REGION, TIMESLICE, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +DiscountedSalvageValue: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +DiscountedTechnologyEmissionsPenalty: + short_name: DiscountedTechEmissionsPenalty + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +NewCapacity: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +NewStorageCapacity: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +NumberOfNewTechnologyUnits: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +ProductionByTechnology: + indices: [REGION, TIMESLICE, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +ProductionByTechnologyAnnual: + indices: [REGION, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfActivity: + indices: [REGION, TIMESLICE, TECHNOLOGY, MODE_OF_OPERATION, YEAR] + type: result + dtype: float + default: 0 + calculated: False +RateOfProductionByTechnology: + indices: [REGION, TIMESLICE, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfProductionByTechnologyByMode: + short_name: RateOfProductionByTechByMode + indices: [REGION, TIMESLICE, TECHNOLOGY, MODE_OF_OPERATION, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfUseByTechnology: + indices: [REGION, TIMESLICE, TECHNOLOGY, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +RateOfUseByTechnologyByMode: + indices: [REGION, TIMESLICE, TECHNOLOGY, MODE_OF_OPERATION, FUEL, YEAR] + type: result + dtype: float + default: 0 + calculated: True +SalvageValue: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: False +SalvageValueStorage: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelDayTypeFinish: + indices: [REGION, STORAGE, SEASON, DAYTYPE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelDayTypeStart: + indices: [REGION, STORAGE, SEASON, DAYTYPE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelSeasonStart: + indices: [REGION, STORAGE, SEASON, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelYearStart: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +StorageLevelYearFinish: + indices: [REGION, STORAGE, YEAR] + type: result + dtype: float + default: 0 + calculated: False +TotalAnnualTechnologyActivityByMode: + short_name: TotalAnnualTechActivityByMode + indices: [REGION, TECHNOLOGY, MODE_OF_OPERATION, YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalCapacityAnnual: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalDiscountedCost: + indices: [REGION,YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalTechnologyAnnualActivity: + indices: [REGION, TECHNOLOGY, YEAR] + type: result + dtype: float + default: 0 + calculated: True +TotalTechnologyModelPeriodActivity: + short_name: TotalTechModelPeriodActivity + indices: [REGION, TECHNOLOGY] + type: result + dtype: float + default: 0 + calculated: True +Trade: + indices: [REGION,TIMESLICE,FUEL,YEAR] + type: result + dtype: float + default: 0 + calculated: False +UseByTechnology: + indices: [REGION,TIMESLICE,TECHNOLOGY,FUEL,YEAR] + type: result + dtype: float + default: 0 + calculated: False diff --git a/tests/fixtures/super_simple/super_simple_gnu.lp b/tests/fixtures/super_simple/super_simple_gnu.lp new file mode 100644 index 00000000..ba9e2917 --- /dev/null +++ b/tests/fixtures/super_simple/super_simple_gnu.lp @@ -0,0 +1,222 @@ +\* Problem: OSeMOSYS *\ + +Minimize + cost: + TotalDiscountedCost(BB,2016) + +Subject To + EQ_SpecifiedDemand(BB,x,electricity,2016): + - RateOfDemand(BB,x,electricity,2016) = -2.1101 + CAa1_TotalNewCapacity(BB,gas_import,2016): + - NewCapacity(BB,gas_import,2016) + + AccumulatedNewCapacity(BB,gas_import,2016) = -0 + CAa1_TotalNewCapacity(BB,gas_plant,2016): + - NewCapacity(BB,gas_plant,2016) + + AccumulatedNewCapacity(BB,gas_plant,2016) = -0 + CAa2_TotalAnnualCapacity(BB,gas_import,2016): + + AccumulatedNewCapacity(BB,gas_import,2016) + - TotalCapacityAnnual(BB,gas_import,2016) = -0 + CAa2_TotalAnnualCapacity(BB,gas_plant,2016): + + AccumulatedNewCapacity(BB,gas_plant,2016) + - TotalCapacityAnnual(BB,gas_plant,2016) = -3.1101 + CAa3_TotalActivityOfEachTechnology(BB,gas_import,x,2016): + + RateOfActivity(BB,x,gas_import,1,2016) + - RateOfTotalActivity(BB,gas_import,x,2016) = -0 + CAa3_TotalActivityOfEachTechnology(BB,gas_plant,x,2016): + + RateOfActivity(BB,x,gas_plant,1,2016) + - RateOfTotalActivity(BB,gas_plant,x,2016) = -0 + CAa4_Constraint_Capacity(BB,x,gas_import,2016): + - TotalCapacityAnnual(BB,gas_import,2016) + + RateOfTotalActivity(BB,gas_import,x,2016) <= -0 + CAa4_Constraint_Capacity(BB,x,gas_plant,2016): + - TotalCapacityAnnual(BB,gas_plant,2016) + + RateOfTotalActivity(BB,gas_plant,x,2016) <= -0 + EBa1_RateOfFuelProduction1(BB,x,natural_gas,gas_import,1,2016): + + RateOfActivity(BB,x,gas_import,1,2016) + - RateOfProductionByTechnologyByMode(BB,x,gas_import,1,natural_gas,2016) + = -0 + EBa1_RateOfFuelProduction1(BB,x,electricity,gas_plant,1,2016): + + RateOfActivity(BB,x,gas_plant,1,2016) + - RateOfProductionByTechnologyByMode(BB,x,gas_plant,1,electricity,2016) + = -0 + EBa2_RateOfFuelProduction2(BB,x,natural_gas,gas_import,2016): + + RateOfProductionByTechnologyByMode(BB,x,gas_import,1,natural_gas,2016) + - RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) = -0 + EBa2_RateOfFuelProduction2(BB,x,electricity,gas_plant,2016): + + RateOfProductionByTechnologyByMode(BB,x,gas_plant,1,electricity,2016) + - RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) = -0 + EBa3_RateOfFuelProduction3(BB,x,natural_gas,2016): + + RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) + - RateOfProduction(BB,x,natural_gas,2016) = -0 + EBa3_RateOfFuelProduction3(BB,x,electricity,2016): + + RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) + - RateOfProduction(BB,x,electricity,2016) = -0 + EBa4_RateOfFuelUse1(BB,x,natural_gas,gas_plant,1,2016): + + 1.1101 RateOfActivity(BB,x,gas_plant,1,2016) + - RateOfUseByTechnologyByMode(BB,x,gas_plant,1,natural_gas,2016) = -0 + EBa5_RateOfFuelUse2(BB,x,natural_gas,gas_plant,2016): + + RateOfUseByTechnologyByMode(BB,x,gas_plant,1,natural_gas,2016) + - RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) = -0 + EBa6_RateOfFuelUse3(BB,x,natural_gas,2016): + + RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) + + RateOfUseByTechnology(BB,x,gas_import,natural_gas,2016) + - RateOfUse(BB,x,natural_gas,2016) = -0 + EBa7_EnergyBalanceEachTS1(BB,x,natural_gas,2016): + + RateOfProduction(BB,x,natural_gas,2016) + - Production(BB,x,natural_gas,2016) = -0 + EBa7_EnergyBalanceEachTS1(BB,x,electricity,2016): + + RateOfProduction(BB,x,electricity,2016) + - Production(BB,x,electricity,2016) = -0 + EBa8_EnergyBalanceEachTS2(BB,x,natural_gas,2016): + + RateOfUse(BB,x,natural_gas,2016) - Use(BB,x,natural_gas,2016) = -0 + EBa9_EnergyBalanceEachTS3(BB,x,electricity,2016): + + RateOfDemand(BB,x,electricity,2016) - Demand(BB,x,electricity,2016) + = -0 + EBa11_EnergyBalanceEachTS5(BB,x,natural_gas,2016): + - Demand(BB,x,natural_gas,2016) + Production(BB,x,natural_gas,2016) + - Use(BB,x,natural_gas,2016) >= -0 + EBa11_EnergyBalanceEachTS5(BB,x,electricity,2016): + - Demand(BB,x,electricity,2016) + Production(BB,x,electricity,2016) + - Use(BB,x,electricity,2016) >= -0 + EBb1_EnergyBalanceEachYear1(BB,natural_gas,2016): + + Production(BB,x,natural_gas,2016) + - ProductionAnnual(BB,natural_gas,2016) = -0 + EBb1_EnergyBalanceEachYear1(BB,electricity,2016): + + Production(BB,x,electricity,2016) + - ProductionAnnual(BB,electricity,2016) = -0 + EBb2_EnergyBalanceEachYear2(BB,natural_gas,2016): + + Use(BB,x,natural_gas,2016) - UseAnnual(BB,natural_gas,2016) = -0 + EBb2_EnergyBalanceEachYear2(BB,electricity,2016): + + Use(BB,x,electricity,2016) - UseAnnual(BB,electricity,2016) = -0 + EBb4_EnergyBalanceEachYear4(BB,natural_gas,2016): + + ProductionAnnual(BB,natural_gas,2016) + - UseAnnual(BB,natural_gas,2016) >= -0 + EBb4_EnergyBalanceEachYear4(BB,electricity,2016): + + ProductionAnnual(BB,electricity,2016) + - UseAnnual(BB,electricity,2016) >= -0 + Acc1_FuelProductionByTechnology(BB,x,gas_import,natural_gas,2016): + + RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) + - ProductionByTechnology(BB,x,gas_import,natural_gas,2016) = -0 + Acc1_FuelProductionByTechnology(BB,x,gas_plant,electricity,2016): + + RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) + - ProductionByTechnology(BB,x,gas_plant,electricity,2016) = -0 + Acc2_FuelUseByTechnology(BB,x,gas_plant,natural_gas,2016): + + RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) + - UseByTechnology(BB,x,gas_plant,natural_gas,2016) = -0 + Acc3_AverageAnnualRateOfActivity(BB,gas_import,1,2016): + + RateOfActivity(BB,x,gas_import,1,2016) + - TotalAnnualTechnologyActivityByMode(BB,gas_import,1,2016) = -0 + Acc3_AverageAnnualRateOfActivity(BB,gas_plant,1,2016): + + RateOfActivity(BB,x,gas_plant,1,2016) + - TotalAnnualTechnologyActivityByMode(BB,gas_plant,1,2016) = -0 + Acc4_ModelPeriodCostByRegion(BB): + TotalDiscountedCost(BB,2016) + - ModelPeriodCostByRegion(BB) = -0 + CC1_UndiscountedCapitalInvestment(BB,gas_import,2016): + + 1e-05 NewCapacity(BB,gas_import,2016) + - CapitalInvestment(BB,gas_import,2016) = -0 + CC1_UndiscountedCapitalInvestment(BB,gas_plant,2016): + + 1.03456 NewCapacity(BB,gas_plant,2016) + - CapitalInvestment(BB,gas_plant,2016) = -0 + CC2_DiscountingCapitalInvestment(BB,gas_import,2016): + + CapitalInvestment(BB,gas_import,2016) + - DiscountedCapitalInvestment(BB,gas_import,2016) = -0 + CC2_DiscountingCapitalInvestment(BB,gas_plant,2016): + + CapitalInvestment(BB,gas_plant,2016) + - DiscountedCapitalInvestment(BB,gas_plant,2016) = -0 + SV3_SalvageValueAtEndOfPeriod3(BB,gas_import,2016): + + SalvageValue(BB,gas_import,2016) = -0 + SV3_SalvageValueAtEndOfPeriod3(BB,gas_plant,2016): + + SalvageValue(BB,gas_plant,2016) = -0 + SV4_SalvageValueDiscountedToStartYear(BB,gas_import,2016): + - 0.952380952380952 SalvageValue(BB,gas_import,2016) + + DiscountedSalvageValue(BB,gas_import,2016) = -0 + SV4_SalvageValueDiscountedToStartYear(BB,gas_plant,2016): + - 0.952380952380952 SalvageValue(BB,gas_plant,2016) + + DiscountedSalvageValue(BB,gas_plant,2016) = -0 + OC1_OperatingCostsVariable(BB,gas_plant,x,2016): + + 9.1202 TotalAnnualTechnologyActivityByMode(BB,gas_plant,1,2016) + - AnnualVariableOperatingCost(BB,gas_plant,2016) = -0 + OC2_OperatingCostsFixedAnnual(BB,gas_import,2016): + - AnnualFixedOperatingCost(BB,gas_import,2016) = -0 + OC2_OperatingCostsFixedAnnual(BB,gas_plant,2016): + + 9.1101 TotalCapacityAnnual(BB,gas_plant,2016) + - AnnualFixedOperatingCost(BB,gas_plant,2016) = -0 + OC3_OperatingCostsTotalAnnual(BB,gas_import,2016): + - OperatingCost(BB,gas_import,2016) + + AnnualVariableOperatingCost(BB,gas_import,2016) + + AnnualFixedOperatingCost(BB,gas_import,2016) = -0 + OC3_OperatingCostsTotalAnnual(BB,gas_plant,2016): + - OperatingCost(BB,gas_plant,2016) + + AnnualVariableOperatingCost(BB,gas_plant,2016) + + AnnualFixedOperatingCost(BB,gas_plant,2016) = -0 + OC4_DiscountedOperatingCostsTotalAnnual(BB,gas_import,2016): + + 0.975900072948533 OperatingCost(BB,gas_import,2016) + - DiscountedOperatingCost(BB,gas_import,2016) = -0 + OC4_DiscountedOperatingCostsTotalAnnual(BB,gas_plant,2016): + + 0.975900072948533 OperatingCost(BB,gas_plant,2016) + - DiscountedOperatingCost(BB,gas_plant,2016) = -0 + TDC1_TotalDiscountedCostByTechnology(BB,gas_import,2016): + + DiscountedCapitalInvestment(BB,gas_import,2016) + - DiscountedSalvageValue(BB,gas_import,2016) + + DiscountedOperatingCost(BB,gas_import,2016) + - TotalDiscountedCostByTechnology(BB,gas_import,2016) + + DiscountedTechnologyEmissionsPenalty(BB,gas_import,2016) = -0 + TDC1_TotalDiscountedCostByTechnology(BB,gas_plant,2016): + + DiscountedCapitalInvestment(BB,gas_plant,2016) + - DiscountedSalvageValue(BB,gas_plant,2016) + + DiscountedOperatingCost(BB,gas_plant,2016) + - TotalDiscountedCostByTechnology(BB,gas_plant,2016) + + DiscountedTechnologyEmissionsPenalty(BB,gas_plant,2016) = -0 + TDC2_TotalDiscountedCost(BB,2016): + + TotalDiscountedCostByTechnology(BB,gas_import,2016) + + TotalDiscountedCostByTechnology(BB,gas_plant,2016) + - TotalDiscountedCost(BB,2016) = -0 + AAC1_TotalAnnualTechnologyActivity(BB,gas_import,2016): + + RateOfTotalActivity(BB,gas_import,x,2016) + - TotalTechnologyAnnualActivity(BB,gas_import,2016) = -0 + AAC1_TotalAnnualTechnologyActivity(BB,gas_plant,2016): + + RateOfTotalActivity(BB,gas_plant,x,2016) + - TotalTechnologyAnnualActivity(BB,gas_plant,2016) = -0 + TAC1_TotalModelHorizonTechnologyActivity(BB,gas_import): + + TotalTechnologyAnnualActivity(BB,gas_import,2016) + - TotalTechnologyModelPeriodActivity(BB,gas_import) = -0 + TAC1_TotalModelHorizonTechnologyActivity(BB,gas_plant): + + TotalTechnologyAnnualActivity(BB,gas_plant,2016) + - TotalTechnologyModelPeriodActivity(BB,gas_plant) = -0 + RM1_ReserveMargin_TechnologiesIncluded_In_Activity_Units(BB,x,2016): + - TotalCapacityInReserveMargin(BB,2016) = -0 + RM2_ReserveMargin_FuelsIncluded(BB,x,2016): + - DemandNeedingReserveMargin(BB,x,2016) = -0 + RM3_ReserveMargin_Constraint(BB,x,2016): + - TotalCapacityInReserveMargin(BB,2016) + + DemandNeedingReserveMargin(BB,x,2016) <= -0 + RE1_FuelProductionByTechnologyAnnual(BB,gas_import,natural_gas,2016): + + ProductionByTechnology(BB,x,gas_import,natural_gas,2016) + - ProductionByTechnologyAnnual(BB,gas_import,natural_gas,2016) = -0 + RE1_FuelProductionByTechnologyAnnual(BB,gas_plant,electricity,2016): + + ProductionByTechnology(BB,x,gas_plant,electricity,2016) + - ProductionByTechnologyAnnual(BB,gas_plant,electricity,2016) = -0 + RE2_TechIncluded(BB,2016): - TotalREProductionAnnual(BB,2016) = -0 + RE3_FuelIncluded(BB,2016): + - RETotalProductionOfTargetFuelAnnual(BB,2016) = -0 + RE4_EnergyConstraint(BB,2016): - TotalREProductionAnnual(BB,2016) <= -0 + RE5_FuelUseByTechnologyAnnual(BB,gas_plant,natural_gas,2016): + + RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) + - UseByTechnologyAnnual(BB,gas_plant,natural_gas,2016) = -0 + E4_EmissionsPenaltyByTechnology(BB,gas_import,2016): + - AnnualTechnologyEmissionsPenalty(BB,gas_import,2016) = -0 + E4_EmissionsPenaltyByTechnology(BB,gas_plant,2016): + - AnnualTechnologyEmissionsPenalty(BB,gas_plant,2016) = -0 + E5_DiscountedEmissionsPenaltyByTechnology(BB,gas_import,2016): + + 0.975900072948533 AnnualTechnologyEmissionsPenalty(BB,gas_import,2016) + - DiscountedTechnologyEmissionsPenalty(BB,gas_import,2016) = -0 + E5_DiscountedEmissionsPenaltyByTechnology(BB,gas_plant,2016): + + 0.975900072948533 AnnualTechnologyEmissionsPenalty(BB,gas_plant,2016) + - DiscountedTechnologyEmissionsPenalty(BB,gas_plant,2016) = -0 + +Bounds + TotalTechnologyModelPeriodActivity(BB,gas_import) free + TotalTechnologyModelPeriodActivity(BB,gas_plant) free + TotalREProductionAnnual(BB,2016) free + RETotalProductionOfTargetFuelAnnual(BB,2016) free + +End diff --git a/tests/fixtures/super_simple/super_simple_gnu.sol b/tests/fixtures/super_simple/super_simple_gnu.sol new file mode 100644 index 00000000..f87af1b1 --- /dev/null +++ b/tests/fixtures/super_simple/super_simple_gnu.sol @@ -0,0 +1,48 @@ +Optimal - objective value 46.43125659 + 0 TotalDiscountedCost(BB,2016) 46.431257 0 + 1 RateOfDemand(BB,x,electricity,2016) 2.1101 0 + 2 NewCapacity(BB,gas_import,2016) 2.342422 0 + 3 AccumulatedNewCapacity(BB,gas_import,2016) 2.342422 0 + 6 TotalCapacityAnnual(BB,gas_import,2016) 2.342422 0 + 7 TotalCapacityAnnual(BB,gas_plant,2016) 3.1101 0 + 8 RateOfActivity(BB,x,gas_import,1,2016) 2.342422 0 + 9 RateOfTotalActivity(BB,gas_import,x,2016) 2.342422 0 + 10 RateOfActivity(BB,x,gas_plant,1,2016) 2.1101 0 + 11 RateOfTotalActivity(BB,gas_plant,x,2016) 2.1101 0 + 12 RateOfProductionByTechnologyByMode(BB,x,gas_import,1,natural_gas,2016) 2.342422 0 + 13 RateOfProductionByTechnologyByMode(BB,x,gas_plant,1,electricity,2016) 2.1101 0 + 14 RateOfProductionByTechnology(BB,x,gas_import,natural_gas,2016) 2.342422 0 + 15 RateOfProductionByTechnology(BB,x,gas_plant,electricity,2016) 2.1101 0 + 16 RateOfProduction(BB,x,natural_gas,2016) 2.342422 0 + 17 RateOfProduction(BB,x,electricity,2016) 2.1101 0 + 18 RateOfUseByTechnologyByMode(BB,x,gas_plant,1,natural_gas,2016) 2.342422 0 + 19 RateOfUseByTechnology(BB,x,gas_plant,natural_gas,2016) 2.342422 0 + 21 RateOfUse(BB,x,natural_gas,2016) 2.342422 0 + 22 Production(BB,x,natural_gas,2016) 2.342422 0 + 23 Production(BB,x,electricity,2016) 2.1101 0 + 24 Use(BB,x,natural_gas,2016) 2.342422 0 + 25 Demand(BB,x,electricity,2016) 2.1101 0 + 28 ProductionAnnual(BB,natural_gas,2016) 2.342422 0 + 29 ProductionAnnual(BB,electricity,2016) 2.1101 0 + 30 UseAnnual(BB,natural_gas,2016) 2.342422 0 + 32 ProductionByTechnology(BB,x,gas_import,natural_gas,2016) 2.342422 0 + 33 ProductionByTechnology(BB,x,gas_plant,electricity,2016) 2.1101 0 + 34 UseByTechnology(BB,x,gas_plant,natural_gas,2016) 2.342422 0 + 35 TotalAnnualTechnologyActivityByMode(BB,gas_import,1,2016) 2.342422 0 + 36 TotalAnnualTechnologyActivityByMode(BB,gas_plant,1,2016) 2.1101 0 + 37 ModelPeriodCostByRegion(BB) 46.431257 0 + 38 CapitalInvestment(BB,gas_import,2016) 2.342422e-05 0 + 40 DiscountedCapitalInvestment(BB,gas_import,2016) 2.342422e-05 0 + 46 AnnualVariableOperatingCost(BB,gas_plant,2016) 19.244534 0 + 48 AnnualFixedOperatingCost(BB,gas_plant,2016) 28.333322 0 + 51 OperatingCost(BB,gas_plant,2016) 47.577856 0 + 53 DiscountedOperatingCost(BB,gas_plant,2016) 46.431233 1.110223e-16 + 54 TotalDiscountedCostByTechnology(BB,gas_import,2016) 2.342422e-05 0 + 56 TotalDiscountedCostByTechnology(BB,gas_plant,2016) 46.431233 0 + 58 TotalTechnologyAnnualActivity(BB,gas_import,2016) 2.342422 0 + 59 TotalTechnologyAnnualActivity(BB,gas_plant,2016) 2.1101 0 + 60 TotalTechnologyModelPeriodActivity(BB,gas_import) 2.342422 0 + 61 TotalTechnologyModelPeriodActivity(BB,gas_plant) 2.1101 0 + 64 ProductionByTechnologyAnnual(BB,gas_import,natural_gas,2016) 2.342422 0 + 65 ProductionByTechnologyAnnual(BB,gas_plant,electricity,2016) 2.1101 0 + 68 UseByTechnologyAnnual(BB,gas_plant,natural_gas,2016) 2.342422 0 diff --git a/tests/results/test_results_package.py b/tests/results/test_results_package.py index 10fc8725..33c784b9 100644 --- a/tests/results/test_results_package.py +++ b/tests/results/test_results_package.py @@ -651,6 +651,24 @@ def test_crf_null(self, discount_rate_idv, operational_life): assert_frame_equal(actual, expected) + def test_crf_no_tech_discount_rate(self, region, discount_rate, operational_life): + + technologies = ["GAS_EXTRACTION", "DUMMY"] + regions = region["VALUE"].to_list() + actual = capital_recovery_factor( + regions, technologies, discount_rate, operational_life + ) + + expected = pd.DataFrame( + data=[ + ["SIMPLICITY", "GAS_EXTRACTION", 0.5121951219512197], + ["SIMPLICITY", "DUMMY", 0.34972244250594786], + ], + columns=["REGION", "TECHNOLOGY", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY"]) + + assert_frame_equal(actual, expected) + class TestPvAnnuity: def test_pva(self, region, discount_rate, operational_life): diff --git a/tests/test_cli.py b/tests/test_cli.py index 779af14e..3e109c03 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -7,20 +7,54 @@ from otoole import __version__ +class TestResults: + """Test the conversion of results via the command line interface""" + + def test_convert_results(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + super_simple_csvs = os.path.join("tests", "fixtures", "super_simple", "csv") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + to_path = mkdtemp() + commands = [ + "otoole", + "results", + from_format, + to_format, + from_path, + to_path, + "csv", + super_simple_csvs, + config, + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 0, print(actual.stdout) + assert os.path.exists(os.path.join(to_path, "NewCapacity.csv")) + + class TestConvert: def test_version(self): result = run(["otoole", "--version"], capture_output=True) assert result.stdout.strip().decode() == str(__version__) + def test_help(self): + commands = ["otoole", "-v", "convert", "--help"] + expected = "usage: otoole convert [-h]" + actual = run(commands, capture_output=True) + assert expected in str(actual.stdout) + assert actual.returncode == 0, print(actual.stdout) + temp = mkdtemp() - temp_excel = NamedTemporaryFile(suffix=".xlsx") - temp_datafile = NamedTemporaryFile(suffix=".dat") simplicity = os.path.join("tests", "fixtures", "simplicity.txt") config_path = os.path.join("tests", "fixtures", "config.yaml") test_data = [ - (["otoole", "-v", "convert", "--help"], "usage: otoole convert [-h]"), ( + "excel", [ "otoole", "-v", @@ -28,12 +62,13 @@ def test_version(self): "datafile", "excel", simplicity, - temp_excel.name, + "convert_to_file_path", # replaced with NamedTemporaryFile config_path, ], "", ), ( + "datafile", [ "otoole", "-v", @@ -41,19 +76,34 @@ def test_version(self): "datafile", "datafile", simplicity, - temp_datafile.name, + "convert_to_file_path", # replaced with NamedTemporaryFile config_path, ], "", ), ] - @mark.parametrize("commands,expected", test_data, ids=["help", "excel", "datafile"]) - def test_convert_commands(self, commands, expected): - actual = run(commands, capture_output=True) - assert expected in str(actual.stdout) - print(" ".join(commands)) - assert actual.returncode == 0, print(actual.stdout) + @mark.parametrize( + "convert_to,commands,expected", test_data, ids=["excel", "datafile"] + ) + def test_convert_commands(self, convert_to, commands, expected): + if convert_to == "datafile": + temp = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + elif convert_to == "excel": + temp = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="w") + else: + raise NotImplementedError + try: + commands_adjusted = [ + x if x != "convert_to_file_path" else temp.name for x in commands + ] + actual = run(commands_adjusted, capture_output=True) + assert expected in str(actual.stdout) + print(" ".join(commands_adjusted)) + assert actual.returncode == 0, print(actual.stdout) + finally: + temp.close() + os.unlink(temp.name) test_errors = [ ( @@ -69,59 +119,68 @@ def test_convert_error(self, commands, expected): def test_convert_datafile_datafile_no_user_config(self): simplicity = os.path.join("tests", "fixtures", "simplicity.txt") - temp_datafile = NamedTemporaryFile(suffix=".dat") - commands = [ - "otoole", - "convert", - "datafile", - "datafile", - simplicity, - temp_datafile.name, - ] - actual = run(commands, capture_output=True) - assert actual.returncode == 2 + temp_datafile = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + try: + commands = [ + "otoole", + "convert", + "datafile", + "datafile", + simplicity, + temp_datafile.name, + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 2 + finally: + temp_datafile.close() + os.unlink(temp_datafile.name) def test_convert_datafile_datafile_with_user_config(self): simplicity = os.path.join("tests", "fixtures", "simplicity.txt") user_config = os.path.join("tests", "fixtures", "config.yaml") - temp_datafile = NamedTemporaryFile(suffix=".dat") - commands = [ - "otoole", - "-vvv", - "convert", - "datafile", - "datafile", - simplicity, - temp_datafile.name, - user_config, - ] - actual = run(commands, capture_output=True) - assert actual.returncode == 0 + temp_datafile = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + try: + commands = [ + "otoole", + "-vvv", + "convert", + "datafile", + "datafile", + simplicity, + temp_datafile.name, + user_config, + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 0 + finally: + temp_datafile.close() + os.unlink(temp_datafile.name) def test_convert_datafile_datafile_with_default_flag(self): simplicity = os.path.join("tests", "fixtures", "simplicity.txt") user_config = os.path.join("tests", "fixtures", "config.yaml") - temp_datafile = NamedTemporaryFile(suffix=".dat") - commands = [ - "otoole", - "-vvv", - "convert", - "datafile", - "datafile", - simplicity, - temp_datafile.name, - user_config, - "--write_defaults", - ] - actual = run(commands, capture_output=True) - assert actual.returncode == 0 + temp_datafile = NamedTemporaryFile(suffix=".dat", delete=False, mode="w") + try: + commands = [ + "otoole", + "-vvv", + "convert", + "datafile", + "datafile", + simplicity, + temp_datafile.name, + user_config, + "--write_defaults", + ] + actual = run(commands, capture_output=True) + assert actual.returncode == 0 + finally: + temp_datafile.close() + os.unlink(temp_datafile.name) class TestSetup: - temp = mkdtemp() - temp_config = NamedTemporaryFile(suffix=".yaml") - test_data = [ ( [ @@ -129,27 +188,45 @@ class TestSetup: "-v", "setup", "config", - NamedTemporaryFile(suffix=".yaml").name, + NamedTemporaryFile( + suffix=".yaml" + ).name, # representes a new config file ], "", ), - (["otoole", "-v", "setup", "config", temp_config.name, "--overwrite"], ""), + (["otoole", "-v", "setup", "config", "temp_file", "--overwrite"], ""), ] @mark.parametrize( "commands,expected", test_data, ids=["setup", "setup_with_overwrite"] ) def test_setup_commands(self, commands, expected): - actual = run(commands, capture_output=True) - assert expected in str(actual.stdout) - print(" ".join(commands)) - assert actual.returncode == 0, print(actual.stdout) + temp_yaml = NamedTemporaryFile(suffix=".yaml", delete=False, mode="w+b") + try: + commands_adjusted = [ + x if x != "temp_file" else temp_yaml.name for x in commands + ] + actual = run(commands_adjusted, capture_output=True) + assert expected in str(actual.stdout) + print(" ".join(commands_adjusted)) + assert actual.returncode == 0, print(actual.stdout) + finally: + temp_yaml.close() + os.unlink(temp_yaml.name) test_errors = [ - (["otoole", "-v", "setup", "config", temp_config.name], "OtooleSetupError"), + (["otoole", "-v", "setup", "config", "temp_file"], "OtooleSetupError"), ] @mark.parametrize("commands,expected", test_errors, ids=["setup_fails"]) def test_setup_error(self, commands, expected): - actual = run(commands, capture_output=True) - assert expected in str(actual.stderr) + temp_yaml = NamedTemporaryFile(suffix=".yaml", delete=False, mode="w") + try: + commands_adjusted = [ + x if x != "temp_file" else temp_yaml.name for x in commands + ] + actual = run(commands_adjusted, capture_output=True) + assert expected in str(actual.stderr) + finally: + temp_yaml.close() + os.unlink(temp_yaml.name) diff --git a/tests/test_convert.py b/tests/test_convert.py index 39b3ed69..e4c99046 100644 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -1,66 +1,133 @@ -""" - -read_strategy = None -write_strategy = None - -if args.from_format == "datafile": - read_strategy = ReadDatafile() -elif args.from_format == "csv": - read_strategy = ReadCsv() -elif args.from_format == "excel": - read_strategy = ReadExcel() - -if args.to_format == "excel": - write_strategy = WriteExcel() -elif args.to_format == "datafile": - write_strategy = WriteDatafile() -elif args.to_format == "csv": - write_strategy = WriteCsv() - -if read_strategy and write_strategy: - context = Context(read_strategy, write_strategy) - context.convert(args.from_path, args.to_path) -else: - raise NotImplementedError(msg) +"""This module tests the public API of the otoole package """ import os from tempfile import NamedTemporaryFile, TemporaryDirectory -from otoole import Context, ReadExcel, WriteCsv, WriteDatafile +import pandas as pd +from pytest import raises +from otoole import convert, convert_results, read, read_results, write +from otoole.exceptions import OtooleError -class TestConvert: - def test_convert_excel_to_datafile(self, user_config): - read_strategy = ReadExcel(user_config) - write_strategy = WriteDatafile(user_config) - context = Context(read_strategy, write_strategy) +class TestRead: + """Tests the public api for reading data""" - tmpfile = NamedTemporaryFile() - from_path = os.path.join("tests", "fixtures", "combined_inputs.xlsx") + def test_read_datafile(self): + """Test reading data from a file""" + data, defaults = read( + os.path.join("tests", "fixtures", "config.yaml"), + "datafile", + os.path.join("tests", "fixtures", "simplicity.txt"), + ) + + assert isinstance(data, dict) + assert isinstance(defaults, dict) + + def test_read_excel(self): + """Test reading data from an Excel file""" + data, defaults = read( + os.path.join("tests", "fixtures", "config.yaml"), + "excel", + os.path.join("tests", "fixtures", "combined_inputs.xlsx"), + ) - context.convert(from_path, tmpfile.name) + assert isinstance(data, dict) + assert isinstance(defaults, dict) - tmpfile.seek(0) - actual = tmpfile.readlines() - tmpfile.close() + def test_read_csv(self): + """Test reading data from a CSV file""" + data, defaults = read( + os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml"), + "csv", + os.path.join("tests", "fixtures", "super_simple", "csv"), + ) - assert actual[-1] == b"end;\n" - assert actual[0] == b"# Model file written by *otoole*\n" - assert actual[2] == b"09_ROK d_bld_2_coal_products 2017 20.8921\n" - assert actual[8996] == b"param default 1 : DepreciationMethod :=\n" + assert isinstance(data, dict) + assert "REGION" in data + pd.testing.assert_frame_equal(data["REGION"], pd.DataFrame({"VALUE": ["BB"]})) + assert "TECHNOLOGY" in data + assert "MODE_OF_OPERATION" in data + assert "YEAR" in data + assert isinstance(defaults, dict) - def test_convert_excel_to_csv(self, user_config): - read_strategy = ReadExcel(user_config) - write_strategy = WriteCsv(user_config) - context = Context(read_strategy, write_strategy) +class TestWrite: + """Tests the public api for writing data""" + + def test_write_datafile(self): + """Test writing data to a file""" + data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} + temp = NamedTemporaryFile(delete=False, mode="w") + try: + assert write( + os.path.join("tests", "fixtures", "config.yaml"), + "datafile", + temp.name, + data, + ) + finally: + temp.close() + os.unlink(temp.name) + + def test_write_excel(self): + """Test writing data to an Excel file""" + data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} + temp = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="w") + try: + assert write( + os.path.join("tests", "fixtures", "config.yaml"), + "excel", + temp.name, + data, + ) + finally: + temp.close() + os.unlink(temp.name) + + def test_write_csv(self): + """Test writing data to a CSV file""" + data = {"REGION": pd.DataFrame({"VALUE": ["BB"]})} + temp = TemporaryDirectory() + assert write( + os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml"), + "csv", + temp.name, + data, + ) + + +class TestConvert: + """Test the convert function""" + + def test_convert_excel_to_datafile(self): + """Test converting from Excel to datafile""" + user_config = os.path.join("tests", "fixtures", "config.yaml") + from_path = os.path.join("tests", "fixtures", "combined_inputs.xlsx") + tmpfile = NamedTemporaryFile(delete=False, mode="w+b") + + try: + convert(user_config, "excel", "datafile", from_path, tmpfile.name) + tmpfile.seek(0) + actual = tmpfile.readlines() + + assert actual[-1] == b"end;\n" + assert actual[0] == b"# Model file written by *otoole*\n" + assert actual[2] == b"09_ROK d_bld_2_coal_products 2017 20.8921\n" + assert actual[8996] == b"param default 1 : DepreciationMethod :=\n" + finally: + tmpfile.close() + os.unlink(tmpfile.name) + + def test_convert_excel_to_csv(self): + """Test converting from Excel to CSV""" tmpfile = TemporaryDirectory() + user_config = os.path.join("tests", "fixtures", "config.yaml") from_path = os.path.join("tests", "fixtures", "combined_inputs.xlsx") - context.convert(from_path, tmpfile.name) + convert(user_config, "excel", "csv", from_path, tmpfile.name) with open(os.path.join(tmpfile.name, "EMISSION.csv")) as csv_file: csv_file.seek(0) @@ -69,3 +136,127 @@ def test_convert_excel_to_csv(self, user_config): assert actual[-1] == "NOX\n" assert actual[0] == "VALUE\n" assert actual[1] == "CO2\n" + + +class TestReadResults: + """Test the read_results function""" + + def test_read_results(self): + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + + input_path = os.path.join("tests", "fixtures", "super_simple", "csv") + input_format = "csv" + from_format = "cbc" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + + actual, _ = read_results( + config, from_format, from_path, input_format, input_path + ) + + expected_data = [["BB", "gas_import", 2016, 2.342422]] + expected_columns = ["REGION", "TECHNOLOGY", "YEAR", "VALUE"] + index = ["REGION", "TECHNOLOGY", "YEAR"] + expected_data_frame = pd.DataFrame( + expected_data, columns=expected_columns + ).set_index(index) + + pd.testing.assert_frame_equal( + actual["AccumulatedNewCapacity"], expected_data_frame + ) + + +class TestConvertResults: + """Test the convert_results function""" + + def test_convert_results_cbc_csv(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + tmpfile = TemporaryDirectory() + to_path = tmpfile.name + input_csvs = os.path.join("tests", "fixtures", "super_simple", "csv") + + result = convert_results( + config, from_format, to_format, from_path, to_path, "csv", input_csvs + ) + assert result is True + + with open(os.path.join(tmpfile.name, "NewCapacity.csv")) as csv_file: + csv_file.seek(0) + actual = csv_file.readlines() + + assert actual[0] == "REGION,TECHNOLOGY,YEAR,VALUE\n" + assert actual[-1] == "BB,gas_import,2016,2.342422\n" + + def test_convert_results_cbc_csv_datafile(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + tmpfile = TemporaryDirectory() + to_path = tmpfile.name + input_datafile = os.path.join( + "tests", "fixtures", "super_simple", "super_simple.txt" + ) + + result = convert_results( + config, + from_format, + to_format, + from_path, + to_path, + "datafile", + input_datafile, + ) + assert result is True + + with open(os.path.join(tmpfile.name, "NewCapacity.csv")) as csv_file: + csv_file.seek(0) + actual = csv_file.readlines() + + assert actual[0] == "REGION,TECHNOLOGY,YEAR,VALUE\n" + assert actual[-1] == "BB,gas_import,2016,2.342422\n" + + def test_convert_results_cbc_csv_raises(self): + """Test converting CBC solution file to folder of CSVs""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + from_format = "cbc" + to_format = "csv" + from_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple_gnu.sol" + ) + tmpfile = TemporaryDirectory() + to_path = tmpfile.name + + with raises(FileNotFoundError): + convert_results( + config, + from_format, + to_format, + from_path, + to_path, + "csv", + "not_a_path", + ) + + +class TestGetReadResultsStrategy: + def test_read_results_glpk_raises(self): + """Checks for .glp model file""" + config = os.path.join("tests", "fixtures", "super_simple", "super_simple.yaml") + input_path = "" + input_format = "csv" + from_format = "glpk" + from_path = "" + + with raises(OtooleError): + read_results(config, from_format, from_path, input_format, input_path) diff --git a/tests/test_input.py b/tests/test_input.py index 37fd4729..e9a7b148 100644 --- a/tests/test_input.py +++ b/tests/test_input.py @@ -265,7 +265,7 @@ def result_data(region): input_data_single_index_empty(region), ] parameter_test_data_ids = [ - "multi_index_no_defaluts", + "multi_index_no_defaults", "multi_index", "multi_index_empty", "single_index", @@ -277,12 +277,13 @@ def result_data(region): parameter_test_data, ids=parameter_test_data_ids, ) - def test_expand_parmaters_defaults( + def test_expand_parameters_defaults( self, user_config, simple_default_values, input_data, parameter, expected ): write_strategy = DummyWriteStrategy( user_config=user_config, default_values=simple_default_values ) + write_strategy.input_data = input_data actual = write_strategy._expand_defaults( input_data, write_strategy.default_values ) @@ -294,22 +295,12 @@ def test_expand_result_defaults( write_strategy = DummyWriteStrategy( user_config=user_config, default_values=simple_default_values ) + write_strategy.input_data = simple_input_data actual = write_strategy._expand_defaults( - result_data[0], write_strategy.default_values, input_data=simple_input_data + result_data[0], write_strategy.default_values ) assert_frame_equal(actual[result_data[1]], result_data[2]) - def test_expand_defaults_exception( - self, user_config, simple_default_values, result_data - ): - write_strategy = DummyWriteStrategy( - user_config=user_config, default_values=simple_default_values - ) - with raises(KeyError): - write_strategy._expand_defaults( - result_data[0], write_strategy.default_values - ) - class TestReadStrategy: diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index b962a157..574fcee8 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -7,12 +7,13 @@ from amply import Amply from pytest import mark, raises -from otoole import ReadCsv, ReadDatafile, ReadExcel, ReadMemory from otoole.exceptions import OtooleDeprecationError, OtooleError from otoole.preprocess.longify_data import check_datatypes +from otoole.read_strategies import ReadCsv, ReadDatafile, ReadExcel, ReadMemory from otoole.results.results import ( ReadCbc, ReadCplex, + ReadGlpk, ReadGurobi, check_for_duplicates, identify_duplicate, @@ -22,212 +23,128 @@ class TestReadCplex: - cplex_empty = ( - "AnnualFixedOperatingCost REGION AOBACKSTOP 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" - ) - cplex_short = "AnnualFixedOperatingCost REGION CDBACKSTOP 0.0 0.0 137958.8400384134 305945.38410619126 626159.9611543404 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" - cplex_long = "RateOfActivity REGION S1D1 CGLFRCFURX 1 0.0 0.0 0.0 0.0 0.0 0.3284446367303371 0.3451714779880536 0.3366163200621617 0.3394945166233896 0.3137488154250392 0.28605725055560716 0.2572505015401749 0.06757558148965725 0.0558936625751148 0.04330608461292407 0.0" - - cplex_mid_empty = ( - pd.DataFrame( - data=[], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ) - .astype({"VALUE": float}) - .set_index(["REGION", "TECHNOLOGY", "YEAR"]) - ) - - cplex_mid_short = pd.DataFrame( - data=[ - ["REGION", "CDBACKSTOP", 2017, 137958.8400384134], - ["REGION", "CDBACKSTOP", 2018, 305945.38410619126], - ["REGION", "CDBACKSTOP", 2019, 626159.9611543404], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + cplex_data = """ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +""" - cplex_mid_long = pd.DataFrame( - data=[ - ["REGION", "S1D1", "CGLFRCFURX", 1, 2020, 0.3284446367303371], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2021, 0.3451714779880536], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2022, 0.3366163200621617], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2023, 0.3394945166233896], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2024, 0.3137488154250392], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2025, 0.28605725055560716], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2026, 0.2572505015401749], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2027, 0.06757558148965725], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2028, 0.0558936625751148], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2029, 0.04330608461292407], - ], - columns=[ - "REGION", - "TIMESLICE", - "TECHNOLOGY", - "MODE_OF_OPERATION", - "YEAR", - "VALUE", - ], - ).set_index(["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"]) - - dataframe_short = { - "AnnualFixedOperatingCost": pd.DataFrame( - data=[ - ["REGION", "CDBACKSTOP", 2017, 137958.8400384134], - ["REGION", "CDBACKSTOP", 2018, 305945.3841061913], - ["REGION", "CDBACKSTOP", 2019, 626159.9611543404], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - } - - dataframe_long = { - "RateOfActivity": pd.DataFrame( - data=[ - ["REGION", "S1D1", "CGLFRCFURX", 1, 2020, 0.3284446367303371], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2021, 0.3451714779880536], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2022, 0.3366163200621617], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2023, 0.3394945166233896], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2024, 0.3137488154250392], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2025, 0.28605725055560716], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2026, 0.2572505015401749], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2027, 0.06757558148965725], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2028, 0.0558936625751148], - ["REGION", "S1D1", "CGLFRCFURX", 1, 2029, 0.04330608461292407], - ], - columns=[ - "REGION", - "TIMESLICE", - "TECHNOLOGY", - "MODE_OF_OPERATION", - "YEAR", - "VALUE", + def test_convert_to_dataframe(self, user_config): + input_file = self.cplex_data + reader = ReadCplex(user_config) + with StringIO(input_file) as file_buffer: + actual = reader._convert_to_dataframe(file_buffer) + # print(actual) + expected = pd.DataFrame( + [ + ["NewCapacity", "SIMPLICITY,ETHPLANT,2015", 0.030000000000000027], + ["NewCapacity", "SIMPLICITY,ETHPLANT,2016", 0.030999999999999917], + ["RateOfActivity", "SIMPLICITY,ID,HYD1,1,2020", 0.25228800000000001], + ["RateOfActivity", "SIMPLICITY,ID,HYD1,1,2021", 0.25228800000000001], + ["RateOfActivity", "SIMPLICITY,ID,HYD1,1,2022", 0.25228800000000001], ], - ).set_index(["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"]) - } - - test_data = [ - (cplex_short, dataframe_short), - (cplex_long, dataframe_long), - ] - - @mark.parametrize("cplex_input,expected", test_data, ids=["short", "long"]) - def test_read_cplex_to_dataframe(self, cplex_input, expected, user_config): - cplex_reader = ReadCplex(user_config=user_config) - - input_data = { - "YEAR": pd.DataFrame(data=list(range(2015, 2031, 1)), columns=["VALUE"]), - "REGION": pd.DataFrame(data=["REGION"], columns=["VALUE"]), - "TECHNOLOGY": pd.DataFrame( - data=["CDBACKSTOP", "CGLFRCFURX"], columns=["VALUE"] - ), - "MODE_OF_OPERATION": pd.DataFrame(data=[1], columns=["VALUE"]), - "TIMESLICE": pd.DataFrame(data=["S1D1"], columns=["VALUE"]), - } - - with StringIO(cplex_input) as file_buffer: - actual, _ = cplex_reader.read(file_buffer, input_data=input_data) - for name, item in actual.items(): - pd.testing.assert_frame_equal(item, expected[name]) - - test_data_mid = [(cplex_short, cplex_mid_short), (cplex_long, cplex_mid_long)] - - def test_read_empty_cplex_to_dataframe(self, user_config): - cplex_input = self.cplex_empty - - cplex_reader = ReadCplex(user_config) + columns=["Variable", "Index", "Value"], + ).astype({"Variable": str, "Index": str, "Value": float}) - input_data = { - "YEAR": pd.DataFrame(data=list(range(2015, 2031, 1)), columns=["VALUE"]) - } + pd.testing.assert_frame_equal(actual, expected) - with StringIO(cplex_input) as file_buffer: - data, _ = cplex_reader.read(file_buffer, input_data=input_data) - assert "AnnualFixedOperatingCost" in data + def test_solution_to_dataframe(self, user_config): + input_file = self.cplex_data + reader = ReadCplex(user_config) + with StringIO(input_file) as file_buffer: + actual = reader.read(file_buffer) + # print(actual) expected = ( pd.DataFrame( - data=[], + [ + ["SIMPLICITY", "ETHPLANT", 2015, 0.030000000000000027], + ["SIMPLICITY", "ETHPLANT", 2016, 0.030999999999999917], + ], columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], ) - .astype({"REGION": str, "VALUE": float, "YEAR": int, "TECHNOLOGY": str}) + .astype({"REGION": str, "TECHNOLOGY": str, "YEAR": "int64", "VALUE": float}) .set_index(["REGION", "TECHNOLOGY", "YEAR"]) ) - actual = data["AnnualFixedOperatingCost"] - pd.testing.assert_frame_equal(actual, expected, check_index_type=False) - test_data_to_cplex = [ - (cplex_empty, cplex_mid_empty), - (cplex_short, cplex_mid_short), - (cplex_long, cplex_mid_long), - ] + pd.testing.assert_frame_equal(actual[0]["NewCapacity"], expected) - @mark.parametrize( - "cplex_input,expected", test_data_to_cplex, ids=["empty", "short", "long"] - ) - def test_convert_cplex_to_df(self, cplex_input, expected, user_config): - - data = cplex_input.split("\t") - variable = data[0] - cplex_reader = ReadCplex(user_config=user_config) - actual = cplex_reader.convert_df([data], variable, 2015, 2030) - pd.testing.assert_frame_equal(actual, expected, check_index_type=False) - - def test_convert_lines_to_df_empty(self, user_config): - - data = [ - [ - "AnnualFixedOperatingCost", - "REGION", - "AOBACKSTOP", - "0", - "0", - "0", - "0", - "0", - "0", - "0", - "0", - "0", - ] - ] - variable = "AnnualFixedOperatingCost" - cplex_reader = ReadCplex(user_config) - actual = cplex_reader.convert_df(data, variable, 2015, 2023) - pd.testing.assert_frame_equal( - actual, + expected = ( pd.DataFrame( - data=[], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + [ + ["SIMPLICITY", "ID", "HYD1", 1, 2020, 0.25228800000000001], + ["SIMPLICITY", "ID", "HYD1", 1, 2021, 0.25228800000000001], + ["SIMPLICITY", "ID", "HYD1", 1, 2022, 0.25228800000000001], + ], + columns=[ + "REGION", + "TIMESLICE", + "TECHNOLOGY", + "MODE_OF_OPERATION", + "YEAR", + "VALUE", + ], + ) + .astype( + { + "REGION": str, + "TIMESLICE": str, + "TECHNOLOGY": str, + "MODE_OF_OPERATION": "int64", + "YEAR": "int64", + "VALUE": float, + } + ) + .set_index( + ["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"] ) - .astype({"REGION": str, "TECHNOLOGY": str, "YEAR": int, "VALUE": float}) - .set_index(["REGION", "TECHNOLOGY", "YEAR"]), - check_index_type=False, ) - - def test_check_datatypes_with_empty(self): - - df = pd.DataFrame(data=[], columns=["REGION", "FUEL", "YEAR", "VALUE"]) - - parameter = "AccumulatedAnnualDemand" - - config_dict = { - "AccumulatedAnnualDemand": { - "indices": ["REGION", "FUEL", "YEAR"], - "type": "param", - "dtype": float, - "default": 0, - }, - "REGION": {"dtype": "str", "type": "set"}, - "FUEL": {"dtype": "str", "type": "set"}, - "YEAR": {"dtype": "int", "type": "set"}, - } - - actual = check_datatypes(df, config_dict, parameter) - - expected = pd.DataFrame( - data=[], columns=["REGION", "FUEL", "YEAR", "VALUE"] - ).astype({"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float}) - - pd.testing.assert_frame_equal(actual, expected, check_index_type=False) + pd.testing.assert_frame_equal(actual[0]["RateOfActivity"], expected) class TestReadGurobi: @@ -285,7 +202,7 @@ def test_solution_to_dataframe(self, user_config): ], columns=["REGION", "YEAR", "VALUE"], ) - .astype({"YEAR": int, "VALUE": float}) + .astype({"YEAR": "int64", "VALUE": float}) .set_index(["REGION", "YEAR"]) ) @@ -308,7 +225,7 @@ def test_solution_to_dataframe(self, user_config): "VALUE", ], ) - .astype({"YEAR": int, "VALUE": float, "MODE_OF_OPERATION": int}) + .astype({"YEAR": "int64", "VALUE": float, "MODE_OF_OPERATION": "int64"}) .set_index( ["REGION", "TIMESLICE", "TECHNOLOGY", "MODE_OF_OPERATION", "YEAR"] ) @@ -611,6 +528,167 @@ def test_manage_infeasible_variables(self, user_config): pd.testing.assert_frame_equal(actual, expected) +class TestReadGlpk: + """Use fixtures instead of StringIO due to the use of context managers in the logic""" + + model_data = dedent( + """p lp min 12665 9450 82606 +n p osemosys_fast +n z cost +i 1 f +n i 1 cost +i 2 u -0 +n i 2 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2014] +i 3 u -0 +n i 3 CAa4_Constraint_Capacity[SIMPLICITY,ID,BACKSTOP1,2015] +i 300 u 147.115 +n i 300 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2015] +i 301 u 144.231 +n i 301 CAa4_Constraint_Capacity[SIMPLICITY,ID,LNDFORCOV,2016] +n j 1 SalvageValueStorage[SIMPLICITY,DAM,2014] +n j 2 SalvageValueStorage[SIMPLICITY,DAM,2015] +n j 130 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2035] +n j 131 StorageLevelSeasonStart[SIMPLICITY,DAM,2,2036] +n j 1025 NewCapacity[SIMPLICITY,WINDPOWER,2039] +n j 1026 NewCapacity[SIMPLICITY,WINDPOWER,2040] +n j 1027 RateOfActivity[SIMPLICITY,ID,BACKSTOP1,1,2014] +n j 1028 RateOfActivity[SIMPLICITY,IN,BACKSTOP1,1,2014] +""" + ) + + sol_data = dedent( + """c Problem: osemosys_fast +c Rows: 12665 +c Columns: 9450 +c Non-zeros: 82606 +c Status: OPTIMAL +c Objective: cost = 4497.31967 (MINimum) +c +s bas 12665 9450 f f 4497.31967015205 +i 1 b 3942.19479265207 0 +i 2 b 0 0 +i 3 b 0 0 +i 300 b 37.499 0 +i 301 b 31.7309999999999 0 +j 1 b 0 0 +j 2 b 0 0 +j 130 l 0 0.282765294823514 +j 131 l 0 0.601075755990521 +j 1025 b 0.0305438002923389 0 +j 1026 b 0.0422503416065477 0 +j 1027 l 0 162679.693161095 +j 1028 l 0 81291.0524314291 +e o f +""" + ) + + expected_solution = pd.DataFrame( + [ + ["i", 1, "b", 3942.19479265207, 0], + ["i", 2, "b", 0, 0], + ["i", 3, "b", 0, 0], + ["i", 300, "b", 37.499, 0], + ["i", 301, "b", 31.7309999999999, 0], + ["j", 1, "b", 0, 0], + ["j", 2, "b", 0, 0], + ["j", 130, "l", 0, 0.282765294823514], + ["j", 131, "l", 0, 0.601075755990521], + ["j", 1025, "b", 0.0305438002923389, 0], + ["j", 1026, "b", 0.0422503416065477, 0], + ["j", 1027, "l", 0, 162679.693161095], + ["j", 1028, "l", 0, 81291.0524314291], + ], + columns=["ID", "NUM", "STATUS", "PRIM", "DUAL"], + ) + + def test_read_model(self, user_config): + model_data = self.model_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + actual = reader.model + + expected = pd.DataFrame( + [ + ["i", 2, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2014"], + ["i", 3, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,BACKSTOP1,2015"], + ["i", 300, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2015"], + ["i", 301, "CAa4_Constraint_Capacity", "SIMPLICITY,ID,LNDFORCOV,2016"], + ["j", 1, "SalvageValueStorage", "SIMPLICITY,DAM,2014"], + ["j", 2, "SalvageValueStorage", "SIMPLICITY,DAM,2015"], + ["j", 130, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035"], + ["j", 131, "StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036"], + ["j", 1025, "NewCapacity", "SIMPLICITY,WINDPOWER,2039"], + ["j", 1026, "NewCapacity", "SIMPLICITY,WINDPOWER,2040"], + ["j", 1027, "RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014"], + ["j", 1028, "RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014"], + ], + columns=["ID", "NUM", "NAME", "INDEX"], + ).astype({"ID": str, "NUM": "int64", "NAME": str, "INDEX": str}) + + pd.testing.assert_frame_equal(actual, expected) + + def test_read_solution(self, user_config): + model_data = self.model_data + sol_data = self.sol_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + with StringIO(sol_data) as file_buffer: + actual_status, actual_data = reader.read_solution(file_buffer) + + expected_status = { + "name": "osemosys_fast", + "status": "OPTIMAL", + "objective": 4497.31967, + } + assert actual_status == expected_status + + pd.testing.assert_frame_equal(actual_data, self.expected_solution) + + def test_merge_model_sol(self, user_config): + model_data = self.model_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + + actual = reader._merge_model_sol(self.expected_solution) + expected = pd.DataFrame( + [ + ["SalvageValueStorage", "SIMPLICITY,DAM,2014", 0], + ["SalvageValueStorage", "SIMPLICITY,DAM,2015", 0], + ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2035", 0], + ["StorageLevelSeasonStart", "SIMPLICITY,DAM,2,2036", 0], + ["NewCapacity", "SIMPLICITY,WINDPOWER,2039", 0.0305438002923389], + ["NewCapacity", "SIMPLICITY,WINDPOWER,2040", 0.0422503416065477], + ["RateOfActivity", "SIMPLICITY,ID,BACKSTOP1,1,2014", 0], + ["RateOfActivity", "SIMPLICITY,IN,BACKSTOP1,1,2014", 0], + ], + columns=["Variable", "Index", "Value"], + ) + + pd.testing.assert_frame_equal(actual, expected) + + def test_convert_to_dataframe(self, user_config): + model_data = self.model_data + sol_data = self.sol_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + with StringIO(sol_data) as file_buffer: + reader._convert_to_dataframe(file_buffer) + + def test_convert_to_dataframe_error(self, user_config): + model_data = self.model_data + with StringIO(model_data) as file_buffer: + reader = ReadGlpk(user_config=user_config, glpk_model=file_buffer) + + sol = pd.DataFrame() + + with raises(TypeError): + reader._convert_to_dataframe(sol) + + def test_read_model_error(self, user_config): + with raises(TypeError): + ReadGlpk(user_config) + + class TestCleanOnRead: """Tests that a data is cleaned and indexed upon reading""" @@ -622,7 +700,7 @@ def test_index_dtypes_available(self, user_config): assert actual == { "REGION": "str", "FUEL": "str", - "YEAR": "int", + "YEAR": "int64", "VALUE": "float", } @@ -648,7 +726,7 @@ def test_remove_empty_lines(self, user_config): ], columns=["REGION", "FUEL", "YEAR", "VALUE"], ) - .astype({"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float}) + .astype({"REGION": str, "FUEL": str, "YEAR": "int64", "VALUE": float}) .set_index(["REGION", "FUEL", "YEAR"]) } @@ -679,7 +757,7 @@ def test_change_types(self, user_config): ], columns=["REGION", "FUEL", "YEAR", "VALUE"], ) - .astype({"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float}) + .astype({"REGION": str, "FUEL": str, "YEAR": "int64", "VALUE": float}) .set_index(["REGION", "FUEL", "YEAR"]) } @@ -756,7 +834,7 @@ def test_read_config(self, user_config): "FUEL": "str", "REGION": "str", "VALUE": "float", - "YEAR": "int", + "YEAR": "int64", }, } assert actual["AccumulatedAnnualDemand"] == expected @@ -1085,3 +1163,43 @@ def test_whitespace_converter( reader = ReadCsv(user_config=user_config, keep_whitespace=keep_whitespace) actual = reader._whitespace_converter(indices) assert actual == expected + + +class TestLongifyData: + """Tests for the preprocess.longify_data module""" + + # example availability factor data + data_valid = pd.DataFrame( + [ + ["SIMPLICITY", "ETH", 2014, 1.0], + ["SIMPLICITY", "RAWSUG", 2014, 0.5], + ["SIMPLICITY", "ETH", 2015, 1.03], + ["SIMPLICITY", "RAWSUG", 2015, 0.51], + ["SIMPLICITY", "ETH", 2016, 1.061], + ["SIMPLICITY", "RAWSUG", 2016, 0.519], + ], + columns=["REGION", "FUEL", "YEAR", "VALUE"], + ) + + data_invalid = pd.DataFrame( + [ + ["SIMPLICITY", "ETH", "invalid", 1.0], + ["SIMPLICITY", "RAWSUG", 2014, 0.5], + ], + columns=["REGION", "FUEL", "YEAR", "VALUE"], + ) + + def test_check_datatypes_valid(self, user_config): + df = self.data_valid.astype( + {"REGION": str, "FUEL": str, "YEAR": int, "VALUE": float} + ) + actual = check_datatypes(df, user_config, "AvailabilityFactor") + expected = df.copy() + + pd.testing.assert_frame_equal(actual, expected) + + def test_check_datatypes_invalid(self, user_config): + df = self.data_invalid + + with raises(ValueError): + check_datatypes(df, user_config, "AvailabilityFactor") diff --git a/tests/test_utils.py b/tests/test_utils.py index 8fac9aa3..1a50ccf8 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,3 +1,4 @@ +import os from tempfile import NamedTemporaryFile import pandas as pd @@ -77,16 +78,18 @@ def test_create_name_mappings_reversed(self, user_config): def test_excel_name_length_error(user_config_simple, request): user_config = request.getfixturevalue(user_config_simple) write_excel = WriteExcel(user_config=user_config) - temp_excel = NamedTemporaryFile(suffix=".xlsx") - handle = pd.ExcelWriter(temp_excel.name) - - with pytest.raises(OtooleExcelNameLengthError): - write_excel._write_parameter( - df=pd.DataFrame(), - parameter_name="ParameterNameLongerThanThirtyOneChars", - handle=pd.ExcelWriter(handle), - default=0, - ) + temp_excel = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="r") + try: + with pytest.raises(OtooleExcelNameLengthError): + write_excel._write_parameter( + df=pd.DataFrame(), + parameter_name="ParameterNameLongerThanThirtyOneChars", + handle=pd.ExcelWriter(temp_excel.name), + default=0, + ) + finally: + temp_excel.close() + os.unlink(temp_excel.name) class TestYamlUniqueKeyReader: diff --git a/tests/test_write_strategies.py b/tests/test_write_strategies.py index af4d8b8e..18cf64ae 100644 --- a/tests/test_write_strategies.py +++ b/tests/test_write_strategies.py @@ -1,4 +1,5 @@ import io +import os from tempfile import NamedTemporaryFile import pandas as pd @@ -114,15 +115,20 @@ def test_form_no_pivot(self, user_config): def test_write_out_empty_dataframe(self, user_config): - temp_excel = NamedTemporaryFile(suffix=".xlsx") - handle = pd.ExcelWriter(temp_excel.name) - convert = WriteExcel(user_config) + temp_excel = NamedTemporaryFile(suffix=".xlsx", delete=False, mode="w") + try: + handle = pd.ExcelWriter(temp_excel.name) + convert = WriteExcel(user_config) - df = pd.DataFrame( - data=None, columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"] - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + df = pd.DataFrame( + data=None, columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"] + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - convert._write_parameter(df, "AvailabilityFactor", handle, default=0) + convert._write_parameter(df, "AvailabilityFactor", handle, default=0) + finally: + handle.close() + temp_excel.close() + os.unlink(temp_excel.name) class TestWriteDatafile: