From 39d76114b230979c425162352479965f6acbfad0 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Fri, 2 Aug 2024 16:44:12 -0600 Subject: [PATCH 01/44] sample code to add custom table functionaility --- reoptjl/views.py | 200 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 200 insertions(+) diff --git a/reoptjl/views.py b/reoptjl/views.py index d513e7642..6797a2922 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1139,6 +1139,206 @@ def easiur_costs(request): log.error(debug_msg) return JsonResponse({"Error": "Unexpected Error. Please check your input parameters and contact reopt@nrel.gov if problems persist."}, status=500) +######################################## +####### Custom Tables and Reports ###### +######################################## +import xlsxwriter +import pandas as pd +import requests +import json +from collections import defaultdict +import re +import uuid + +#### Helper Functions +def get_with_suffix(df, key, suffix, default_val=0): + """Fetch value from dataframe with an optional retriaval of _bau suffix.""" + if not key.endswith("_bau"): + key = f"{key}{suffix}" + return df.get(key, default_val) + +def flatten_dict(d, parent_key='', sep='.'): + """Flatten nested dictionary.""" + items = [] + for k, v in d.items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + if isinstance(v, dict): + items.extend(flatten_dict(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + +def clean_data_dict(data_dict): + """Clean data dictionary by removing default values.""" + for key, value_array in data_dict.items(): + new_value_array = [ + "" if v in [0, float("nan"), "NaN", "0", "0.0", "$0.0", -0, "-0", "-0.0", "-$0.0", None] else v + for v in value_array + ] + data_dict[key] = new_value_array + return data_dict + +def sum_vectors(data): + """Sum numerical vectors within a nested data structure.""" + if isinstance(data, dict): + return {key: sum_vectors(value) for key, value in data.items()} + elif isinstance(data, list): + if all(isinstance(item, (int, float)) for item in data): + return sum(data) + else: + return [sum_vectors(item) for item in data] + else: + return data + +#### Core Functions +def generate_data_dict(config, df_gen, suffix): + """Generate data dictionary based on configuration and dataframe.""" + data_dict = defaultdict(list) + for var_key, col_name in config: + if callable(var_key): + val = var_key(df_gen) + else: + val = get_with_suffix(df_gen, var_key, suffix, "-") + data_dict[col_name].append(val) + return data_dict + +def get_REopt_data(data_f, scenario_name, config): + """Fetch and format data for a specific REopt scenario.""" + scenario_name_str = str(scenario_name) + suffix = "_bau" if re.search(r"(?i)\bBAU\b", scenario_name_str) else "" + + df_gen = flatten_dict(data_f) + data_dict = generate_data_dict(config, df_gen, suffix) + data_dict["Scenario"] = [scenario_name_str] + + col_order = ["Scenario"] + [col_name for _, col_name in config] + df_res = pd.DataFrame(data_dict) + df_res = df_res[col_order] + + return df_res + +def get_bau_values(mock_scenarios, config): + """Retrieve BAU values for comparison.""" + bau_values = {col_name: None for _, col_name in config} + for scenario in mock_scenarios: + df_gen = flatten_dict(scenario["outputs"]) + for var_key, col_name in config: + try: + key = var_key.__code__.co_consts[1] + except IndexError: + print(f"Warning: Could not find constant in lambda for {col_name}. Skipping...") + continue + + key_bau = f"{key}_bau" + if key_bau in df_gen: + value = df_gen[key_bau] + if bau_values[col_name] is None: + bau_values[col_name] = value + elif bau_values[col_name] != value: + raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") + return bau_values + +# def get_scenario_results(run_uuid): +# """Retrieve scenario results from an external API.""" +# results_url = f"{root_url}/job/{run_uuid}/results/?api_key={API_KEY}" +# response = requests.get(results_url, verify=False) +# response.raise_for_status() +# result_data = response.json() + +# processed_data = sum_vectors(result_data) #vectors are summed into a single value + +# # ## outputs json with the simplified REopt results where vectors are summed into a single value +# # with open(f"{run_uuid}.json", "w") as json_file: +# # json.dump(processed_data, json_file, indent=4) + +# return processed_data + + +def process_scenarios(scenarios, reopt_data_config): + """Process multiple scenarios and generate a combined dataframe.""" + config = reopt_data_config + bau_values = get_bau_values(scenarios, config) + combined_df = pd.DataFrame() + for scenario in scenarios: + run_uuid = scenario['run_uuid'] + df_result = get_REopt_data(scenario["outputs"], run_uuid, config) + df_result = df_result.set_index('Scenario').T + df_result.columns = [run_uuid] + combined_df = df_result if combined_df.empty else combined_df.join(df_result, how='outer') + + bau_data = {key: [value] for key, value in bau_values.items()} + bau_data["Scenario"] = ["BAU"] + df_bau = pd.DataFrame(bau_data) + + combined_df = pd.concat([df_bau, combined_df.T]).reset_index(drop=True) + combined_df = clean_data_dict(combined_df.to_dict(orient="list")) + combined_df = pd.DataFrame(combined_df) + combined_df = combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] + + return combined_df + +# def summary_by_runuuids(run_uuids): +# """Fetch summary for multiple run UUIDs.""" +# if not run_uuids: +# return {'Error': 'Must provide one or more run_uuids'} + +# for r_uuid in run_uuids: +# if not isinstance(r_uuid, str): +# return {'Error': f'Provided run_uuids type error, must be string. {r_uuid}'} + +# try: +# uuid.UUID(r_uuid) +# except ValueError as e: +# return {"Error": str(e)} + +# try: +# scenarios = [get_scenario_results(run_uuid) for run_uuid in run_uuids] +# return {'scenarios': scenarios} +# except Exception as e: +# return {"Error": str(e)} + +def summary_by_runuuids(request): + """ + Fetch summary for multiple run UUIDs. + """ + try: + # Parse the request body + body = json.loads(request.body) + run_uuids = body.get('run_uuids', []) + + if not run_uuids: + return JsonResponse({'Error': 'Must provide one or more run_uuids'}, status=400) + + for r_uuid in run_uuids: + if not isinstance(r_uuid, str): + return JsonResponse({'Error': f'Provided run_uuids type error, must be string. {r_uuid}'}, status=400) + try: + uuid.UUID(r_uuid) + except ValueError as e: + return JsonResponse({"Error": str(e)}, status=400) + + scenarios = [] + for run_uuid in run_uuids: + # Call the existing results function + response = results(request, run_uuid) + if response.status_code == 200: + scenarios.append(json.loads(response.content)) + else: + return JsonResponse({"Error": f"Error fetching results for run_uuid {run_uuid}: {response.content}"}, status=response.status_code) + + return JsonResponse({'scenarios': scenarios}, status=200) + + except ValueError as e: + return JsonResponse({"Error": str(e)}, status=400) + + except KeyError as e: + return JsonResponse({"Error. Missing": str(e)}, status=400) + + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + debug_msg = "exc_type: {}; exc_value: {}; exc_traceback: {}".format(exc_type, exc_value, tb.format_tb(exc_traceback)) + log.debug(debug_msg) + return JsonResponse({"Error": "Unexpected error. Check log for more."}, status=500) # def fuel_emissions_rates(request): # try: From ed0f545334aa2f1acaf69dd5d7eb3b2d90e53536 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:18:16 -0600 Subject: [PATCH 02/44] added custom table functions --- reoptjl/custom_table_helpers.py | 51 +++ reoptjl/testing_custom_table.ipynb | 127 ++++++ reoptjl/views.py | 655 +++++++++++++++++------------ 3 files changed, 568 insertions(+), 265 deletions(-) create mode 100644 reoptjl/custom_table_helpers.py create mode 100644 reoptjl/testing_custom_table.ipynb diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py new file mode 100644 index 000000000..6038a8f72 --- /dev/null +++ b/reoptjl/custom_table_helpers.py @@ -0,0 +1,51 @@ +# custom table helpers.py + +import pandas as pd +from collections import defaultdict + +def get_with_suffix(df, key, suffix, default_val=0): + """Fetch value from dataframe with an optional retriaval of _bau suffix.""" + if not key.endswith("_bau"): + key = f"{key}{suffix}" + return df.get(key, default_val) + +def flatten_dict(d, parent_key='', sep='.'): + """Flatten nested dictionary.""" + items = [] + for k, v in d.items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + if isinstance(v, dict): + items.extend(flatten_dict(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + +def clean_data_dict(data_dict): + """Clean data dictionary by removing default values.""" + for key, value_array in data_dict.items(): + new_value_array = [ + "" if v in [0, float("nan"), "NaN", "0", "0.0", "$0.0", -0, "-0", "-0.0", "-$0.0", None] else v + for v in value_array + ] + data_dict[key] = new_value_array + return data_dict + +def sum_vectors(data): + """Sum numerical vectors within a nested data structure.""" + if isinstance(data, dict): + return {key: sum_vectors(value) for key, value in data.items()} + elif isinstance(data, list): + if all(isinstance(item, (int, float)) for item in data): + return sum(data) + else: + return [sum_vectors(item) for item in data] + else: + return data + +def colnum_string(n): + """Convert a column number to an Excel-style column string.""" + string = "" + while n > 0: + n, remainder = divmod(n - 1, 26) + string = chr(65 + remainder) + string + return string diff --git a/reoptjl/testing_custom_table.ipynb b/reoptjl/testing_custom_table.ipynb new file mode 100644 index 000000000..dd3526a1f --- /dev/null +++ b/reoptjl/testing_custom_table.ipynb @@ -0,0 +1,127 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'reo'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[3], line 12\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mjson\u001b[39;00m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HttpRequest, HttpResponse, JsonResponse\n\u001b[0;32m---> 12\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mviews\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m\n\u001b[1;32m 14\u001b[0m API_KEY \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgYV8t7d6c5naotp67meIJyJRi6DksKv0VfPSQzEa\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;66;03m# Replace with your API key\u001b[39;00m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;66;03m# Define the API key and URL\u001b[39;00m\n", + "File \u001b[0;32m~/REopt_API/reoptjl/views.py:8\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mre\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m JsonResponse, HttpResponse\n\u001b[0;32m----> 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mreo\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexceptions\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m UnexpectedError\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mreoptjl\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodels\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Settings, PVInputs, ElectricStorageInputs, WindInputs, GeneratorInputs, ElectricLoadInputs,\\\n\u001b[1;32m 10\u001b[0m ElectricTariffInputs, ElectricUtilityInputs, SpaceHeatingLoadInputs, PVOutputs, ElectricStorageOutputs,\\\n\u001b[1;32m 11\u001b[0m WindOutputs, ExistingBoilerInputs, GeneratorOutputs, ElectricTariffOutputs, ElectricUtilityOutputs, \\\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 16\u001b[0m FinancialInputs, FinancialOutputs, UserUnlinkedRuns, BoilerInputs, BoilerOutputs, SteamTurbineInputs, \\\n\u001b[1;32m 17\u001b[0m SteamTurbineOutputs, GHPInputs, GHPOutputs, ProcessHeatLoadInputs\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'reo'" + ] + } + ], + "source": [ + "# test_script.py\n", + "\n", + "import pandas as pd\n", + "import json\n", + "import requests\n", + "import os\n", + "import uuid\n", + "from collections import defaultdict\n", + "import re\n", + "import io\n", + "from django.http import HttpRequest, HttpResponse, JsonResponse\n", + "from views import create_comparison_table\n", + "\n", + "API_KEY = \"gYV8t7d6c5naotp67meIJyJRi6DksKv0VfPSQzEa\" # Replace with your API key\n", + "root_url = \"https://developer.nrel.gov/api/reopt/stable\"\n", + "\n", + "# Disable warnings\n", + "import urllib3\n", + "urllib3.disable_warnings()\n", + "\n", + "def test_create_comparison_table(run_uuids):\n", + " # Create a mock request object\n", + " request = HttpRequest()\n", + " request.method = 'GET'\n", + " \n", + " # Mock API key and root URL for the API requests\n", + " request.META['API_KEY'] = API_KEY\n", + " request.META['root_url'] = root_url\n", + "\n", + " # Mock user UUID\n", + " user_uuid = str(uuid.uuid4())\n", + "\n", + " # Replace the actual fetch_raw_data method to use the provided run UUIDs\n", + " def fetch_raw_data(request, run_uuid):\n", + " results_url = f\"{root_url}/job/{run_uuid}/results/?api_key={API_KEY}\"\n", + " response = requests.get(results_url, verify=False)\n", + " response.raise_for_status()\n", + " result_data = response.json()\n", + " processed_data = sum_vectors(result_data) # Summing vectors into a single value\n", + " return processed_data\n", + "\n", + " # Replace the actual get_raw_data method to use the provided run UUIDs\n", + " def get_raw_data(api_metas, request):\n", + " full_summary_dict = {\"scenarios\": []}\n", + " for m in api_metas:\n", + " scenario_data = {\n", + " \"run_uuid\": str(m['run_uuid']),\n", + " \"status\": m['status'],\n", + " \"created\": str(m['created']),\n", + " \"full_data\": fetch_raw_data(request, m['run_uuid'])\n", + " }\n", + " full_summary_dict[\"scenarios\"].append(scenario_data)\n", + " return full_summary_dict\n", + "\n", + " # Mock API meta data\n", + " api_metas = [{\"run_uuid\": run_uuid, \"status\": \"completed\", \"created\": \"2024-08-08\"} for run_uuid in run_uuids]\n", + "\n", + " # Call the create_comparison_table function with the mock request and user UUID\n", + " response = create_comparison_table(request, user_uuid)\n", + "\n", + " # Check the response type and print the appropriate result\n", + " if isinstance(response, HttpResponse):\n", + " if response.status_code == 200:\n", + " # Save the response content to an Excel file\n", + " with open(\"comparison_table.xlsx\", \"wb\") as f:\n", + " f.write(response.content)\n", + " print(\"Comparison table saved to 'comparison_table.xlsx'.\")\n", + " else:\n", + " print(f\"Error: {response.status_code} - {response.content.decode()}\")\n", + " elif isinstance(response, JsonResponse):\n", + " print(json.dumps(response.json(), indent=4))\n", + " else:\n", + " print(\"Unexpected response type.\")\n", + "\n", + "# Define the runrun_uuid_1 = \"4043a50f-52b9-482a-90dd-8f7ea417182a\"\n", + "run_uuid_2 = \"3ccb973a-e9ed-405e-bb41-4fcb0f4bb9a5\"\n", + "run_uuids = [run_uuid_1, run_uuid_2] UUIDs\n", + "\n", + "\n", + "# Run the test\n", + "test_create_comparison_table(run_uuids)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/reoptjl/views.py b/reoptjl/views.py index 6797a2922..6ba13337d 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -4,7 +4,7 @@ import sys import traceback as tb import re -from django.http import JsonResponse +from django.http import JsonResponse, HttpResponse from reo.exceptions import UnexpectedError from reoptjl.models import Settings, PVInputs, ElectricStorageInputs, WindInputs, GeneratorInputs, ElectricLoadInputs,\ ElectricTariffInputs, ElectricUtilityInputs, SpaceHeatingLoadInputs, PVOutputs, ElectricStorageOutputs,\ @@ -20,6 +20,10 @@ import numpy as np import json import logging +from reoptjl.custom_table_helpers import * +import xlsxwriter +import io + log = logging.getLogger(__name__) def make_error_resp(msg): @@ -694,115 +698,6 @@ def summary(request, user_uuid): err.save_to_db() return JsonResponse({"Error": err.message}, status=404) -def summary_by_chunk(request, user_uuid, chunk): - - # Dictionary to store all results. Primary key = run_uuid and secondary key = data values from each uuid - summary_dict = dict() - - try: - uuid.UUID(user_uuid) # raises ValueError if not valid uuid - - except ValueError as e: - if e.args[0] == "badly formed hexadecimal UUID string": - return JsonResponse({"Error": str(e.message)}, status=404) - else: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='summary', user_uuid=user_uuid) - err.save_to_db() - return JsonResponse({"Error": str(err.message)}, status=404) - - try: - try: - # chunk size is an optional URL parameter which defines the number of chunks into which to - # divide all user summary results. It must be a positive integer. - default_chunk_size = 30 - chunk_size = int(request.GET.get('chunk_size') or default_chunk_size) - if chunk_size != float(request.GET.get('chunk_size') or default_chunk_size): - return JsonResponse({"Error": "Chunk size must be an integer."}, status=400) - except: - return JsonResponse({"Error": "Chunk size must be a positive integer."}, status=400) - - try: - # chunk is the 1-indexed indice of the chunks for which to return results. - # chunk is a mandatory input from URL, different from chunk_size. - # It must be a positive integer. - chunk = int(chunk) - if chunk < 1: - response = JsonResponse({"Error": "Chunks are 1-indexed, please provide a chunk index greater than or equal to 1"} - , content_type='application/json', status=400) - return response - except: - return JsonResponse({"Error": "Chunk number must be a 1-indexed integer."}, status=400) - - # Create Querysets: Select all objects associate with a user_uuid, Order by `created` column - scenarios = APIMeta.objects.filter(user_uuid=user_uuid).only( - 'run_uuid', - 'status', - 'created' - ).order_by("-created") - - unlinked_run_uuids = [i.run_uuid for i in UserUnlinkedRuns.objects.filter(user_uuid=user_uuid)] - api_metas = [s for s in scenarios if s.run_uuid not in unlinked_run_uuids] - - total_scenarios = len(api_metas) - if total_scenarios == 0: - response = JsonResponse({"Error": "No scenarios found for user '{}'".format(user_uuid)}, content_type='application/json', status=404) - return response - - # Determine total number of chunks from current query of user results based on the chunk size - total_chunks = total_scenarios/float(chunk_size) - # If the last chunk is only patially full, i.e. there is a remainder, then add 1 so when it - # is converted to an integer the result will reflect the true total number of chunks - if total_chunks%1 > 0: - total_chunks = total_chunks + 1 - # Make sure total chunks is an integer - total_chunks = int(total_chunks) - - # Catch cases where user queries for a chunk that is more than the total chunks for the user - if chunk > total_chunks: - response = JsonResponse({"Error": "Chunk index {} is greater than the total number of chunks ({}) at a chunk size of {}".format( - chunk, total_chunks, chunk_size)}, content_type='application/json', status=400) - return response - - # Filter scenarios to the chunk - start_idx = max((chunk-1) * chunk_size, 0) - end_idx = min(chunk * chunk_size, total_scenarios) - api_metas_by_chunk = api_metas[start_idx: end_idx] - - summary_dict = queryset_for_summary(api_metas_by_chunk, summary_dict) - response = JsonResponse(create_summary_dict(user_uuid,summary_dict), status=200, safe=False) - return response - - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='summary', user_uuid=user_uuid) - err.save_to_db() - return JsonResponse({"Error": err.message}, status=404) - -# Take summary_dict and convert it to the desired format for response. Also add any missing key/val pairs -def create_summary_dict(user_uuid:str,summary_dict:dict): - - # if these keys are missing from a `scenario` we add 0s for them, all Floats. - optional_keys = ["npv_us_dollars", "net_capital_costs", "year_one_savings_us_dollars", "pv_kw", "wind_kw", "gen_kw", "batt_kw", "batt_kwh"] - - # Create eventual response dictionary - return_dict = dict() - return_dict['user_uuid'] = user_uuid - scenario_summaries = [] - for k in summary_dict.keys(): - - d = summary_dict[k] - - # for opt_key in optional_keys: - # if opt_key not in d.keys(): - # d[opt_key] = 0.0 - - scenario_summaries.append(d) - - return_dict['scenarios'] = scenario_summaries - - return return_dict - # Query all django models for all run_uuids found for given user_uuid def queryset_for_summary(api_metas,summary_dict:dict): @@ -980,6 +875,116 @@ def queryset_for_summary(api_metas,summary_dict:dict): return summary_dict +def summary_by_chunk(request, user_uuid, chunk): + + # Dictionary to store all results. Primary key = run_uuid and secondary key = data values from each uuid + summary_dict = dict() + + try: + uuid.UUID(user_uuid) # raises ValueError if not valid uuid + + except ValueError as e: + if e.args[0] == "badly formed hexadecimal UUID string": + return JsonResponse({"Error": str(e.message)}, status=404) + else: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='summary', user_uuid=user_uuid) + err.save_to_db() + return JsonResponse({"Error": str(err.message)}, status=404) + + try: + try: + # chunk size is an optional URL parameter which defines the number of chunks into which to + # divide all user summary results. It must be a positive integer. + default_chunk_size = 30 + chunk_size = int(request.GET.get('chunk_size') or default_chunk_size) + if chunk_size != float(request.GET.get('chunk_size') or default_chunk_size): + return JsonResponse({"Error": "Chunk size must be an integer."}, status=400) + except: + return JsonResponse({"Error": "Chunk size must be a positive integer."}, status=400) + + try: + # chunk is the 1-indexed indice of the chunks for which to return results. + # chunk is a mandatory input from URL, different from chunk_size. + # It must be a positive integer. + chunk = int(chunk) + if chunk < 1: + response = JsonResponse({"Error": "Chunks are 1-indexed, please provide a chunk index greater than or equal to 1"} + , content_type='application/json', status=400) + return response + except: + return JsonResponse({"Error": "Chunk number must be a 1-indexed integer."}, status=400) + + # Create Querysets: Select all objects associate with a user_uuid, Order by `created` column + scenarios = APIMeta.objects.filter(user_uuid=user_uuid).only( + 'run_uuid', + 'status', + 'created' + ).order_by("-created") + + unlinked_run_uuids = [i.run_uuid for i in UserUnlinkedRuns.objects.filter(user_uuid=user_uuid)] + api_metas = [s for s in scenarios if s.run_uuid not in unlinked_run_uuids] + + total_scenarios = len(api_metas) + if total_scenarios == 0: + response = JsonResponse({"Error": "No scenarios found for user '{}'".format(user_uuid)}, content_type='application/json', status=404) + return response + + # Determine total number of chunks from current query of user results based on the chunk size + total_chunks = total_scenarios/float(chunk_size) + # If the last chunk is only patially full, i.e. there is a remainder, then add 1 so when it + # is converted to an integer the result will reflect the true total number of chunks + if total_chunks%1 > 0: + total_chunks = total_chunks + 1 + # Make sure total chunks is an integer + total_chunks = int(total_chunks) + + # Catch cases where user queries for a chunk that is more than the total chunks for the user + if chunk > total_chunks: + response = JsonResponse({"Error": "Chunk index {} is greater than the total number of chunks ({}) at a chunk size of {}".format( + chunk, total_chunks, chunk_size)}, content_type='application/json', status=400) + return response + + # Filter scenarios to the chunk + start_idx = max((chunk-1) * chunk_size, 0) + end_idx = min(chunk * chunk_size, total_scenarios) + api_metas_by_chunk = api_metas[start_idx: end_idx] + + summary_dict = queryset_for_summary(api_metas_by_chunk, summary_dict) + response = JsonResponse(create_summary_dict(user_uuid,summary_dict), status=200, safe=False) + return response + + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='summary', user_uuid=user_uuid) + err.save_to_db() + return JsonResponse({"Error": err.message}, status=404) + +# Take summary_dict and convert it to the desired format for response. Also add any missing key/val pairs +def create_summary_dict(user_uuid:str,summary_dict:dict): + + # if these keys are missing from a `scenario` we add 0s for them, all Floats. + optional_keys = ["npv_us_dollars", "net_capital_costs", "year_one_savings_us_dollars", "pv_kw", "wind_kw", "gen_kw", "batt_kw", "batt_kwh"] + + # Create eventual response dictionary + return_dict = dict() + return_dict['user_uuid'] = user_uuid + scenario_summaries = [] + for k in summary_dict.keys(): + + d = summary_dict[k] + + # for opt_key in optional_keys: + # if opt_key not in d.keys(): + # d[opt_key] = 0.0 + + scenario_summaries.append(d) + + return_dict['scenarios'] = scenario_summaries + + return return_dict + + # Unlink a user_uuid from a run_uuid. def unlink(request, user_uuid, run_uuid): @@ -1139,60 +1144,51 @@ def easiur_costs(request): log.error(debug_msg) return JsonResponse({"Error": "Unexpected Error. Please check your input parameters and contact reopt@nrel.gov if problems persist."}, status=500) -######################################## -####### Custom Tables and Reports ###### -######################################## -import xlsxwriter -import pandas as pd -import requests -import json -from collections import defaultdict -import re -import uuid +# def fuel_emissions_rates(request): +# try: -#### Helper Functions -def get_with_suffix(df, key, suffix, default_val=0): - """Fetch value from dataframe with an optional retriaval of _bau suffix.""" - if not key.endswith("_bau"): - key = f"{key}{suffix}" - return df.get(key, default_val) - -def flatten_dict(d, parent_key='', sep='.'): - """Flatten nested dictionary.""" - items = [] - for k, v in d.items(): - new_key = f"{parent_key}{sep}{k}" if parent_key else k - if isinstance(v, dict): - items.extend(flatten_dict(v, new_key, sep=sep).items()) - else: - items.append((new_key, v)) - return dict(items) - -def clean_data_dict(data_dict): - """Clean data dictionary by removing default values.""" - for key, value_array in data_dict.items(): - new_value_array = [ - "" if v in [0, float("nan"), "NaN", "0", "0.0", "$0.0", -0, "-0", "-0.0", "-$0.0", None] else v - for v in value_array - ] - data_dict[key] = new_value_array - return data_dict +# try: +# response = JsonResponse({ +# 'CO2': { +# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_CO2_per_gal, +# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_CO2_per_mmbtu +# }, +# 'NOx': { +# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_NOx_per_gal, +# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_NOx_per_mmbtu +# }, +# 'SO2': { +# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_SO2_per_gal, +# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_SO2_per_mmbtu +# }, +# 'PM25': { +# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_PM25_per_gal, +# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_PM25_per_mmbtu +# } +# }) +# return response +# except AttributeError as e: +# return JsonResponse({"Error": str(e.args[0])}, status=500) -def sum_vectors(data): - """Sum numerical vectors within a nested data structure.""" - if isinstance(data, dict): - return {key: sum_vectors(value) for key, value in data.items()} - elif isinstance(data, list): - if all(isinstance(item, (int, float)) for item in data): - return sum(data) - else: - return [sum_vectors(item) for item in data] - else: - return data +# except KeyError as e: +# return JsonResponse({"No parameters required."}, status=500) + +# except ValueError as e: +# return JsonResponse({"Error": str(e.args[0])}, status=500) + +# except Exception: + +# exc_type, exc_value, exc_traceback = sys.exc_info() +# debug_msg = "exc_type: {}; exc_value: {}; exc_traceback: {}".format(exc_type, exc_value.args[0], +# tb.format_tb(exc_traceback)) +# log.error(debug_msg) +# return JsonResponse({"Error": "Unexpected Error. Please check your input parameters and contact reopt@nrel.gov if problems persist."}, status=500) + +############################################################### +################ Custom Table ################################# +############################################################### -#### Core Functions def generate_data_dict(config, df_gen, suffix): - """Generate data dictionary based on configuration and dataframe.""" data_dict = defaultdict(list) for var_key, col_name in config: if callable(var_key): @@ -1203,7 +1199,6 @@ def generate_data_dict(config, df_gen, suffix): return data_dict def get_REopt_data(data_f, scenario_name, config): - """Fetch and format data for a specific REopt scenario.""" scenario_name_str = str(scenario_name) suffix = "_bau" if re.search(r"(?i)\bBAU\b", scenario_name_str) else "" @@ -1218,15 +1213,13 @@ def get_REopt_data(data_f, scenario_name, config): return df_res def get_bau_values(mock_scenarios, config): - """Retrieve BAU values for comparison.""" bau_values = {col_name: None for _, col_name in config} for scenario in mock_scenarios: - df_gen = flatten_dict(scenario["outputs"]) + df_gen = flatten_dict(scenario) for var_key, col_name in config: try: key = var_key.__code__.co_consts[1] except IndexError: - print(f"Warning: Could not find constant in lambda for {col_name}. Skipping...") continue key_bau = f"{key}_bau" @@ -1238,30 +1231,34 @@ def get_bau_values(mock_scenarios, config): raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") return bau_values -# def get_scenario_results(run_uuid): -# """Retrieve scenario results from an external API.""" -# results_url = f"{root_url}/job/{run_uuid}/results/?api_key={API_KEY}" -# response = requests.get(results_url, verify=False) -# response.raise_for_status() -# result_data = response.json() - -# processed_data = sum_vectors(result_data) #vectors are summed into a single value - -# # ## outputs json with the simplified REopt results where vectors are summed into a single value -# # with open(f"{run_uuid}.json", "w") as json_file: -# # json.dump(processed_data, json_file, indent=4) - -# return processed_data +def fetch_raw_data(request, run_uuid): + response = results(request, run_uuid) + if response.status_code == 200: + result_data = json.loads(response.content) + processed_data = sum_vectors(result_data) # Summing vectors into a single value + return processed_data + else: + return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} +def access_raw_data(api_metas, request): + full_summary_dict = {"scenarios": []} + for m in api_metas: + scenario_data = { + "run_uuid": str(m.run_uuid), + "status": m.status, + "created": str(m.created), + "full_data": fetch_raw_data(request, m.run_uuid) + } + full_summary_dict["scenarios"].append(scenario_data) + return full_summary_dict def process_scenarios(scenarios, reopt_data_config): - """Process multiple scenarios and generate a combined dataframe.""" config = reopt_data_config bau_values = get_bau_values(scenarios, config) combined_df = pd.DataFrame() for scenario in scenarios: run_uuid = scenario['run_uuid'] - df_result = get_REopt_data(scenario["outputs"], run_uuid, config) + df_result = get_REopt_data(scenario['full_data'], run_uuid, config) df_result = df_result.set_index('Scenario').T df_result.columns = [run_uuid] combined_df = df_result if combined_df.empty else combined_df.join(df_result, how='outer') @@ -1277,104 +1274,232 @@ def process_scenarios(scenarios, reopt_data_config): return combined_df -# def summary_by_runuuids(run_uuids): -# """Fetch summary for multiple run UUIDs.""" -# if not run_uuids: -# return {'Error': 'Must provide one or more run_uuids'} +def create_custom_table_excel(df, custom_table, calculations, output): + # Create a new Excel file and add a worksheet + workbook = xlsxwriter.Workbook(output, {'in_memory': True}) + worksheet = workbook.add_worksheet('ITA Report Template') -# for r_uuid in run_uuids: -# if not isinstance(r_uuid, str): -# return {'Error': f'Provided run_uuids type error, must be string. {r_uuid}'} - -# try: -# uuid.UUID(r_uuid) -# except ValueError as e: -# return {"Error": str(e)} + # Define formats + data_format = workbook.add_format({'align': 'center', 'valign': 'center', 'border': 1}) + formula_format = workbook.add_format({'bg_color': '#C1EE86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) + scenario_header_format = workbook.add_format({'bold': True, 'bg_color': '#0079C2', 'border': 1, 'align': 'center', 'font_color': 'white'}) + variable_name_format = workbook.add_format({'bold': True, 'bg_color': '#DEE2E5', 'border': 1, 'align': 'left'}) + + worksheet.write(1, len(df.columns) + 2, "Values in red are formulas. Do not input anything.", formula_format) + + column_width = 35 + for col_num in range(len(df.columns) + 3): + worksheet.set_column(col_num, col_num, column_width) -# try: -# scenarios = [get_scenario_results(run_uuid) for run_uuid in run_uuids] -# return {'scenarios': scenarios} -# except Exception as e: -# return {"Error": str(e)} + worksheet.write('A1', 'Scenario', scenario_header_format) + for col_num, header in enumerate(df.columns): + worksheet.write(0, col_num + 1, header, scenario_header_format) + + for row_num, variable in enumerate(df.index): + worksheet.write(row_num + 1, 0, variable, variable_name_format) -def summary_by_runuuids(request): - """ - Fetch summary for multiple run UUIDs. - """ - try: - # Parse the request body - body = json.loads(request.body) - run_uuids = body.get('run_uuids', []) + for row_num, row_data in enumerate(df.itertuples(index=False)): + for col_num, value in enumerate(row_data): + worksheet.write(row_num + 1, col_num + 1, "" if pd.isnull(value) or value == '-' else value, data_format) - if not run_uuids: - return JsonResponse({'Error': 'Must provide one or more run_uuids'}, status=400) + headers = {header: idx for idx, header in enumerate(df.index)} - for r_uuid in run_uuids: - if not isinstance(r_uuid, str): - return JsonResponse({'Error': f'Provided run_uuids type error, must be string. {r_uuid}'}, status=400) - try: - uuid.UUID(r_uuid) - except ValueError as e: - return JsonResponse({"Error": str(e)}, status=400) - - scenarios = [] - for run_uuid in run_uuids: - # Call the existing results function - response = results(request, run_uuid) - if response.status_code == 200: - scenarios.append(json.loads(response.content)) + bau_cells = { + 'grid_value': f'{colnum_string(2)}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, + 'net_cost_value': f'{colnum_string(2)}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, + 'ng_reduction_value': f'{colnum_string(2)}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, + 'util_cost_value': f'{colnum_string(2)}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, + 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None + } + + missing_entries = [] + for col in range(2, len(df.columns) + 2): + col_letter = colnum_string(col) + for calc in calculations: + if calc["name"] in headers: + row_idx = headers[calc["name"]] + formula = calc["formula"](col_letter, bau_cells, headers) + if formula: + worksheet.write_formula(row_idx + 1, col-1, formula, formula_format) + else: + missing_entries.append(calc["name"]) else: - return JsonResponse({"Error": f"Error fetching results for run_uuid {run_uuid}: {response.content}"}, status=response.status_code) + missing_entries.append(calc["name"]) + + if missing_entries: + print(f"Missing entries in the input table: {', '.join(set(missing_entries))}. Please update the configuration if necessary.") + + workbook.close() - return JsonResponse({'scenarios': scenarios}, status=200) +def create_comparison_table(request, user_uuid): + def fetch_data_for_comparison(api_metas): + return access_raw_data(api_metas, request) + # Validate that user UUID is valid. + try: + uuid.UUID(user_uuid) except ValueError as e: - return JsonResponse({"Error": str(e)}, status=400) + return JsonResponse({"Error": str(e)}, status=404) - except KeyError as e: - return JsonResponse({"Error. Missing": str(e)}, status=400) + try: + api_metas = APIMeta.objects.filter(user_uuid=user_uuid).only( + 'user_uuid', + 'status', + 'created' + ).order_by("-created") - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - debug_msg = "exc_type: {}; exc_value: {}; exc_traceback: {}".format(exc_type, exc_value, tb.format_tb(exc_traceback)) - log.debug(debug_msg) - return JsonResponse({"Error": "Unexpected error. Check log for more."}, status=500) -# def fuel_emissions_rates(request): -# try: + if api_metas.exists(): + scenarios = fetch_data_for_comparison(api_metas) + if 'scenarios' not in scenarios: + return JsonResponse({"Error": scenarios['error']}, content_type='application/json', status=404) + + final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) + final_df.iloc[1:, 0] = [meta.run_uuid for meta in api_metas] -# try: -# response = JsonResponse({ -# 'CO2': { -# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_CO2_per_gal, -# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_CO2_per_mmbtu -# }, -# 'NOx': { -# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_NOx_per_gal, -# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_NOx_per_mmbtu -# }, -# 'SO2': { -# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_SO2_per_gal, -# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_SO2_per_mmbtu -# }, -# 'PM25': { -# 'generator_lb_per_gal': ValidateNestedInput.fuel_conversion_lb_PM25_per_gal, -# 'lb_per_mmbtu': ValidateNestedInput.fuel_conversion_lb_PM25_per_mmbtu -# } -# }) -# return response -# except AttributeError as e: -# return JsonResponse({"Error": str(e.args[0])}, status=500) + final_df_transpose = final_df.transpose() + final_df_transpose.columns = final_df_transpose.iloc[0] + final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) -# except KeyError as e: -# return JsonResponse({"No parameters required."}, status=500) + output = io.BytesIO() + create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) + output.seek(0) -# except ValueError as e: -# return JsonResponse({"Error": str(e.args[0])}, status=500) + filename = "comparison_table.xlsx" + response = HttpResponse( + output, + content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + ) + response['Content-Disposition'] = f'attachment; filename={filename}' -# except Exception: + return response -# exc_type, exc_value, exc_traceback = sys.exc_info() -# debug_msg = "exc_type: {}; exc_value: {}; exc_traceback: {}".format(exc_type, exc_value.args[0], -# tb.format_tb(exc_traceback)) -# log.error(debug_msg) -# return JsonResponse({"Error": "Unexpected Error. Please check your input parameters and contact reopt@nrel.gov if problems persist."}, status=500) + else: + return JsonResponse({"Error": f"No scenarios found for user '{user_uuid}'"}, content_type='application/json', status=404) + + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='comparison', user_uuid=user_uuid) + err.save_to_db() + return JsonResponse({"Error": str(err)}, status=404) + +# Configuration +# Set up table needed along with REopt dictionaries to grab data +ita_custom_table = [ + (lambda df: get_with_suffix(df, "outputs.PV.size_kw", ""), "PV Size (kW)"), + (lambda df: get_with_suffix(df, "outputs.Wind.size_kw", ""), "Wind Size (kW)"), + (lambda df: get_with_suffix(df, "outputs.CHP.size_kw", ""), "CHP Size (kW)"), + (lambda df: get_with_suffix(df, "outputs.PV.annual_energy_produced_kwh", ""), "PV Total Electricity Produced (kWh)"), + (lambda df: get_with_suffix(df, "outputs.PV.electric_to_grid_series_kw", ""), "PV Exported to Grid (kWh)"), + (lambda df: get_with_suffix(df, "outputs.PV.electric_to_load_series_kw", ""), "PV Serving Load (kWh)"), + (lambda df: get_with_suffix(df, "outputs.Wind.annual_energy_produced_kwh", ""), "Wind Total Electricity Produced (kWh)"), + (lambda df: get_with_suffix(df, "outputs.Wind.electric_to_grid_series_kw", ""), "Wind Exported to Grid (kWh)"), + (lambda df: get_with_suffix(df, "outputs.Wind.electric_to_load_series_kw", ""), "Wind Serving Load (kWh)"), + (lambda df: get_with_suffix(df, "outputs.CHP.annual_electric_production_kwh", ""), "CHP Total Electricity Produced (kWh)"), + (lambda df: get_with_suffix(df, "outputs.CHP.electric_to_grid_series_kw", ""), "CHP Exported to Grid (kWh)"), + (lambda df: get_with_suffix(df, "outputs.CHP.electric_to_load_series_kw", ""), "CHP Serving Load (kWh)"), + (lambda df: get_with_suffix(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour", ""), "CHP Serving Thermal Load (MMBtu)"), + (lambda df: get_with_suffix(df, "outputs.ElectricUtility.annual_energy_supplied_kwh", ""), "Grid Purchased Electricity (kWh)"), + (lambda df: get_with_suffix(df, "outputs.ElectricUtility.electric_to_load_series_kw", ""), "Total Site Electricity Use (kWh)"), + (lambda df: get_with_suffix(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf", ""), "Net Purchased Electricity Reduction (%)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax", ""), "Electricity Energy Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax", ""), "Electricity Demand Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax", ""), "Utility Fixed Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_bill_before_tax", ""), "Purchased Electricity Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax", ""), "Electricity Export Benefit ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax", ""), "Net Electricity Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau", ""), "Electricity Cost Savings ($/year)"), + (lambda df: get_with_suffix(df, "outputs.Boiler.fuel_used_mmbtu", ""), "Boiler Fuel (MMBtu)"), + (lambda df: get_with_suffix(df, "outputs.CHP.annual_fuel_consumption_mmbtu", ""), "CHP Fuel (MMBtu)"), + (lambda df: get_with_suffix(df, "outputs.ElectricUtility.total_energy_supplied_kwh", ""), "Total Fuel (MMBtu)"), + (lambda df: get_with_suffix(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau", ""), "Natural Gas Reduction (%)"), + (lambda df: get_with_suffix(df, "outputs.Boiler.annual_thermal_production_mmbtu", ""), "Boiler Thermal Production (MMBtu)"), + (lambda df: get_with_suffix(df, "outputs.CHP.annual_thermal_production_mmbtu", ""), "CHP Thermal Production (MMBtu)"), + (lambda df: get_with_suffix(df, "outputs.CHP.annual_thermal_production_mmbtu", ""), "Total Thermal Production (MMBtu)"), + (lambda df: get_with_suffix(df, "outputs.Site.heating_system_fuel_cost_us_dollars", ""), "Heating System Fuel Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.CHP.year_one_fuel_cost_before_tax", ""), "CHP Fuel Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.Site.total_fuel_cost_us_dollars", ""), "Total Fuel (NG) Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.Site.total_utility_cost_us_dollars", ""), "Total Utility Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.om_and_replacement_present_cost_after_tax", ""), "O&M Cost Increase ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.simple_payback_years", ""), "Payback Period (years)"), + (lambda df: get_with_suffix(df, "outputs.Financial.lifecycle_capital_costs", ""), "Gross Capital Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_us_dollars", ""), "Federal Tax Incentive (30%)"), + (lambda df: get_with_suffix(df, "outputs.Financial.iac_grant_us_dollars", ""), "IAC Grant ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_value_us_dollars", ""), "Incentive Value ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.net_capital_cost_us_dollars", ""), "Net Capital Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.annual_cost_savings_us_dollars", ""), "Annual Cost Savings ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.simple_payback_years", ""), "Simple Payback (years)"), + (lambda df: get_with_suffix(df, "outputs.Site.annual_emissions_tonnes_CO2", ""), "CO2 Emissions (tonnes)"), + (lambda df: get_with_suffix(df, "outputs.Site.lifecycle_emissions_tonnes_CO2", ""), "CO2 Reduction (tonnes)"), + (lambda df: get_with_suffix(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau", ""), "CO2 (%) savings "), + (lambda df: get_with_suffix(df, "outputs.Financial.npv", ""), "NPV"), + (lambda df: get_with_suffix(df, "inputs.PV.federal_itc_fraction", ""), "PV Federal Tax Incentive (%)"), + (lambda df: get_with_suffix(df, "inputs.ElectricStorage.total_itc_fraction", ""), "Storage Federal Tax Incentive (%)") +] + +# Configuration for calculations +calculations = [ + { + "name": "Total Site Electricity Use (kWh)", + "formula": lambda col, bau, headers: f'={col}{headers["PV Serving Load (kWh)"] + 2}+{col}{headers["Wind Serving Load (kWh)"] + 2}+{col}{headers["CHP Serving Load (kWh)"] + 2}+{col}{headers["Grid Purchased Electricity (kWh)"] + 2}' + }, + { + "name": "Net Purchased Electricity Reduction (%)", + "formula": lambda col, bau, headers: f'=({bau["grid_value"]}-{col}{headers["Grid Purchased Electricity (kWh)"] + 2})/{bau["grid_value"]}' + }, + { + "name": "Purchased Electricity Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Electricity Energy Cost ($)"] + 2}+{col}{headers["Electricity Demand Cost ($)"] + 2}+{col}{headers["Utility Fixed Cost ($)"] + 2}' + }, + { + "name": "Net Electricity Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Purchased Electricity Cost ($)"] + 2}-{col}{headers["Electricity Export Benefit ($)"] + 2}' + }, + { + "name": "Electricity Cost Savings ($/year)", + "formula": lambda col, bau, headers: f'={bau["net_cost_value"]}-{col}{headers["Net Electricity Cost ($)"] + 2}' + }, + { + "name": "Total Fuel (MMBtu)", + "formula": lambda col, bau, headers: f'={col}{headers["Boiler Fuel (MMBtu)"] + 2}+{col}{headers["CHP Fuel (MMBtu)"] + 2}' + }, + { + "name": "Natural Gas Reduction (%)", + "formula": lambda col, bau, headers: f'=({bau["ng_reduction_value"]}-{col}{headers["Total Fuel (MMBtu)"] + 2})/{bau["ng_reduction_value"]}' + }, + { + "name": "Total Thermal Production (MMBtu)", + "formula": lambda col, bau, headers: f'={col}{headers["Boiler Thermal Production (MMBtu)"] + 2}+{col}{headers["CHP Thermal Production (MMBtu)"] + 2}' + }, + { + "name": "Total Fuel (NG) Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Heating System Fuel Cost ($)"] + 2}+{col}{headers["CHP Fuel Cost ($)"] + 2}' + }, + { + "name": "Total Utility Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Net Electricity Cost ($)"] + 2}+{col}{headers["Total Fuel (NG) Cost ($)"] + 2}' + }, + { + "name": "Incentive Value ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2}+{col}{headers["IAC Grant ($)"] + 2}' + }, + { + "name": "Net Capital Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Gross Capital Cost ($)"] + 2}-{col}{headers["Incentive Value ($)"] + 2}' + }, + { + "name": "Annual Cost Savings ($)", + "formula": lambda col, bau, headers: f'={bau["util_cost_value"]}-{col}{headers["Total Utility Cost ($)"] + 2}+{col}{headers["O&M Cost Increase ($)"] + 2}' + }, + { + "name": "Simple Payback (years)", + "formula": lambda col, bau, headers: f'={col}{headers["Net Capital Cost ($)"] + 2}/{col}{headers["Annual Cost Savings ($)"] + 2}' + }, + { + "name": "CO2 Reduction (tonnes)", + "formula": lambda col, bau, headers: f'={bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2}' + }, + { + "name": "CO2 (%) savings ", + "formula": lambda col, bau, headers: f'=({bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2})/{bau["co2_reduction_value"]}' + } +] From 191629feb4343f3f8973d38252727a8ddcf608b3 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:30:45 -0600 Subject: [PATCH 03/44] added test --- reoptjl/testing_custom_table.ipynb | 65 +++++----------------- reoptjl/views.py | 87 +++++++++++++----------------- 2 files changed, 50 insertions(+), 102 deletions(-) diff --git a/reoptjl/testing_custom_table.ipynb b/reoptjl/testing_custom_table.ipynb index dd3526a1f..d8064eb0b 100644 --- a/reoptjl/testing_custom_table.ipynb +++ b/reoptjl/testing_custom_table.ipynb @@ -2,19 +2,19 @@ "cells": [ { "cell_type": "code", - "execution_count": 3, + "execution_count": 7, "metadata": {}, "outputs": [ { "ename": "ModuleNotFoundError", - "evalue": "No module named 'reo'", + "evalue": "No module named 'reoptjl'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[3], line 12\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mjson\u001b[39;00m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HttpRequest, HttpResponse, JsonResponse\n\u001b[0;32m---> 12\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mviews\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m\n\u001b[1;32m 14\u001b[0m API_KEY \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgYV8t7d6c5naotp67meIJyJRi6DksKv0VfPSQzEa\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;66;03m# Replace with your API key\u001b[39;00m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;66;03m# Define the API key and URL\u001b[39;00m\n", - "File \u001b[0;32m~/REopt_API/reoptjl/views.py:8\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mre\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m JsonResponse, HttpResponse\n\u001b[0;32m----> 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mreo\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexceptions\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m UnexpectedError\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mreoptjl\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodels\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Settings, PVInputs, ElectricStorageInputs, WindInputs, GeneratorInputs, ElectricLoadInputs,\\\n\u001b[1;32m 10\u001b[0m ElectricTariffInputs, ElectricUtilityInputs, SpaceHeatingLoadInputs, PVOutputs, ElectricStorageOutputs,\\\n\u001b[1;32m 11\u001b[0m WindOutputs, ExistingBoilerInputs, GeneratorOutputs, ElectricTariffOutputs, ElectricUtilityOutputs, \\\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 16\u001b[0m FinancialInputs, FinancialOutputs, UserUnlinkedRuns, BoilerInputs, BoilerOutputs, SteamTurbineInputs, \\\n\u001b[1;32m 17\u001b[0m SteamTurbineOutputs, GHPInputs, GHPOutputs, ProcessHeatLoadInputs\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n", - "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'reo'" + "Cell \u001b[0;32mIn[7], line 9\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01muuid\u001b[39;00m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HttpRequest, HttpResponse, JsonResponse\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mviews\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;66;03m# Disable warnings\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01murllib3\u001b[39;00m\n", + "File \u001b[0;32m~/REopt_API/reoptjl/views.py:9\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m JsonResponse, HttpResponse\n\u001b[1;32m 8\u001b[0m \u001b[38;5;66;03m# from reo.exceptions import UnexpectedError\u001b[39;00m\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mreoptjl\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodels\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Settings, PVInputs, ElectricStorageInputs, WindInputs, GeneratorInputs, ElectricLoadInputs,\\\n\u001b[1;32m 10\u001b[0m ElectricTariffInputs, ElectricUtilityInputs, SpaceHeatingLoadInputs, PVOutputs, ElectricStorageOutputs,\\\n\u001b[1;32m 11\u001b[0m WindOutputs, ExistingBoilerInputs, GeneratorOutputs, ElectricTariffOutputs, ElectricUtilityOutputs, \\\n\u001b[1;32m 12\u001b[0m ElectricLoadOutputs, ExistingBoilerOutputs, DomesticHotWaterLoadInputs, SiteInputs, SiteOutputs, APIMeta, \\\n\u001b[1;32m 13\u001b[0m UserProvidedMeta, CHPInputs, CHPOutputs, CoolingLoadInputs, ExistingChillerInputs, ExistingChillerOutputs,\\\n\u001b[1;32m 14\u001b[0m CoolingLoadOutputs, HeatingLoadOutputs, REoptjlMessageOutputs, HotThermalStorageInputs, HotThermalStorageOutputs,\\\n\u001b[1;32m 15\u001b[0m ColdThermalStorageInputs, ColdThermalStorageOutputs, AbsorptionChillerInputs, AbsorptionChillerOutputs,\\\n\u001b[1;32m 16\u001b[0m FinancialInputs, FinancialOutputs, UserUnlinkedRuns, BoilerInputs, BoilerOutputs, SteamTurbineInputs, \\\n\u001b[1;32m 17\u001b[0m SteamTurbineOutputs, GHPInputs, GHPOutputs, ProcessHeatLoadInputs\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mrequests\u001b[39;00m\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'reoptjl'" ] } ], @@ -24,16 +24,10 @@ "import pandas as pd\n", "import json\n", "import requests\n", - "import os\n", - "import uuid\n", - "from collections import defaultdict\n", - "import re\n", "import io\n", + "import uuid\n", "from django.http import HttpRequest, HttpResponse, JsonResponse\n", - "from views import create_comparison_table\n", - "\n", - "API_KEY = \"gYV8t7d6c5naotp67meIJyJRi6DksKv0VfPSQzEa\" # Replace with your API key\n", - "root_url = \"https://developer.nrel.gov/api/reopt/stable\"\n", + "from views import *\n", "\n", "# Disable warnings\n", "import urllib3\n", @@ -42,42 +36,11 @@ "def test_create_comparison_table(run_uuids):\n", " # Create a mock request object\n", " request = HttpRequest()\n", - " request.method = 'GET'\n", + " request.method = 'POST'\n", + " request.body = json.dumps({\"run_uuids\": run_uuids})\n", " \n", - " # Mock API key and root URL for the API requests\n", - " request.META['API_KEY'] = API_KEY\n", - " request.META['root_url'] = root_url\n", - "\n", - " # Mock user UUID\n", - " user_uuid = str(uuid.uuid4())\n", - "\n", - " # Replace the actual fetch_raw_data method to use the provided run UUIDs\n", - " def fetch_raw_data(request, run_uuid):\n", - " results_url = f\"{root_url}/job/{run_uuid}/results/?api_key={API_KEY}\"\n", - " response = requests.get(results_url, verify=False)\n", - " response.raise_for_status()\n", - " result_data = response.json()\n", - " processed_data = sum_vectors(result_data) # Summing vectors into a single value\n", - " return processed_data\n", - "\n", - " # Replace the actual get_raw_data method to use the provided run UUIDs\n", - " def get_raw_data(api_metas, request):\n", - " full_summary_dict = {\"scenarios\": []}\n", - " for m in api_metas:\n", - " scenario_data = {\n", - " \"run_uuid\": str(m['run_uuid']),\n", - " \"status\": m['status'],\n", - " \"created\": str(m['created']),\n", - " \"full_data\": fetch_raw_data(request, m['run_uuid'])\n", - " }\n", - " full_summary_dict[\"scenarios\"].append(scenario_data)\n", - " return full_summary_dict\n", - "\n", - " # Mock API meta data\n", - " api_metas = [{\"run_uuid\": run_uuid, \"status\": \"completed\", \"created\": \"2024-08-08\"} for run_uuid in run_uuids]\n", - "\n", - " # Call the create_comparison_table function with the mock request and user UUID\n", - " response = create_comparison_table(request, user_uuid)\n", + " # Call the create_comparison_table function with the mock request\n", + " response = create_comparison_table(request)\n", "\n", " # Check the response type and print the appropriate result\n", " if isinstance(response, HttpResponse):\n", @@ -93,10 +56,10 @@ " else:\n", " print(\"Unexpected response type.\")\n", "\n", - "# Define the runrun_uuid_1 = \"4043a50f-52b9-482a-90dd-8f7ea417182a\"\n", + "# Define the run UUIDs\n", + "run_uuid_1 = \"4043a50f-52b9-482a-90dd-8f7ea417182a\"\n", "run_uuid_2 = \"3ccb973a-e9ed-405e-bb41-4fcb0f4bb9a5\"\n", - "run_uuids = [run_uuid_1, run_uuid_2] UUIDs\n", - "\n", + "run_uuids = [run_uuid_1, run_uuid_2]\n", "\n", "# Run the test\n", "test_create_comparison_table(run_uuids)\n" diff --git a/reoptjl/views.py b/reoptjl/views.py index 6ba13337d..997900aeb 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -20,7 +20,7 @@ import numpy as np import json import logging -from reoptjl.custom_table_helpers import * +from reoptjl.custom_table_helpers import get_with_suffix, flatten_dict, clean_data_dict, sum_vectors, colnum_string import xlsxwriter import io @@ -1240,14 +1240,12 @@ def fetch_raw_data(request, run_uuid): else: return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} -def access_raw_data(api_metas, request): +def access_raw_data(run_uuids, request): full_summary_dict = {"scenarios": []} - for m in api_metas: + for run_uuid in run_uuids: scenario_data = { - "run_uuid": str(m.run_uuid), - "status": m.status, - "created": str(m.created), - "full_data": fetch_raw_data(request, m.run_uuid) + "run_uuid": str(run_uuid), + "full_data": fetch_raw_data(request, run_uuid) } full_summary_dict["scenarios"].append(scenario_data) return full_summary_dict @@ -1275,11 +1273,9 @@ def process_scenarios(scenarios, reopt_data_config): return combined_df def create_custom_table_excel(df, custom_table, calculations, output): - # Create a new Excel file and add a worksheet workbook = xlsxwriter.Workbook(output, {'in_memory': True}) worksheet = workbook.add_worksheet('ITA Report Template') - # Define formats data_format = workbook.add_format({'align': 'center', 'valign': 'center', 'border': 1}) formula_format = workbook.add_format({'bg_color': '#C1EE86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) scenario_header_format = workbook.add_format({'bold': True, 'bg_color': '#0079C2', 'border': 1, 'align': 'center', 'font_color': 'white'}) @@ -1331,56 +1327,45 @@ def create_custom_table_excel(df, custom_table, calculations, output): workbook.close() -def create_comparison_table(request, user_uuid): - def fetch_data_for_comparison(api_metas): - return access_raw_data(api_metas, request) - - # Validate that user UUID is valid. - try: - uuid.UUID(user_uuid) - except ValueError as e: - return JsonResponse({"Error": str(e)}, status=404) - - try: - api_metas = APIMeta.objects.filter(user_uuid=user_uuid).only( - 'user_uuid', - 'status', - 'created' - ).order_by("-created") +def create_comparison_table(request): + run_uuids = json.loads(request.body).get('run_uuids', []) + if not run_uuids: + return JsonResponse({'Error': 'Must provide one or more run_uuids'}, status=400) - if api_metas.exists(): - scenarios = fetch_data_for_comparison(api_metas) - if 'scenarios' not in scenarios: - return JsonResponse({"Error": scenarios['error']}, content_type='application/json', status=404) + # Validate run UUIDs + for r_uuid in run_uuids: + if not isinstance(r_uuid, str): + return JsonResponse({'Error': 'Provided run_uuids type error, must be string. ' + str(r_uuid)}, status=400) - final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) - final_df.iloc[1:, 0] = [meta.run_uuid for meta in api_metas] + try: + uuid.UUID(r_uuid) # raises ValueError if not valid UUID + except ValueError as e: + return JsonResponse({"Error": str(e)}, status=404) - final_df_transpose = final_df.transpose() - final_df_transpose.columns = final_df_transpose.iloc[0] - final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + scenarios = access_raw_data(run_uuids, request) + if 'scenarios' not in scenarios: + return JsonResponse({"Error": "Failed to fetch scenarios"}, content_type='application/json', status=404) - output = io.BytesIO() - create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) - output.seek(0) + final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) + final_df.iloc[1:, 0] = run_uuids - filename = "comparison_table.xlsx" - response = HttpResponse( - output, - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' - ) - response['Content-Disposition'] = f'attachment; filename={filename}' + final_df_transpose = final_df.transpose() + final_df_transpose.columns = final_df_transpose.iloc[0] + final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - return response + output = io.BytesIO() + + create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) + output.seek(0) - else: - return JsonResponse({"Error": f"No scenarios found for user '{user_uuid}'"}, content_type='application/json', status=404) + filename = "comparison_table.xlsx" + response = HttpResponse( + output, + content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + ) + response['Content-Disposition'] = f'attachment; filename={filename}' - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='comparison', user_uuid=user_uuid) - err.save_to_db() - return JsonResponse({"Error": str(err)}, status=404) + return response # Configuration # Set up table needed along with REopt dictionaries to grab data From 7ec1a58ce0f145e11f8b4c5d61ec1e5e32f08a84 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:45:07 -0600 Subject: [PATCH 04/44] added helpers --- reoptjl/custom_table_helpers.py | 4 -- reoptjl/testing_custom_table.ipynb | 68 +----------------------------- reoptjl/views.py | 2 + 3 files changed, 3 insertions(+), 71 deletions(-) diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py index 6038a8f72..233a518b1 100644 --- a/reoptjl/custom_table_helpers.py +++ b/reoptjl/custom_table_helpers.py @@ -1,8 +1,4 @@ # custom table helpers.py - -import pandas as pd -from collections import defaultdict - def get_with_suffix(df, key, suffix, default_val=0): """Fetch value from dataframe with an optional retriaval of _bau suffix.""" if not key.endswith("_bau"): diff --git a/reoptjl/testing_custom_table.ipynb b/reoptjl/testing_custom_table.ipynb index d8064eb0b..b7d2a9dd1 100644 --- a/reoptjl/testing_custom_table.ipynb +++ b/reoptjl/testing_custom_table.ipynb @@ -1,71 +1,5 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "ename": "ModuleNotFoundError", - "evalue": "No module named 'reoptjl'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[7], line 9\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01muuid\u001b[39;00m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HttpRequest, HttpResponse, JsonResponse\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mviews\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;66;03m# Disable warnings\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01murllib3\u001b[39;00m\n", - "File \u001b[0;32m~/REopt_API/reoptjl/views.py:9\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdjango\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m JsonResponse, HttpResponse\n\u001b[1;32m 8\u001b[0m \u001b[38;5;66;03m# from reo.exceptions import UnexpectedError\u001b[39;00m\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mreoptjl\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodels\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Settings, PVInputs, ElectricStorageInputs, WindInputs, GeneratorInputs, ElectricLoadInputs,\\\n\u001b[1;32m 10\u001b[0m ElectricTariffInputs, ElectricUtilityInputs, SpaceHeatingLoadInputs, PVOutputs, ElectricStorageOutputs,\\\n\u001b[1;32m 11\u001b[0m WindOutputs, ExistingBoilerInputs, GeneratorOutputs, ElectricTariffOutputs, ElectricUtilityOutputs, \\\n\u001b[1;32m 12\u001b[0m ElectricLoadOutputs, ExistingBoilerOutputs, DomesticHotWaterLoadInputs, SiteInputs, SiteOutputs, APIMeta, \\\n\u001b[1;32m 13\u001b[0m UserProvidedMeta, CHPInputs, CHPOutputs, CoolingLoadInputs, ExistingChillerInputs, ExistingChillerOutputs,\\\n\u001b[1;32m 14\u001b[0m CoolingLoadOutputs, HeatingLoadOutputs, REoptjlMessageOutputs, HotThermalStorageInputs, HotThermalStorageOutputs,\\\n\u001b[1;32m 15\u001b[0m ColdThermalStorageInputs, ColdThermalStorageOutputs, AbsorptionChillerInputs, AbsorptionChillerOutputs,\\\n\u001b[1;32m 16\u001b[0m FinancialInputs, FinancialOutputs, UserUnlinkedRuns, BoilerInputs, BoilerOutputs, SteamTurbineInputs, \\\n\u001b[1;32m 17\u001b[0m SteamTurbineOutputs, GHPInputs, GHPOutputs, ProcessHeatLoadInputs\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mrequests\u001b[39;00m\n", - "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'reoptjl'" - ] - } - ], - "source": [ - "# test_script.py\n", - "\n", - "import pandas as pd\n", - "import json\n", - "import requests\n", - "import io\n", - "import uuid\n", - "from django.http import HttpRequest, HttpResponse, JsonResponse\n", - "from views import *\n", - "\n", - "# Disable warnings\n", - "import urllib3\n", - "urllib3.disable_warnings()\n", - "\n", - "def test_create_comparison_table(run_uuids):\n", - " # Create a mock request object\n", - " request = HttpRequest()\n", - " request.method = 'POST'\n", - " request.body = json.dumps({\"run_uuids\": run_uuids})\n", - " \n", - " # Call the create_comparison_table function with the mock request\n", - " response = create_comparison_table(request)\n", - "\n", - " # Check the response type and print the appropriate result\n", - " if isinstance(response, HttpResponse):\n", - " if response.status_code == 200:\n", - " # Save the response content to an Excel file\n", - " with open(\"comparison_table.xlsx\", \"wb\") as f:\n", - " f.write(response.content)\n", - " print(\"Comparison table saved to 'comparison_table.xlsx'.\")\n", - " else:\n", - " print(f\"Error: {response.status_code} - {response.content.decode()}\")\n", - " elif isinstance(response, JsonResponse):\n", - " print(json.dumps(response.json(), indent=4))\n", - " else:\n", - " print(\"Unexpected response type.\")\n", - "\n", - "# Define the run UUIDs\n", - "run_uuid_1 = \"4043a50f-52b9-482a-90dd-8f7ea417182a\"\n", - "run_uuid_2 = \"3ccb973a-e9ed-405e-bb41-4fcb0f4bb9a5\"\n", - "run_uuids = [run_uuid_1, run_uuid_2]\n", - "\n", - "# Run the test\n", - "test_create_comparison_table(run_uuids)\n" - ] - } - ], + "cells": [], "metadata": { "kernelspec": { "display_name": "base", diff --git a/reoptjl/views.py b/reoptjl/views.py index 997900aeb..add4fb774 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -18,10 +18,12 @@ import os import requests import numpy as np +import pandas as pd import json import logging from reoptjl.custom_table_helpers import get_with_suffix, flatten_dict, clean_data_dict, sum_vectors, colnum_string import xlsxwriter +from collections import defaultdict import io log = logging.getLogger(__name__) From 3e5e02fa95eda3ad8432f2ecd9e537aefd93e913 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:57:54 -0600 Subject: [PATCH 05/44] added functions tasks in url --- reoptjl/urls.py | 1 + reoptjl/views.py | 75 +++++++++++++++++++++++++++++------------------- 2 files changed, 46 insertions(+), 30 deletions(-) diff --git a/reoptjl/urls.py b/reoptjl/urls.py index 7f8077a91..37260969c 100644 --- a/reoptjl/urls.py +++ b/reoptjl/urls.py @@ -22,4 +22,5 @@ re_path(r'^invalid_urdb/?$', reoviews.invalid_urdb), re_path(r'^schedule_stats/?$', reoviews.schedule_stats), re_path(r'^get_existing_chiller_default_cop/?$', views.get_existing_chiller_default_cop), + re_path(r'^create_comparison_table/?$', views.create_comparison_table) ] diff --git a/reoptjl/views.py b/reoptjl/views.py index add4fb774..20b41b98b 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1237,7 +1237,7 @@ def fetch_raw_data(request, run_uuid): response = results(request, run_uuid) if response.status_code == 200: result_data = json.loads(response.content) - processed_data = sum_vectors(result_data) # Summing vectors into a single value + processed_data = sum_vectors(result_data) return processed_data else: return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} @@ -1330,45 +1330,60 @@ def create_custom_table_excel(df, custom_table, calculations, output): workbook.close() def create_comparison_table(request): - run_uuids = json.loads(request.body).get('run_uuids', []) - if not run_uuids: + # Parse run_uuids from request body + try: + run_uuids = json.loads(request.body)['run_uuids'] + except (json.JSONDecodeError, KeyError): + return JsonResponse({'Error': 'Invalid JSON format or missing run_uuids'}, status=400) + + if len(run_uuids) == 0: return JsonResponse({'Error': 'Must provide one or more run_uuids'}, status=400) # Validate run UUIDs for r_uuid in run_uuids: if not isinstance(r_uuid, str): - return JsonResponse({'Error': 'Provided run_uuids type error, must be string. ' + str(r_uuid)}, status=400) - + return JsonResponse({'Error': f'Provided run_uuid {r_uuid} must be a string'}, status=400) try: - uuid.UUID(r_uuid) # raises ValueError if not valid UUID - except ValueError as e: - return JsonResponse({"Error": str(e)}, status=404) + uuid.UUID(r_uuid) # raises ValueError if not a valid UUID + except ValueError: + return JsonResponse({'Error': f'Invalid UUID format: {r_uuid}'}, status=400) - scenarios = access_raw_data(run_uuids, request) - if 'scenarios' not in scenarios: - return JsonResponse({"Error": "Failed to fetch scenarios"}, content_type='application/json', status=404) - - final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) - final_df.iloc[1:, 0] = run_uuids + try: + # Access raw data + scenarios = access_raw_data(run_uuids, request) + if 'scenarios' not in scenarios: + return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) + + # Process scenarios + final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) + final_df.iloc[1:, 0] = run_uuids + + # Transpose and format DataFrame + final_df_transpose = final_df.transpose() + final_df_transpose.columns = final_df_transpose.iloc[0] + final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + + # Create Excel file + output = io.BytesIO() + create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) + output.seek(0) + + # Set up the HTTP response + filename = "comparison_table.xlsx" + response = HttpResponse( + output, + content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + ) + response['Content-Disposition'] = f'attachment; filename={filename}' - final_df_transpose = final_df.transpose() - final_df_transpose.columns = final_df_transpose.iloc[0] - final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + return response - output = io.BytesIO() + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_comparison_table', run_uuids=run_uuids) + err.save_to_db() + return JsonResponse({"Error": str(err.message)}, status=500) - create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) - output.seek(0) - - filename = "comparison_table.xlsx" - response = HttpResponse( - output, - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' - ) - response['Content-Disposition'] = f'attachment; filename={filename}' - - return response - # Configuration # Set up table needed along with REopt dictionaries to grab data ita_custom_table = [ From 585b75b22bfaa305bab7c9a2e4092001daef6848 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 8 Aug 2024 11:00:20 -0600 Subject: [PATCH 06/44] updated config to include run_uuids --- reoptjl/views.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 20b41b98b..3b4d271ec 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1233,25 +1233,25 @@ def get_bau_values(mock_scenarios, config): raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") return bau_values -def fetch_raw_data(request, run_uuid): - response = results(request, run_uuid) - if response.status_code == 200: - result_data = json.loads(response.content) - processed_data = sum_vectors(result_data) - return processed_data - else: - return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} - def access_raw_data(run_uuids, request): full_summary_dict = {"scenarios": []} for run_uuid in run_uuids: scenario_data = { "run_uuid": str(run_uuid), - "full_data": fetch_raw_data(request, run_uuid) + "full_data": process_raw_data(request, run_uuid) } full_summary_dict["scenarios"].append(scenario_data) return full_summary_dict +def process_raw_data(request, run_uuid): + response = results(request, run_uuid) + if response.status_code == 200: + result_data = json.loads(response.content) + processed_data = sum_vectors(result_data) + return processed_data + else: + return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} + def process_scenarios(scenarios, reopt_data_config): config = reopt_data_config bau_values = get_bau_values(scenarios, config) @@ -1276,7 +1276,7 @@ def process_scenarios(scenarios, reopt_data_config): def create_custom_table_excel(df, custom_table, calculations, output): workbook = xlsxwriter.Workbook(output, {'in_memory': True}) - worksheet = workbook.add_worksheet('ITA Report Template') + worksheet = workbook.add_worksheet('Custom Table') data_format = workbook.add_format({'align': 'center', 'valign': 'center', 'border': 1}) formula_format = workbook.add_format({'bg_color': '#C1EE86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) From 95b5bf7b573e8ca44e38147ce353cbab618e5fc6 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Fri, 16 Aug 2024 16:13:48 -0600 Subject: [PATCH 07/44] updated docker files --- docker-compose.yml | 2 -- julia_src/Dockerfile | 10 ++++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index a3ebd222f..2dead0986 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,3 @@ -version: "2.1" - services: redis: diff --git a/julia_src/Dockerfile b/julia_src/Dockerfile index 60bb2034c..c626110c5 100644 --- a/julia_src/Dockerfile +++ b/julia_src/Dockerfile @@ -5,14 +5,20 @@ ARG NREL_ROOT_CERT_URL_ROOT="" RUN set -x && if [ -n "$NREL_ROOT_CERT_URL_ROOT" ]; then curl -fsSLk -o /usr/local/share/ca-certificates/nrel_root.crt "${NREL_ROOT_CERT_URL_ROOT}/nrel_root.pem" && curl -fsSLk -o /usr/local/share/ca-certificates/nrel_xca1.crt "${NREL_ROOT_CERT_URL_ROOT}/nrel_xca1.pem" && update-ca-certificates; fi ENV REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt +# # Optionally disable SSL verification for Git operations +ENV JULIA_SSL_NO_VERIFY_HOSTS="github.com" + # Install Julia packages -ENV JULIA_NUM_THREADS=2 ENV XPRESS_JL_SKIP_LIB_CHECK=True +# Set the working directory and copy files WORKDIR /opt/julia_src COPY . . + +# Install Julia packages RUN julia --project=/opt/julia_src -e 'import Pkg; Pkg.instantiate();' RUN julia --project=/opt/julia_src precompile.jl -EXPOSE 8081 + +EXPOSE 8081 CMD ["bash"] From 430d81d51ff0ac321c20b5941a090836cdd3baf6 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 19 Aug 2024 08:09:29 -0600 Subject: [PATCH 08/44] added custom table structure --- reoptjl/testing_custom_table.ipynb | 24 --------- reoptjl/urls.py | 15 +++++- reoptjl/views.py | 79 +++++++++++++++++++++++++----- requirements.txt | 2 + 4 files changed, 83 insertions(+), 37 deletions(-) delete mode 100644 reoptjl/testing_custom_table.ipynb diff --git a/reoptjl/testing_custom_table.ipynb b/reoptjl/testing_custom_table.ipynb deleted file mode 100644 index b7d2a9dd1..000000000 --- a/reoptjl/testing_custom_table.ipynb +++ /dev/null @@ -1,24 +0,0 @@ -{ - "cells": [], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/reoptjl/urls.py b/reoptjl/urls.py index 37260969c..5ca0eae32 100644 --- a/reoptjl/urls.py +++ b/reoptjl/urls.py @@ -2,6 +2,19 @@ from . import views from reo import views as reoviews from django.urls import re_path +from django.urls import register_converter, re_path + +class UUIDListConverter: + regex = r'([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})(;([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}))*' + + def to_python(self, value): + return value.split(';') + + def to_url(self, value): + return ';'.join(value) + +# Register the custom converter +register_converter(UUIDListConverter, 'uuidlist') urlpatterns = [ re_path(r'^job/(?P[0-9a-f-]+)/results/?$', views.results), @@ -22,5 +35,5 @@ re_path(r'^invalid_urdb/?$', reoviews.invalid_urdb), re_path(r'^schedule_stats/?$', reoviews.schedule_stats), re_path(r'^get_existing_chiller_default_cop/?$', views.get_existing_chiller_default_cop), - re_path(r'^create_comparison_table/?$', views.create_comparison_table) + re_path(r'^job/comparison_table/(?Puuidlist)/?$', views.create_custom_comparison_table), ] diff --git a/reoptjl/views.py b/reoptjl/views.py index 3b4d271ec..3d0e4a446 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1329,20 +1329,75 @@ def create_custom_table_excel(df, custom_table, calculations, output): workbook.close() -def create_comparison_table(request): - # Parse run_uuids from request body - try: - run_uuids = json.loads(request.body)['run_uuids'] - except (json.JSONDecodeError, KeyError): - return JsonResponse({'Error': 'Invalid JSON format or missing run_uuids'}, status=400) +# def create_custom_comparison_table(request, run_uuids): +# # Validate and parse run UUIDs from request body +# try: +# run_uuids = json.loads(request.body)['run_uuids'] +# except (json.JSONDecodeError, KeyError): +# return JsonResponse({'Error': 'Invalid JSON format or missing run_uuids'}, status=400) + +# if len(run_uuids) == 0: +# return JsonResponse({'Error': 'Must provide one or more run_uuids'}, status=400) + +# # Validate each run_uuid +# for r_uuid in run_uuids: +# if not isinstance(r_uuid, str): +# return JsonResponse({'Error': f'Provided run_uuid {r_uuid} must be a string'}, status=400) +# try: +# uuid.UUID(r_uuid) # raises ValueError if not a valid UUID +# except ValueError: +# return JsonResponse({'Error': f'Invalid UUID format: {r_uuid}'}, status=400) + +# try: +# # Create Querysets: Select all objects associated with the provided run_uuids +# api_metas = APIMeta.objects.filter(run_uuid__in=run_uuids).only( +# 'run_uuid', 'status', 'created' +# ).order_by("-created") + +# if api_metas.exists(): +# # Access raw data for each run_uuid +# scenarios = access_raw_data(run_uuids, request) +# if 'scenarios' not in scenarios: +# return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) + +# # Process scenarios +# final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) +# final_df.iloc[1:, 0] = run_uuids + +# # Transpose and format DataFrame +# final_df_transpose = final_df.transpose() +# final_df_transpose.columns = final_df_transpose.iloc[0] +# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + +# # Create Excel file in memory +# output = io.BytesIO() +# create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) +# output.seek(0) + +# # Set up the HTTP response +# filename = "comparison_table.xlsx" +# response = HttpResponse( +# output, +# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' +# ) +# response['Content-Disposition'] = f'attachment; filename={filename}' + +# return response +# else: +# return JsonResponse({"Error": "No scenarios found for the provided run UUIDs."}, content_type='application/json', status=404) + +# except Exception as e: +# exc_type, exc_value, exc_traceback = sys.exc_info() +# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) +# err.save_to_db() +# return JsonResponse({"Error": str(err.message)}, status=500) - if len(run_uuids) == 0: - return JsonResponse({'Error': 'Must provide one or more run_uuids'}, status=400) +def create_custom_comparison_table(request, run_uuids): + # Split the comma-separated run_uuids into a list + run_ids_str = ';'.join(run_uuids) # Validate run UUIDs for r_uuid in run_uuids: - if not isinstance(r_uuid, str): - return JsonResponse({'Error': f'Provided run_uuid {r_uuid} must be a string'}, status=400) try: uuid.UUID(r_uuid) # raises ValueError if not a valid UUID except ValueError: @@ -1380,10 +1435,10 @@ def create_comparison_table(request): except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_comparison_table', run_uuids=run_uuids) + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) err.save_to_db() return JsonResponse({"Error": str(err.message)}, status=500) - + # Configuration # Set up table needed along with REopt dictionaries to grab data ita_custom_table = [ diff --git a/requirements.txt b/requirements.txt index 348306a51..1dec41c86 100644 --- a/requirements.txt +++ b/requirements.txt @@ -115,3 +115,5 @@ wrapt==1.13.3 xlrd==2.0.1 yarg==0.1.9 zipp==3.7.0 +xlsxwriter==3.1.9 + From 6cc929e82a45f078ccecec2744bd5bd91d937936 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 19 Aug 2024 10:24:45 -0600 Subject: [PATCH 09/44] updated code to retrieve results from url using django --- reoptjl/urls.py | 3 +- reoptjl/views.py | 143 ++++++++++++++++++++++------------------------- requirements.txt | 2 +- 3 files changed, 70 insertions(+), 78 deletions(-) diff --git a/reoptjl/urls.py b/reoptjl/urls.py index 5ca0eae32..f9fdb8294 100644 --- a/reoptjl/urls.py +++ b/reoptjl/urls.py @@ -1,7 +1,6 @@ # REopt®, Copyright (c) Alliance for Sustainable Energy, LLC. See also https://github.com/NREL/REopt_API/blob/master/LICENSE. from . import views from reo import views as reoviews -from django.urls import re_path from django.urls import register_converter, re_path class UUIDListConverter: @@ -35,5 +34,5 @@ def to_url(self, value): re_path(r'^invalid_urdb/?$', reoviews.invalid_urdb), re_path(r'^schedule_stats/?$', reoviews.schedule_stats), re_path(r'^get_existing_chiller_default_cop/?$', views.get_existing_chiller_default_cop), - re_path(r'^job/comparison_table/(?Puuidlist)/?$', views.create_custom_comparison_table), + re_path(r'^job/comparison_table/(?P[0-9a-f\-;]+)/$', views.create_custom_comparison_table), ] diff --git a/reoptjl/views.py b/reoptjl/views.py index 3d0e4a446..7b7f16324 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1187,7 +1187,7 @@ def easiur_costs(request): # return JsonResponse({"Error": "Unexpected Error. Please check your input parameters and contact reopt@nrel.gov if problems persist."}, status=500) ############################################################### -################ Custom Table ################################# +################ START Custom Table ########################### ############################################################### def generate_data_dict(config, df_gen, suffix): @@ -1330,37 +1330,21 @@ def create_custom_table_excel(df, custom_table, calculations, output): workbook.close() # def create_custom_comparison_table(request, run_uuids): -# # Validate and parse run UUIDs from request body -# try: -# run_uuids = json.loads(request.body)['run_uuids'] -# except (json.JSONDecodeError, KeyError): -# return JsonResponse({'Error': 'Invalid JSON format or missing run_uuids'}, status=400) - -# if len(run_uuids) == 0: -# return JsonResponse({'Error': 'Must provide one or more run_uuids'}, status=400) - -# # Validate each run_uuid -# for r_uuid in run_uuids: -# if not isinstance(r_uuid, str): -# return JsonResponse({'Error': f'Provided run_uuid {r_uuid} must be a string'}, status=400) +# if request.method == 'GET': +# print("Handling GET request for comparison table") # Debug print +# # Ensure run_uuids is a list of valid UUIDs # try: -# uuid.UUID(r_uuid) # raises ValueError if not a valid UUID -# except ValueError: -# return JsonResponse({'Error': f'Invalid UUID format: {r_uuid}'}, status=400) +# run_uuids = [uuid.UUID(r_uuid) for r_uuid in run_uuids] +# except ValueError as e: +# return JsonResponse({"Error": f"Invalid UUID format: {str(e)}"}, status=400) -# try: -# # Create Querysets: Select all objects associated with the provided run_uuids -# api_metas = APIMeta.objects.filter(run_uuid__in=run_uuids).only( -# 'run_uuid', 'status', 'created' -# ).order_by("-created") - -# if api_metas.exists(): -# # Access raw data for each run_uuid +# try: +# # Access raw data using the UUIDs # scenarios = access_raw_data(run_uuids, request) # if 'scenarios' not in scenarios: # return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) -# # Process scenarios +# # Process the scenarios and generate the comparison table # final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) # final_df.iloc[1:, 0] = run_uuids @@ -1369,7 +1353,7 @@ def create_custom_table_excel(df, custom_table, calculations, output): # final_df_transpose.columns = final_df_transpose.iloc[0] # final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) -# # Create Excel file in memory +# # Create Excel file # output = io.BytesIO() # create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) # output.seek(0) @@ -1383,61 +1367,66 @@ def create_custom_table_excel(df, custom_table, calculations, output): # response['Content-Disposition'] = f'attachment; filename={filename}' # return response -# else: -# return JsonResponse({"Error": "No scenarios found for the provided run UUIDs."}, content_type='application/json', status=404) -# except Exception as e: -# exc_type, exc_value, exc_traceback = sys.exc_info() -# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) -# err.save_to_db() -# return JsonResponse({"Error": str(err.message)}, status=500) - +# except Exception as e: +# exc_type, exc_value, exc_traceback = sys.exc_info() +# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) +# err.save_to_db() +# return JsonResponse({"Error": str(err.message)}, status=500) + +# return JsonResponse({"Error": "Method not allowed"}, status=405) + def create_custom_comparison_table(request, run_uuids): - # Split the comma-separated run_uuids into a list - run_ids_str = ';'.join(run_uuids) + if request.method == 'GET': + print(f"Handling GET request with run_uuids: {run_uuids}") + + # Convert the string of UUIDs back into a list + run_uuids = run_uuids.split(';') + + # Validate that all run UUIDs are valid + for r_uuid in run_uuids: + try: + uuid.UUID(r_uuid) # raises ValueError if not a valid UUID + except ValueError as e: + return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) - # Validate run UUIDs - for r_uuid in run_uuids: try: - uuid.UUID(r_uuid) # raises ValueError if not a valid UUID - except ValueError: - return JsonResponse({'Error': f'Invalid UUID format: {r_uuid}'}, status=400) + # Access raw data using the list of UUIDs + scenarios = access_raw_data(run_uuids, request) + if 'scenarios' not in scenarios: + return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) + + # Process scenarios + final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) + final_df.iloc[1:, 0] = run_uuids + + # Transpose and format DataFrame + final_df_transpose = final_df.transpose() + final_df_transpose.columns = final_df_transpose.iloc[0] + final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + + # Create Excel file + output = io.BytesIO() + create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) + output.seek(0) + + # Set up the HTTP response + filename = "comparison_table.xlsx" + response = HttpResponse( + output, + content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + ) + response['Content-Disposition'] = f'attachment; filename={filename}' - try: - # Access raw data - scenarios = access_raw_data(run_uuids, request) - if 'scenarios' not in scenarios: - return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) - - # Process scenarios - final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) - final_df.iloc[1:, 0] = run_uuids - - # Transpose and format DataFrame - final_df_transpose = final_df.transpose() - final_df_transpose.columns = final_df_transpose.iloc[0] - final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - - # Create Excel file - output = io.BytesIO() - create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) - output.seek(0) - - # Set up the HTTP response - filename = "comparison_table.xlsx" - response = HttpResponse( - output, - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' - ) - response['Content-Disposition'] = f'attachment; filename={filename}' + return response - return response + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) + err.save_to_db() + return JsonResponse({"Error": str(err.message)}, status=500) - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) - err.save_to_db() - return JsonResponse({"Error": str(err.message)}, status=500) + return JsonResponse({"Error": "Method not allowed"}, status=405) # Configuration # Set up table needed along with REopt dictionaries to grab data @@ -1560,3 +1549,7 @@ def create_custom_comparison_table(request, run_uuids): "formula": lambda col, bau, headers: f'=({bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2})/{bau["co2_reduction_value"]}' } ] + +############################################################### +################ END Custom Table ############################# +############################################################### \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 1dec41c86..e6b1af133 100644 --- a/requirements.txt +++ b/requirements.txt @@ -112,8 +112,8 @@ urllib3==1.26.8 vine==5.0.0 wcwidth==0.2.5 wrapt==1.13.3 +xlsxwriter==3.1.9 xlrd==2.0.1 yarg==0.1.9 zipp==3.7.0 -xlsxwriter==3.1.9 From 772d359026001e71fe5ffd780953536c08927df1 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 19 Aug 2024 12:56:43 -0600 Subject: [PATCH 10/44] updated error catching for inconsistent scenario comparisons --- reoptjl/views.py | 182 +++++++++++++++++++++++++++++++---------------- 1 file changed, 122 insertions(+), 60 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 7b7f16324..f80a13fb3 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1214,10 +1214,62 @@ def get_REopt_data(data_f, scenario_name, config): return df_res +# def get_bau_values(mock_scenarios, config): +# bau_values = {col_name: None for _, col_name in config} +# for scenario in mock_scenarios: +# df_gen = flatten_dict(scenario) +# for var_key, col_name in config: +# try: +# key = var_key.__code__.co_consts[1] +# except IndexError: +# continue + +# key_bau = f"{key}_bau" +# if key_bau in df_gen: +# value = df_gen[key_bau] +# if bau_values[col_name] is None: +# bau_values[col_name] = value +# elif bau_values[col_name] != value: +# raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") +# return bau_values + + def get_bau_values(mock_scenarios, config): + # Initialize bau_values for the config keys bau_values = {col_name: None for _, col_name in config} - for scenario in mock_scenarios: + + # Fields that should be consistent across all scenarios + consistent_fields = { + "full_data.inputs.Site.latitude": None, + "full_data.inputs.Site.longitude": None, + "full_data.inputs.ElectricLoad.doe_reference_name": None, + "full_data.inputs.ElectricTariff.urdb_label": None + } + + # Iterate through all scenarios and flatten them + for scenario_index, scenario in enumerate(mock_scenarios): df_gen = flatten_dict(scenario) + + # On the first pass, store the reference values + if scenario_index == 0: + for key in consistent_fields: + consistent_fields[key] = df_gen.get(key) + + # On subsequent passes, compare against the reference values + else: + for key, reference_value in consistent_fields.items(): + current_value = df_gen.get(key) + # Compare with the reference value + if current_value != reference_value: + raise ValueError( + f"Inconsistent scenario input values found across scenarios. " + f"Scenario {scenario_index + 1} has {current_value} " + f"while reference scenario has {reference_value}. " + "This should only be used for portfolio cases with the same Site, " + "ElectricLoad, and ElectricTariff for energy consumption and energy costs." + ) + + # Process the scenario with existing logic for BAU values for var_key, col_name in config: try: key = var_key.__code__.co_consts[1] @@ -1231,8 +1283,10 @@ def get_bau_values(mock_scenarios, config): bau_values[col_name] = value elif bau_values[col_name] != value: raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") + return bau_values + def access_raw_data(run_uuids, request): full_summary_dict = {"scenarios": []} for run_uuid in run_uuids: @@ -1278,30 +1332,37 @@ def create_custom_table_excel(df, custom_table, calculations, output): workbook = xlsxwriter.Workbook(output, {'in_memory': True}) worksheet = workbook.add_worksheet('Custom Table') + # Formats data_format = workbook.add_format({'align': 'center', 'valign': 'center', 'border': 1}) - formula_format = workbook.add_format({'bg_color': '#C1EE86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) + formula_format = workbook.add_format({'bg_color': '#FECF86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) + error_format = workbook.add_format({'bg_color': '#FFC7CE', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'black'}) # For missing data scenario_header_format = workbook.add_format({'bold': True, 'bg_color': '#0079C2', 'border': 1, 'align': 'center', 'font_color': 'white'}) variable_name_format = workbook.add_format({'bold': True, 'bg_color': '#DEE2E5', 'border': 1, 'align': 'left'}) + # Add warning note worksheet.write(1, len(df.columns) + 2, "Values in red are formulas. Do not input anything.", formula_format) column_width = 35 for col_num in range(len(df.columns) + 3): worksheet.set_column(col_num, col_num, column_width) - + + # Write headers worksheet.write('A1', 'Scenario', scenario_header_format) for col_num, header in enumerate(df.columns): worksheet.write(0, col_num + 1, header, scenario_header_format) + # Write variable names for row_num, variable in enumerate(df.index): worksheet.write(row_num + 1, 0, variable, variable_name_format) + # Write data values for row_num, row_data in enumerate(df.itertuples(index=False)): for col_num, value in enumerate(row_data): worksheet.write(row_num + 1, col_num + 1, "" if pd.isnull(value) or value == '-' else value, data_format) headers = {header: idx for idx, header in enumerate(df.index)} + # Define BAU cells bau_cells = { 'grid_value': f'{colnum_string(2)}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, 'net_cost_value': f'{colnum_string(2)}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, @@ -1310,18 +1371,45 @@ def create_custom_table_excel(df, custom_table, calculations, output): 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None } + relevant_columns = [col_name for _, col_name in custom_table] + relevant_calculations = [calc for calc in calculations if calc["name"] in relevant_columns] + + logged_messages = set() # Set to track unique error messages missing_entries = [] + for col in range(2, len(df.columns) + 2): col_letter = colnum_string(col) - for calc in calculations: - if calc["name"] in headers: - row_idx = headers[calc["name"]] - formula = calc["formula"](col_letter, bau_cells, headers) - if formula: - worksheet.write_formula(row_idx + 1, col-1, formula, formula_format) + for calc in relevant_calculations: + try: + # Check if all required keys are present in headers or bau_cells + if all(key in headers or key in bau_cells for key in calc["formula"].__code__.co_names): + row_idx = headers.get(calc["name"]) + if row_idx is not None: + formula = calc["formula"](col_letter, bau_cells, headers) + worksheet.write_formula(row_idx + 1, col-1, formula, formula_format) + else: + missing_entries.append(calc["name"]) else: - missing_entries.append(calc["name"]) - else: + # Identify missing keys and set the cell to "MISSING DATA" + missing_keys = [key for key in calc["formula"].__code__.co_names if key not in headers and key not in bau_cells] + if missing_keys: + row_idx = headers.get(calc["name"]) + if row_idx is not None: + worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) + message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Custom Table configuration. Update the Custom Table to include {missing_keys}. Writing 'MISSING DATA' instead." + if message not in logged_messages: + print(message) + logged_messages.add(message) + missing_entries.append(calc["name"]) + except KeyError as e: + missing_field = str(e) + message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Custom Table configuration. Update the Custom Table to include {missing_field} . Writing 'MISSING DATA' instead." + if message not in logged_messages: + print(message) + logged_messages.add(message) + row_idx = headers.get(calc["name"]) + if row_idx is not None: + worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) missing_entries.append(calc["name"]) if missing_entries: @@ -1329,53 +1417,6 @@ def create_custom_table_excel(df, custom_table, calculations, output): workbook.close() -# def create_custom_comparison_table(request, run_uuids): -# if request.method == 'GET': -# print("Handling GET request for comparison table") # Debug print -# # Ensure run_uuids is a list of valid UUIDs -# try: -# run_uuids = [uuid.UUID(r_uuid) for r_uuid in run_uuids] -# except ValueError as e: -# return JsonResponse({"Error": f"Invalid UUID format: {str(e)}"}, status=400) - -# try: -# # Access raw data using the UUIDs -# scenarios = access_raw_data(run_uuids, request) -# if 'scenarios' not in scenarios: -# return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) - -# # Process the scenarios and generate the comparison table -# final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) -# final_df.iloc[1:, 0] = run_uuids - -# # Transpose and format DataFrame -# final_df_transpose = final_df.transpose() -# final_df_transpose.columns = final_df_transpose.iloc[0] -# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - -# # Create Excel file -# output = io.BytesIO() -# create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) -# output.seek(0) - -# # Set up the HTTP response -# filename = "comparison_table.xlsx" -# response = HttpResponse( -# output, -# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' -# ) -# response['Content-Disposition'] = f'attachment; filename={filename}' - -# return response - -# except Exception as e: -# exc_type, exc_value, exc_traceback = sys.exc_info() -# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) -# err.save_to_db() -# return JsonResponse({"Error": str(err.message)}, status=500) - -# return JsonResponse({"Error": "Method not allowed"}, status=405) - def create_custom_comparison_table(request, run_uuids): if request.method == 'GET': print(f"Handling GET request with run_uuids: {run_uuids}") @@ -1383,6 +1424,8 @@ def create_custom_comparison_table(request, run_uuids): # Convert the string of UUIDs back into a list run_uuids = run_uuids.split(';') + #### Selected Table + target_custom_table = ita_custom_table # Validate that all run UUIDs are valid for r_uuid in run_uuids: try: @@ -1397,7 +1440,7 @@ def create_custom_comparison_table(request, run_uuids): return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) # Process scenarios - final_df = process_scenarios(scenarios['scenarios'], ita_custom_table) + final_df = process_scenarios(scenarios['scenarios'], target_custom_table) final_df.iloc[1:, 0] = run_uuids # Transpose and format DataFrame @@ -1407,7 +1450,7 @@ def create_custom_comparison_table(request, run_uuids): # Create Excel file output = io.BytesIO() - create_custom_table_excel(final_df_transpose, ita_custom_table, calculations, output) + create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) output.seek(0) # Set up the HTTP response @@ -1430,6 +1473,25 @@ def create_custom_comparison_table(request, run_uuids): # Configuration # Set up table needed along with REopt dictionaries to grab data + +other_custom_table = [ + (lambda df: get_with_suffix(df, "outputs.PV.size_kw", ""), "PV Size (kW)"), + (lambda df: get_with_suffix(df, "outputs.Wind.size_kw", ""), "Wind Size (kW)"), + (lambda df: get_with_suffix(df, "outputs.CHP.size_kw", ""), "CHP Size (kW)"), + (lambda df: get_with_suffix(df, "outputs.PV.annual_energy_produced_kwh", ""), "PV Total Electricity Produced (kWh)"), + (lambda df: get_with_suffix(df, "outputs.PV.electric_to_grid_series_kw", ""), "PV Exported to Grid (kWh)"), + (lambda df: get_with_suffix(df, "outputs.PV.electric_to_load_series_kw", ""), "PV Serving Load (kWh)"), + (lambda df: get_with_suffix(df, "outputs.Financial.lifecycle_capital_costs", ""), "Gross Capital Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_us_dollars", ""), "Federal Tax Incentive (30%)"), + (lambda df: get_with_suffix(df, "outputs.Financial.iac_grant_us_dollars", ""), "IAC Grant ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_value_us_dollars", ""), "Incentive Value ($)"), + (lambda df: get_with_suffix(df, "outputs.Financial.net_capital_cost_us_dollars", ""), "Net Capital Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau", ""), "CO2 (%) savings "), + (lambda df: get_with_suffix(df, "outputs.Financial.npv", ""), "NPV"), + (lambda df: get_with_suffix(df, "inputs.PV.federal_itc_fraction", ""), "PV Federal Tax Incentive (%)"), + (lambda df: get_with_suffix(df, "inputs.ElectricStorage.total_itc_fraction", ""), "Storage Federal Tax Incentive (%)") +] + ita_custom_table = [ (lambda df: get_with_suffix(df, "outputs.PV.size_kw", ""), "PV Size (kW)"), (lambda df: get_with_suffix(df, "outputs.Wind.size_kw", ""), "Wind Size (kW)"), From 00c97d5853cea18648cd20a2376cab0abbcbae82 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 19 Aug 2024 16:03:52 -0600 Subject: [PATCH 11/44] updated code to retrieve BAU values --- reoptjl/views.py | 490 ++++++++++++++++++++++++++--------------------- 1 file changed, 276 insertions(+), 214 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index f80a13fb3..08975373d 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1191,269 +1191,328 @@ def easiur_costs(request): ############################################################### def generate_data_dict(config, df_gen, suffix): - data_dict = defaultdict(list) - for var_key, col_name in config: - if callable(var_key): - val = var_key(df_gen) - else: - val = get_with_suffix(df_gen, var_key, suffix, "-") - data_dict[col_name].append(val) - return data_dict + try: + data_dict = defaultdict(list) + for var_key, col_name in config: + if callable(var_key): + val = var_key(df_gen) + else: + val = get_with_suffix(df_gen, var_key, suffix, "-") + data_dict[col_name].append(val) + return data_dict + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + raise def get_REopt_data(data_f, scenario_name, config): - scenario_name_str = str(scenario_name) - suffix = "_bau" if re.search(r"(?i)\bBAU\b", scenario_name_str) else "" - - df_gen = flatten_dict(data_f) - data_dict = generate_data_dict(config, df_gen, suffix) - data_dict["Scenario"] = [scenario_name_str] + try: + scenario_name_str = str(scenario_name) + suffix = "_bau" if re.search(r"(?i)\bBAU\b", scenario_name_str) else "" + + df_gen = flatten_dict(data_f) + data_dict = generate_data_dict(config, df_gen, suffix) + data_dict["Scenario"] = [scenario_name_str] - col_order = ["Scenario"] + [col_name for _, col_name in config] - df_res = pd.DataFrame(data_dict) - df_res = df_res[col_order] + col_order = ["Scenario"] + [col_name for _, col_name in config] + df_res = pd.DataFrame(data_dict) + df_res = df_res[col_order] - return df_res + return df_res + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + raise -# def get_bau_values(mock_scenarios, config): -# bau_values = {col_name: None for _, col_name in config} -# for scenario in mock_scenarios: -# df_gen = flatten_dict(scenario) -# for var_key, col_name in config: -# try: -# key = var_key.__code__.co_consts[1] -# except IndexError: -# continue - -# key_bau = f"{key}_bau" -# if key_bau in df_gen: -# value = df_gen[key_bau] -# if bau_values[col_name] is None: -# bau_values[col_name] = value -# elif bau_values[col_name] != value: -# raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") -# return bau_values +# def get_bau_values(mock_scenarios, config): +# try: +# bau_values = {col_name: None for _, col_name in config} + +# consistent_fields = { +# "full_data.inputs.Site.latitude": None, +# "full_data.inputs.Site.longitude": None, +# "full_data.inputs.ElectricLoad.doe_reference_name": None, +# "full_data.inputs.ElectricTariff.urdb_label": None +# } + +# for scenario_index, scenario in enumerate(mock_scenarios): +# df_gen = flatten_dict(scenario) + +# if scenario_index == 0: +# for key in consistent_fields: +# consistent_fields[key] = df_gen.get(key) + +# else: +# for key, reference_value in consistent_fields.items(): +# current_value = df_gen.get(key) +# if current_value != reference_value: +# raise ValueError( +# f"Inconsistent scenario input values found across scenarios. " +# f"Scenario {scenario_index + 1} has {current_value} " +# f"while reference scenario has {reference_value}. " +# "This should only be used for portfolio cases with the same Site, " +# "ElectricLoad, and ElectricTariff for energy consumption and energy costs." +# ) + +# for var_key, col_name in config: +# try: +# key = var_key.__code__.co_consts[1] +# except IndexError: +# continue + +# key_bau = f"{key}_bau" +# if key_bau in df_gen: +# value = df_gen[key_bau] +# if bau_values[col_name] is None: +# bau_values[col_name] = value +# elif bau_values[col_name] != value: +# raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") + +# return bau_values +# except ValueError as e: +# raise +# except Exception as e: +# exc_type, exc_value, exc_traceback = sys.exc_info() +# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') +# err.save_to_db() +# raise def get_bau_values(mock_scenarios, config): - # Initialize bau_values for the config keys - bau_values = {col_name: None for _, col_name in config} - - # Fields that should be consistent across all scenarios - consistent_fields = { - "full_data.inputs.Site.latitude": None, - "full_data.inputs.Site.longitude": None, - "full_data.inputs.ElectricLoad.doe_reference_name": None, - "full_data.inputs.ElectricTariff.urdb_label": None - } - - # Iterate through all scenarios and flatten them - for scenario_index, scenario in enumerate(mock_scenarios): - df_gen = flatten_dict(scenario) + try: + bau_values = {col_name: None for _, col_name in config} - # On the first pass, store the reference values - if scenario_index == 0: - for key in consistent_fields: - consistent_fields[key] = df_gen.get(key) + # Assuming the first scenario has the BAU data + first_scenario = mock_scenarios[0] + df_gen = flatten_dict(first_scenario['full_data']) - # On subsequent passes, compare against the reference values - else: - for key, reference_value in consistent_fields.items(): - current_value = df_gen.get(key) - # Compare with the reference value - if current_value != reference_value: - raise ValueError( - f"Inconsistent scenario input values found across scenarios. " - f"Scenario {scenario_index + 1} has {current_value} " - f"while reference scenario has {reference_value}. " - "This should only be used for portfolio cases with the same Site, " - "ElectricLoad, and ElectricTariff for energy consumption and energy costs." - ) - - # Process the scenario with existing logic for BAU values for var_key, col_name in config: - try: - key = var_key.__code__.co_consts[1] - except IndexError: - continue + if callable(var_key): + # Extract the key being referenced in the lambda function + try: + key = var_key.__code__.co_consts[1] + except IndexError: + continue + else: + key = var_key + # Append the '_bau' suffix to match BAU values key_bau = f"{key}_bau" - if key_bau in df_gen: - value = df_gen[key_bau] - if bau_values[col_name] is None: - bau_values[col_name] = value - elif bau_values[col_name] != value: - raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") + value = df_gen.get(key_bau) - return bau_values + if value is not None: + bau_values[col_name] = value + + return bau_values + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + raise def access_raw_data(run_uuids, request): - full_summary_dict = {"scenarios": []} - for run_uuid in run_uuids: - scenario_data = { - "run_uuid": str(run_uuid), - "full_data": process_raw_data(request, run_uuid) + try: + full_summary_dict = {"scenarios": []} + + # Fetch UserProvidedMeta data for the relevant run_uuids + usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only( + 'meta__run_uuid', + 'description', + 'address' + ) + + # Create a dictionary to map run_uuids to their associated meta data + meta_data_dict = { + um.meta.run_uuid: { + "description": um.description, + "address": um.address + } + for um in usermeta } - full_summary_dict["scenarios"].append(scenario_data) - return full_summary_dict + + for run_uuid in run_uuids: + scenario_data = { + "run_uuid": str(run_uuid), + "full_data": process_raw_data(request, run_uuid), + "meta_data": meta_data_dict.get(run_uuid, {}) + } + full_summary_dict["scenarios"].append(scenario_data) + + return full_summary_dict + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + raise + def process_raw_data(request, run_uuid): - response = results(request, run_uuid) - if response.status_code == 200: - result_data = json.loads(response.content) - processed_data = sum_vectors(result_data) - return processed_data - else: - return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} + try: + response = results(request, run_uuid) + if response.status_code == 200: + result_data = json.loads(response.content) + processed_data = sum_vectors(result_data) + return processed_data + else: + return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + raise def process_scenarios(scenarios, reopt_data_config): - config = reopt_data_config - bau_values = get_bau_values(scenarios, config) - combined_df = pd.DataFrame() - for scenario in scenarios: - run_uuid = scenario['run_uuid'] - df_result = get_REopt_data(scenario['full_data'], run_uuid, config) - df_result = df_result.set_index('Scenario').T - df_result.columns = [run_uuid] - combined_df = df_result if combined_df.empty else combined_df.join(df_result, how='outer') - - bau_data = {key: [value] for key, value in bau_values.items()} - bau_data["Scenario"] = ["BAU"] - df_bau = pd.DataFrame(bau_data) - - combined_df = pd.concat([df_bau, combined_df.T]).reset_index(drop=True) - combined_df = clean_data_dict(combined_df.to_dict(orient="list")) - combined_df = pd.DataFrame(combined_df) - combined_df = combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] - - return combined_df + try: + config = reopt_data_config + bau_values = get_bau_values(scenarios, config) + combined_df = pd.DataFrame() + + for scenario in scenarios: + run_uuid = scenario['run_uuid'] + df_result = get_REopt_data(scenario['full_data'], run_uuid, config) + df_result = df_result.set_index('Scenario').T + df_result.columns = [run_uuid] + combined_df = df_result if combined_df.empty else combined_df.join(df_result, how='outer') + + # Adding BAU data as the first row in the DataFrame + bau_data = {key: [value] for key, value in bau_values.items()} + bau_data["Scenario"] = ["BAU"] + df_bau = pd.DataFrame(bau_data) + + # Combine BAU data with scenario results + combined_df = pd.concat([df_bau, combined_df.T]).reset_index(drop=True) + combined_df = clean_data_dict(combined_df.to_dict(orient="list")) + combined_df = pd.DataFrame(combined_df) + combined_df = combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] + + return combined_df + except ValueError as e: + raise + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + raise def create_custom_table_excel(df, custom_table, calculations, output): - workbook = xlsxwriter.Workbook(output, {'in_memory': True}) - worksheet = workbook.add_worksheet('Custom Table') - - # Formats - data_format = workbook.add_format({'align': 'center', 'valign': 'center', 'border': 1}) - formula_format = workbook.add_format({'bg_color': '#FECF86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) - error_format = workbook.add_format({'bg_color': '#FFC7CE', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'black'}) # For missing data - scenario_header_format = workbook.add_format({'bold': True, 'bg_color': '#0079C2', 'border': 1, 'align': 'center', 'font_color': 'white'}) - variable_name_format = workbook.add_format({'bold': True, 'bg_color': '#DEE2E5', 'border': 1, 'align': 'left'}) - - # Add warning note - worksheet.write(1, len(df.columns) + 2, "Values in red are formulas. Do not input anything.", formula_format) - - column_width = 35 - for col_num in range(len(df.columns) + 3): - worksheet.set_column(col_num, col_num, column_width) - - # Write headers - worksheet.write('A1', 'Scenario', scenario_header_format) - for col_num, header in enumerate(df.columns): - worksheet.write(0, col_num + 1, header, scenario_header_format) - - # Write variable names - for row_num, variable in enumerate(df.index): - worksheet.write(row_num + 1, 0, variable, variable_name_format) - - # Write data values - for row_num, row_data in enumerate(df.itertuples(index=False)): - for col_num, value in enumerate(row_data): - worksheet.write(row_num + 1, col_num + 1, "" if pd.isnull(value) or value == '-' else value, data_format) - - headers = {header: idx for idx, header in enumerate(df.index)} - - # Define BAU cells - bau_cells = { - 'grid_value': f'{colnum_string(2)}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, - 'net_cost_value': f'{colnum_string(2)}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, - 'ng_reduction_value': f'{colnum_string(2)}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, - 'util_cost_value': f'{colnum_string(2)}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, - 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None - } + try: + workbook = xlsxwriter.Workbook(output, {'in_memory': True}) + worksheet = workbook.add_worksheet('Custom Table') - relevant_columns = [col_name for _, col_name in custom_table] - relevant_calculations = [calc for calc in calculations if calc["name"] in relevant_columns] + data_format = workbook.add_format({'align': 'center', 'valign': 'center', 'border': 1}) + formula_format = workbook.add_format({'bg_color': '#FECF86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) + error_format = workbook.add_format({'bg_color': '#FFC7CE', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'black'}) # For missing data + scenario_header_format = workbook.add_format({'bold': True, 'bg_color': '#0079C2', 'border': 1, 'align': 'center', 'font_color': 'white'}) + variable_name_format = workbook.add_format({'bold': True, 'bg_color': '#DEE2E5', 'border': 1, 'align': 'left'}) - logged_messages = set() # Set to track unique error messages - missing_entries = [] + worksheet.write(1, len(df.columns) + 2, "Values in red are formulas. Do not input anything.", formula_format) - for col in range(2, len(df.columns) + 2): - col_letter = colnum_string(col) - for calc in relevant_calculations: - try: - # Check if all required keys are present in headers or bau_cells - if all(key in headers or key in bau_cells for key in calc["formula"].__code__.co_names): - row_idx = headers.get(calc["name"]) - if row_idx is not None: - formula = calc["formula"](col_letter, bau_cells, headers) - worksheet.write_formula(row_idx + 1, col-1, formula, formula_format) - else: - missing_entries.append(calc["name"]) - else: - # Identify missing keys and set the cell to "MISSING DATA" - missing_keys = [key for key in calc["formula"].__code__.co_names if key not in headers and key not in bau_cells] - if missing_keys: + column_width = 35 + for col_num in range(len(df.columns) + 3): + worksheet.set_column(col_num, col_num, column_width) + + worksheet.write('A1', 'Scenario', scenario_header_format) + for col_num, header in enumerate(df.columns): + worksheet.write(0, col_num + 1, header, scenario_header_format) + + for row_num, variable in enumerate(df.index): + worksheet.write(row_num + 1, 0, variable, variable_name_format) + + for row_num, row_data in enumerate(df.itertuples(index=False)): + for col_num, value in enumerate(row_data): + worksheet.write(row_num + 1, col_num + 1, "" if pd.isnull(value) or value == '-' else value, data_format) + + headers = {header: idx for idx, header in enumerate(df.index)} + + bau_cells = { + 'grid_value': f'{colnum_string(2)}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, + 'net_cost_value': f'{colnum_string(2)}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, + 'ng_reduction_value': f'{colnum_string(2)}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, + 'util_cost_value': f'{colnum_string(2)}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, + 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None + } + + relevant_columns = [col_name for _, col_name in custom_table] + relevant_calculations = [calc for calc in calculations if calc["name"] in relevant_columns] + + logged_messages = set() + missing_entries = [] + + for col in range(2, len(df.columns) + 2): + col_letter = colnum_string(col) + for calc in relevant_calculations: + try: + if all(key in headers or key in bau_cells for key in calc["formula"].__code__.co_names): row_idx = headers.get(calc["name"]) if row_idx is not None: - worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) - message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Custom Table configuration. Update the Custom Table to include {missing_keys}. Writing 'MISSING DATA' instead." - if message not in logged_messages: - print(message) - logged_messages.add(message) - missing_entries.append(calc["name"]) - except KeyError as e: - missing_field = str(e) - message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Custom Table configuration. Update the Custom Table to include {missing_field} . Writing 'MISSING DATA' instead." - if message not in logged_messages: - print(message) - logged_messages.add(message) - row_idx = headers.get(calc["name"]) - if row_idx is not None: - worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) - missing_entries.append(calc["name"]) - - if missing_entries: - print(f"Missing entries in the input table: {', '.join(set(missing_entries))}. Please update the configuration if necessary.") - - workbook.close() + formula = calc["formula"](col_letter, bau_cells, headers) + worksheet.write_formula(row_idx + 1, col-1, formula, formula_format) + else: + missing_entries.append(calc["name"]) + else: + missing_keys = [key for key in calc["formula"].__code__.co_names if key not in headers and key not in bau_cells] + if missing_keys: + row_idx = headers.get(calc["name"]) + if row_idx is not None: + worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) + message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Custom Table configuration. Update the Custom Table to include {missing_keys}. Writing 'MISSING DATA' instead." + if message not in logged_messages: + print(message) + logged_messages.add(message) + missing_entries.append(calc["name"]) + except KeyError as e: + missing_field = str(e) + message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Custom Table configuration. Update the Custom Table to include {missing_field}. Writing 'MISSING DATA' instead." + if message not in logged_messages: + print(message) + logged_messages.add(message) + row_idx = headers.get(calc["name"]) + if row_idx is not None: + worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) + missing_entries.append(calc["name"]) + + if missing_entries: + print(f"Missing entries in the input table: {', '.join(set(missing_entries))}. Please update the configuration if necessary.") + + workbook.close() + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + raise def create_custom_comparison_table(request, run_uuids): if request.method == 'GET': - print(f"Handling GET request with run_uuids: {run_uuids}") + try: + print(f"Handling GET request with run_uuids: {run_uuids}") - # Convert the string of UUIDs back into a list - run_uuids = run_uuids.split(';') + run_uuids = run_uuids.split(';') + target_custom_table = ita_custom_table - #### Selected Table - target_custom_table = ita_custom_table - # Validate that all run UUIDs are valid - for r_uuid in run_uuids: - try: - uuid.UUID(r_uuid) # raises ValueError if not a valid UUID - except ValueError as e: - return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) + for r_uuid in run_uuids: + try: + uuid.UUID(r_uuid) + except ValueError as e: + return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) - try: - # Access raw data using the list of UUIDs scenarios = access_raw_data(run_uuids, request) if 'scenarios' not in scenarios: return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) - # Process scenarios final_df = process_scenarios(scenarios['scenarios'], target_custom_table) final_df.iloc[1:, 0] = run_uuids - # Transpose and format DataFrame final_df_transpose = final_df.transpose() final_df_transpose.columns = final_df_transpose.iloc[0] final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - # Create Excel file output = io.BytesIO() create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) output.seek(0) - # Set up the HTTP response filename = "comparison_table.xlsx" response = HttpResponse( output, @@ -1471,6 +1530,7 @@ def create_custom_comparison_table(request, run_uuids): return JsonResponse({"Error": "Method not allowed"}, status=405) + # Configuration # Set up table needed along with REopt dictionaries to grab data @@ -1482,11 +1542,13 @@ def create_custom_comparison_table(request, run_uuids): (lambda df: get_with_suffix(df, "outputs.PV.electric_to_grid_series_kw", ""), "PV Exported to Grid (kWh)"), (lambda df: get_with_suffix(df, "outputs.PV.electric_to_load_series_kw", ""), "PV Serving Load (kWh)"), (lambda df: get_with_suffix(df, "outputs.Financial.lifecycle_capital_costs", ""), "Gross Capital Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax", ""), "Electricity Energy Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax", ""), "Electricity Demand Cost ($)"), + (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax", ""), "Utility Fixed Cost ($)"), (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_us_dollars", ""), "Federal Tax Incentive (30%)"), (lambda df: get_with_suffix(df, "outputs.Financial.iac_grant_us_dollars", ""), "IAC Grant ($)"), (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_value_us_dollars", ""), "Incentive Value ($)"), (lambda df: get_with_suffix(df, "outputs.Financial.net_capital_cost_us_dollars", ""), "Net Capital Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau", ""), "CO2 (%) savings "), (lambda df: get_with_suffix(df, "outputs.Financial.npv", ""), "NPV"), (lambda df: get_with_suffix(df, "inputs.PV.federal_itc_fraction", ""), "PV Federal Tax Incentive (%)"), (lambda df: get_with_suffix(df, "inputs.ElectricStorage.total_itc_fraction", ""), "Storage Federal Tax Incentive (%)") From 8811db8fd929a7522d8f85c323d531d7db388019 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 19 Aug 2024 16:26:06 -0600 Subject: [PATCH 12/44] revert docker file to original --- docker-compose.yml | 4 +++- julia_src/Dockerfile | 12 +++--------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 2dead0986..81d32040f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,3 +1,5 @@ +version: "2.1" + services: redis: @@ -69,4 +71,4 @@ services: ports: - "8081:8081" volumes: - - ./julia_src:/opt/julia_src + - ./julia_src:/opt/julia_src \ No newline at end of file diff --git a/julia_src/Dockerfile b/julia_src/Dockerfile index c626110c5..b0573d7af 100644 --- a/julia_src/Dockerfile +++ b/julia_src/Dockerfile @@ -5,20 +5,14 @@ ARG NREL_ROOT_CERT_URL_ROOT="" RUN set -x && if [ -n "$NREL_ROOT_CERT_URL_ROOT" ]; then curl -fsSLk -o /usr/local/share/ca-certificates/nrel_root.crt "${NREL_ROOT_CERT_URL_ROOT}/nrel_root.pem" && curl -fsSLk -o /usr/local/share/ca-certificates/nrel_xca1.crt "${NREL_ROOT_CERT_URL_ROOT}/nrel_xca1.pem" && update-ca-certificates; fi ENV REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt -# # Optionally disable SSL verification for Git operations -ENV JULIA_SSL_NO_VERIFY_HOSTS="github.com" - # Install Julia packages +ENV JULIA_NUM_THREADS=2 ENV XPRESS_JL_SKIP_LIB_CHECK=True -# Set the working directory and copy files WORKDIR /opt/julia_src COPY . . - -# Install Julia packages RUN julia --project=/opt/julia_src -e 'import Pkg; Pkg.instantiate();' RUN julia --project=/opt/julia_src precompile.jl - - EXPOSE 8081 -CMD ["bash"] + +CMD ["bash"] \ No newline at end of file From e3818f8d8cfcafbc731ab207115ab58b4e4cefd8 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 21 Aug 2024 12:24:59 -0600 Subject: [PATCH 13/44] updated query pattern to match simulate load --- reoptjl/urls.py | 16 +----- reoptjl/views.py | 130 +++++++++++++++++++++++++---------------------- 2 files changed, 71 insertions(+), 75 deletions(-) diff --git a/reoptjl/urls.py b/reoptjl/urls.py index f9fdb8294..28eab3bc2 100644 --- a/reoptjl/urls.py +++ b/reoptjl/urls.py @@ -1,19 +1,7 @@ # REopt®, Copyright (c) Alliance for Sustainable Energy, LLC. See also https://github.com/NREL/REopt_API/blob/master/LICENSE. from . import views from reo import views as reoviews -from django.urls import register_converter, re_path - -class UUIDListConverter: - regex = r'([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})(;([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}))*' - - def to_python(self, value): - return value.split(';') - - def to_url(self, value): - return ';'.join(value) - -# Register the custom converter -register_converter(UUIDListConverter, 'uuidlist') +from django.urls import re_path urlpatterns = [ re_path(r'^job/(?P[0-9a-f-]+)/results/?$', views.results), @@ -34,5 +22,5 @@ def to_url(self, value): re_path(r'^invalid_urdb/?$', reoviews.invalid_urdb), re_path(r'^schedule_stats/?$', reoviews.schedule_stats), re_path(r'^get_existing_chiller_default_cop/?$', views.get_existing_chiller_default_cop), - re_path(r'^job/comparison_table/(?P[0-9a-f\-;]+)/$', views.create_custom_comparison_table), + re_path(r'^job/comparison_table/?$', views.create_custom_comparison_table), ] diff --git a/reoptjl/views.py b/reoptjl/views.py index 08975373d..be80a6590 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1226,60 +1226,6 @@ def get_REopt_data(data_f, scenario_name, config): err.save_to_db() raise - -# def get_bau_values(mock_scenarios, config): -# try: -# bau_values = {col_name: None for _, col_name in config} - -# consistent_fields = { -# "full_data.inputs.Site.latitude": None, -# "full_data.inputs.Site.longitude": None, -# "full_data.inputs.ElectricLoad.doe_reference_name": None, -# "full_data.inputs.ElectricTariff.urdb_label": None -# } - -# for scenario_index, scenario in enumerate(mock_scenarios): -# df_gen = flatten_dict(scenario) - -# if scenario_index == 0: -# for key in consistent_fields: -# consistent_fields[key] = df_gen.get(key) - -# else: -# for key, reference_value in consistent_fields.items(): -# current_value = df_gen.get(key) -# if current_value != reference_value: -# raise ValueError( -# f"Inconsistent scenario input values found across scenarios. " -# f"Scenario {scenario_index + 1} has {current_value} " -# f"while reference scenario has {reference_value}. " -# "This should only be used for portfolio cases with the same Site, " -# "ElectricLoad, and ElectricTariff for energy consumption and energy costs." -# ) - -# for var_key, col_name in config: -# try: -# key = var_key.__code__.co_consts[1] -# except IndexError: -# continue - -# key_bau = f"{key}_bau" -# if key_bau in df_gen: -# value = df_gen[key_bau] -# if bau_values[col_name] is None: -# bau_values[col_name] = value -# elif bau_values[col_name] != value: -# raise ValueError(f"Inconsistent BAU values for {col_name}. This should only be used for portfolio cases with the same Site, ElectricLoad, and ElectricTariff for energy consumption and energy costs.") - -# return bau_values -# except ValueError as e: -# raise -# except Exception as e: -# exc_type, exc_value, exc_traceback = sys.exc_info() -# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') -# err.save_to_db() -# raise - def get_bau_values(mock_scenarios, config): try: bau_values = {col_name: None for _, col_name in config} @@ -1312,7 +1258,6 @@ def get_bau_values(mock_scenarios, config): err.save_to_db() raise - def access_raw_data(run_uuids, request): try: full_summary_dict = {"scenarios": []} @@ -1348,7 +1293,6 @@ def access_raw_data(run_uuids, request): err.save_to_db() raise - def process_raw_data(request, run_uuid): try: response = results(request, run_uuid) @@ -1484,22 +1428,85 @@ def create_custom_table_excel(df, custom_table, calculations, output): err.save_to_db() raise -def create_custom_comparison_table(request, run_uuids): +# def create_custom_comparison_table(request): +# if request.method == 'GET': +# try: +# # Extract the run_uuid[] values from the query parameters +# run_uuids = request.GET.getlist('run_uuid[]') +# print(f"Handling GET request with run_uuids: {run_uuids}") + +# # Validate each UUID +# for r_uuid in run_uuids: +# try: +# uuid.UUID(r_uuid) +# except ValueError as e: +# return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) + +# target_custom_table = other_custom_table + +# # Process scenarios and generate the custom table +# scenarios = access_raw_data(run_uuids, request) +# if 'scenarios' not in scenarios: +# return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) + +# final_df = process_scenarios(scenarios['scenarios'], target_custom_table) +# final_df.iloc[1:, 0] = run_uuids + +# final_df_transpose = final_df.transpose() +# final_df_transpose.columns = final_df_transpose.iloc[0] +# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + +# # Create and send the Excel file +# output = io.BytesIO() +# create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) +# output.seek(0) + +# filename = "comparison_table.xlsx" +# response = HttpResponse( +# output, +# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' +# ) +# response['Content-Disposition'] = f'attachment; filename={filename}' + +# return response + +# except Exception as e: +# exc_type, exc_value, exc_traceback = sys.exc_info() +# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) +# err.save_to_db() +# return JsonResponse({"Error": str(err.message)}, status=500) + +# return JsonResponse({"Error": "Method not allowed"}, status=405) + +def create_custom_comparison_table(request): if request.method == 'GET': try: + # Log the entire request GET parameters + print(f"GET parameters: {request.GET}") + + # Manually collect the run_uuid values by iterating over the keys + run_uuids = [] + for key in request.GET.keys(): + if key.startswith('run_uuid['): + run_uuids.append(request.GET[key]) + print(f"Handling GET request with run_uuids: {run_uuids}") - run_uuids = run_uuids.split(';') - target_custom_table = ita_custom_table + if not run_uuids: + return JsonResponse({"Error": "No run_uuids provided"}, status=400) + # Validate each UUID for r_uuid in run_uuids: try: uuid.UUID(r_uuid) except ValueError as e: return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) + target_custom_table = other_custom_table + + # Process scenarios and generate the custom table scenarios = access_raw_data(run_uuids, request) - if 'scenarios' not in scenarios: + if 'scenarios' not in scenarios or not scenarios['scenarios']: return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) final_df = process_scenarios(scenarios['scenarios'], target_custom_table) @@ -1509,6 +1516,7 @@ def create_custom_comparison_table(request, run_uuids): final_df_transpose.columns = final_df_transpose.iloc[0] final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + # Create and send the Excel file output = io.BytesIO() create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) output.seek(0) @@ -1524,7 +1532,7 @@ def create_custom_comparison_table(request, run_uuids): except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') err.save_to_db() return JsonResponse({"Error": str(err.message)}, status=500) From ee73e6ac36c4d72b318a93bbd18e643fc33c19d8 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:51:00 -0600 Subject: [PATCH 14/44] default table set to ita_custom_table --- reoptjl/views.py | 53 +----------------------------------------------- 1 file changed, 1 insertion(+), 52 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index be80a6590..cb88a4e44 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1428,56 +1428,6 @@ def create_custom_table_excel(df, custom_table, calculations, output): err.save_to_db() raise -# def create_custom_comparison_table(request): -# if request.method == 'GET': -# try: -# # Extract the run_uuid[] values from the query parameters -# run_uuids = request.GET.getlist('run_uuid[]') -# print(f"Handling GET request with run_uuids: {run_uuids}") - -# # Validate each UUID -# for r_uuid in run_uuids: -# try: -# uuid.UUID(r_uuid) -# except ValueError as e: -# return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) - -# target_custom_table = other_custom_table - -# # Process scenarios and generate the custom table -# scenarios = access_raw_data(run_uuids, request) -# if 'scenarios' not in scenarios: -# return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) - -# final_df = process_scenarios(scenarios['scenarios'], target_custom_table) -# final_df.iloc[1:, 0] = run_uuids - -# final_df_transpose = final_df.transpose() -# final_df_transpose.columns = final_df_transpose.iloc[0] -# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - -# # Create and send the Excel file -# output = io.BytesIO() -# create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) -# output.seek(0) - -# filename = "comparison_table.xlsx" -# response = HttpResponse( -# output, -# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' -# ) -# response['Content-Disposition'] = f'attachment; filename={filename}' - -# return response - -# except Exception as e: -# exc_type, exc_value, exc_traceback = sys.exc_info() -# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table', run_uuids=run_uuids) -# err.save_to_db() -# return JsonResponse({"Error": str(err.message)}, status=500) - -# return JsonResponse({"Error": "Method not allowed"}, status=405) - def create_custom_comparison_table(request): if request.method == 'GET': try: @@ -1502,7 +1452,7 @@ def create_custom_comparison_table(request): except ValueError as e: return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) - target_custom_table = other_custom_table + target_custom_table = ita_custom_table # Process scenarios and generate the custom table scenarios = access_raw_data(run_uuids, request) @@ -1538,7 +1488,6 @@ def create_custom_comparison_table(request): return JsonResponse({"Error": "Method not allowed"}, status=405) - # Configuration # Set up table needed along with REopt dictionaries to grab data From 50647dcf6aa905f20168dd8b015be9a72499a7cc Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 21 Aug 2024 16:29:01 -0600 Subject: [PATCH 15/44] updated error logs --- reoptjl/views.py | 74 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 3 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index cb88a4e44..9e9a23bae 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1428,11 +1428,71 @@ def create_custom_table_excel(df, custom_table, calculations, output): err.save_to_db() raise +# def create_custom_comparison_table(request): +# if request.method == 'GET': +# try: +# # Log the entire request GET parameters +# print(f"GET parameters: {request.GET}") + +# # Manually collect the run_uuid values by iterating over the keys +# run_uuids = [] +# for key in request.GET.keys(): +# if key.startswith('run_uuid['): +# run_uuids.append(request.GET[key]) + +# print(f"Handling GET request with run_uuids: {run_uuids}") + +# if not run_uuids: +# return JsonResponse({"Error": "No run_uuids provided"}, status=400) + +# # Validate each UUID +# for r_uuid in run_uuids: +# try: +# uuid.UUID(r_uuid) +# except ValueError as e: +# return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) + +# target_custom_table = ita_custom_table + +# # Process scenarios and generate the custom table +# scenarios = access_raw_data(run_uuids, request) +# if 'scenarios' not in scenarios or not scenarios['scenarios']: +# return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) + +# final_df = process_scenarios(scenarios['scenarios'], target_custom_table) +# final_df.iloc[1:, 0] = run_uuids + +# final_df_transpose = final_df.transpose() +# final_df_transpose.columns = final_df_transpose.iloc[0] +# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + +# # Create and send the Excel file +# output = io.BytesIO() +# create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) +# output.seek(0) + +# filename = "comparison_table.xlsx" +# response = HttpResponse( +# output, +# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' +# ) +# response['Content-Disposition'] = f'attachment; filename={filename}' + +# return response + +# except Exception as e: +# exc_type, exc_value, exc_traceback = sys.exc_info() +# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') +# err.save_to_db() +# return JsonResponse({"Error": str(err.message)}, status=500) + +# return JsonResponse({"Error": "Method not allowed"}, status=405) + def create_custom_comparison_table(request): if request.method == 'GET': try: # Log the entire request GET parameters - print(f"GET parameters: {request.GET}") + log.debug(f"GET parameters: {request.GET}") # Manually collect the run_uuid values by iterating over the keys run_uuids = [] @@ -1440,7 +1500,7 @@ def create_custom_comparison_table(request): if key.startswith('run_uuid['): run_uuids.append(request.GET[key]) - print(f"Handling GET request with run_uuids: {run_uuids}") + log.debug(f"Handling GET request with run_uuids: {run_uuids}") if not run_uuids: return JsonResponse({"Error": "No run_uuids provided"}, status=400) @@ -1450,6 +1510,7 @@ def create_custom_comparison_table(request): try: uuid.UUID(r_uuid) except ValueError as e: + log.debug(f"Invalid UUID format: {r_uuid}") return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) target_custom_table = ita_custom_table @@ -1457,6 +1518,7 @@ def create_custom_comparison_table(request): # Process scenarios and generate the custom table scenarios = access_raw_data(run_uuids, request) if 'scenarios' not in scenarios or not scenarios['scenarios']: + log.debug("Failed to fetch scenarios") return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) final_df = process_scenarios(scenarios['scenarios'], target_custom_table) @@ -1480,14 +1542,20 @@ def create_custom_comparison_table(request): return response - except Exception as e: + except ValueError as e: + log.debug(f"ValueError: {str(e.args[0])}") + return JsonResponse({"Error": str(e.args[0])}, status=500) + + except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() + log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') err.save_to_db() return JsonResponse({"Error": str(err.message)}, status=500) return JsonResponse({"Error": "Method not allowed"}, status=405) + # Configuration # Set up table needed along with REopt dictionaries to grab data From 357b356f1d9b7e6220ed2361f097f2fbdbe75e47 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:52:21 -0600 Subject: [PATCH 16/44] updated function to check for bau consistency and to allow for specified table configurations --- reoptjl/custom_table_helpers.py | 40 +- reoptjl/views.py | 1030 +++++++++++++++++++++---------- 2 files changed, 747 insertions(+), 323 deletions(-) diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py index 233a518b1..efb6f3d53 100644 --- a/reoptjl/custom_table_helpers.py +++ b/reoptjl/custom_table_helpers.py @@ -1,10 +1,4 @@ # custom table helpers.py -def get_with_suffix(df, key, suffix, default_val=0): - """Fetch value from dataframe with an optional retriaval of _bau suffix.""" - if not key.endswith("_bau"): - key = f"{key}{suffix}" - return df.get(key, default_val) - def flatten_dict(d, parent_key='', sep='.'): """Flatten nested dictionary.""" items = [] @@ -45,3 +39,37 @@ def colnum_string(n): n, remainder = divmod(n - 1, 26) string = chr(65 + remainder) + string return string + +def safe_get(df, key, default=0): + return df.get(key, default) + +def check_bau_consistency(scenarios): + """Check the consistency of BAU values across all scenarios.""" + bau_values_list = [] + all_bau_keys = set() + + for scenario in scenarios: + df_gen = flatten_dict(scenario['full_data']) + + current_bau_values = {} + for key, value in df_gen.items(): + if key.endswith('_bau'): + current_bau_values[key] = value + all_bau_keys.add(key) + + bau_values_list.append(current_bau_values) + + # Perform consistency check across all `_bau` values + first_bau_values = bau_values_list[0] + for idx, other_bau_values in enumerate(bau_values_list[1:], start=1): + differences = { + key: (first_bau_values[key], other_bau_values[key]) + for key in all_bau_keys + if first_bau_values.get(key) != other_bau_values.get(key) + } + + if differences: + diff_message = "\n".join( + [f" - {key}: {first_bau_values[key]} vs {other_bau_values[key]}" for key in differences] + ) + raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}:\n{diff_message}") diff --git a/reoptjl/views.py b/reoptjl/views.py index 9e9a23bae..6e69619f0 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -21,7 +21,7 @@ import pandas as pd import json import logging -from reoptjl.custom_table_helpers import get_with_suffix, flatten_dict, clean_data_dict, sum_vectors, colnum_string +from reoptjl.custom_table_helpers import safe_get,flatten_dict, clean_data_dict, sum_vectors, colnum_string, check_bau_consistency import xlsxwriter from collections import defaultdict import io @@ -1190,133 +1190,144 @@ def easiur_costs(request): ################ START Custom Table ########################### ############################################################### -def generate_data_dict(config, df_gen, suffix): +def access_raw_data(run_uuids, request): + try: + # Fetch UserProvidedMeta data for the relevant run_uuids + usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only( + 'meta__run_uuid', 'description', 'address' + ) + + # Create a dictionary to map run_uuids to their associated meta data + meta_data_dict = {um.meta.run_uuid: {"description": um.description, "address": um.address} for um in usermeta} + + full_summary_dict = { + "scenarios": [ + { + "run_uuid": str(run_uuid), + "full_data": process_raw_data(request, run_uuid), + "meta_data": meta_data_dict.get(run_uuid, {}) + } + for run_uuid in run_uuids + ] + } + + # Perform the BAU consistency check + check_bau_consistency(full_summary_dict['scenarios']) + + return full_summary_dict + + except ValueError as e: + log.error(f"ValueError in access_raw_data: {e}") + raise + except Exception: + log.error(f"Error in access_raw_data: {tb.format_exc()}") + raise + +def process_raw_data(request, run_uuid): + try: + response = results(request, run_uuid) + if response.status_code == 200: + return sum_vectors(json.loads(response.content)) + return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} + except Exception: + err = UnexpectedError(*sys.exc_info(), task='create_custom_comparison_table') + err.save_to_db() + raise + +def generate_data_dict(config, df_gen, suffix=""): try: data_dict = defaultdict(list) - for var_key, col_name in config: - if callable(var_key): - val = var_key(df_gen) - else: - val = get_with_suffix(df_gen, var_key, suffix, "-") - data_dict[col_name].append(val) + for entry in config: + val = entry["scenario_value"](df_gen) + data_dict[entry["label"]].append(val) + log.debug(f"Generated data_dict: {data_dict}") return data_dict - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() + except Exception: + log.error(f"Error in generate_data_dict: {tb.format_exc()}") raise def get_REopt_data(data_f, scenario_name, config): try: scenario_name_str = str(scenario_name) - suffix = "_bau" if re.search(r"(?i)\bBAU\b", scenario_name_str) else "" - + suffix = "_bau" if "BAU" in scenario_name_str.upper() else "" + df_gen = flatten_dict(data_f) + log.debug(f"Flattened data_f in get_REopt_data: {df_gen}") data_dict = generate_data_dict(config, df_gen, suffix) data_dict["Scenario"] = [scenario_name_str] - col_order = ["Scenario"] + [col_name for _, col_name in config] - df_res = pd.DataFrame(data_dict) - df_res = df_res[col_order] + col_order = ["Scenario"] + [entry["label"] for entry in config] + df_res = pd.DataFrame(data_dict)[col_order] + log.debug(f"Generated DataFrame in get_REopt_data: {df_res}") return df_res - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() + except Exception: + log.error(f"Error in get_REopt_data: {tb.format_exc()}") raise -def get_bau_values(mock_scenarios, config): +def get_bau_values(scenarios, config): try: - bau_values = {col_name: None for _, col_name in config} - - # Assuming the first scenario has the BAU data - first_scenario = mock_scenarios[0] - df_gen = flatten_dict(first_scenario['full_data']) - - for var_key, col_name in config: - if callable(var_key): - # Extract the key being referenced in the lambda function - try: - key = var_key.__code__.co_consts[1] - except IndexError: - continue - else: - key = var_key + bau_values = {entry["label"]: None for entry in config} + log.debug(f"Initialized BAU values: {bau_values}") - # Append the '_bau' suffix to match BAU values - key_bau = f"{key}_bau" - value = df_gen.get(key_bau) + # Extract and compare BAU values across all scenarios + bau_values_list = [] - if value is not None: - bau_values[col_name] = value - - return bau_values - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() - raise - -def access_raw_data(run_uuids, request): - try: - full_summary_dict = {"scenarios": []} - - # Fetch UserProvidedMeta data for the relevant run_uuids - usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only( - 'meta__run_uuid', - 'description', - 'address' - ) - - # Create a dictionary to map run_uuids to their associated meta data - meta_data_dict = { - um.meta.run_uuid: { - "description": um.description, - "address": um.address + for scenario in scenarios: + df_gen = flatten_dict(scenario['full_data']) + log.debug(f"Flattened data for scenario {scenario['run_uuid']}: {df_gen}") + + current_bau_values = {} + for entry in config: + bau_func = entry.get("bau_value") + value = bau_func(df_gen) if bau_func else df_gen.get(f"{entry['key']}_bau") + current_bau_values[entry["label"]] = value + + bau_values_list.append(current_bau_values) + + # Check consistency of BAU values across all scenarios + first_bau_values = bau_values_list[0] + for idx, other_bau_values in enumerate(bau_values_list[1:], start=1): + differences = { + key: (first_bau_values[key], other_bau_values[key]) + for key in first_bau_values + if first_bau_values[key] != other_bau_values[key] } - for um in usermeta - } - for run_uuid in run_uuids: - scenario_data = { - "run_uuid": str(run_uuid), - "full_data": process_raw_data(request, run_uuid), - "meta_data": meta_data_dict.get(run_uuid, {}) - } - full_summary_dict["scenarios"].append(scenario_data) + if differences: + # Log each difference in a user-friendly way + diff_message = "\n".join( + [f" - {key}: {first_bau_values[key]} vs {other_bau_values[key]}" for key in differences] + ) + log.warning( + f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}:\n{diff_message}" + ) + raise ValueError( + "Inconsistent BAU values across scenarios. Please check the differences in the logs." + ) + + # If consistent, use the first set of BAU values + bau_values.update(first_bau_values) + log.debug(f"Final consolidated BAU values: {bau_values}") + return bau_values - return full_summary_dict - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() + except ValueError as e: + log.error(f"ValueError in get_bau_values: {e}") raise - -def process_raw_data(request, run_uuid): - try: - response = results(request, run_uuid) - if response.status_code == 200: - result_data = json.loads(response.content) - processed_data = sum_vectors(result_data) - return processed_data - else: - return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() + except Exception: + log.error(f"Error in get_bau_values: {tb.format_exc()}") raise def process_scenarios(scenarios, reopt_data_config): try: - config = reopt_data_config - bau_values = get_bau_values(scenarios, config) + log.debug(f"Starting process_scenarios with config: {reopt_data_config}") + bau_values = get_bau_values(scenarios, reopt_data_config) + log.debug(f"BAU values: {bau_values}") combined_df = pd.DataFrame() for scenario in scenarios: run_uuid = scenario['run_uuid'] - df_result = get_REopt_data(scenario['full_data'], run_uuid, config) + df_result = get_REopt_data(scenario['full_data'], run_uuid, reopt_data_config) df_result = df_result.set_index('Scenario').T df_result.columns = [run_uuid] combined_df = df_result if combined_df.empty else combined_df.join(df_result, how='outer') @@ -1328,30 +1339,191 @@ def process_scenarios(scenarios, reopt_data_config): # Combine BAU data with scenario results combined_df = pd.concat([df_bau, combined_df.T]).reset_index(drop=True) + log.debug(f"Final DataFrame before clean_data_dict:\n{combined_df}") + combined_df = clean_data_dict(combined_df.to_dict(orient="list")) combined_df = pd.DataFrame(combined_df) combined_df = combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] + log.debug(f"Final DataFrame in process_scenarios:\n{combined_df}") return combined_df - except ValueError as e: - raise - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() + except Exception: + log.error(f"Error in process_scenarios: {tb.format_exc()}") raise +def create_custom_comparison_table(request): + if request.method == 'GET': + try: + # Log the entire request GET parameters + log.debug(f"GET parameters: {request.GET}") + + # Get the table configuration name from the query parameters + table_config_name = request.GET.get('table_config_name', 'example_table_config') # Default to 'example_table_config' if not provided + log.debug(f"Using table configuration: {table_config_name}") + + # Manually collect the run_uuid values by iterating over the keys + run_uuids = [] + for key in request.GET.keys(): + if key.startswith('run_uuid['): + run_uuids.append(request.GET[key]) + + log.debug(f"Handling GET request with run_uuids: {run_uuids}") + + if not run_uuids: + return JsonResponse({"Error": "No run_uuids provided"}, status=400) + + # Validate each UUID + for r_uuid in run_uuids: + try: + uuid.UUID(r_uuid) + except ValueError as e: + log.debug(f"Invalid UUID format: {r_uuid}") + return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) + + # Dynamically select the table configuration + if table_config_name in globals(): + target_custom_table = globals()[table_config_name] + else: + return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}"}, status=400) + + # Process scenarios and generate the custom table + scenarios = access_raw_data(run_uuids, request) + if 'scenarios' not in scenarios or not scenarios['scenarios']: + log.debug("Failed to fetch scenarios") + return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) + + final_df = process_scenarios(scenarios['scenarios'], target_custom_table) + log.debug(f"Final DataFrame (before transpose):\n{final_df}") + + final_df.iloc[1:, 0] = run_uuids + + final_df_transpose = final_df.transpose() + final_df_transpose.columns = final_df_transpose.iloc[0] + final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + + # Create and send the Excel file + output = io.BytesIO() + create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) + output.seek(0) + + filename = "comparison_table.xlsx" + response = HttpResponse( + output, + content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + ) + response['Content-Disposition'] = f'attachment; filename={filename}' + + return response + + except ValueError as e: + log.debug(f"ValueError: {str(e.args[0])}") + return JsonResponse({"Error": str(e.args[0])}, status=500) + + except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() + log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + return JsonResponse({"Error": str(err.message)}, status=500) + + return JsonResponse({"Error": "Method not allowed"}, status=405) + def create_custom_table_excel(df, custom_table, calculations, output): try: workbook = xlsxwriter.Workbook(output, {'in_memory': True}) worksheet = workbook.add_worksheet('Custom Table') - data_format = workbook.add_format({'align': 'center', 'valign': 'center', 'border': 1}) - formula_format = workbook.add_format({'bg_color': '#FECF86', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'red'}) - error_format = workbook.add_format({'bg_color': '#FFC7CE', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'black'}) # For missing data - scenario_header_format = workbook.add_format({'bold': True, 'bg_color': '#0079C2', 'border': 1, 'align': 'center', 'font_color': 'white'}) - variable_name_format = workbook.add_format({'bold': True, 'bg_color': '#DEE2E5', 'border': 1, 'align': 'left'}) - + # Formats for general data, percentages, and currency values + # General formatting + data_format = workbook.add_format({ + 'align' : 'center', + 'valign' : 'center', + 'border' : 1, + 'font_size': 10 + }) + + # Formatting for formulas + formula_format = workbook.add_format({ + 'bg_color' : '#FFE599', # Light yellow background + 'align' : 'center', + 'valign' : 'center', + 'border' : 1, + 'font_color': 'red', + 'font_size' : 10, + 'italic' : True # Italic to highlight it's a formula + }) + + # Formatting for errors + error_format = workbook.add_format({ + 'bg_color' : '#FFC7CE', # Light red background + 'align' : 'center', + 'valign' : 'center', + 'border' : 1, + 'font_color': 'black', + 'font_size' : 10 + }) + + # Formatting for percentages, showing as whole numbers (e.g., 9%) + percent_format = workbook.add_format({ + 'num_format': '0%', # Whole number percentage (e.g., 9%) + 'align' : 'center', + 'valign' : 'center', + 'border' : 1, + 'font_size' : 10 + }) + + # Formatting for currency values with two decimal places + currency_format = workbook.add_format({ + 'num_format': '$#,##0.00', # Currency with two decimal places + 'align' : 'center', + 'valign' : 'center', + 'border' : 1, + 'font_size' : 10 + }) + + # Formatting for formulas that are percentages + formula_percent_format = workbook.add_format({ + 'bg_color' : '#FFE599', # Light yellow background + 'num_format': '0%', # Whole number percentage + 'align' : 'center', + 'valign' : 'center', + 'border' : 1, + 'font_color': 'red', + 'font_size' : 10, + 'italic' : True + }) + + # Formatting for formulas that are currency values + formula_currency_format = workbook.add_format({ + 'bg_color' : '#FFE599', # Light yellow background + 'num_format': '$#,##0.00', # Currency with two decimal places + 'align' : 'center', + 'valign' : 'center', + 'border' : 1, + 'font_color': 'red', + 'font_size' : 10, + 'italic' : True + }) + + # Header format for the scenario column + scenario_header_format = workbook.add_format({ + 'bold' : True, + 'bg_color' : '#0079C2', # Dark blue background + 'border' : 1, + 'align' : 'center', + 'font_color': 'white', + 'font_size' : 10 + }) + + # Format for the variable names in the first column + variable_name_format = workbook.add_format({ + 'bold' : True, + 'bg_color' : '#DEE2E5', # Light gray background + 'border' : 1, + 'align' : 'left', + 'font_size': 10 + }) + worksheet.write(1, len(df.columns) + 2, "Values in red are formulas. Do not input anything.", formula_format) column_width = 35 @@ -1365,9 +1537,24 @@ def create_custom_table_excel(df, custom_table, calculations, output): for row_num, variable in enumerate(df.index): worksheet.write(row_num + 1, 0, variable, variable_name_format) - for row_num, row_data in enumerate(df.itertuples(index=False)): - for col_num, value in enumerate(row_data): - worksheet.write(row_num + 1, col_num + 1, "" if pd.isnull(value) or value == '-' else value, data_format) + # Use the custom table to determine format + def get_format(label): + entry = next((item for item in custom_table if item["label"] == label), None) + if entry: + if '$' in entry["label"]: + return currency_format, formula_currency_format + elif '%' in entry["label"]: + return percent_format, formula_percent_format + return data_format, formula_format + + # Writing data to cells with the appropriate format + for row_num, variable in enumerate(df.index): + cell_format, cell_formula_format = get_format(variable) + for col_num, value in enumerate(df.loc[variable]): + if pd.isnull(value) or value == '-': + worksheet.write(row_num + 1, col_num + 1, "", data_format) + else: + worksheet.write(row_num + 1, col_num + 1, value, cell_format) headers = {header: idx for idx, header in enumerate(df.index)} @@ -1376,10 +1563,12 @@ def create_custom_table_excel(df, custom_table, calculations, output): 'net_cost_value': f'{colnum_string(2)}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, 'ng_reduction_value': f'{colnum_string(2)}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, 'util_cost_value': f'{colnum_string(2)}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, - 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None - } + 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None, + # New placeholders added based on Example 6 and 7 calculations + 'placeholder1_value': f'{colnum_string(2)}{headers["Placeholder1"] + 2}' if "Placeholder1" in headers else None, + } - relevant_columns = [col_name for _, col_name in custom_table] + relevant_columns = [entry["label"] for entry in custom_table] relevant_calculations = [calc for calc in calculations if calc["name"] in relevant_columns] logged_messages = set() @@ -1393,7 +1582,8 @@ def create_custom_table_excel(df, custom_table, calculations, output): row_idx = headers.get(calc["name"]) if row_idx is not None: formula = calc["formula"](col_letter, bau_cells, headers) - worksheet.write_formula(row_idx + 1, col-1, formula, formula_format) + cell_format, cell_formula_format = get_format(calc["name"]) + worksheet.write_formula(row_idx + 1, col-1, formula, cell_formula_format) else: missing_entries.append(calc["name"]) else: @@ -1402,14 +1592,14 @@ def create_custom_table_excel(df, custom_table, calculations, output): row_idx = headers.get(calc["name"]) if row_idx is not None: worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) - message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Custom Table configuration. Update the Custom Table to include {missing_keys}. Writing 'MISSING DATA' instead." + message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Table configuration provided. Update the Table to include {missing_keys}. Writing 'MISSING DATA' instead." if message not in logged_messages: print(message) logged_messages.add(message) missing_entries.append(calc["name"]) except KeyError as e: missing_field = str(e) - message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Custom Table configuration. Update the Custom Table to include {missing_field}. Writing 'MISSING DATA' instead." + message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Table configuration provided. Update the Table to include {missing_field}. Writing 'MISSING DATA' instead." if message not in logged_messages: print(message) logged_messages.add(message) @@ -1428,207 +1618,399 @@ def create_custom_table_excel(df, custom_table, calculations, output): err.save_to_db() raise -# def create_custom_comparison_table(request): -# if request.method == 'GET': -# try: -# # Log the entire request GET parameters -# print(f"GET parameters: {request.GET}") - -# # Manually collect the run_uuid values by iterating over the keys -# run_uuids = [] -# for key in request.GET.keys(): -# if key.startswith('run_uuid['): -# run_uuids.append(request.GET[key]) - -# print(f"Handling GET request with run_uuids: {run_uuids}") - -# if not run_uuids: -# return JsonResponse({"Error": "No run_uuids provided"}, status=400) - -# # Validate each UUID -# for r_uuid in run_uuids: -# try: -# uuid.UUID(r_uuid) -# except ValueError as e: -# return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) - -# target_custom_table = ita_custom_table - -# # Process scenarios and generate the custom table -# scenarios = access_raw_data(run_uuids, request) -# if 'scenarios' not in scenarios or not scenarios['scenarios']: -# return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) - -# final_df = process_scenarios(scenarios['scenarios'], target_custom_table) -# final_df.iloc[1:, 0] = run_uuids - -# final_df_transpose = final_df.transpose() -# final_df_transpose.columns = final_df_transpose.iloc[0] -# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - -# # Create and send the Excel file -# output = io.BytesIO() -# create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) -# output.seek(0) - -# filename = "comparison_table.xlsx" -# response = HttpResponse( -# output, -# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' -# ) -# response['Content-Disposition'] = f'attachment; filename={filename}' - -# return response - -# except Exception as e: -# exc_type, exc_value, exc_traceback = sys.exc_info() -# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') -# err.save_to_db() -# return JsonResponse({"Error": str(err.message)}, status=500) - -# return JsonResponse({"Error": "Method not allowed"}, status=405) - -def create_custom_comparison_table(request): - if request.method == 'GET': - try: - # Log the entire request GET parameters - log.debug(f"GET parameters: {request.GET}") - - # Manually collect the run_uuid values by iterating over the keys - run_uuids = [] - for key in request.GET.keys(): - if key.startswith('run_uuid['): - run_uuids.append(request.GET[key]) - - log.debug(f"Handling GET request with run_uuids: {run_uuids}") - - if not run_uuids: - return JsonResponse({"Error": "No run_uuids provided"}, status=400) - - # Validate each UUID - for r_uuid in run_uuids: - try: - uuid.UUID(r_uuid) - except ValueError as e: - log.debug(f"Invalid UUID format: {r_uuid}") - return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) - - target_custom_table = ita_custom_table - - # Process scenarios and generate the custom table - scenarios = access_raw_data(run_uuids, request) - if 'scenarios' not in scenarios or not scenarios['scenarios']: - log.debug("Failed to fetch scenarios") - return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) - - final_df = process_scenarios(scenarios['scenarios'], target_custom_table) - final_df.iloc[1:, 0] = run_uuids - - final_df_transpose = final_df.transpose() - final_df_transpose.columns = final_df_transpose.iloc[0] - final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - - # Create and send the Excel file - output = io.BytesIO() - create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) - output.seek(0) - - filename = "comparison_table.xlsx" - response = HttpResponse( - output, - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' - ) - response['Content-Disposition'] = f'attachment; filename={filename}' - - return response - - except ValueError as e: - log.debug(f"ValueError: {str(e.args[0])}") - return JsonResponse({"Error": str(e.args[0])}, status=500) - - except Exception: - exc_type, exc_value, exc_traceback = sys.exc_info() - log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() - return JsonResponse({"Error": str(err.message)}, status=500) - - return JsonResponse({"Error": "Method not allowed"}, status=405) - - # Configuration # Set up table needed along with REopt dictionaries to grab data -other_custom_table = [ - (lambda df: get_with_suffix(df, "outputs.PV.size_kw", ""), "PV Size (kW)"), - (lambda df: get_with_suffix(df, "outputs.Wind.size_kw", ""), "Wind Size (kW)"), - (lambda df: get_with_suffix(df, "outputs.CHP.size_kw", ""), "CHP Size (kW)"), - (lambda df: get_with_suffix(df, "outputs.PV.annual_energy_produced_kwh", ""), "PV Total Electricity Produced (kWh)"), - (lambda df: get_with_suffix(df, "outputs.PV.electric_to_grid_series_kw", ""), "PV Exported to Grid (kWh)"), - (lambda df: get_with_suffix(df, "outputs.PV.electric_to_load_series_kw", ""), "PV Serving Load (kWh)"), - (lambda df: get_with_suffix(df, "outputs.Financial.lifecycle_capital_costs", ""), "Gross Capital Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax", ""), "Electricity Energy Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax", ""), "Electricity Demand Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax", ""), "Utility Fixed Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_us_dollars", ""), "Federal Tax Incentive (30%)"), - (lambda df: get_with_suffix(df, "outputs.Financial.iac_grant_us_dollars", ""), "IAC Grant ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_value_us_dollars", ""), "Incentive Value ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.net_capital_cost_us_dollars", ""), "Net Capital Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.npv", ""), "NPV"), - (lambda df: get_with_suffix(df, "inputs.PV.federal_itc_fraction", ""), "PV Federal Tax Incentive (%)"), - (lambda df: get_with_suffix(df, "inputs.ElectricStorage.total_itc_fraction", ""), "Storage Federal Tax Incentive (%)") -] +# Example Custom Table Configuration +example_table = [ + # Example 1: Basic Key Retrieval with Data Values + { + "label": "Site Name", + "key": "site", + "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), + "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + }, + { + "label": "Site Location", + "key": "site_lat_long", + "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + }, + # Example 2: Concatenating Strings + { + "label": "Site Address", + "key": "site_address", + "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), + "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") + }, + + # Example 3: Calculated Value (Sum of Two Fields), this does not show up in formulas + { + "label": "Combined Renewable Size (kW)", + "key": "combined_renewable_size", + "bau_value": lambda df: 0, + "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + safe_get(df, "outputs.Wind.size_kw") #NOTE: These calculations will not show up as in the excel calculations + }, + + # Example 4: Hardcoded Values + { + "label": "Hardcoded Values (kWh)", + "key": "hardcoded_value", + "bau_value": lambda df: 500, # BAU scenario + "scenario_value": lambda df: 1000 # Regular scenarios + }, -ita_custom_table = [ - (lambda df: get_with_suffix(df, "outputs.PV.size_kw", ""), "PV Size (kW)"), - (lambda df: get_with_suffix(df, "outputs.Wind.size_kw", ""), "Wind Size (kW)"), - (lambda df: get_with_suffix(df, "outputs.CHP.size_kw", ""), "CHP Size (kW)"), - (lambda df: get_with_suffix(df, "outputs.PV.annual_energy_produced_kwh", ""), "PV Total Electricity Produced (kWh)"), - (lambda df: get_with_suffix(df, "outputs.PV.electric_to_grid_series_kw", ""), "PV Exported to Grid (kWh)"), - (lambda df: get_with_suffix(df, "outputs.PV.electric_to_load_series_kw", ""), "PV Serving Load (kWh)"), - (lambda df: get_with_suffix(df, "outputs.Wind.annual_energy_produced_kwh", ""), "Wind Total Electricity Produced (kWh)"), - (lambda df: get_with_suffix(df, "outputs.Wind.electric_to_grid_series_kw", ""), "Wind Exported to Grid (kWh)"), - (lambda df: get_with_suffix(df, "outputs.Wind.electric_to_load_series_kw", ""), "Wind Serving Load (kWh)"), - (lambda df: get_with_suffix(df, "outputs.CHP.annual_electric_production_kwh", ""), "CHP Total Electricity Produced (kWh)"), - (lambda df: get_with_suffix(df, "outputs.CHP.electric_to_grid_series_kw", ""), "CHP Exported to Grid (kWh)"), - (lambda df: get_with_suffix(df, "outputs.CHP.electric_to_load_series_kw", ""), "CHP Serving Load (kWh)"), - (lambda df: get_with_suffix(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour", ""), "CHP Serving Thermal Load (MMBtu)"), - (lambda df: get_with_suffix(df, "outputs.ElectricUtility.annual_energy_supplied_kwh", ""), "Grid Purchased Electricity (kWh)"), - (lambda df: get_with_suffix(df, "outputs.ElectricUtility.electric_to_load_series_kw", ""), "Total Site Electricity Use (kWh)"), - (lambda df: get_with_suffix(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf", ""), "Net Purchased Electricity Reduction (%)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax", ""), "Electricity Energy Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax", ""), "Electricity Demand Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax", ""), "Utility Fixed Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_bill_before_tax", ""), "Purchased Electricity Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax", ""), "Electricity Export Benefit ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax", ""), "Net Electricity Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau", ""), "Electricity Cost Savings ($/year)"), - (lambda df: get_with_suffix(df, "outputs.Boiler.fuel_used_mmbtu", ""), "Boiler Fuel (MMBtu)"), - (lambda df: get_with_suffix(df, "outputs.CHP.annual_fuel_consumption_mmbtu", ""), "CHP Fuel (MMBtu)"), - (lambda df: get_with_suffix(df, "outputs.ElectricUtility.total_energy_supplied_kwh", ""), "Total Fuel (MMBtu)"), - (lambda df: get_with_suffix(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau", ""), "Natural Gas Reduction (%)"), - (lambda df: get_with_suffix(df, "outputs.Boiler.annual_thermal_production_mmbtu", ""), "Boiler Thermal Production (MMBtu)"), - (lambda df: get_with_suffix(df, "outputs.CHP.annual_thermal_production_mmbtu", ""), "CHP Thermal Production (MMBtu)"), - (lambda df: get_with_suffix(df, "outputs.CHP.annual_thermal_production_mmbtu", ""), "Total Thermal Production (MMBtu)"), - (lambda df: get_with_suffix(df, "outputs.Site.heating_system_fuel_cost_us_dollars", ""), "Heating System Fuel Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.CHP.year_one_fuel_cost_before_tax", ""), "CHP Fuel Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Site.total_fuel_cost_us_dollars", ""), "Total Fuel (NG) Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Site.total_utility_cost_us_dollars", ""), "Total Utility Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.om_and_replacement_present_cost_after_tax", ""), "O&M Cost Increase ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.simple_payback_years", ""), "Payback Period (years)"), - (lambda df: get_with_suffix(df, "outputs.Financial.lifecycle_capital_costs", ""), "Gross Capital Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_us_dollars", ""), "Federal Tax Incentive (30%)"), - (lambda df: get_with_suffix(df, "outputs.Financial.iac_grant_us_dollars", ""), "IAC Grant ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.total_incentives_value_us_dollars", ""), "Incentive Value ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.net_capital_cost_us_dollars", ""), "Net Capital Cost ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.annual_cost_savings_us_dollars", ""), "Annual Cost Savings ($)"), - (lambda df: get_with_suffix(df, "outputs.Financial.simple_payback_years", ""), "Simple Payback (years)"), - (lambda df: get_with_suffix(df, "outputs.Site.annual_emissions_tonnes_CO2", ""), "CO2 Emissions (tonnes)"), - (lambda df: get_with_suffix(df, "outputs.Site.lifecycle_emissions_tonnes_CO2", ""), "CO2 Reduction (tonnes)"), - (lambda df: get_with_suffix(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau", ""), "CO2 (%) savings "), - (lambda df: get_with_suffix(df, "outputs.Financial.npv", ""), "NPV"), - (lambda df: get_with_suffix(df, "inputs.PV.federal_itc_fraction", ""), "PV Federal Tax Incentive (%)"), - (lambda df: get_with_suffix(df, "inputs.ElectricStorage.total_itc_fraction", ""), "Storage Federal Tax Incentive (%)") + # Example 5: Conditional Formatting + { + "label": "PV Size Status", + "key": "pv_size_status", + "bau_value": lambda df: 0, + "scenario_value": lambda df: "Above Threshold" if safe_get(df, "outputs.PV.size_kw") > 2500 else "Below Threshold" + }, + #Example 6 and 7: First define any data that might need to be referenced, Here I've defined two placeholders + # Define Placeholder1 + { + "label": "Placeholder1", + "key": "placeholder1", + "bau_value": lambda df: 100, # BAU value + "scenario_value": lambda df: 200 # Scenario value + }, + # Define Placeholder2 + { + "label": "Placeholder2", + "key": "placeholder2", + "bau_value": lambda df: 50, # BAU value + "scenario_value": lambda df: 100 # Scenario value + }, + # Example 6: Calculation Without Reference to BAU + { + "label": "Placeholder Calculation Without BAU Reference", + "key": "placeholder_calculation_without_bau", + "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel + "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel + }, + # Example 7: Calculation With Reference to BAU + { + "label": "Placeholder Calculation With BAU Reference", + "key": "placeholder_calculation_with_bau", + "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel + "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel + }] + +# TASC/Single Site Configuration +single_site_custom_table = [ + { + "label": "Site Name", + "key": "site", + "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), + "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + }, + { + "label": "Site Location", + "key": "site_lat_long", + "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + }, + { + "label": "Site Address", + "key": "site_address", + "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), + "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") + }, + { + "label": "PV Size (kW)", + "key": "pv_size", + "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + }, + { + "label": "Wind Size (kW)", + "key": "wind_size", + "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") + }, + { + "label": "CHP Size (kW)", + "key": "chp_size", + "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") + }, + { + "label": "PV Total Electricity Produced (kWh)", + "key": "pv_total_electricity_produced", + "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") + }, + { + "label": "PV Exported to Grid (kWh)", + "key": "pv_exported_to_grid", + "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw") + }, + { + "label": "PV Serving Load (kWh)", + "key": "pv_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") + }, + { + "label": "Wind Total Electricity Produced (kWh)", + "key": "wind_total_electricity_produced", + "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") + }, + { + "label": "Wind Exported to Grid (kWh)", + "key": "wind_exported_to_grid", + "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw") + }, + { + "label": "Wind Serving Load (kWh)", + "key": "wind_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") + }, + { + "label": "CHP Total Electricity Produced (kWh)", + "key": "chp_total_electricity_produced", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") + }, + { + "label": "CHP Exported to Grid (kWh)", + "key": "chp_exported_to_grid", + "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw") + }, + { + "label": "CHP Serving Load (kWh)", + "key": "chp_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") + }, + { + "label": "CHP Serving Thermal Load (MMBtu)", + "key": "chp_serving_thermal_load", + "bau_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour") + }, + { + "label": "Grid Purchased Electricity (kWh)", + "key": "grid_purchased_electricity", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") + }, + { + "label": "Total Site Electricity Use (kWh)", + "key": "total_site_electricity_use", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw") + }, + { + "label": "Net Purchased Electricity Reduction (%)", + "key": "net_purchased_electricity_reduction", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf") + }, + { + "label": "Electricity Energy Cost ($)", + "key": "electricity_energy_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") + }, + { + "label": "Electricity Demand Cost ($)", + "key": "electricity_demand_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") + }, + { + "label": "Utility Fixed Cost ($)", + "key": "utility_fixed_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") + }, + { + "label": "Purchased Electricity Cost ($)", + "key": "purchased_electricity_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") + }, + { + "label": "Electricity Export Benefit ($)", + "key": "electricity_export_benefit", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax") + }, + { + "label": "Net Electricity Cost ($)", + "key": "net_electricity_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax") + }, + { + "label": "Electricity Cost Savings ($/year)", + "key": "electricity_cost_savings", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau") + }, + { + "label": "Boiler Fuel (MMBtu)", + "key": "boiler_fuel", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu") + }, + { + "label": "CHP Fuel (MMBtu)", + "key": "chp_fuel", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") + }, + { + "label": "Total Fuel (MMBtu)", + "key": "total_fuel", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh") + }, + { + "label": "Natural Gas Reduction (%)", + "key": "natural_gas_reduction", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau") + }, + { + "label": "Boiler Thermal Production (MMBtu)", + "key": "boiler_thermal_production", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") + }, + { + "label": "CHP Thermal Production (MMBtu)", + "key": "chp_thermal_production", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") + }, + { + "label": "Total Thermal Production (MMBtu)", + "key": "total_thermal_production", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") + }, + { + "label": "Heating System Fuel Cost ($)", + "key": "heating_system_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars") + }, + { + "label": "CHP Fuel Cost ($)", + "key": "chp_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") + }, + { + "label": "Total Fuel (NG) Cost ($)", + "key": "total_fuel_ng_cost", + "bau_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars") + }, + { + "label": "Total Utility Cost ($)", + "key": "total_utility_cost", + "bau_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars") + }, + { + "label": "O&M Cost Increase ($)", + "key": "om_cost_increase", + "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") + }, + { + "label": "Payback Period (years)", + "key": "payback_period", + "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") + }, + { + "label": "Gross Capital Cost ($)", + "key": "gross_capital_cost", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") + }, + { + "label": "Federal Tax Incentive (30%)", + "key": "federal_tax_incentive", + "bau_value": lambda df: 0.3, + "scenario_value": lambda df: 0.3 + }, + { + "label": "Additional Grant ($)", + "key": "additional_grant", + "bau_value": lambda df: 0, + "scenario_value": lambda df: 0 + }, + { + "label": "Incentive Value ($)", + "key": "incentive_value", + "bau_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars") + }, + { + "label": "Net Capital Cost ($)", + "key": "net_capital_cost", + "bau_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars") + }, + { + "label": "Annual Cost Savings ($)", + "key": "annual_cost_savings", + "bau_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars") + }, + { + "label": "Simple Payback (years)", + "key": "simple_payback", + "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") + }, + { + "label": "CO2 Emissions (tonnes)", + "key": "co2_emissions", + "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") + }, + { + "label": "CO2 Reduction (tonnes)", + "key": "co2_reduction", + "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") + }, + { + "label": "CO2 (%) savings", + "key": "co2_savings_percentage", + "bau_value": lambda df: 0, + "scenario_value": lambda df: 0 + }, + { + "label": "NPV ($)", + "key": "npv", + "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") + }, + { + "label": "PV Federal Tax Incentive (%)", + "key": "pv_federal_tax_incentive", + "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") + }, + { + "label": "Storage Federal Tax Incentive (%)", + "key": "storage_federal_tax_incentive", + "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") + } ] # Configuration for calculations @@ -1675,7 +2057,7 @@ def create_custom_comparison_table(request): }, { "name": "Incentive Value ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2}+{col}{headers["IAC Grant ($)"] + 2}' + "formula": lambda col, bau, headers: f'=({col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2})+{col}{headers["Additional Grant ($)"] + 2}' }, { "name": "Net Capital Cost ($)", @@ -1694,8 +2076,22 @@ def create_custom_comparison_table(request): "formula": lambda col, bau, headers: f'={bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2}' }, { - "name": "CO2 (%) savings ", + "name": "CO2 (%) savings", "formula": lambda col, bau, headers: f'=({bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2})/{bau["co2_reduction_value"]}' + }, + #Example Calculations + # Calculation Without Reference to bau_cells + { + "name": "Placeholder Calculation Without BAU Reference", + "formula": lambda col, bau, headers: f'={col}{headers["Placeholder1"] + 2}+{col}{headers["Placeholder2"] + 2}' + # This formula adds Placeholder1 and Placeholder2 values from the scenario. + }, + + # Calculation With Reference to bau_cells + { + "name": "Placeholder Calculation With BAU Reference", + "formula": lambda col, bau, headers: f'=({bau["placeholder1_value"]}-{col}{headers["Placeholder2"] + 2})/{bau["placeholder1_value"]}' + # This formula calculates the percentage change of Placeholder2 using Placeholder1's BAU value as the reference. } ] From bd98fa28e89cde70354d04cd850dde9b636f2f88 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 15:55:39 -0600 Subject: [PATCH 17/44] default set to single site config and bau values have a 10% tolerance --- reoptjl/custom_table_helpers.py | 31 +++++++++++++++++++++++-------- reoptjl/views.py | 2 +- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py index efb6f3d53..9dfb9828d 100644 --- a/reoptjl/custom_table_helpers.py +++ b/reoptjl/custom_table_helpers.py @@ -43,8 +43,15 @@ def colnum_string(n): def safe_get(df, key, default=0): return df.get(key, default) -def check_bau_consistency(scenarios): - """Check the consistency of BAU values across all scenarios.""" +def check_bau_consistency(scenarios, tolerance_percentage=0.1): + """ + Check the consistency of BAU values across all scenarios with a percentage-based tolerance. + + Args: + scenarios (list): List of scenario dictionaries to check. + tolerance_percentage (float): Tolerance percentage for allowable differences. + For example, 0.1 for 0.1% tolerance. + """ bau_values_list = [] all_bau_keys = set() @@ -62,14 +69,22 @@ def check_bau_consistency(scenarios): # Perform consistency check across all `_bau` values first_bau_values = bau_values_list[0] for idx, other_bau_values in enumerate(bau_values_list[1:], start=1): - differences = { - key: (first_bau_values[key], other_bau_values[key]) - for key in all_bau_keys - if first_bau_values.get(key) != other_bau_values.get(key) - } + differences = {} + + for key in all_bau_keys: + first_value = first_bau_values.get(key, 0) + other_value = other_bau_values.get(key, 0) + if first_value != 0: # Avoid division by zero + difference = abs(first_value - other_value) + tolerance = abs(first_value) * (tolerance_percentage / 100) + if difference > tolerance: + differences[key] = (first_value, other_value) + else: # Handle the case where the first value is 0 + if abs(other_value) > tolerance: + differences[key] = (first_value, other_value) if differences: diff_message = "\n".join( [f" - {key}: {first_bau_values[key]} vs {other_bau_values[key]}" for key in differences] ) - raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}:\n{diff_message}") + raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1} (tolerance: {tolerance_percentage}%):\n{diff_message}") diff --git a/reoptjl/views.py b/reoptjl/views.py index 6e69619f0..fd5fb0023 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1358,7 +1358,7 @@ def create_custom_comparison_table(request): log.debug(f"GET parameters: {request.GET}") # Get the table configuration name from the query parameters - table_config_name = request.GET.get('table_config_name', 'example_table_config') # Default to 'example_table_config' if not provided + table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') # Default to 'example_table_config' if not provided log.debug(f"Using table configuration: {table_config_name}") # Manually collect the run_uuid values by iterating over the keys From 4a91f86e6cd19e27d668f01f55fc5c52a725e1dc Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 16:31:50 -0600 Subject: [PATCH 18/44] default set to single site config and bau values have a 10% tolerance --- reoptjl/views.py | 53 +++++++++++------------------------------------- 1 file changed, 12 insertions(+), 41 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index fd5fb0023..b1a4b141f 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1220,7 +1220,10 @@ def access_raw_data(run_uuids, request): log.error(f"ValueError in access_raw_data: {e}") raise except Exception: - log.error(f"Error in access_raw_data: {tb.format_exc()}") + exc_type, exc_value, exc_traceback = sys.exc_info() + log.error(f"Error in access_raw_data: {exc_value}, traceback: {tb.format_tb(exc_traceback)}") + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='access_raw_data') + err.save_to_db() raise def process_raw_data(request, run_uuid): @@ -1270,50 +1273,18 @@ def get_bau_values(scenarios, config): bau_values = {entry["label"]: None for entry in config} log.debug(f"Initialized BAU values: {bau_values}") - # Extract and compare BAU values across all scenarios - bau_values_list = [] + # Extract BAU values from the first scenario + df_gen = flatten_dict(scenarios[0]['full_data']) + log.debug(f"Flattened data for scenario {scenarios[0]['run_uuid']}: {df_gen}") + + for entry in config: + bau_func = entry.get("bau_value") + value = bau_func(df_gen) if bau_func else df_gen.get(f"{entry['key']}_bau") + bau_values[entry["label"]] = value - for scenario in scenarios: - df_gen = flatten_dict(scenario['full_data']) - log.debug(f"Flattened data for scenario {scenario['run_uuid']}: {df_gen}") - - current_bau_values = {} - for entry in config: - bau_func = entry.get("bau_value") - value = bau_func(df_gen) if bau_func else df_gen.get(f"{entry['key']}_bau") - current_bau_values[entry["label"]] = value - - bau_values_list.append(current_bau_values) - - # Check consistency of BAU values across all scenarios - first_bau_values = bau_values_list[0] - for idx, other_bau_values in enumerate(bau_values_list[1:], start=1): - differences = { - key: (first_bau_values[key], other_bau_values[key]) - for key in first_bau_values - if first_bau_values[key] != other_bau_values[key] - } - - if differences: - # Log each difference in a user-friendly way - diff_message = "\n".join( - [f" - {key}: {first_bau_values[key]} vs {other_bau_values[key]}" for key in differences] - ) - log.warning( - f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}:\n{diff_message}" - ) - raise ValueError( - "Inconsistent BAU values across scenarios. Please check the differences in the logs." - ) - - # If consistent, use the first set of BAU values - bau_values.update(first_bau_values) log.debug(f"Final consolidated BAU values: {bau_values}") return bau_values - except ValueError as e: - log.error(f"ValueError in get_bau_values: {e}") - raise except Exception: log.error(f"Error in get_bau_values: {tb.format_exc()}") raise From 45e24dc2af973c6dd77fb7cd1182931854ee07a8 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 16:37:06 -0600 Subject: [PATCH 19/44] simplified message for bau inconsistencies --- reoptjl/custom_table_helpers.py | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py index 9dfb9828d..00e4f0d10 100644 --- a/reoptjl/custom_table_helpers.py +++ b/reoptjl/custom_table_helpers.py @@ -44,14 +44,6 @@ def safe_get(df, key, default=0): return df.get(key, default) def check_bau_consistency(scenarios, tolerance_percentage=0.1): - """ - Check the consistency of BAU values across all scenarios with a percentage-based tolerance. - - Args: - scenarios (list): List of scenario dictionaries to check. - tolerance_percentage (float): Tolerance percentage for allowable differences. - For example, 0.1 for 0.1% tolerance. - """ bau_values_list = [] all_bau_keys = set() @@ -69,8 +61,6 @@ def check_bau_consistency(scenarios, tolerance_percentage=0.1): # Perform consistency check across all `_bau` values first_bau_values = bau_values_list[0] for idx, other_bau_values in enumerate(bau_values_list[1:], start=1): - differences = {} - for key in all_bau_keys: first_value = first_bau_values.get(key, 0) other_value = other_bau_values.get(key, 0) @@ -78,13 +68,7 @@ def check_bau_consistency(scenarios, tolerance_percentage=0.1): difference = abs(first_value - other_value) tolerance = abs(first_value) * (tolerance_percentage / 100) if difference > tolerance: - differences[key] = (first_value, other_value) + raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}. Please check the BAU values for consistency.") else: # Handle the case where the first value is 0 if abs(other_value) > tolerance: - differences[key] = (first_value, other_value) - - if differences: - diff_message = "\n".join( - [f" - {key}: {first_bau_values[key]} vs {other_bau_values[key]}" for key in differences] - ) - raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1} (tolerance: {tolerance_percentage}%):\n{diff_message}") + raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}. Please check the BAU values for consistency.") From 814f20b14f99017bbb7fe69ea65a1e50514a0aed Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 16:44:12 -0600 Subject: [PATCH 20/44] check bau consistencies checks financial bau outputs --- reoptjl/custom_table_helpers.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py index 00e4f0d10..5d776d03f 100644 --- a/reoptjl/custom_table_helpers.py +++ b/reoptjl/custom_table_helpers.py @@ -44,6 +44,14 @@ def safe_get(df, key, default=0): return df.get(key, default) def check_bau_consistency(scenarios, tolerance_percentage=0.1): + """ + Check the consistency of BAU values within the 'outputs.Financial' section across all scenarios with a percentage-based tolerance. + + Args: + scenarios (list): List of scenario dictionaries to check. + tolerance_percentage (float): Tolerance percentage for allowable differences. + For example, 0.1 for 0.1% tolerance. + """ bau_values_list = [] all_bau_keys = set() @@ -52,13 +60,14 @@ def check_bau_consistency(scenarios, tolerance_percentage=0.1): current_bau_values = {} for key, value in df_gen.items(): - if key.endswith('_bau'): + # Focus only on keys related to 'outputs.Financial' and ending with '_bau' + if key.startswith('outputs.Financial') and key.endswith('_bau'): current_bau_values[key] = value all_bau_keys.add(key) bau_values_list.append(current_bau_values) - # Perform consistency check across all `_bau` values + # Perform consistency check across all `_bau` values within 'outputs.Financial' first_bau_values = bau_values_list[0] for idx, other_bau_values in enumerate(bau_values_list[1:], start=1): for key in all_bau_keys: From d54e152760129ce46c1daf7dcfe6754826d783df Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 17:38:54 -0600 Subject: [PATCH 21/44] updated response for error catching --- reoptjl/views.py | 129 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 95 insertions(+), 34 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index b1a4b141f..7d9b16f11 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1322,46 +1322,111 @@ def process_scenarios(scenarios, reopt_data_config): log.error(f"Error in process_scenarios: {tb.format_exc()}") raise +# def create_custom_comparison_table(request): +# if request.method == 'GET': +# try: +# log.debug(f"GET parameters: {request.GET}") + +# table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') +# log.debug(f"Using table configuration: {table_config_name}") + +# run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] +# log.debug(f"Handling GET request with run_uuids: {run_uuids}") + +# if not run_uuids: +# return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) + +# for r_uuid in run_uuids: +# try: +# uuid.UUID(r_uuid) +# except ValueError: +# log.debug(f"Invalid UUID format: {r_uuid}") +# return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) + +# if table_config_name in globals(): +# target_custom_table = globals()[table_config_name] +# else: +# return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) + +# scenarios = access_raw_data(run_uuids, request) +# if 'scenarios' not in scenarios or not scenarios['scenarios']: +# log.debug("Failed to fetch scenarios") +# return JsonResponse({'Error': 'Failed to fetch scenarios. The provided run_uuids might be incorrect or not associated with any data.'}, content_type='application/json', status=404) + +# final_df = process_scenarios(scenarios['scenarios'], target_custom_table) +# log.debug(f"Final DataFrame (before transpose):\n{final_df}") + +# final_df.iloc[1:, 0] = run_uuids + +# final_df_transpose = final_df.transpose() +# final_df_transpose.columns = final_df_transpose.iloc[0] +# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + +# output = io.BytesIO() +# create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) +# output.seek(0) + +# filename = "comparison_table.xlsx" +# response = HttpResponse( +# output, +# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' +# ) +# response['Content-Disposition'] = f'attachment; filename={filename}' + +# return response + +# except ValueError as e: +# log.debug(f"ValueError: {str(e)}") +# return JsonResponse({"Error": f"A ValueError occurred: {str(e)}. Please check the input values and try again."}, status=500) + +# except Exception as e: +# exc_type, exc_value, exc_traceback = sys.exc_info() +# log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") +# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') +# err.save_to_db() +# return JsonResponse({"Error": f"An unexpected error occurred while creating the comparison table. Please try again later or contact support if the issue persists. Error details: {str(e)}"}, status=500) + +# return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) + def create_custom_comparison_table(request): - if request.method == 'GET': - try: - # Log the entire request GET parameters + """ + Create a custom comparison table based on user-provided run UUIDs. + """ + try: + if request.method == 'GET': log.debug(f"GET parameters: {request.GET}") - # Get the table configuration name from the query parameters - table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') # Default to 'example_table_config' if not provided + table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') log.debug(f"Using table configuration: {table_config_name}") - # Manually collect the run_uuid values by iterating over the keys - run_uuids = [] - for key in request.GET.keys(): - if key.startswith('run_uuid['): - run_uuids.append(request.GET[key]) - + run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] log.debug(f"Handling GET request with run_uuids: {run_uuids}") if not run_uuids: - return JsonResponse({"Error": "No run_uuids provided"}, status=400) + return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) - # Validate each UUID for r_uuid in run_uuids: try: uuid.UUID(r_uuid) except ValueError as e: - log.debug(f"Invalid UUID format: {r_uuid}") - return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}"}, status=400) + if e.args[0] == "badly formed hexadecimal UUID string": + resp = {"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."} + return JsonResponse(resp, status=400) + else: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value.args[0], tb.format_tb(exc_traceback), task='create_custom_comparison_table', run_uuid=r_uuid) + err.save_to_db() + return JsonResponse({"Error": str(err.args[0])}, status=400) - # Dynamically select the table configuration if table_config_name in globals(): target_custom_table = globals()[table_config_name] else: - return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}"}, status=400) + return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) - # Process scenarios and generate the custom table scenarios = access_raw_data(run_uuids, request) if 'scenarios' not in scenarios or not scenarios['scenarios']: - log.debug("Failed to fetch scenarios") - return JsonResponse({'Error': 'Failed to fetch scenarios'}, content_type='application/json', status=404) + response = JsonResponse({'Error': 'Failed to fetch scenarios. The provided run_uuids might be incorrect or not associated with any data.'}, content_type='application/json', status=404) + return response final_df = process_scenarios(scenarios['scenarios'], target_custom_table) log.debug(f"Final DataFrame (before transpose):\n{final_df}") @@ -1372,7 +1437,6 @@ def create_custom_comparison_table(request): final_df_transpose.columns = final_df_transpose.iloc[0] final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - # Create and send the Excel file output = io.BytesIO() create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) output.seek(0) @@ -1386,18 +1450,15 @@ def create_custom_comparison_table(request): return response - except ValueError as e: - log.debug(f"ValueError: {str(e.args[0])}") - return JsonResponse({"Error": str(e.args[0])}, status=500) - - except Exception: - exc_type, exc_value, exc_traceback = sys.exc_info() - log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() - return JsonResponse({"Error": str(err.message)}, status=500) + else: + return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) - return JsonResponse({"Error": "Method not allowed"}, status=405) + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = UnexpectedError(exc_type, exc_value.args[0], tb.format_tb(exc_traceback), task='create_custom_comparison_table') + err.save_to_db() + resp = {"Error": err.message} + return JsonResponse(resp, status=500) def create_custom_table_excel(df, custom_table, calculations, output): try: @@ -1673,7 +1734,7 @@ def get_format(label): { "label": "Site Name", "key": "site", - "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), + "bau_value": lambda df: "", "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") }, { @@ -1715,7 +1776,7 @@ def get_format(label): { "label": "PV Exported to Grid (kWh)", "key": "pv_exported_to_grid", - "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw_bau"), + "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw"), "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw") }, { From b6362dbe1d17b06cd311cbd7f9b245d57e83c44b Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 17:39:16 -0600 Subject: [PATCH 22/44] updated response for catching errors --- reoptjl/views.py | 66 ------------------------------------------------ 1 file changed, 66 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 7d9b16f11..f7d454bab 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1322,72 +1322,6 @@ def process_scenarios(scenarios, reopt_data_config): log.error(f"Error in process_scenarios: {tb.format_exc()}") raise -# def create_custom_comparison_table(request): -# if request.method == 'GET': -# try: -# log.debug(f"GET parameters: {request.GET}") - -# table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') -# log.debug(f"Using table configuration: {table_config_name}") - -# run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] -# log.debug(f"Handling GET request with run_uuids: {run_uuids}") - -# if not run_uuids: -# return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) - -# for r_uuid in run_uuids: -# try: -# uuid.UUID(r_uuid) -# except ValueError: -# log.debug(f"Invalid UUID format: {r_uuid}") -# return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) - -# if table_config_name in globals(): -# target_custom_table = globals()[table_config_name] -# else: -# return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) - -# scenarios = access_raw_data(run_uuids, request) -# if 'scenarios' not in scenarios or not scenarios['scenarios']: -# log.debug("Failed to fetch scenarios") -# return JsonResponse({'Error': 'Failed to fetch scenarios. The provided run_uuids might be incorrect or not associated with any data.'}, content_type='application/json', status=404) - -# final_df = process_scenarios(scenarios['scenarios'], target_custom_table) -# log.debug(f"Final DataFrame (before transpose):\n{final_df}") - -# final_df.iloc[1:, 0] = run_uuids - -# final_df_transpose = final_df.transpose() -# final_df_transpose.columns = final_df_transpose.iloc[0] -# final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - -# output = io.BytesIO() -# create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) -# output.seek(0) - -# filename = "comparison_table.xlsx" -# response = HttpResponse( -# output, -# content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' -# ) -# response['Content-Disposition'] = f'attachment; filename={filename}' - -# return response - -# except ValueError as e: -# log.debug(f"ValueError: {str(e)}") -# return JsonResponse({"Error": f"A ValueError occurred: {str(e)}. Please check the input values and try again."}, status=500) - -# except Exception as e: -# exc_type, exc_value, exc_traceback = sys.exc_info() -# log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") -# err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') -# err.save_to_db() -# return JsonResponse({"Error": f"An unexpected error occurred while creating the comparison table. Please try again later or contact support if the issue persists. Error details: {str(e)}"}, status=500) - -# return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) - def create_custom_comparison_table(request): """ Create a custom comparison table based on user-provided run UUIDs. From 757afcb9164073d4fb124fa74adbbb0f71686bd9 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 18:40:32 -0600 Subject: [PATCH 23/44] updated error catching --- reoptjl/views.py | 45 +++++++++++++++++++-------------------------- 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index f7d454bab..635e477f8 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1323,11 +1323,8 @@ def process_scenarios(scenarios, reopt_data_config): raise def create_custom_comparison_table(request): - """ - Create a custom comparison table based on user-provided run UUIDs. - """ - try: - if request.method == 'GET': + if request.method == 'GET': + try: log.debug(f"GET parameters: {request.GET}") table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') @@ -1342,15 +1339,9 @@ def create_custom_comparison_table(request): for r_uuid in run_uuids: try: uuid.UUID(r_uuid) - except ValueError as e: - if e.args[0] == "badly formed hexadecimal UUID string": - resp = {"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."} - return JsonResponse(resp, status=400) - else: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value.args[0], tb.format_tb(exc_traceback), task='create_custom_comparison_table', run_uuid=r_uuid) - err.save_to_db() - return JsonResponse({"Error": str(err.args[0])}, status=400) + except ValueError: + log.debug(f"Invalid UUID format: {r_uuid}") + return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) if table_config_name in globals(): target_custom_table = globals()[table_config_name] @@ -1359,11 +1350,10 @@ def create_custom_comparison_table(request): scenarios = access_raw_data(run_uuids, request) if 'scenarios' not in scenarios or not scenarios['scenarios']: - response = JsonResponse({'Error': 'Failed to fetch scenarios. The provided run_uuids might be incorrect or not associated with any data.'}, content_type='application/json', status=404) - return response + log.debug("Failed to fetch scenarios") + return JsonResponse({'Error': 'Failed to fetch scenarios. The provided run_uuids contains inconsistent BAU data. This should be used for scenarios with the same site inputs'}, content_type='application/json', status=404) final_df = process_scenarios(scenarios['scenarios'], target_custom_table) - log.debug(f"Final DataFrame (before transpose):\n{final_df}") final_df.iloc[1:, 0] = run_uuids @@ -1384,15 +1374,18 @@ def create_custom_comparison_table(request): return response - else: - return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) + except ValueError as e: + log.debug(f"ValueError: {str(e)}") + return JsonResponse({"Error": f"A ValueError occurred: {str(e)}. Please check the input values and try again."}, status=500) - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value.args[0], tb.format_tb(exc_traceback), task='create_custom_comparison_table') - err.save_to_db() - resp = {"Error": err.message} - return JsonResponse(resp, status=500) + except Exception as e: + exc_type, exc_value, exc_traceback = sys.exc_info() + log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") + err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + err.save_to_db() + return JsonResponse({"Error": f"An unexpected error occurred while creating the comparison table. Please try again later or contact support if the issue persists. Error details: {str(e)}"}, status=500) + + return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) def create_custom_table_excel(df, custom_table, calculations, output): try: @@ -1593,7 +1586,7 @@ def get_format(label): { "label": "Site Name", "key": "site", - "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), + "bau_value": lambda df: "", "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") }, { From 4d01d40c10214de59572de22ff2d453a88b5657c Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 28 Aug 2024 19:02:00 -0600 Subject: [PATCH 24/44] fixed bau check error --- reoptjl/custom_table_helpers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py index 5d776d03f..c36db182a 100644 --- a/reoptjl/custom_table_helpers.py +++ b/reoptjl/custom_table_helpers.py @@ -73,9 +73,12 @@ def check_bau_consistency(scenarios, tolerance_percentage=0.1): for key in all_bau_keys: first_value = first_bau_values.get(key, 0) other_value = other_bau_values.get(key, 0) + + # Assign a default tolerance value before performing checks + tolerance = abs(first_value) * (tolerance_percentage / 100) if first_value != 0 else tolerance_percentage + if first_value != 0: # Avoid division by zero difference = abs(first_value - other_value) - tolerance = abs(first_value) * (tolerance_percentage / 100) if difference > tolerance: raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}. Please check the BAU values for consistency.") else: # Handle the case where the first value is 0 From 97bb30102d3f80583df98504e5cd802e2d43cd4b Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 29 Aug 2024 09:57:54 -0600 Subject: [PATCH 25/44] updated table formatting for better visuals --- reoptjl/views.py | 208 +++++++++++++++++++---------------------------- 1 file changed, 82 insertions(+), 126 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 635e477f8..e39dedd74 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1376,7 +1376,7 @@ def create_custom_comparison_table(request): except ValueError as e: log.debug(f"ValueError: {str(e)}") - return JsonResponse({"Error": f"A ValueError occurred: {str(e)}. Please check the input values and try again."}, status=500) + return JsonResponse({"Error": f"A ValueError occurred: {str(e)} Please check the input values and try again."}, status=500) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() @@ -1392,129 +1392,85 @@ def create_custom_table_excel(df, custom_table, calculations, output): workbook = xlsxwriter.Workbook(output, {'in_memory': True}) worksheet = workbook.add_worksheet('Custom Table') - # Formats for general data, percentages, and currency values - # General formatting - data_format = workbook.add_format({ - 'align' : 'center', - 'valign' : 'center', - 'border' : 1, - 'font_size': 10 - }) - - # Formatting for formulas - formula_format = workbook.add_format({ - 'bg_color' : '#FFE599', # Light yellow background - 'align' : 'center', - 'valign' : 'center', - 'border' : 1, - 'font_color': 'red', - 'font_size' : 10, - 'italic' : True # Italic to highlight it's a formula - }) - - # Formatting for errors - error_format = workbook.add_format({ - 'bg_color' : '#FFC7CE', # Light red background - 'align' : 'center', - 'valign' : 'center', - 'border' : 1, - 'font_color': 'black', - 'font_size' : 10 - }) - - # Formatting for percentages, showing as whole numbers (e.g., 9%) - percent_format = workbook.add_format({ - 'num_format': '0%', # Whole number percentage (e.g., 9%) - 'align' : 'center', - 'valign' : 'center', - 'border' : 1, - 'font_size' : 10 - }) - - # Formatting for currency values with two decimal places - currency_format = workbook.add_format({ - 'num_format': '$#,##0.00', # Currency with two decimal places - 'align' : 'center', - 'valign' : 'center', - 'border' : 1, - 'font_size' : 10 - }) - - # Formatting for formulas that are percentages - formula_percent_format = workbook.add_format({ - 'bg_color' : '#FFE599', # Light yellow background - 'num_format': '0%', # Whole number percentage - 'align' : 'center', - 'valign' : 'center', - 'border' : 1, - 'font_color': 'red', - 'font_size' : 10, - 'italic' : True - }) - - # Formatting for formulas that are currency values - formula_currency_format = workbook.add_format({ - 'bg_color' : '#FFE599', # Light yellow background - 'num_format': '$#,##0.00', # Currency with two decimal places - 'align' : 'center', - 'valign' : 'center', - 'border' : 1, - 'font_color': 'red', - 'font_size' : 10, - 'italic' : True - }) - - # Header format for the scenario column - scenario_header_format = workbook.add_format({ - 'bold' : True, - 'bg_color' : '#0079C2', # Dark blue background - 'border' : 1, - 'align' : 'center', - 'font_color': 'white', - 'font_size' : 10 - }) - - # Format for the variable names in the first column - variable_name_format = workbook.add_format({ - 'bold' : True, - 'bg_color' : '#DEE2E5', # Light gray background - 'border' : 1, - 'align' : 'left', - 'font_size': 10 + # Scenario header formatting with colors + scenario_colors = ['#0079C2', '#00A2E8', '#22B573', '#FFB300', '#E05A24', '#FF5050'] + scenario_formats = [workbook.add_format({'bold': True, 'bg_color': color, 'border': 1, 'align': 'center', 'font_color': 'white', 'font_size': 10}) for color in scenario_colors] + + # Row alternating colors + row_colors = ['#d1d5d8', '#fafbfb'] + + # Base formats for errors, percentages, and currency values + error_format = workbook.add_format({'bg_color': '#FFC7CE', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'white', 'bold': True, 'font_size': 10}) + base_percent_format = {'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} + base_currency_format = {'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} + + # Formula formats using a medium-dark orange + formula_color = '#FF8C00' + formula_format = workbook.add_format({'bg_color': '#FFE599', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + formula_percent_format = workbook.add_format({'bg_color': '#FFE599', 'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + formula_currency_format = workbook.add_format({'bg_color': '#FFE599', 'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + + # Message format to match formula style + message_format = workbook.add_format({ + 'bg_color': '#FFE599', # Light yellow background to match formula cells + 'align': 'center', + 'valign': 'center', + 'border': 1, + 'font_color': formula_color, # Match the formula text color + 'bold': True, # Bold to make it stand out + 'font_size': 12, # Larger font size for visibility + 'italic': True # Italic to match formula cells }) - worksheet.write(1, len(df.columns) + 2, "Values in red are formulas. Do not input anything.", formula_format) - + # Combine row color with cell format, excluding formulas + def get_combined_format(label, row_color, is_formula=False): + if is_formula: + if '$' in label: + return formula_currency_format + elif '%' in label: + return formula_percent_format + return formula_format + base_data_format = {'bg_color': row_color, 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} + if label: + if '$' in label: + return workbook.add_format({**base_currency_format, 'bg_color': row_color}) + elif '%' in label: + return workbook.add_format({**base_percent_format, 'bg_color': row_color}) + return workbook.add_format(base_data_format) + + # Setting column widths and writing headers column_width = 35 for col_num in range(len(df.columns) + 3): worksheet.set_column(col_num, col_num, column_width) - worksheet.write('A1', 'Scenario', scenario_header_format) + # Write scenario headers with different colors + worksheet.write('A1', 'Scenario', scenario_formats[0]) for col_num, header in enumerate(df.columns): - worksheet.write(0, col_num + 1, header, scenario_header_format) + worksheet.write(0, col_num + 1, header, scenario_formats[col_num % len(scenario_formats)]) + # Write variable names and data with full-row formatting for row_num, variable in enumerate(df.index): - worksheet.write(row_num + 1, 0, variable, variable_name_format) - - # Use the custom table to determine format - def get_format(label): - entry = next((item for item in custom_table if item["label"] == label), None) - if entry: - if '$' in entry["label"]: - return currency_format, formula_currency_format - elif '%' in entry["label"]: - return percent_format, formula_percent_format - return data_format, formula_format - - # Writing data to cells with the appropriate format - for row_num, variable in enumerate(df.index): - cell_format, cell_formula_format = get_format(variable) + row_color = row_colors[row_num % 2] + worksheet.write(row_num + 1, 0, variable, workbook.add_format({'bg_color': row_color, 'border': 1})) + + # Determine the format for each data cell for col_num, value in enumerate(df.loc[variable]): + is_formula = False # Logic to detect if this cell should be a formula + if isinstance(value, str) and "formula" in value.lower(): # Example logic for formulas + is_formula = True + + cell_format = get_combined_format(variable, row_color, is_formula) if pd.isnull(value) or value == '-': - worksheet.write(row_num + 1, col_num + 1, "", data_format) + worksheet.write(row_num + 1, col_num + 1, "", cell_format) else: worksheet.write(row_num + 1, col_num + 1, value, cell_format) + worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), "Values in orange are formulas. Do not input anything.", message_format) + + # Adjust row heights for better readability + for row_num in range(1, len(df.index) + 2): + worksheet.set_row(row_num, 20) + headers = {header: idx for idx, header in enumerate(df.index)} bau_cells = { @@ -1523,9 +1479,8 @@ def get_format(label): 'ng_reduction_value': f'{colnum_string(2)}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, 'util_cost_value': f'{colnum_string(2)}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None, - # New placeholders added based on Example 6 and 7 calculations 'placeholder1_value': f'{colnum_string(2)}{headers["Placeholder1"] + 2}' if "Placeholder1" in headers else None, - } + } relevant_columns = [entry["label"] for entry in custom_table] relevant_calculations = [calc for calc in calculations if calc["name"] in relevant_columns] @@ -1541,8 +1496,8 @@ def get_format(label): row_idx = headers.get(calc["name"]) if row_idx is not None: formula = calc["formula"](col_letter, bau_cells, headers) - cell_format, cell_formula_format = get_format(calc["name"]) - worksheet.write_formula(row_idx + 1, col-1, formula, cell_formula_format) + cell_format = get_combined_format(calc["name"], row_colors[row_idx % 2], is_formula=True) + worksheet.write_formula(row_idx + 1, col-1, formula, cell_format) else: missing_entries.append(calc["name"]) else: @@ -1568,14 +1523,16 @@ def get_format(label): missing_entries.append(calc["name"]) if missing_entries: - print(f"Missing entries in the input table: {', '.join(set(missing_entries))}. Please update the configuration if necessary.") + print(f"missing_entries in the input table: {', '.join(set(missing_entries))}. Please update the configuration if necessary.") workbook.close() + except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() - raise + err.save + + # Configuration # Set up table needed along with REopt dictionaries to grab data @@ -1589,20 +1546,19 @@ def get_format(label): "bau_value": lambda df: "", "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") }, - { - "label": "Site Location", - "key": "site_lat_long", - "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", - "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" - }, - # Example 2: Concatenating Strings { "label": "Site Address", "key": "site_address", "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") }, - + # Example 2: Concatenating Strings + { + "label": "Site Location", + "key": "site_lat_long", + "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + }, # Example 3: Calculated Value (Sum of Two Fields), this does not show up in formulas { "label": "Combined Renewable Size (kW)", @@ -1616,7 +1572,7 @@ def get_format(label): "label": "Hardcoded Values (kWh)", "key": "hardcoded_value", "bau_value": lambda df: 500, # BAU scenario - "scenario_value": lambda df: 1000 # Regular scenarios + "scenario_value": lambda df: 1000 # other scenarios }, # Example 5: Conditional Formatting From 162dd4f82a3d2f3777911ca846f8b21ce8eae6e9 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:36:28 -0600 Subject: [PATCH 26/44] Added functionality for portfolio config --- reoptjl/views.py | 129 +++++++++++++++++++++++++---------------------- 1 file changed, 68 insertions(+), 61 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index e39dedd74..afce652b5 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1190,7 +1190,7 @@ def easiur_costs(request): ################ START Custom Table ########################### ############################################################### -def access_raw_data(run_uuids, request): +def access_raw_data(run_uuids, request, table_config_name): try: # Fetch UserProvidedMeta data for the relevant run_uuids usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only( @@ -1211,15 +1211,17 @@ def access_raw_data(run_uuids, request): ] } - # Perform the BAU consistency check - check_bau_consistency(full_summary_dict['scenarios']) + config = globals().get(table_config_name) + if not config: + raise ValueError(f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name.") + + # Check if the BAU consistency check should be performed + if any("bau_value" in entry for entry in config): + check_bau_consistency(full_summary_dict['scenarios']) return full_summary_dict - except ValueError as e: - log.error(f"ValueError in access_raw_data: {e}") - raise - except Exception: + except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error(f"Error in access_raw_data: {exc_value}, traceback: {tb.format_tb(exc_traceback)}") err = UnexpectedError(exc_type, exc_value, exc_traceback, task='access_raw_data') @@ -1243,7 +1245,6 @@ def generate_data_dict(config, df_gen, suffix=""): for entry in config: val = entry["scenario_value"](df_gen) data_dict[entry["label"]].append(val) - log.debug(f"Generated data_dict: {data_dict}") return data_dict except Exception: log.error(f"Error in generate_data_dict: {tb.format_exc()}") @@ -1255,14 +1256,12 @@ def get_REopt_data(data_f, scenario_name, config): suffix = "_bau" if "BAU" in scenario_name_str.upper() else "" df_gen = flatten_dict(data_f) - log.debug(f"Flattened data_f in get_REopt_data: {df_gen}") data_dict = generate_data_dict(config, df_gen, suffix) data_dict["Scenario"] = [scenario_name_str] col_order = ["Scenario"] + [entry["label"] for entry in config] df_res = pd.DataFrame(data_dict)[col_order] - log.debug(f"Generated DataFrame in get_REopt_data: {df_res}") return df_res except Exception: log.error(f"Error in get_REopt_data: {tb.format_exc()}") @@ -1271,18 +1270,20 @@ def get_REopt_data(data_f, scenario_name, config): def get_bau_values(scenarios, config): try: bau_values = {entry["label"]: None for entry in config} - log.debug(f"Initialized BAU values: {bau_values}") + + # Only proceed if at least one config entry has a bau_value + if not any("bau_value" in entry for entry in config): + return None # Extract BAU values from the first scenario df_gen = flatten_dict(scenarios[0]['full_data']) - log.debug(f"Flattened data for scenario {scenarios[0]['run_uuid']}: {df_gen}") for entry in config: bau_func = entry.get("bau_value") - value = bau_func(df_gen) if bau_func else df_gen.get(f"{entry['key']}_bau") - bau_values[entry["label"]] = value + if bau_func: # Only extract BAU values if `bau_value` exists + value = bau_func(df_gen) + bau_values[entry["label"]] = value - log.debug(f"Final consolidated BAU values: {bau_values}") return bau_values except Exception: @@ -1291,9 +1292,8 @@ def get_bau_values(scenarios, config): def process_scenarios(scenarios, reopt_data_config): try: - log.debug(f"Starting process_scenarios with config: {reopt_data_config}") + # Check if BAU values exist bau_values = get_bau_values(scenarios, reopt_data_config) - log.debug(f"BAU values: {bau_values}") combined_df = pd.DataFrame() for scenario in scenarios: @@ -1303,21 +1303,25 @@ def process_scenarios(scenarios, reopt_data_config): df_result.columns = [run_uuid] combined_df = df_result if combined_df.empty else combined_df.join(df_result, how='outer') - # Adding BAU data as the first row in the DataFrame - bau_data = {key: [value] for key, value in bau_values.items()} - bau_data["Scenario"] = ["BAU"] - df_bau = pd.DataFrame(bau_data) + if bau_values: + # Single site scenario with BAU data + bau_data = {key: [value] for key, value in bau_values.items()} + bau_data["Scenario"] = ["BAU"] + df_bau = pd.DataFrame(bau_data) - # Combine BAU data with scenario results - combined_df = pd.concat([df_bau, combined_df.T]).reset_index(drop=True) - log.debug(f"Final DataFrame before clean_data_dict:\n{combined_df}") + # Combine BAU data with scenario results + combined_df = pd.concat([df_bau, combined_df.T]).reset_index(drop=True) + else: + # Portfolio scenario without BAU data + combined_df = combined_df.T.reset_index() + combined_df.rename(columns={'index': 'Scenario'}, inplace=True) combined_df = clean_data_dict(combined_df.to_dict(orient="list")) combined_df = pd.DataFrame(combined_df) combined_df = combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] - log.debug(f"Final DataFrame in process_scenarios:\n{combined_df}") return combined_df + except Exception: log.error(f"Error in process_scenarios: {tb.format_exc()}") raise @@ -1325,14 +1329,10 @@ def process_scenarios(scenarios, reopt_data_config): def create_custom_comparison_table(request): if request.method == 'GET': try: - log.debug(f"GET parameters: {request.GET}") - + # Set default table configuration name to 'single_site_custom_table' table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') - log.debug(f"Using table configuration: {table_config_name}") run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] - log.debug(f"Handling GET request with run_uuids: {run_uuids}") - if not run_uuids: return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) @@ -1340,22 +1340,24 @@ def create_custom_comparison_table(request): try: uuid.UUID(r_uuid) except ValueError: - log.debug(f"Invalid UUID format: {r_uuid}") return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) - if table_config_name in globals(): - target_custom_table = globals()[table_config_name] - else: - return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) + # Access raw data and check for BAU consistency if needed + scenarios = access_raw_data(run_uuids, request, table_config_name) + if not scenarios.get('scenarios'): + return JsonResponse({'Error': 'Failed to fetch scenarios or inconsistent BAU data. Ensure that scenarios have consistent site inputs.'}, content_type='application/json', status=404) - scenarios = access_raw_data(run_uuids, request) - if 'scenarios' not in scenarios or not scenarios['scenarios']: - log.debug("Failed to fetch scenarios") - return JsonResponse({'Error': 'Failed to fetch scenarios. The provided run_uuids contains inconsistent BAU data. This should be used for scenarios with the same site inputs'}, content_type='application/json', status=404) + target_custom_table = globals().get(table_config_name) + if not target_custom_table: + return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) final_df = process_scenarios(scenarios['scenarios'], target_custom_table) - final_df.iloc[1:, 0] = run_uuids + # Ensure correct alignment of run_uuids with the DataFrame + if len(run_uuids) == final_df.shape[0] - 1: # Exclude BAU row if present + final_df.iloc[1:, 0] = run_uuids + elif len(run_uuids) == final_df.shape[0]: + final_df.iloc[:, 0] = run_uuids final_df_transpose = final_df.transpose() final_df_transpose.columns = final_df_transpose.iloc[0] @@ -1365,22 +1367,18 @@ def create_custom_comparison_table(request): create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) output.seek(0) - filename = "comparison_table.xlsx" response = HttpResponse( output, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ) - response['Content-Disposition'] = f'attachment; filename={filename}' - + response['Content-Disposition'] = 'attachment; filename="comparison_table.xlsx"' return response except ValueError as e: - log.debug(f"ValueError: {str(e)}") return JsonResponse({"Error": f"A ValueError occurred: {str(e)} Please check the input values and try again."}, status=500) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() - log.debug(f"exc_type: {exc_type}; exc_value: {exc_value}; exc_traceback: {tb.format_tb(exc_traceback)}") err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') err.save_to_db() return JsonResponse({"Error": f"An unexpected error occurred while creating the comparison table. Please try again later or contact support if the issue persists. Error details: {str(e)}"}, status=500) @@ -1412,14 +1410,14 @@ def create_custom_table_excel(df, custom_table, calculations, output): # Message format to match formula style message_format = workbook.add_format({ - 'bg_color': '#FFE599', # Light yellow background to match formula cells + 'bg_color': '#FFE599', 'align': 'center', 'valign': 'center', 'border': 1, - 'font_color': formula_color, # Match the formula text color - 'bold': True, # Bold to make it stand out - 'font_size': 12, # Larger font size for visibility - 'italic': True # Italic to match formula cells + 'font_color': formula_color, + 'bold': True, + 'font_size': 12, + 'italic': True }) # Combine row color with cell format, excluding formulas @@ -1453,10 +1451,9 @@ def get_combined_format(label, row_color, is_formula=False): row_color = row_colors[row_num % 2] worksheet.write(row_num + 1, 0, variable, workbook.add_format({'bg_color': row_color, 'border': 1})) - # Determine the format for each data cell for col_num, value in enumerate(df.loc[variable]): - is_formula = False # Logic to detect if this cell should be a formula - if isinstance(value, str) and "formula" in value.lower(): # Example logic for formulas + is_formula = False + if isinstance(value, str) and "formula" in value.lower(): is_formula = True cell_format = get_combined_format(variable, row_color, is_formula) @@ -1467,10 +1464,6 @@ def get_combined_format(label, row_color, is_formula=False): worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), "Values in orange are formulas. Do not input anything.", message_format) - # Adjust row heights for better readability - for row_num in range(1, len(df.index) + 2): - worksheet.set_row(row_num, 20) - headers = {header: idx for idx, header in enumerate(df.index)} bau_cells = { @@ -1508,14 +1501,12 @@ def get_combined_format(label, row_color, is_formula=False): worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Table configuration provided. Update the Table to include {missing_keys}. Writing 'MISSING DATA' instead." if message not in logged_messages: - print(message) logged_messages.add(message) missing_entries.append(calc["name"]) except KeyError as e: missing_field = str(e) message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Table configuration provided. Update the Table to include {missing_field}. Writing 'MISSING DATA' instead." if message not in logged_messages: - print(message) logged_messages.add(message) row_idx = headers.get(calc["name"]) if row_idx is not None: @@ -1532,10 +1523,26 @@ def get_combined_format(label, row_color, is_formula=False): err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') err.save - - # Configuration # Set up table needed along with REopt dictionaries to grab data +# Portfolio configuration should not include "bau_value" in the keys +example_table_portfolio = [ + { + "label": "Site Name", + "key": "site", + "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + }, + { + "label": "Site Address", + "key": "site_address", + "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") + }, + { + "label": "Site Location", + "key": "site_lat_long", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + } +] # Example Custom Table Configuration example_table = [ From ea8820576643f3eef728eb6f987983f7fec463c5 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Fri, 30 Aug 2024 15:16:47 -0600 Subject: [PATCH 27/44] added portfolio configuration, updated single site config --- reoptjl/views.py | 203 ++++++++++++++++++++++++++++++----------------- 1 file changed, 131 insertions(+), 72 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index afce652b5..35f257660 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1402,23 +1402,14 @@ def create_custom_table_excel(df, custom_table, calculations, output): base_percent_format = {'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} base_currency_format = {'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} - # Formula formats using a medium-dark orange - formula_color = '#FF8C00' - formula_format = workbook.add_format({'bg_color': '#FFE599', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) - formula_percent_format = workbook.add_format({'bg_color': '#FFE599', 'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) - formula_currency_format = workbook.add_format({'bg_color': '#FFE599', 'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + # Formula formats using dark blue background + formula_color = '#F8F8FF' + formula_format = workbook.add_format({'bg_color': '#0B5E90', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + formula_percent_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + formula_currency_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) # Message format to match formula style - message_format = workbook.add_format({ - 'bg_color': '#FFE599', - 'align': 'center', - 'valign': 'center', - 'border': 1, - 'font_color': formula_color, - 'bold': True, - 'font_size': 12, - 'italic': True - }) + message_format = workbook.add_format({'bg_color': '#0B5E90', 'align': 'center','valign': 'center','border': 1,'font_color': formula_color, 'bold': True, 'font_size': 12, 'italic': True }) # Combine row color with cell format, excluding formulas def get_combined_format(label, row_color, is_formula=False): @@ -1462,7 +1453,7 @@ def get_combined_format(label, row_color, is_formula=False): else: worksheet.write(row_num + 1, col_num + 1, value, cell_format) - worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), "Values in orange are formulas. Do not input anything.", message_format) + worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), "Values in white are formulas. Do not input anything.", message_format) headers = {header: idx for idx, header in enumerate(df.index)} @@ -1498,7 +1489,7 @@ def get_combined_format(label, row_color, is_formula=False): if missing_keys: row_idx = headers.get(calc["name"]) if row_idx is not None: - worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) + worksheet.write(row_idx + 1, col - 1, "MISSING REFERENCE IN FORMULA", error_format) message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Table configuration provided. Update the Table to include {missing_keys}. Writing 'MISSING DATA' instead." if message not in logged_messages: logged_messages.add(message) @@ -1627,12 +1618,6 @@ def get_combined_format(label, row_color, is_formula=False): "bau_value": lambda df: "", "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") }, - { - "label": "Site Location", - "key": "site_lat_long", - "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", - "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" - }, { "label": "Site Address", "key": "site_address", @@ -1640,34 +1625,22 @@ def get_combined_format(label, row_color, is_formula=False): "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") }, { - "label": "PV Size (kW)", - "key": "pv_size", - "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") - }, - { - "label": "Wind Size (kW)", - "key": "wind_size", - "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") - }, - { - "label": "CHP Size (kW)", - "key": "chp_size", - "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") + "label": "Site Location", + "key": "site_lat_long", + "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" }, { - "label": "PV Total Electricity Produced (kWh)", - "key": "pv_total_electricity_produced", - "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") + "label": "PV Nameplate capacity (kW), purchased", + "key": "pv_size_purchased", + "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") }, { - "label": "PV Exported to Grid (kWh)", - "key": "pv_exported_to_grid", - "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw") + "label": "PV Nameplate capacity (kW), existing", + "key": "pv_size_existing", + "bau_value": lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.existing_kw") }, { "label": "PV Serving Load (kWh)", @@ -1676,16 +1649,10 @@ def get_combined_format(label, row_color, is_formula=False): "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") }, { - "label": "Wind Total Electricity Produced (kWh)", - "key": "wind_total_electricity_produced", - "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") - }, - { - "label": "Wind Exported to Grid (kWh)", - "key": "wind_exported_to_grid", - "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw") + "label": "Wind Nameplate capacity (kW), purchased", + "key": "wind_size_purchased", + "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") }, { "label": "Wind Serving Load (kWh)", @@ -1694,16 +1661,46 @@ def get_combined_format(label, row_color, is_formula=False): "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") }, { - "label": "CHP Total Electricity Produced (kWh)", - "key": "chp_total_electricity_produced", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") + "label": "Backup Generator Nameplate capacity (kW), purchased", + "key": "backup_generator_capacity_purchased", + "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") + }, + { + "label": "Backup Generator Nameplate capacity (kW), existing", + "key": "backup_generator_capacity_existing", + "bau_value": lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") + }, + { + "label": "Backup Generator Serving Load (kWh)", + "key": "backup_generator_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw") }, { - "label": "CHP Exported to Grid (kWh)", - "key": "chp_exported_to_grid", - "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw") + "label": "Battery power (kW)", + "key": "battery_power", + "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") + }, + { + "label": "Battery capacity (kWh)", + "key": "battery_capacity", + "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") + }, + { + "label": "Battery Serving Load (kWh)", + "key": "battery_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") + }, + { + "label": "CHP capacity (kW)", + "key": "chp_capacity", + "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") }, { "label": "CHP Serving Load (kWh)", @@ -1712,10 +1709,64 @@ def get_combined_format(label, row_color, is_formula=False): "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") }, { - "label": "CHP Serving Thermal Load (MMBtu)", - "key": "chp_serving_thermal_load", - "bau_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour") + "label": "Absorption chiller capacity (tons)", + "key": "absorption_chiller_capacity", + "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton") + }, + { + "label": "Absorption Chiller Serving Load (ton)", + "key": "absorption_chiller_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton") + }, + { + "label": "Chilled water TES capacity (gallons)", + "key": "chilled_water_tes_capacity", + "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal") + }, + { + "label": "Chilled Water TES Serving Load (ton)", + "key": "chilled_water_tes_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton") + }, + { + "label": "Hot water TES capacity (gallons)", + "key": "hot_water_tes_capacity", + "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") + }, + { + "label": "Hot Water TES Serving Load (MMBtu)", + "key": "hot_water_tes_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour") + }, + { + "label": "Steam turbine capacity (kW)", + "key": "steam_turbine_capacity", + "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") + }, + { + "label": "Steam Turbine Serving Load (kWh)", + "key": "steam_turbine_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw") + }, + { + "label": "GHP heat pump capacity (ton)", + "key": "ghp_heat_pump_capacity", + "bau_value": lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") + }, + { + "label": "GHP ground heat exchanger size (ft)", + "key": "ghp_ground_heat_exchanger_size", + "bau_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") }, { "label": "Grid Purchased Electricity (kWh)", @@ -1726,8 +1777,8 @@ def get_combined_format(label, row_color, is_formula=False): { "label": "Total Site Electricity Use (kWh)", "key": "total_site_electricity_use", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw") + "bau_value": lambda df: 0, + "scenario_value": lambda df: 0 }, { "label": "Net Purchased Electricity Reduction (%)", @@ -1939,7 +1990,15 @@ def get_combined_format(label, row_color, is_formula=False): calculations = [ { "name": "Total Site Electricity Use (kWh)", - "formula": lambda col, bau, headers: f'={col}{headers["PV Serving Load (kWh)"] + 2}+{col}{headers["Wind Serving Load (kWh)"] + 2}+{col}{headers["CHP Serving Load (kWh)"] + 2}+{col}{headers["Grid Purchased Electricity (kWh)"] + 2}' + "formula": lambda col, bau, headers: ( + f'={col}{headers["PV Serving Load (kWh)"] + 2}+' + f'{col}{headers["Wind Serving Load (kWh)"] + 2}+' + f'{col}{headers["CHP Serving Load (kWh)"] + 2}+' + f'{col}{headers["Battery Serving Load (kWh)"] + 2}+' + f'{col}{headers["Backup Generator Serving Load (kWh)"] + 2}+' + f'{col}{headers["Steam Turbine Serving Load (kWh)"] + 2}+' + f'{col}{headers["Grid Purchased Electricity (kWh)"] + 2}' + ) }, { "name": "Net Purchased Electricity Reduction (%)", From d342a1d27a9b4a1fbf70fb512420eab2961b3fcc Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Tue, 3 Sep 2024 16:12:23 -0600 Subject: [PATCH 28/44] Added link to results to config --- reoptjl/views.py | 71 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 35f257660..3f1e65f3c 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1456,7 +1456,8 @@ def get_combined_format(label, row_color, is_formula=False): worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), "Values in white are formulas. Do not input anything.", message_format) headers = {header: idx for idx, header in enumerate(df.index)} - + headers["Scenario"] = 0 + bau_cells = { 'grid_value': f'{colnum_string(2)}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, 'net_cost_value': f'{colnum_string(2)}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, @@ -1516,28 +1517,16 @@ def get_combined_format(label, row_color, is_formula=False): # Configuration # Set up table needed along with REopt dictionaries to grab data -# Portfolio configuration should not include "bau_value" in the keys -example_table_portfolio = [ - { - "label": "Site Name", - "key": "site", - "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") - }, - { - "label": "Site Address", - "key": "site_address", - "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") - }, - { - "label": "Site Location", - "key": "site_lat_long", - "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" - } -] - # Example Custom Table Configuration example_table = [ + { + "label": "Results URL", + "key": "url", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, # Example 1: Basic Key Retrieval with Data Values + { "label": "Site Name", "key": "site", @@ -1608,7 +1597,14 @@ def get_combined_format(label, row_color, is_formula=False): "key": "placeholder_calculation_with_bau", "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel - }] + }, + { + "label": "Results URL", + "key": "url", + "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid"), + "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid") + }, + ] # TASC/Single Site Configuration single_site_custom_table = [ @@ -1983,7 +1979,13 @@ def get_combined_format(label, row_color, is_formula=False): "key": "storage_federal_tax_incentive", "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") - } + }, + { + "label": "Results URL", + "key": "url", + "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid"), + "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid") + }, ] # Configuration for calculations @@ -2076,6 +2078,31 @@ def get_combined_format(label, row_color, is_formula=False): } ] + +# Portfolio configuration should not include "bau_value" in the keys +example_table_portfolio = [ + { + "label": "Site Name", + "key": "site", + "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + }, + { + "label": "Site Address", + "key": "site_address", + "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") + }, + { + "label": "Site Location", + "key": "site_lat_long", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + }, + { + "label": "Results URL", + "key": "url", + "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid") + }, +] + ############################################################### ################ END Custom Table ############################# ############################################################### \ No newline at end of file From a1730c8f39734f9cb6f5d70fa6f535f55f2ecf26 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Tue, 3 Sep 2024 16:46:38 -0600 Subject: [PATCH 29/44] fixed example table config and url link --- reoptjl/views.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 3f1e65f3c..6ec563623 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1519,14 +1519,6 @@ def get_combined_format(label, row_color, is_formula=False): # Set up table needed along with REopt dictionaries to grab data # Example Custom Table Configuration example_table = [ - { - "label": "Results URL", - "key": "url", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - # Example 1: Basic Key Retrieval with Data Values - { "label": "Site Name", "key": "site", @@ -1601,9 +1593,9 @@ def get_combined_format(label, row_color, is_formula=False): { "label": "Results URL", "key": "url", - "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid"), - "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid") - }, + "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), + "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") + } ] # TASC/Single Site Configuration @@ -1983,8 +1975,8 @@ def get_combined_format(label, row_color, is_formula=False): { "label": "Results URL", "key": "url", - "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid"), - "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid") + "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), + "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") }, ] @@ -2099,7 +2091,7 @@ def get_combined_format(label, row_color, is_formula=False): { "label": "Results URL", "key": "url", - "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/"+safe_get(df, "run_uuid") + "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") }, ] From e9ed5490ea0a00f483bf3d34546ab64e340a0ae5 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 9 Sep 2024 16:18:14 -0600 Subject: [PATCH 30/44] updated code for single implementation of portfolio and single site --- reoptjl/views.py | 281 ++++++++++++++++++++++++++++------------------- 1 file changed, 167 insertions(+), 114 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 6ec563623..12dc4f41d 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1186,11 +1186,11 @@ def easiur_costs(request): # log.error(debug_msg) # return JsonResponse({"Error": "Unexpected Error. Please check your input parameters and contact reopt@nrel.gov if problems persist."}, status=500) -############################################################### -################ START Custom Table ########################### -############################################################### +############################################################################################################################## +################################################# START Custom Table ######################################################### +############################################################################################################################## -def access_raw_data(run_uuids, request, table_config_name): +def access_raw_data(run_uuids, request): try: # Fetch UserProvidedMeta data for the relevant run_uuids usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only( @@ -1211,14 +1211,6 @@ def access_raw_data(run_uuids, request, table_config_name): ] } - config = globals().get(table_config_name) - if not config: - raise ValueError(f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name.") - - # Check if the BAU consistency check should be performed - if any("bau_value" in entry for entry in config): - check_bau_consistency(full_summary_dict['scenarios']) - return full_summary_dict except Exception as e: @@ -1269,22 +1261,20 @@ def get_REopt_data(data_f, scenario_name, config): def get_bau_values(scenarios, config): try: - bau_values = {entry["label"]: None for entry in config} - - # Only proceed if at least one config entry has a bau_value - if not any("bau_value" in entry for entry in config): - return None + # Dictionary to store BAU values for each scenario + bau_values_per_scenario = {scenario['run_uuid']: {entry["label"]: None for entry in config} for scenario in scenarios} - # Extract BAU values from the first scenario - df_gen = flatten_dict(scenarios[0]['full_data']) + for scenario in scenarios: + run_uuid = scenario['run_uuid'] + df_gen = flatten_dict(scenario['full_data']) - for entry in config: - bau_func = entry.get("bau_value") - if bau_func: # Only extract BAU values if `bau_value` exists - value = bau_func(df_gen) - bau_values[entry["label"]] = value + for entry in config: + bau_func = entry.get("bau_value") + if bau_func: # Only extract BAU values if `bau_value` exists + value = bau_func(df_gen) + bau_values_per_scenario[run_uuid][entry["label"]] = value - return bau_values + return bau_values_per_scenario except Exception: log.error(f"Error in get_bau_values: {tb.format_exc()}") @@ -1292,32 +1282,38 @@ def get_bau_values(scenarios, config): def process_scenarios(scenarios, reopt_data_config): try: - # Check if BAU values exist - bau_values = get_bau_values(scenarios, reopt_data_config) + # Fetch BAU values for each scenario + bau_values_per_scenario = get_bau_values(scenarios, reopt_data_config) combined_df = pd.DataFrame() - for scenario in scenarios: + for idx, scenario in enumerate(scenarios): run_uuid = scenario['run_uuid'] + + # Process scenario data df_result = get_REopt_data(scenario['full_data'], run_uuid, reopt_data_config) - df_result = df_result.set_index('Scenario').T - df_result.columns = [run_uuid] - combined_df = df_result if combined_df.empty else combined_df.join(df_result, how='outer') - - if bau_values: - # Single site scenario with BAU data - bau_data = {key: [value] for key, value in bau_values.items()} - bau_data["Scenario"] = ["BAU"] + + # Ensure the run_uuid is assigned to the Scenario column + df_result["Scenario"] = run_uuid + + # Create BAU DataFrame for this scenario + bau_data = {key: [value] for key, value in bau_values_per_scenario[run_uuid].items()} + bau_data["Scenario"] = [f"BAU {idx + 1}"] # Assign distinct BAU labels (BAU 1, BAU 2) df_bau = pd.DataFrame(bau_data) - # Combine BAU data with scenario results - combined_df = pd.concat([df_bau, combined_df.T]).reset_index(drop=True) - else: - # Portfolio scenario without BAU data - combined_df = combined_df.T.reset_index() - combined_df.rename(columns={'index': 'Scenario'}, inplace=True) + # Append BAU row followed by scenario result row, preserve UUIDs + if combined_df.empty: + combined_df = pd.concat([df_bau, df_result], axis=0) + else: + combined_df = pd.concat([combined_df, df_bau, df_result], axis=0) + # Reset index and remove any misalignment + combined_df.reset_index(drop=True, inplace=True) + + # Clean up and format the data for final output combined_df = clean_data_dict(combined_df.to_dict(orient="list")) combined_df = pd.DataFrame(combined_df) + + # Ensure 'Scenario' is the first column, with others following combined_df = combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] return combined_df @@ -1342,31 +1338,27 @@ def create_custom_comparison_table(request): except ValueError: return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) - # Access raw data and check for BAU consistency if needed - scenarios = access_raw_data(run_uuids, request, table_config_name) - if not scenarios.get('scenarios'): - return JsonResponse({'Error': 'Failed to fetch scenarios or inconsistent BAU data. Ensure that scenarios have consistent site inputs.'}, content_type='application/json', status=404) + # Access raw data + scenarios = access_raw_data(run_uuids, request) + # Process scenarios and generate the final DataFrame target_custom_table = globals().get(table_config_name) if not target_custom_table: return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) final_df = process_scenarios(scenarios['scenarios'], target_custom_table) - # Ensure correct alignment of run_uuids with the DataFrame - if len(run_uuids) == final_df.shape[0] - 1: # Exclude BAU row if present - final_df.iloc[1:, 0] = run_uuids - elif len(run_uuids) == final_df.shape[0]: - final_df.iloc[:, 0] = run_uuids - + # Transpose the final DataFrame final_df_transpose = final_df.transpose() final_df_transpose.columns = final_df_transpose.iloc[0] final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + # Create the Excel file output = io.BytesIO() create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) output.seek(0) + # Return the Excel file as a response response = HttpResponse( output, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' @@ -1391,8 +1383,8 @@ def create_custom_table_excel(df, custom_table, calculations, output): worksheet = workbook.add_worksheet('Custom Table') # Scenario header formatting with colors - scenario_colors = ['#0079C2', '#00A2E8', '#22B573', '#FFB300', '#E05A24', '#FF5050'] - scenario_formats = [workbook.add_format({'bold': True, 'bg_color': color, 'border': 1, 'align': 'center', 'font_color': 'white', 'font_size': 10}) for color in scenario_colors] + scenario_colors = ['#0B5E90','#00A4E4' ,'#F7A11A', '#D9531E', '#FFB300', '#D1D5D8', '#FF5050'] + scenario_formats = [workbook.add_format({'bold': True, 'bg_color': color, 'border': 1, 'align': 'center', 'font_color': 'white', 'font_size': 12}) for color in scenario_colors] # Row alternating colors row_colors = ['#d1d5d8', '#fafbfb'] @@ -1411,6 +1403,9 @@ def create_custom_table_excel(df, custom_table, calculations, output): # Message format to match formula style message_format = workbook.add_format({'bg_color': '#0B5E90', 'align': 'center','valign': 'center','border': 1,'font_color': formula_color, 'bold': True, 'font_size': 12, 'italic': True }) + # Separator format for rows that act as visual dividers + separator_format = workbook.add_format({'bg_color': '#8CC63F', 'bold': True, 'border': 1,'font_size': 11}) + # Combine row color with cell format, excluding formulas def get_combined_format(label, row_color, is_formula=False): if is_formula: @@ -1427,45 +1422,93 @@ def get_combined_format(label, row_color, is_formula=False): return workbook.add_format({**base_percent_format, 'bg_color': row_color}) return workbook.add_format(base_data_format) - # Setting column widths and writing headers - column_width = 35 - for col_num in range(len(df.columns) + 3): - worksheet.set_column(col_num, col_num, column_width) + # Set column width for the first column (labels column) + worksheet.set_column(0, 0, 35) - # Write scenario headers with different colors + # Setting column widths and writing headers for other columns + column_width = 25 + columns_to_hide = set() + + # Loop through BAU columns and check if all values are identical across all BAU columns + bau_columns = [i for i, header in enumerate(df.columns) if "BAU" in header] + + # Only proceed if there are BAU columns + if bau_columns: + identical_bau_columns = True # Assume all BAU columns are identical unless proven otherwise + + # Loop through each row and check the values across BAU columns + for row_num in range(len(df)): + row_values = df.iloc[row_num, bau_columns].values # Get all BAU values for this row + + # Check if all BAU values in this row are the same + first_bau_value = row_values[0] + if not all(value == first_bau_value for value in row_values): + identical_bau_columns = False + break # If any row has different BAU values, stop checking further + + # If all BAU columns are identical across all rows, hide all but the first BAU column + if identical_bau_columns: + for col_num in bau_columns[1:]: + columns_to_hide.add(col_num) + + # Now set the column properties for hiding BAU columns and leaving others unchanged + for col_num, header in enumerate(df.columns): + if "BAU" in header and col_num in columns_to_hide: + # Hide the BAU columns that have been marked + worksheet.set_column(col_num + 1, col_num + 1, column_width, None, {'hidden': True}) + else: + # Set the normal column width for non-hidden columns + worksheet.set_column(col_num + 1, col_num + 1, column_width) + + # Write scenario headers worksheet.write('A1', 'Scenario', scenario_formats[0]) for col_num, header in enumerate(df.columns): - worksheet.write(0, col_num + 1, header, scenario_formats[col_num % len(scenario_formats)]) + worksheet.write(0, col_num + 1, header, scenario_formats[(col_num // 2) % (len(scenario_formats) - 1) + 1]) # Write variable names and data with full-row formatting - for row_num, variable in enumerate(df.index): - row_color = row_colors[row_num % 2] - worksheet.write(row_num + 1, 0, variable, workbook.add_format({'bg_color': row_color, 'border': 1})) - - for col_num, value in enumerate(df.loc[variable]): - is_formula = False - if isinstance(value, str) and "formula" in value.lower(): - is_formula = True - - cell_format = get_combined_format(variable, row_color, is_formula) - if pd.isnull(value) or value == '-': - worksheet.write(row_num + 1, col_num + 1, "", cell_format) - else: - worksheet.write(row_num + 1, col_num + 1, value, cell_format) + row_offset = 0 # To keep track of the current row in the worksheet + for row_num, entry in enumerate(custom_table): + key = entry['key'] # Extract the key from custom_table + + # Check if the key contains 'separator' + if 'separator' in key.lower(): + # Merge the first few columns for the separator + worksheet.merge_range(row_num + 1 + row_offset, 0, row_num + 1 + row_offset, len(df.columns), entry['label'], separator_format) + else: + # Regular row data writing + row_color = row_colors[(row_num + row_offset) % 2] # Alternating row colors + + # Write the label in the first column + worksheet.write(row_num + 1 + row_offset, 0, entry['label'], workbook.add_format({'bg_color': row_color, 'border': 1})) + + # Write the data for each column + variable = entry['label'] # Assuming df index or columns match the label + for col_num, value in enumerate(df.loc[variable]): + is_formula = False # Detect if this cell contains a formula + if isinstance(value, str) and "formula" in value.lower(): + is_formula = True + + cell_format = get_combined_format(variable, row_color, is_formula) + if pd.isnull(value) or value == '-': + worksheet.write(row_num + 1 + row_offset, col_num + 1, "", cell_format) + else: + worksheet.write(row_num + 1 + row_offset, col_num + 1, value, cell_format) + + # Update the message to include clear information about BAU values being hidden for novice users + message_text = ( + "Values in white are formulas, so please do not enter anything in those cells." + ) - worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), "Values in white are formulas. Do not input anything.", message_format) + # Merge the range and apply the updated message + worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), message_text, message_format) headers = {header: idx for idx, header in enumerate(df.index)} headers["Scenario"] = 0 - - bau_cells = { - 'grid_value': f'{colnum_string(2)}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, - 'net_cost_value': f'{colnum_string(2)}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, - 'ng_reduction_value': f'{colnum_string(2)}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, - 'util_cost_value': f'{colnum_string(2)}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, - 'co2_reduction_value': f'{colnum_string(2)}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None, - 'placeholder1_value': f'{colnum_string(2)}{headers["Placeholder1"] + 2}' if "Placeholder1" in headers else None, - } + + # Function to get the correct BAU reference column dynamically + def get_bau_column(col): + # BAU column will always be right before the corresponding scenario column + return col - 1 if col > 1 else 1 relevant_columns = [entry["label"] for entry in custom_table] relevant_calculations = [calc for calc in calculations if calc["name"] in relevant_columns] @@ -1474,7 +1517,23 @@ def get_combined_format(label, row_color, is_formula=False): missing_entries = [] for col in range(2, len(df.columns) + 2): + # Skip BAU columns (BAU columns should not have formulas) + if col % 2 == 0: + continue # Skip the BAU column + col_letter = colnum_string(col) + bau_col = get_bau_column(col) # Get the corresponding BAU column + bau_col_letter = colnum_string(bau_col) # Convert the column number to letter for Excel reference + + bau_cells = { + 'grid_value': f'{bau_col_letter}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, + 'net_cost_value': f'{bau_col_letter}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, + 'ng_reduction_value': f'{bau_col_letter}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, + 'util_cost_value': f'{bau_col_letter}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, + 'co2_reduction_value': f'{bau_col_letter}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None, + 'placeholder1_value': f'{bau_col_letter}{headers["Placeholder1"] + 2}' if "Placeholder1" in headers else None, + } + for calc in relevant_calculations: try: if all(key in headers or key in bau_cells for key in calc["formula"].__code__.co_names): @@ -1482,7 +1541,7 @@ def get_combined_format(label, row_color, is_formula=False): if row_idx is not None: formula = calc["formula"](col_letter, bau_cells, headers) cell_format = get_combined_format(calc["name"], row_colors[row_idx % 2], is_formula=True) - worksheet.write_formula(row_idx + 1, col-1, formula, cell_format) + worksheet.write_formula(row_idx + 1, col - 1, formula, cell_format) else: missing_entries.append(calc["name"]) else: @@ -1513,7 +1572,7 @@ def get_combined_format(label, row_color, is_formula=False): except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save + # Configuration # Set up table needed along with REopt dictionaries to grab data @@ -1538,6 +1597,12 @@ def get_combined_format(label, row_color, is_formula=False): "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" }, + { + "label": "Technology Sizing", # This is your separator label + "key": "tech_separator", #MUST HAVE "separator" somewhere in the name + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, # Example 3: Calculated Value (Sum of Two Fields), this does not show up in formulas { "label": "Combined Renewable Size (kW)", @@ -1619,7 +1684,13 @@ def get_combined_format(label, row_color, is_formula=False): "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" }, { - "label": "PV Nameplate capacity (kW), purchased", + "label": "Technology Sizing", # This is your separator label + "key": "tech_separator", #MUST HAVE "separator" somewhere in the name + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "PV Nameplate capacity (kW), new", "key": "pv_size_purchased", "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") @@ -1637,7 +1708,7 @@ def get_combined_format(label, row_color, is_formula=False): "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") }, { - "label": "Wind Nameplate capacity (kW), purchased", + "label": "Wind Nameplate capacity (kW), new", "key": "wind_size_purchased", "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") @@ -1649,7 +1720,7 @@ def get_combined_format(label, row_color, is_formula=False): "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") }, { - "label": "Backup Generator Nameplate capacity (kW), purchased", + "label": "Backup Generator Nameplate capacity (kW), new", "key": "backup_generator_capacity_purchased", "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") @@ -1774,6 +1845,12 @@ def get_combined_format(label, row_color, is_formula=False): "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf") }, + { + "label": "Financials", # This is your separator label + "key": "fin_separator", #MUST HAVE "separator" somewhere in the name + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, { "label": "Electricity Energy Cost ($)", "key": "electricity_energy_cost", @@ -1994,6 +2071,7 @@ def get_combined_format(label, row_color, is_formula=False): f'{col}{headers["Grid Purchased Electricity (kWh)"] + 2}' ) }, + { "name": "Net Purchased Electricity Reduction (%)", "formula": lambda col, bau, headers: f'=({bau["grid_value"]}-{col}{headers["Grid Purchased Electricity (kWh)"] + 2})/{bau["grid_value"]}' @@ -2070,31 +2148,6 @@ def get_combined_format(label, row_color, is_formula=False): } ] - -# Portfolio configuration should not include "bau_value" in the keys -example_table_portfolio = [ - { - "label": "Site Name", - "key": "site", - "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") - }, - { - "label": "Site Address", - "key": "site_address", - "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") - }, - { - "label": "Site Location", - "key": "site_lat_long", - "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" - }, - { - "label": "Results URL", - "key": "url", - "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") - }, -] - ############################################################### ################ END Custom Table ############################# ############################################################### \ No newline at end of file From 0d4df01776138c825069471820ba73796a091dd8 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 9 Sep 2024 16:36:49 -0600 Subject: [PATCH 31/44] fixed automatic hidden columns --- reoptjl/views.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 12dc4f41d..69f6e065b 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1429,7 +1429,7 @@ def get_combined_format(label, row_color, is_formula=False): column_width = 25 columns_to_hide = set() - # Loop through BAU columns and check if all values are identical across all BAU columns + # Loop through BAU columns and check if all numerical values are identical across all BAU columns bau_columns = [i for i, header in enumerate(df.columns) if "BAU" in header] # Only proceed if there are BAU columns @@ -1440,11 +1440,15 @@ def get_combined_format(label, row_color, is_formula=False): for row_num in range(len(df)): row_values = df.iloc[row_num, bau_columns].values # Get all BAU values for this row - # Check if all BAU values in this row are the same - first_bau_value = row_values[0] - if not all(value == first_bau_value for value in row_values): - identical_bau_columns = False - break # If any row has different BAU values, stop checking further + # Filter only numerical values for comparison + numerical_values = [value for value in row_values if isinstance(value, (int, float))] + + # Check if all numerical BAU values in this row are the same + if numerical_values: # Proceed only if there are numerical values to compare + first_bau_value = numerical_values[0] + if not all(value == first_bau_value for value in numerical_values): + identical_bau_columns = False + break # If any row has different BAU values, stop checking further # If all BAU columns are identical across all rows, hide all but the first BAU column if identical_bau_columns: From 3ba11f6cd629450b81327dbc1f77840054e1ade3 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Mon, 9 Sep 2024 16:57:25 -0600 Subject: [PATCH 32/44] updated color scheme for table --- reoptjl/views.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 69f6e065b..c6aef9686 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1383,7 +1383,8 @@ def create_custom_table_excel(df, custom_table, calculations, output): worksheet = workbook.add_worksheet('Custom Table') # Scenario header formatting with colors - scenario_colors = ['#0B5E90','#00A4E4' ,'#F7A11A', '#D9531E', '#FFB300', '#D1D5D8', '#FF5050'] + scenario_colors = ['#0B5E90', '#00A4E4','#f46d43','#fdae61', '#66c2a5', '#d53e4f', '#3288bd'] + scenario_formats = [workbook.add_format({'bold': True, 'bg_color': color, 'border': 1, 'align': 'center', 'font_color': 'white', 'font_size': 12}) for color in scenario_colors] # Row alternating colors From bf147243f091d6b8e984b35150a4a23daa1a65f9 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 11 Sep 2024 10:22:47 -0600 Subject: [PATCH 33/44] updated configuration for webtool --- reoptjl/views.py | 1294 ++++++++++++++++++++++++++++++---------------- 1 file changed, 850 insertions(+), 444 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index c6aef9686..95382cb73 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1189,18 +1189,19 @@ def easiur_costs(request): ############################################################################################################################## ################################################# START Custom Table ######################################################### ############################################################################################################################## +def log_and_raise_error(task_name): + exc_type, exc_value, exc_traceback = sys.exc_info() + log.error(f"Error in {task_name}: {exc_value}, traceback: {tb.format_tb(exc_traceback)}") + err = UnexpectedError(exc_type, exc_value, exc_traceback, task=task_name) + err.save_to_db() + raise def access_raw_data(run_uuids, request): try: - # Fetch UserProvidedMeta data for the relevant run_uuids - usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only( - 'meta__run_uuid', 'description', 'address' - ) - - # Create a dictionary to map run_uuids to their associated meta data + usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only('meta__run_uuid', 'description', 'address') meta_data_dict = {um.meta.run_uuid: {"description": um.description, "address": um.address} for um in usermeta} - full_summary_dict = { + return { "scenarios": [ { "run_uuid": str(run_uuid), @@ -1210,8 +1211,8 @@ def access_raw_data(run_uuids, request): for run_uuid in run_uuids ] } - - return full_summary_dict + except Exception: + log_and_raise_error('access_raw_data') except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() @@ -1227,9 +1228,7 @@ def process_raw_data(request, run_uuid): return sum_vectors(json.loads(response.content)) return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} except Exception: - err = UnexpectedError(*sys.exc_info(), task='create_custom_comparison_table') - err.save_to_db() - raise + log_and_raise_error('process_raw_data') def generate_data_dict(config, df_gen, suffix=""): try: @@ -1239,144 +1238,97 @@ def generate_data_dict(config, df_gen, suffix=""): data_dict[entry["label"]].append(val) return data_dict except Exception: - log.error(f"Error in generate_data_dict: {tb.format_exc()}") - raise + log_and_raise_error('generate_data_dict') def get_REopt_data(data_f, scenario_name, config): try: scenario_name_str = str(scenario_name) - suffix = "_bau" if "BAU" in scenario_name_str.upper() else "" - df_gen = flatten_dict(data_f) - data_dict = generate_data_dict(config, df_gen, suffix) + data_dict = generate_data_dict(config, df_gen, "_bau" if "BAU" in scenario_name_str.upper() else "") data_dict["Scenario"] = [scenario_name_str] - col_order = ["Scenario"] + [entry["label"] for entry in config] - df_res = pd.DataFrame(data_dict)[col_order] - - return df_res + return pd.DataFrame(data_dict)[col_order] except Exception: - log.error(f"Error in get_REopt_data: {tb.format_exc()}") - raise + log_and_raise_error('get_REopt_data') def get_bau_values(scenarios, config): try: - # Dictionary to store BAU values for each scenario - bau_values_per_scenario = {scenario['run_uuid']: {entry["label"]: None for entry in config} for scenario in scenarios} + bau_values_per_scenario = { + scenario['run_uuid']: {entry["label"]: None for entry in config} for scenario in scenarios + } for scenario in scenarios: run_uuid = scenario['run_uuid'] df_gen = flatten_dict(scenario['full_data']) - for entry in config: bau_func = entry.get("bau_value") - if bau_func: # Only extract BAU values if `bau_value` exists - value = bau_func(df_gen) - bau_values_per_scenario[run_uuid][entry["label"]] = value + if bau_func: + bau_values_per_scenario[run_uuid][entry["label"]] = bau_func(df_gen) return bau_values_per_scenario - except Exception: - log.error(f"Error in get_bau_values: {tb.format_exc()}") - raise + log_and_raise_error('get_bau_values') def process_scenarios(scenarios, reopt_data_config): try: - # Fetch BAU values for each scenario bau_values_per_scenario = get_bau_values(scenarios, reopt_data_config) combined_df = pd.DataFrame() for idx, scenario in enumerate(scenarios): run_uuid = scenario['run_uuid'] - - # Process scenario data df_result = get_REopt_data(scenario['full_data'], run_uuid, reopt_data_config) - - # Ensure the run_uuid is assigned to the Scenario column df_result["Scenario"] = run_uuid - # Create BAU DataFrame for this scenario bau_data = {key: [value] for key, value in bau_values_per_scenario[run_uuid].items()} - bau_data["Scenario"] = [f"BAU {idx + 1}"] # Assign distinct BAU labels (BAU 1, BAU 2) + bau_data["Scenario"] = [f"BAU {idx + 1}"] df_bau = pd.DataFrame(bau_data) - # Append BAU row followed by scenario result row, preserve UUIDs - if combined_df.empty: - combined_df = pd.concat([df_bau, df_result], axis=0) - else: - combined_df = pd.concat([combined_df, df_bau, df_result], axis=0) + combined_df = pd.concat([combined_df, df_bau, df_result], axis=0) if not combined_df.empty else pd.concat([df_bau, df_result], axis=0) - # Reset index and remove any misalignment combined_df.reset_index(drop=True, inplace=True) - - # Clean up and format the data for final output - combined_df = clean_data_dict(combined_df.to_dict(orient="list")) - combined_df = pd.DataFrame(combined_df) - - # Ensure 'Scenario' is the first column, with others following - combined_df = combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] - - return combined_df - + combined_df = pd.DataFrame(clean_data_dict(combined_df.to_dict(orient="list"))) + return combined_df[["Scenario"] + [col for col in combined_df.columns if col != "Scenario"]] except Exception: - log.error(f"Error in process_scenarios: {tb.format_exc()}") - raise + log_and_raise_error('process_scenarios') def create_custom_comparison_table(request): - if request.method == 'GET': - try: - # Set default table configuration name to 'single_site_custom_table' - table_config_name = request.GET.get('table_config_name', 'single_site_custom_table') - - run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] - if not run_uuids: - return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) - - for r_uuid in run_uuids: - try: - uuid.UUID(r_uuid) - except ValueError: - return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) - - # Access raw data - scenarios = access_raw_data(run_uuids, request) - - # Process scenarios and generate the final DataFrame - target_custom_table = globals().get(table_config_name) - if not target_custom_table: - return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) - - final_df = process_scenarios(scenarios['scenarios'], target_custom_table) - - # Transpose the final DataFrame - final_df_transpose = final_df.transpose() - final_df_transpose.columns = final_df_transpose.iloc[0] - final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) - - # Create the Excel file - output = io.BytesIO() - create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) - output.seek(0) - - # Return the Excel file as a response - response = HttpResponse( - output, - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' - ) - response['Content-Disposition'] = 'attachment; filename="comparison_table.xlsx"' - return response - - except ValueError as e: - return JsonResponse({"Error": f"A ValueError occurred: {str(e)} Please check the input values and try again."}, status=500) - - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') - err.save_to_db() - return JsonResponse({"Error": f"An unexpected error occurred while creating the comparison table. Please try again later or contact support if the issue persists. Error details: {str(e)}"}, status=500) - - return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) + if request.method != 'GET': + return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) + try: + table_config_name = request.GET.get('table_config_name', 'webtool_table') + run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] + if not run_uuids: + return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) + + for r_uuid in run_uuids: + try: + uuid.UUID(r_uuid) + except ValueError: + return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) + + scenarios = access_raw_data(run_uuids, request) + target_custom_table = globals().get(table_config_name) + if not target_custom_table: + return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) + + final_df = process_scenarios(scenarios['scenarios'], target_custom_table) + final_df_transpose = final_df.transpose() + final_df_transpose.columns = final_df_transpose.iloc[0] + final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) + + output = io.BytesIO() + create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) + output.seek(0) + + response = HttpResponse(output, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename="comparison_table.xlsx"' + return response + except ValueError as e: + log_and_return_error(e, 'create_custom_comparison_table', 500) + except Exception: + log_and_raise_error('create_custom_comparison_table') + def create_custom_table_excel(df, custom_table, calculations, output): try: workbook = xlsxwriter.Workbook(output, {'in_memory': True}) @@ -1397,7 +1349,7 @@ def create_custom_table_excel(df, custom_table, calculations, output): # Formula formats using dark blue background formula_color = '#F8F8FF' - formula_format = workbook.add_format({'bg_color': '#0B5E90', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + formula_format = workbook.add_format({'num_format': '#,##0.00','bg_color': '#0B5E90', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) formula_percent_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) formula_currency_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) @@ -1405,7 +1357,7 @@ def create_custom_table_excel(df, custom_table, calculations, output): message_format = workbook.add_format({'bg_color': '#0B5E90', 'align': 'center','valign': 'center','border': 1,'font_color': formula_color, 'bold': True, 'font_size': 12, 'italic': True }) # Separator format for rows that act as visual dividers - separator_format = workbook.add_format({'bg_color': '#8CC63F', 'bold': True, 'border': 1,'font_size': 11}) + separator_format = workbook.add_format({'bg_color': '#5D6A71', 'bold': True, 'border': 1,'font_size': 11,'font_color': 'white'}) # Combine row color with cell format, excluding formulas def get_combined_format(label, row_color, is_formula=False): @@ -1415,7 +1367,7 @@ def get_combined_format(label, row_color, is_formula=False): elif '%' in label: return formula_percent_format return formula_format - base_data_format = {'bg_color': row_color, 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} + base_data_format = {'num_format': '#,##0.00','bg_color': row_color, 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} if label: if '$' in label: return workbook.add_format({**base_currency_format, 'bg_color': row_color}) @@ -1510,9 +1462,7 @@ def get_combined_format(label, row_color, is_formula=False): headers = {header: idx for idx, header in enumerate(df.index)} headers["Scenario"] = 0 - # Function to get the correct BAU reference column dynamically def get_bau_column(col): - # BAU column will always be right before the corresponding scenario column return col - 1 if col > 1 else 1 relevant_columns = [entry["label"] for entry in custom_table] @@ -1532,9 +1482,11 @@ def get_bau_column(col): bau_cells = { 'grid_value': f'{bau_col_letter}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, - 'net_cost_value': f'{bau_col_letter}{headers["Net Electricity Cost ($)"] + 2}' if "Net Electricity Cost ($)" in headers else None, + 'elec_cost_value': f'{bau_col_letter}{headers["Purchased Electricity Cost ($)"] + 2}' if "Purchased Electricity Cost ($)" in headers else None, 'ng_reduction_value': f'{bau_col_letter}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, - 'util_cost_value': f'{bau_col_letter}{headers["Total Utility Cost ($)"] + 2}' if "Total Utility Cost ($)" in headers else None, + # 'util_cost_value': f'{bau_col_letter}{headers["Total Utility Costs ($)"] + 2}' if "Total Utility Costs ($)" in headers else None, + 'total_elec_costs': f'{bau_col_letter}{headers["Total Electric Costs ($)"] + 2}' if "Total Electric Costs ($)" in headers else None, + 'total_fuel_costs': f'{bau_col_letter}{headers["Total Fuel Costs ($)"] + 2}' if "Total Fuel Costs ($)" in headers else None, 'co2_reduction_value': f'{bau_col_letter}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None, 'placeholder1_value': f'{bau_col_letter}{headers["Placeholder1"] + 2}' if "Placeholder1" in headers else None, } @@ -1574,302 +1526,766 @@ def get_bau_column(col): workbook.close() - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='create_custom_comparison_table') + except Exception: + log_and_raise_error('create_custom_table_excel') # Configuration # Set up table needed along with REopt dictionaries to grab data # Example Custom Table Configuration -example_table = [ - { - "label": "Site Name", - "key": "site", - "bau_value": lambda df: "", +# example_table = [ +# { +# "label": "Site Name", +# "key": "site", +# "bau_value": lambda df: "", +# "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") +# }, +# { +# "label": "Site Address", +# "key": "site_address", +# "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), +# "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") +# }, +# # Example 2: Concatenating Strings +# { +# "label": "Site Location", +# "key": "site_lat_long", +# "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", +# "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" +# }, +# { +# "label": "Technology Sizing", # This is your separator label +# "key": "tech_separator", #MUST HAVE "separator" somewhere in the name +# "bau_value": lambda df: "", +# "scenario_value": lambda df: "" +# }, +# # Example 3: Calculated Value (Sum of Two Fields), this does not show up in formulas +# { +# "label": "Combined Renewable Size (kW)", +# "key": "combined_renewable_size", +# "bau_value": lambda df: 0, +# "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + safe_get(df, "outputs.Wind.size_kw") #NOTE: These calculations will not show up as in the excel calculations +# }, + +# # Example 4: Hardcoded Values +# { +# "label": "Hardcoded Values (kWh)", +# "key": "hardcoded_value", +# "bau_value": lambda df: 500, # BAU scenario +# "scenario_value": lambda df: 1000 # other scenarios +# }, + +# # Example 5: Conditional Formatting +# { +# "label": "PV Size Status", +# "key": "pv_size_status", +# "bau_value": lambda df: 0, +# "scenario_value": lambda df: "Above Threshold" if safe_get(df, "outputs.PV.size_kw") > 2500 else "Below Threshold" +# }, +# #Example 6 and 7: First define any data that might need to be referenced, Here I've defined two placeholders +# # Define Placeholder1 +# { +# "label": "Placeholder1", +# "key": "placeholder1", +# "bau_value": lambda df: 100, # BAU value +# "scenario_value": lambda df: 200 # Scenario value +# }, +# # Define Placeholder2 +# { +# "label": "Placeholder2", +# "key": "placeholder2", +# "bau_value": lambda df: 50, # BAU value +# "scenario_value": lambda df: 100 # Scenario value +# }, +# # Example 6: Calculation Without Reference to BAU +# { +# "label": "Placeholder Calculation Without BAU Reference", +# "key": "placeholder_calculation_without_bau", +# "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel +# "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel +# }, +# # Example 7: Calculation With Reference to BAU +# { +# "label": "Placeholder Calculation With BAU Reference", +# "key": "placeholder_calculation_with_bau", +# "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel +# "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel +# }, +# { +# "label": "Results URL", +# "key": "url", +# "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), +# "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") +# } +# ] + +# # TASC/Single Site Configuration +# single_site_custom_table = [ +# { +# "label": "Site Name", +# "key": "site", +# "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), +# "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") +# }, +# { +# "label": "Site Address", +# "key": "site_address", +# "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), +# "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") +# }, +# { +# "label": "Site Location", +# "key": "site_lat_long", +# "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", +# "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" +# }, +# { +# "label": "Technology Sizing", # This is your separator label +# "key": "tech_separator", #MUST HAVE "separator" somewhere in the name to be identified correctly as a section separator +# "bau_value": lambda df: "", +# "scenario_value": lambda df: "" +# }, +# { +# "label": "PV Nameplate capacity (kW), new", +# "key": "pv_size_purchased", +# "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") +# }, +# { +# "label": "PV Nameplate capacity (kW), existing", +# "key": "pv_size_existing", +# "bau_value": lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.PV.existing_kw") +# }, +# { +# "label": "PV Serving Load (kWh)", +# "key": "pv_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") +# }, +# { +# "label": "Wind Nameplate capacity (kW), new", +# "key": "wind_size_purchased", +# "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") +# }, +# { +# "label": "Wind Serving Load (kWh)", +# "key": "wind_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") +# }, +# { +# "label": "Backup Generator Nameplate capacity (kW), new", +# "key": "backup_generator_capacity_purchased", +# "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") +# }, +# { +# "label": "Backup Generator Nameplate capacity (kW), existing", +# "key": "backup_generator_capacity_existing", +# "bau_value": lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") +# }, +# { +# "label": "Backup Generator Serving Load (kWh)", +# "key": "backup_generator_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw") +# }, +# { +# "label": "Battery power (kW)", +# "key": "battery_power", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") +# }, +# { +# "label": "Battery capacity (kWh)", +# "key": "battery_capacity", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") +# }, +# { +# "label": "Battery Serving Load (kWh)", +# "key": "battery_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") +# }, +# { +# "label": "CHP capacity (kW)", +# "key": "chp_capacity", +# "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") +# }, +# { +# "label": "CHP Serving Load (kWh)", +# "key": "chp_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") +# }, +# { +# "label": "Absorption chiller capacity (tons)", +# "key": "absorption_chiller_capacity", +# "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton") +# }, +# { +# "label": "Absorption Chiller Serving Load (ton)", +# "key": "absorption_chiller_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton") +# }, +# { +# "label": "Chilled water TES capacity (gallons)", +# "key": "chilled_water_tes_capacity", +# "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal") +# }, +# { +# "label": "Chilled Water TES Serving Load (ton)", +# "key": "chilled_water_tes_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton") +# }, +# { +# "label": "Hot water TES capacity (gallons)", +# "key": "hot_water_tes_capacity", +# "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") +# }, +# { +# "label": "Hot Water TES Serving Load (MMBtu)", +# "key": "hot_water_tes_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour") +# }, +# { +# "label": "Steam turbine capacity (kW)", +# "key": "steam_turbine_capacity", +# "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") +# }, +# { +# "label": "Steam Turbine Serving Load (kWh)", +# "key": "steam_turbine_serving_load", +# "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw") +# }, +# { +# "label": "GHP heat pump capacity (ton)", +# "key": "ghp_heat_pump_capacity", +# "bau_value": lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") +# }, +# { +# "label": "GHP ground heat exchanger size (ft)", +# "key": "ghp_ground_heat_exchanger_size", +# "bau_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") +# }, +# { +# "label": "Grid Purchased Electricity (kWh)", +# "key": "grid_purchased_electricity", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") +# }, +# { +# "label": "Total Site Electricity Use (kWh)", +# "key": "total_site_electricity_use", +# "bau_value": lambda df: 0, +# "scenario_value": lambda df: 0 +# }, +# { +# "label": "Net Purchased Electricity Reduction (%)", +# "key": "net_purchased_electricity_reduction", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf") +# }, +# { +# "label": "Financials", # This is your separator label +# "key": "fin_separator", #MUST HAVE "separator" somewhere in the name +# "bau_value": lambda df: "", +# "scenario_value": lambda df: "" +# }, +# { +# "label": "Electricity Energy Cost ($)", +# "key": "electricity_energy_cost", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") +# }, +# { +# "label": "Electricity Demand Cost ($)", +# "key": "electricity_demand_cost", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") +# }, +# { +# "label": "Utility Fixed Cost ($)", +# "key": "utility_fixed_cost", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") +# }, +# { +# "label": "Purchased Electricity Cost ($)", +# "key": "purchased_electricity_cost", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") +# }, +# { +# "label": "Electricity Export Benefit ($)", +# "key": "electricity_export_benefit", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax") +# }, +# { +# "label": "Net Electricity Cost ($)", +# "key": "net_electricity_cost", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax") +# }, +# { +# "label": "Electricity Cost Savings ($/year)", +# "key": "electricity_cost_savings", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau") +# }, +# { +# "label": "Boiler Fuel (MMBtu)", +# "key": "boiler_fuel", +# "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu") +# }, +# { +# "label": "CHP Fuel (MMBtu)", +# "key": "chp_fuel", +# "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") +# }, +# { +# "label": "Total Fuel (MMBtu)", +# "key": "total_fuel", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh") +# }, +# { +# "label": "Natural Gas Reduction (%)", +# "key": "natural_gas_reduction", +# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau") +# }, +# { +# "label": "Boiler Thermal Production (MMBtu)", +# "key": "boiler_thermal_production", +# "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") +# }, +# { +# "label": "CHP Thermal Production (MMBtu)", +# "key": "chp_thermal_production", +# "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") +# }, +# { +# "label": "Total Thermal Production (MMBtu)", +# "key": "total_thermal_production", +# "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") +# }, +# { +# "label": "Heating System Fuel Cost ($)", +# "key": "heating_system_fuel_cost", +# "bau_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars") +# }, +# { +# "label": "CHP Fuel Cost ($)", +# "key": "chp_fuel_cost", +# "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") +# }, +# { +# "label": "Total Fuel (NG) Cost ($)", +# "key": "total_fuel_ng_cost", +# "bau_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars") +# }, +# { +# "label": "Total Utility Cost ($)", +# "key": "total_utility_cost", +# "bau_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars") +# }, +# { +# "label": "O&M Cost Increase ($)", +# "key": "om_cost_increase", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") +# }, +# { +# "label": "Payback Period (years)", +# "key": "payback_period", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") +# }, +# { +# "label": "Gross Capital Cost ($)", +# "key": "gross_capital_cost", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") +# }, +# { +# "label": "Federal Tax Incentive (30%)", +# "key": "federal_tax_incentive", +# "bau_value": lambda df: 0.3, +# "scenario_value": lambda df: 0.3 +# }, +# { +# "label": "Additional Grant ($)", +# "key": "additional_grant", +# "bau_value": lambda df: 0, +# "scenario_value": lambda df: 0 +# }, +# { +# "label": "Incentive Value ($)", +# "key": "incentive_value", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars") +# }, +# { +# "label": "Net Capital Cost ($)", +# "key": "net_capital_cost", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") +# }, +# { +# "label": "Annual Cost Savings ($)", +# "key": "annual_cost_savings", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars") +# }, +# { +# "label": "Simple Payback (years)", +# "key": "simple_payback", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") +# }, +# { +# "label": "CO2 Emissions (tonnes)", +# "key": "co2_emissions", +# "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") +# }, +# { +# "label": "CO2 Reduction (tonnes)", +# "key": "co2_reduction", +# "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") +# }, +# { +# "label": "CO2 (%) savings", +# "key": "co2_savings_percentage", +# "bau_value": lambda df: 0, +# "scenario_value": lambda df: 0 +# }, +# { +# "label": "NPV ($)", +# "key": "npv", +# "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), +# "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") +# }, +# { +# "label": "PV Federal Tax Incentive (%)", +# "key": "pv_federal_tax_incentive", +# "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), +# "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") +# }, +# { +# "label": "Storage Federal Tax Incentive (%)", +# "key": "storage_federal_tax_incentive", +# "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), +# "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") +# }, +# { +# "label": "Results URL", +# "key": "url", +# "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), +# "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") +# }, +# ] + +webtool_table = [ + { + "label": "Evaluation Name", + "key": "evaluation_name", + "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") }, { - "label": "Site Address", - "key": "site_address", - "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), - "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") + "label": "BAU or Optimal Case?", + "key": "bau_or_optimal_case", + "bau_value": lambda df: "BAU", + "scenario_value": lambda df: "Optimal" }, - # Example 2: Concatenating Strings { "label": "Site Location", - "key": "site_lat_long", + "key": "site_location", "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" }, - { - "label": "Technology Sizing", # This is your separator label - "key": "tech_separator", #MUST HAVE "separator" somewhere in the name - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - # Example 3: Calculated Value (Sum of Two Fields), this does not show up in formulas - { - "label": "Combined Renewable Size (kW)", - "key": "combined_renewable_size", - "bau_value": lambda df: 0, - "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + safe_get(df, "outputs.Wind.size_kw") #NOTE: These calculations will not show up as in the excel calculations - }, - - # Example 4: Hardcoded Values - { - "label": "Hardcoded Values (kWh)", - "key": "hardcoded_value", - "bau_value": lambda df: 500, # BAU scenario - "scenario_value": lambda df: 1000 # other scenarios - }, - - # Example 5: Conditional Formatting - { - "label": "PV Size Status", - "key": "pv_size_status", - "bau_value": lambda df: 0, - "scenario_value": lambda df: "Above Threshold" if safe_get(df, "outputs.PV.size_kw") > 2500 else "Below Threshold" - }, - #Example 6 and 7: First define any data that might need to be referenced, Here I've defined two placeholders - # Define Placeholder1 - { - "label": "Placeholder1", - "key": "placeholder1", - "bau_value": lambda df: 100, # BAU value - "scenario_value": lambda df: 200 # Scenario value - }, - # Define Placeholder2 - { - "label": "Placeholder2", - "key": "placeholder2", - "bau_value": lambda df: 50, # BAU value - "scenario_value": lambda df: 100 # Scenario value - }, - # Example 6: Calculation Without Reference to BAU - { - "label": "Placeholder Calculation Without BAU Reference", - "key": "placeholder_calculation_without_bau", - "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel - "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel - }, - # Example 7: Calculation With Reference to BAU - { - "label": "Placeholder Calculation With BAU Reference", - "key": "placeholder_calculation_with_bau", - "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel - "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel - }, { "label": "Results URL", "key": "url", - "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), - "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") - } - ] - -# TASC/Single Site Configuration -single_site_custom_table = [ - { - "label": "Site Name", - "key": "site", - "bau_value": lambda df: "", - "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + "bau_value": lambda df: f'=HYPERLINK("https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/{safe_get(df, "webtool_uuid")}", "Results Link")', + "scenario_value": lambda df: f'=HYPERLINK("https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/{safe_get(df, "webtool_uuid")}", "Results Link")' }, { - "label": "Site Address", - "key": "site_address", - "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), - "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") - }, - { - "label": "Site Location", - "key": "site_lat_long", - "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", - "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" - }, - { - "label": "Technology Sizing", # This is your separator label - "key": "tech_separator", #MUST HAVE "separator" somewhere in the name + "label": "System Capacities", + "key": "system_capacities_separator", "bau_value": lambda df: "", "scenario_value": lambda df: "" }, { - "label": "PV Nameplate capacity (kW), new", - "key": "pv_size_purchased", + "label": "PV capacity, new (kW)", + "key": "pv_capacity_new", "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") }, { - "label": "PV Nameplate capacity (kW), existing", - "key": "pv_size_existing", + "label": "PV capacity, existing (kW)", + "key": "pv_size_purchased", "bau_value": lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.PV.existing_kw") }, { - "label": "PV Serving Load (kWh)", - "key": "pv_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") - }, - { - "label": "Wind Nameplate capacity (kW), new", - "key": "wind_size_purchased", - "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), + "label": "Wind Capacity (kW)", + "key": "wind_capacity", + "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw"), "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") }, { - "label": "Wind Serving Load (kWh)", - "key": "wind_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") - }, - { - "label": "Backup Generator Nameplate capacity (kW), new", - "key": "backup_generator_capacity_purchased", + "label": "Backup Generator Capacity, New (kW)", + "key": "backup_generator_new", "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") }, { - "label": "Backup Generator Nameplate capacity (kW), existing", - "key": "backup_generator_capacity_existing", + "label": "Backup Generator Capacity, Existing (kW)", + "key": "backup_generator_existing", "bau_value": lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") }, { - "label": "Backup Generator Serving Load (kWh)", - "key": "backup_generator_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw") + "label": "Generator Annual Fuel Consumption (gallons)", + "key": "backup_generator_fuel_consumption", + "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") + }, + { + "label": "Generator Fuel Cost ($)", + "key": "backup_generator_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") + }, + { + "label": "Generator Lifecycle Fuel Cost ($)", + "key": "lifecycle_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax") }, { - "label": "Battery power (kW)", - "key": "battery_power", + "label": "Battery Power Capacity (kW)", + "key": "battery_power_capacity", "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") }, { - "label": "Battery capacity (kWh)", - "key": "battery_capacity", + "label": "Battery Energy Capacity (kWh)", + "key": "battery_energy_capacity", "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") }, { - "label": "Battery Serving Load (kWh)", - "key": "battery_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") - }, - { - "label": "CHP capacity (kW)", + "label": "CHP Capacity (kW)", "key": "chp_capacity", "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") }, { - "label": "CHP Serving Load (kWh)", - "key": "chp_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") - }, - { - "label": "Absorption chiller capacity (tons)", + "label": "Absorption Chiller Capacity (tons)", "key": "absorption_chiller_capacity", "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton") }, { - "label": "Absorption Chiller Serving Load (ton)", - "key": "absorption_chiller_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton") - }, - { - "label": "Chilled water TES capacity (gallons)", + "label": "Chilled Water TES Capacity (gallons)", "key": "chilled_water_tes_capacity", "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal") }, { - "label": "Chilled Water TES Serving Load (ton)", - "key": "chilled_water_tes_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton") - }, - { - "label": "Hot water TES capacity (gallons)", + "label": "Hot Water TES Capacity (gallons)", "key": "hot_water_tes_capacity", "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") }, { - "label": "Hot Water TES Serving Load (MMBtu)", - "key": "hot_water_tes_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour") - }, - { - "label": "Steam turbine capacity (kW)", + "label": "Steam Turbine Capacity (kW)", "key": "steam_turbine_capacity", "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") }, { - "label": "Steam Turbine Serving Load (kWh)", - "key": "steam_turbine_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw") - }, - { - "label": "GHP heat pump capacity (ton)", + "label": "GHP Heat Pump Capacity (ton)", "key": "ghp_heat_pump_capacity", "bau_value": lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") }, { - "label": "GHP ground heat exchanger size (ft)", + "label": "GHP Ground Heat Exchanger Size (ft)", "key": "ghp_ground_heat_exchanger_size", "bau_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") }, { - "label": "Grid Purchased Electricity (kWh)", - "key": "grid_purchased_electricity", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") + "label": "Summary Financial Metrics", + "key": "summary_financial_metrics_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" }, { - "label": "Total Site Electricity Use (kWh)", - "key": "total_site_electricity_use", - "bau_value": lambda df: 0, - "scenario_value": lambda df: 0 + "label": "Gross Capital Costs, Before Incentives ($)", + "key": "gross_capital_costs_before_incentives", + "bau_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs") + }, + { + "label": "Present Value of Incentives ($)", + "key": "present_value_of_incentives", + "bau_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit") + }, + { + "label": "Net Capital Cost ($)", + "key": "net_capital_cost", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") + }, + { + "label": "Year 1 O&M Cost, Before Tax ($)", + "key": "year_1_om_cost_before_tax", + "bau_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax") + }, + { + "label": "Total Life Cycle Costs ($)", + "key": "total_life_cycle_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") + }, + { + "label": "Net Present Value ($)", + "key": "npv", + "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") }, { - "label": "Net Purchased Electricity Reduction (%)", - "key": "net_purchased_electricity_reduction", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf") + "label": "Payback Period (years)", + "key": "payback_period", + "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") }, { - "label": "Financials", # This is your separator label - "key": "fin_separator", #MUST HAVE "separator" somewhere in the name + "label": "Simple Payback (years)", + "key": "simple_payback_period", + "bau_value": lambda df: safe_get(df, ""), + "scenario_value": lambda df: safe_get(df, "") + }, + { + "label": "Internal Rate of Return (%)", + "key": "internal_rate_of_return", + "bau_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return") + }, + { + "label": "Life Cycle Cost Breakdown", + "key": "lifecycle_cost_breakdown_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Technology Capital Costs + Replacements, After Incentives ($)", + "key": "technology_capital_costs_after_incentives", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs") + }, + { + "label": "O&M Costs ($)", + "key": "om_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") + }, + { + "label": "Total Electric Costs ($)", + "key": "total_electric_utility_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax") + }, + { + "label": "Total Fuel Costs ($)", + "key": "total_fuel_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax") + }, + { + "label": "Total Utility Costs ($)", + "key": "total_fuel_costs", "bau_value": lambda df: "", "scenario_value": lambda df: "" }, { - "label": "Electricity Energy Cost ($)", + "label": "Total Emissions Costs ($)", + "key": "total_emissions_costs", + "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health") + }, + { + "label": "LCC ($)", + "key": "lcc", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") + }, + { + "label": "NPV as a % of BAU LCC (%)", + "key": "npv_bau_percent", + "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent") + }, + { + "label": "Year 1 Electric Bill", + "key": "year_1_electric_bill_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Electric Grid Purchases (kWh)", + "key": "electric_grid_purchases", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") + }, + { + "label": "Energy Charges ($)", "key": "electricity_energy_cost", "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") }, { - "label": "Electricity Demand Cost ($)", + "label": "Demand Charges ($)", "key": "electricity_demand_cost", "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") }, { - "label": "Utility Fixed Cost ($)", + "label": "Fixed Charges ($)", "key": "utility_fixed_cost", "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") @@ -1881,166 +2297,148 @@ def get_bau_column(col): "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") }, { - "label": "Electricity Export Benefit ($)", - "key": "electricity_export_benefit", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax") + "label": "Annual Cost Savings ($)", + "key": "annual_cost_savings", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" }, { - "label": "Net Electricity Cost ($)", - "key": "net_electricity_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax") + "label": "Year 1 Fuel Costs & Consumption", + "key": "year_1_fuel_costs_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" }, { - "label": "Electricity Cost Savings ($/year)", - "key": "electricity_cost_savings", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau") + "label": "Boiler Fuel Consumption (mmbtu)", + "key": "boiler_fuel_consumption", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu") }, { - "label": "Boiler Fuel (MMBtu)", - "key": "boiler_fuel", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu") + "label": "Boiler Fuel Costs ($)", + "key": "boiler_fuel_costs", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax") }, { - "label": "CHP Fuel (MMBtu)", - "key": "chp_fuel", + "label": "CHP Fuel Consumption (mmbtu)", + "key": "chp_fuel_consumption", "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") }, { - "label": "Total Fuel (MMBtu)", - "key": "total_fuel", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh") - }, - { - "label": "Natural Gas Reduction (%)", - "key": "natural_gas_reduction", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau") - }, - { - "label": "Boiler Thermal Production (MMBtu)", - "key": "boiler_thermal_production", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") - }, - { - "label": "CHP Thermal Production (MMBtu)", - "key": "chp_thermal_production", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") + "label": "CHP Fuel Cost ($)", + "key": "chp_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") }, { - "label": "Total Thermal Production (MMBtu)", - "key": "total_thermal_production", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") + "label": "Backup Generator Fuel Consumption (gallons)", + "key": "backup_generator_fuel_consumption", + "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") }, { - "label": "Heating System Fuel Cost ($)", - "key": "heating_system_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars") + "label": "Backup Generator Fuel Cost ($)", + "key": "backup_generator_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") }, { - "label": "CHP Fuel Cost ($)", - "key": "chp_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") + "label": "Renewable Energy & Emissions", + "key": "renewable_energy_emissions_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" }, { - "label": "Total Fuel (NG) Cost ($)", - "key": "total_fuel_ng_cost", - "bau_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars") + "label": "Annual % Renewable Electricity (%)", + "key": "annual_renewable_electricity", + "bau_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction") }, { - "label": "Total Utility Cost ($)", - "key": "total_utility_cost", - "bau_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars") + "label": "Year 1 CO2 Emissions (tonnes)", + "key": "year_1_co2_emissions", + "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") }, { - "label": "O&M Cost Increase ($)", - "key": "om_cost_increase", - "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") + "label": "CO2 Emissions (tonnes)", + "key": "co2_emissions", + "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") }, { - "label": "Payback Period (years)", - "key": "payback_period", - "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") + "label": "CO2 (%) savings", + "key": "co2_savings_percentage", + "bau_value": lambda df: 0, + "scenario_value": lambda df: 0 }, { - "label": "Gross Capital Cost ($)", - "key": "gross_capital_cost", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") + "label": "Annual Energy Production & Throughput", + "key": "energy_production_throughput_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" }, { - "label": "Federal Tax Incentive (30%)", - "key": "federal_tax_incentive", - "bau_value": lambda df: 0.3, - "scenario_value": lambda df: 0.3 + "label": "PV (kWh)", + "key": "pv_kwh", + "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") }, { - "label": "Additional Grant ($)", - "key": "additional_grant", - "bau_value": lambda df: 0, - "scenario_value": lambda df: 0 + "label": "Wind (kWh)", + "key": "wind_kwh", + "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") }, { - "label": "Incentive Value ($)", - "key": "incentive_value", - "bau_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars") + "label": "CHP (kWh)", + "key": "chp_kwh", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") }, { - "label": "Net Capital Cost ($)", - "key": "net_capital_cost", - "bau_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars") + "label": "CHP (MMBtu)", + "key": "chp_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") }, { - "label": "Annual Cost Savings ($)", - "key": "annual_cost_savings", - "bau_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars") + "label": "Boiler (MMBtu)", + "key": "boiler_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") }, { - "label": "Simple Payback (years)", - "key": "simple_payback", - "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") + "label": "Battery (kWh)", + "key": "battery_kwh", + "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") }, { - "label": "CO2 Emissions (tonnes)", - "key": "co2_emissions", - "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") + "label": "HW-TES (MMBtu)", + "key": "hw_tes_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu") }, { - "label": "CO2 Reduction (tonnes)", - "key": "co2_reduction", - "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") + "label": "CW-TES (MMBtu)", + "key": "cw_tes_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu") }, { - "label": "CO2 (%) savings", - "key": "co2_savings_percentage", - "bau_value": lambda df: 0, - "scenario_value": lambda df: 0 + "label": "Breakdown of Incentives", + "key": "breakdown_of_incentives_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" }, { - "label": "NPV ($)", - "key": "npv", - "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") + "label": "Federal Tax Incentive (30%)", + "key": "federal_tax_incentive_30", + "bau_value": lambda df: 0.3, + "scenario_value": lambda df: 0.3 }, { "label": "PV Federal Tax Incentive (%)", @@ -2054,14 +2452,22 @@ def get_bau_column(col): "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") }, + # { + # "label": "Incentive Value ($)", + # "key": "incentive_value", + # "bau_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2_bau"), + # "scenario_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2") + # }, { - "label": "Results URL", - "key": "url", - "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), - "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") - }, + "label": "Additional Grant ($)", + "key": "iac_grant", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax") + } ] + + # Configuration for calculations calculations = [ { @@ -2083,7 +2489,7 @@ def get_bau_column(col): }, { "name": "Purchased Electricity Cost ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Electricity Energy Cost ($)"] + 2}+{col}{headers["Electricity Demand Cost ($)"] + 2}+{col}{headers["Utility Fixed Cost ($)"] + 2}' + "formula": lambda col, bau, headers: f'={col}{headers["Energy Charges ($)"] + 2}+{col}{headers["Demand Charges ($)"] + 2}+{col}{headers["Fixed Charges ($)"] + 2}' }, { "name": "Net Electricity Cost ($)", @@ -2091,7 +2497,7 @@ def get_bau_column(col): }, { "name": "Electricity Cost Savings ($/year)", - "formula": lambda col, bau, headers: f'={bau["net_cost_value"]}-{col}{headers["Net Electricity Cost ($)"] + 2}' + "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}-{col}{headers["Purchased Electricity Cost ($)"] + 2}' }, { "name": "Total Fuel (MMBtu)", @@ -2105,34 +2511,34 @@ def get_bau_column(col): "name": "Total Thermal Production (MMBtu)", "formula": lambda col, bau, headers: f'={col}{headers["Boiler Thermal Production (MMBtu)"] + 2}+{col}{headers["CHP Thermal Production (MMBtu)"] + 2}' }, + # { + # "name": "Total Fuel Costs ($)", + # "formula": lambda col, bau, headers: f'={col}{headers["Heating System Fuel Cost ($)"] + 2}+{col}{headers["CHP Fuel Cost ($)"] + 2}' + # }, { - "name": "Total Fuel (NG) Cost ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Heating System Fuel Cost ($)"] + 2}+{col}{headers["CHP Fuel Cost ($)"] + 2}' - }, - { - "name": "Total Utility Cost ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Net Electricity Cost ($)"] + 2}+{col}{headers["Total Fuel (NG) Cost ($)"] + 2}' - }, - { - "name": "Incentive Value ($)", - "formula": lambda col, bau, headers: f'=({col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2})+{col}{headers["Additional Grant ($)"] + 2}' - }, - { - "name": "Net Capital Cost ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Gross Capital Cost ($)"] + 2}-{col}{headers["Incentive Value ($)"] + 2}' + "name": "Total Utility Costs ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Total Electric Costs ($)"] + 2}+{col}{headers["Total Fuel Costs ($)"] + 2}' }, + # { + # "name": "Incentive Value ($)", + # "formula": lambda col, bau, headers: f'=({col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2})+{col}{headers["Additional Grant ($)"] + 2}' + # }, + # { + # "name": "Net Capital Cost ($)", + # "formula": lambda col, bau, headers: f'={col}{headers["Gross Capital Cost ($)"] + 2}-{col}{headers["Incentive Value ($)"] + 2}' + # }, { "name": "Annual Cost Savings ($)", - "formula": lambda col, bau, headers: f'={bau["util_cost_value"]}-{col}{headers["Total Utility Cost ($)"] + 2}+{col}{headers["O&M Cost Increase ($)"] + 2}' + "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}+-{col}{headers["Purchased Electricity Cost ($)"] + 2}' }, { "name": "Simple Payback (years)", "formula": lambda col, bau, headers: f'={col}{headers["Net Capital Cost ($)"] + 2}/{col}{headers["Annual Cost Savings ($)"] + 2}' }, - { - "name": "CO2 Reduction (tonnes)", - "formula": lambda col, bau, headers: f'={bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2}' - }, + # { + # "name": "CO2 Reduction (tonnes)", + # "formula": lambda col, bau, headers: f'={bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2}' + # }, { "name": "CO2 (%) savings", "formula": lambda col, bau, headers: f'=({bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2})/{bau["co2_reduction_value"]}' From e1b6c407e7c05e50ba5fda86a89819fc3919b986 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Tue, 24 Sep 2024 09:43:04 -0600 Subject: [PATCH 34/44] updated configuration --- julia_src/Dockerfile | 4 +- reoptjl/custom_table_config.py | 1024 ++++++++++++++++++++++++++++ reoptjl/custom_table_helpers.py | 75 +- reoptjl/views.py | 1130 ++----------------------------- 4 files changed, 1086 insertions(+), 1147 deletions(-) create mode 100644 reoptjl/custom_table_config.py diff --git a/julia_src/Dockerfile b/julia_src/Dockerfile index b0573d7af..7c25ad573 100644 --- a/julia_src/Dockerfile +++ b/julia_src/Dockerfile @@ -5,8 +5,10 @@ ARG NREL_ROOT_CERT_URL_ROOT="" RUN set -x && if [ -n "$NREL_ROOT_CERT_URL_ROOT" ]; then curl -fsSLk -o /usr/local/share/ca-certificates/nrel_root.crt "${NREL_ROOT_CERT_URL_ROOT}/nrel_root.pem" && curl -fsSLk -o /usr/local/share/ca-certificates/nrel_xca1.crt "${NREL_ROOT_CERT_URL_ROOT}/nrel_xca1.pem" && update-ca-certificates; fi ENV REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt +ENV JULIA_SSL_NO_VERIFY_HOSTS="github.com" + # Install Julia packages -ENV JULIA_NUM_THREADS=2 +# ENV JULIA_NUM_THREADS=2 ENV XPRESS_JL_SKIP_LIB_CHECK=True WORKDIR /opt/julia_src diff --git a/reoptjl/custom_table_config.py b/reoptjl/custom_table_config.py new file mode 100644 index 000000000..22ccd82f5 --- /dev/null +++ b/reoptjl/custom_table_config.py @@ -0,0 +1,1024 @@ +# custom_table_config.py +from reoptjl.custom_table_helpers import safe_get + +""" +1. Naming Convention for Tables: +------------------------------- +To prevent namespace pollution and keep table configurations well-organized, use the following naming convention when adding new tables: + +Structure: + custom_table_ + +- `custom_table_`: A prefix to indicate that this variable represents a custom table configuration. +- ``: A descriptive word representing the feature, tool, or module the table is associated with the table configuration. + +Examples: +- custom_table_webtool: A table configuration for the webtool feature. +- custom_table_simple: A table configuration for the simple results. +- custom_table_iedo: A table configuration for the IEDO. + +Guidelines: +- Use lowercase letters and underscores to separate words. +- Avoid numbering unless necessary to differentiate versions. +- Ensure each table configuration is descriptive enough to understand its context or feature. +---------------------------------- +""" +# Example table configuration +custom_table_example = [ + { + "label": "Site Name", + "key": "site", + "bau_value": lambda df: "", + "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + }, + { + "label": "Site Address", + "key": "site_address", + "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), + "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") + }, + # Example 2: Concatenating Strings + { + "label": "Site Location", + "key": "site_lat_long", + "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + }, + { + "label": "Technology Sizing", # This is your separator label + "key": "tech_separator", #MUST HAVE "separator" somewhere in the name + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + # Example 3: Calculated Value (Sum of Two Fields), this does not show up in formulas + { + "label": "Combined Renewable Size (kW)", + "key": "combined_renewable_size", + "bau_value": lambda df: 0, + "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + safe_get(df, "outputs.Wind.size_kw") #NOTE: These calculations will not show up as in the excel calculations + }, + + # Example 4: Hardcoded Values + { + "label": "Hardcoded Values (kWh)", + "key": "hardcoded_value", + "bau_value": lambda df: 500, # BAU scenario + "scenario_value": lambda df: 1000 # other scenarios + }, + + # Example 5: Conditional Formatting + { + "label": "PV Size Status", + "key": "pv_size_status", + "bau_value": lambda df: 0, + "scenario_value": lambda df: "Above Threshold" if safe_get(df, "outputs.PV.size_kw") > 2500 else "Below Threshold" + }, + #Example 6 and 7: First define any data that might need to be referenced, Here I've defined two placeholders + # Define Placeholder1 + { + "label": "Placeholder1", + "key": "placeholder1", + "bau_value": lambda df: 100, # BAU value + "scenario_value": lambda df: 200 # Scenario value + }, + # Define Placeholder2 + { + "label": "Placeholder2", + "key": "placeholder2", + "bau_value": lambda df: 50, # BAU value + "scenario_value": lambda df: 100 # Scenario value + }, + # Example 6: Calculation Without Reference to BAU + { + "label": "Placeholder Calculation Without BAU Reference", + "key": "placeholder_calculation_without_bau", + "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel + "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel + }, + # Example 7: Calculation With Reference to BAU + { + "label": "Placeholder Calculation With BAU Reference", + "key": "placeholder_calculation_with_bau", + "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel + "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel + }, + { + "label": "Results URL", + "key": "url", + "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), + "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") + } + ] + +# Webtool table configuration +custom_table_webtool = [ + { + "label": "Evaluation Name", + "key": "evaluation_name", + "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), + "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + }, + { + "label": "BAU or Optimal Case?", + "key": "bau_or_optimal_case", + "bau_value": lambda df: "BAU", + "scenario_value": lambda df: "Optimal" + }, + { + "label": "Site Location", + "key": "site_location", + "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + }, + { + "label": "Results URL", + "key": "url", + "bau_value": lambda df: '', + "scenario_value": lambda df: f'=HYPERLINK("https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/{safe_get(df, "webtool_uuid")}", "Results Link")' + }, + { + "label": "System Capacities", + "key": "system_capacities_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "PV capacity, new (kW)", + "key": "pv_capacity_new", + "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + }, + { + "label": "PV capacity, existing (kW)", + "key": "pv_size_purchased", + "bau_value": lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.existing_kw") + }, + { + "label": "Wind Capacity (kW)", + "key": "wind_capacity", + "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") + }, + { + "label": "Backup Generator Capacity, New (kW)", + "key": "backup_generator_new", + "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") + }, + { + "label": "Backup Generator Capacity, Existing (kW)", + "key": "backup_generator_existing", + "bau_value": lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") + }, + { + "label": "Generator Annual Fuel Consumption (gallons)", + "key": "backup_generator_fuel_consumption", + "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") + }, + { + "label": "Generator Fuel Cost ($)", + "key": "backup_generator_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") + }, + { + "label": "Generator Lifecycle Fuel Cost ($)", + "key": "lifecycle_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax") + }, + { + "label": "Battery Power Capacity (kW)", + "key": "battery_power_capacity", + "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") + }, + { + "label": "Battery Energy Capacity (kWh)", + "key": "battery_energy_capacity", + "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") + }, + { + "label": "CHP Capacity (kW)", + "key": "chp_capacity", + "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") + }, + { + "label": "Absorption Chiller Capacity (tons)", + "key": "absorption_chiller_capacity", + "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton") + }, + { + "label": "Chilled Water TES Capacity (gallons)", + "key": "chilled_water_tes_capacity", + "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal") + }, + { + "label": "Hot Water TES Capacity (gallons)", + "key": "hot_water_tes_capacity", + "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") + }, + { + "label": "Steam Turbine Capacity (kW)", + "key": "steam_turbine_capacity", + "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") + }, + { + "label": "GHP Heat Pump Capacity (ton)", + "key": "ghp_heat_pump_capacity", + "bau_value": lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") + }, + { + "label": "GHP Ground Heat Exchanger Size (ft)", + "key": "ghp_ground_heat_exchanger_size", + "bau_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") + }, + { + "label": "Summary Financial Metrics", + "key": "summary_financial_metrics_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Gross Capital Costs, Before Incentives ($)", + "key": "gross_capital_costs_before_incentives", + "bau_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs") + }, + { + "label": "Present Value of Incentives ($)", + "key": "present_value_of_incentives", + "bau_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit") + }, + { + "label": "Net Capital Cost ($)", + "key": "net_capital_cost", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") + }, + { + "label": "Year 1 O&M Cost, Before Tax ($)", + "key": "year_1_om_cost_before_tax", + "bau_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax") + }, + { + "label": "Total Life Cycle Costs ($)", + "key": "total_life_cycle_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") + }, + { + "label": "Net Present Value ($)", + "key": "npv", + "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") + }, + { + "label": "Payback Period (years)", + "key": "payback_period", + "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") + }, + { + "label": "Simple Payback (years)", + "key": "simple_payback_period", + "bau_value": lambda df: safe_get(df, ""), + "scenario_value": lambda df: safe_get(df, "") + }, + { + "label": "Internal Rate of Return (%)", + "key": "internal_rate_of_return", + "bau_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return") + }, + { + "label": "Life Cycle Cost Breakdown", + "key": "lifecycle_cost_breakdown_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Technology Capital Costs + Replacements, After Incentives ($)", + "key": "technology_capital_costs_after_incentives", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs") + }, + { + "label": "O&M Costs ($)", + "key": "om_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") + }, + { + "label": "Total Electric Costs ($)", + "key": "total_electric_utility_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax") + }, + { + "label": "Total Fuel Costs ($)", + "key": "total_fuel_costs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax") + }, + { + "label": "Total Utility Costs ($)", + "key": "total_fuel_costs", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Total Emissions Costs ($)", + "key": "total_emissions_costs", + "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health") + }, + { + "label": "LCC ($)", + "key": "lcc", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") + }, + { + "label": "NPV as a % of BAU LCC (%)", + "key": "npv_bau_percent", + "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent") + }, + { + "label": "Year 1 Electric Bill", + "key": "year_1_electric_bill_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Electric Grid Purchases (kWh)", + "key": "electric_grid_purchases", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") + }, + { + "label": "Energy Charges ($)", + "key": "electricity_energy_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") + }, + { + "label": "Demand Charges ($)", + "key": "electricity_demand_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") + }, + { + "label": "Fixed Charges ($)", + "key": "utility_fixed_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") + }, + { + "label": "Purchased Electricity Cost ($)", + "key": "purchased_electricity_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") + }, + { + "label": "Annual Cost Savings ($)", + "key": "annual_cost_savings", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Year 1 Fuel Costs & Consumption", + "key": "year_1_fuel_costs_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Boiler Fuel Consumption (mmbtu)", + "key": "boiler_fuel_consumption", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu") + }, + { + "label": "Boiler Fuel Costs ($)", + "key": "boiler_fuel_costs", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax") + }, + { + "label": "CHP Fuel Consumption (mmbtu)", + "key": "chp_fuel_consumption", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") + }, + { + "label": "CHP Fuel Cost ($)", + "key": "chp_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") + }, + { + "label": "Backup Generator Fuel Consumption (gallons)", + "key": "backup_generator_fuel_consumption", + "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") + }, + { + "label": "Backup Generator Fuel Cost ($)", + "key": "backup_generator_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") + }, + { + "label": "Renewable Energy & Emissions", + "key": "renewable_energy_emissions_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Annual % Renewable Electricity (%)", + "key": "annual_renewable_electricity", + "bau_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction") + }, + { + "label": "Year 1 CO2 Emissions (tonnes)", + "key": "year_1_co2_emissions", + "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") + }, + { + "label": "CO2 Emissions (tonnes)", + "key": "co2_emissions", + "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") + }, + { + "label": "CO2 (%) savings", + "key": "co2_savings_percentage", + "bau_value": lambda df: 0, + "scenario_value": lambda df: 0 + }, + { + "label": "Annual Energy Production & Throughput", + "key": "energy_production_throughput_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "PV (kWh)", + "key": "pv_kwh", + "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") + }, + { + "label": "Wind (kWh)", + "key": "wind_kwh", + "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") + }, + { + "label": "CHP (kWh)", + "key": "chp_kwh", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") + }, + { + "label": "CHP (MMBtu)", + "key": "chp_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") + }, + { + "label": "Boiler (MMBtu)", + "key": "boiler_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") + }, + { + "label": "Battery (kWh)", + "key": "battery_kwh", + "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") + }, + { + "label": "HW-TES (MMBtu)", + "key": "hw_tes_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu") + }, + { + "label": "CW-TES (MMBtu)", + "key": "cw_tes_mmbtu", + "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu") + }, + { + "label": "Breakdown of Incentives", + "key": "breakdown_of_incentives_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Federal Tax Incentive (30%)", + "key": "federal_tax_incentive_30", + "bau_value": lambda df: 0.3, + "scenario_value": lambda df: 0.3 + }, + { + "label": "PV Federal Tax Incentive (%)", + "key": "pv_federal_tax_incentive", + "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") + }, + { + "label": "Storage Federal Tax Incentive (%)", + "key": "storage_federal_tax_incentive", + "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") + }, + # { + # "label": "Incentive Value ($)", + # "key": "incentive_value", + # "bau_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2_bau"), + # "scenario_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2") + # }, + { + "label": "Additional Grant ($)", + "key": "iac_grant", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax") + } +] + +# IEDO TASC Configuration +custom_table_tasc = [ + { + "label": "Site Name", + "key": "site", + "bau_value": lambda df: "", + "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") + }, + { + "label": "Site Location", + "key": "site_lat_long", + "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" + }, + { + "label": "Site Address", + "key": "site_address", + "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), + "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") + }, + { + "label": "PV Size (kW)", + "key": "pv_size", + "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + }, + { + "label": "Wind Size (kW)", + "key": "wind_size", + "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") + }, + { + "label": "CHP Size (kW)", + "key": "chp_size", + "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") + }, + { + "label": "PV Total Electricity Produced (kWh)", + "key": "pv_total_electricity_produced", + "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") + }, + { + "label": "PV Exported to Grid (kWh)", + "key": "pv_exported_to_grid", + "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw") + }, + { + "label": "PV Serving Load (kWh)", + "key": "pv_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") + }, + { + "label": "Wind Total Electricity Produced (kWh)", + "key": "wind_total_electricity_produced", + "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") + }, + { + "label": "Wind Exported to Grid (kWh)", + "key": "wind_exported_to_grid", + "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw") + }, + { + "label": "Wind Serving Load (kWh)", + "key": "wind_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") + }, + { + "label": "CHP Total Electricity Produced (kWh)", + "key": "chp_total_electricity_produced", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") + }, + { + "label": "CHP Exported to Grid (kWh)", + "key": "chp_exported_to_grid", + "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw") + }, + { + "label": "CHP Serving Load (kWh)", + "key": "chp_serving_load", + "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") + }, + { + "label": "CHP Serving Thermal Load (MMBtu)", + "key": "chp_serving_thermal_load", + "bau_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour") + }, + { + "label": "Grid Purchased Electricity (kWh)", + "key": "grid_purchased_electricity", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") + }, + { + "label": "Total Site Electricity Use (kWh)", + "key": "total_site_electricity_use", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw") + }, + { + "label": "Net Purchased Electricity Reduction (%)", + "key": "net_purchased_electricity_reduction", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf") + }, + { + "label": "Electricity Energy Cost ($)", + "key": "electricity_energy_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") + }, + { + "label": "Electricity Demand Cost ($)", + "key": "electricity_demand_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") + }, + { + "label": "Utility Fixed Cost ($)", + "key": "utility_fixed_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") + }, + { + "label": "Purchased Electricity Cost ($)", + "key": "purchased_electricity_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") + }, + { + "label": "Electricity Export Benefit ($)", + "key": "electricity_export_benefit", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax") + }, + { + "label": "Net Electricity Cost ($)", + "key": "net_electricity_cost", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax") + }, + { + "label": "Electricity Cost Savings ($/year)", + "key": "electricity_cost_savings", + "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau") + }, + { + "label": "Boiler Fuel (MMBtu)", + "key": "boiler_fuel", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu") + }, + { + "label": "CHP Fuel (MMBtu)", + "key": "chp_fuel", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") + }, + { + "label": "Total Fuel (MMBtu)", + "key": "total_fuel", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh") + }, + { + "label": "Natural Gas Reduction (%)", + "key": "natural_gas_reduction", + "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau") + }, + { + "label": "Boiler Thermal Production (MMBtu)", + "key": "boiler_thermal_production", + "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") + }, + { + "label": "CHP Thermal Production (MMBtu)", + "key": "chp_thermal_production", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") + }, + { + "label": "Total Thermal Production (MMBtu)", + "key": "total_thermal_production", + "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") + }, + { + "label": "Heating System Fuel Cost ($)", + "key": "heating_system_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars") + }, + { + "label": "CHP Fuel Cost ($)", + "key": "chp_fuel_cost", + "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") + }, + { + "label": "Total Fuel (NG) Cost ($)", + "key": "total_fuel_ng_cost", + "bau_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars") + }, + { + "label": "Total Utility Cost ($)", + "key": "total_utility_cost", + "bau_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars") + }, + { + "label": "O&M Cost Increase ($)", + "key": "om_cost_increase", + "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") + }, + { + "label": "Payback Period (years)", + "key": "payback_period", + "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") + }, + { + "label": "Gross Capital Cost ($)", + "key": "gross_capital_cost", + "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") + }, + { + "label": "Federal Tax Incentive (30%)", + "key": "federal_tax_incentive", + "bau_value": lambda df: 0.3, + "scenario_value": lambda df: 0.3 + }, + { + "label": "Additional Grant ($)", + "key": "additional_grant", + "bau_value": lambda df: 0, + "scenario_value": lambda df: 0 + }, + { + "label": "Incentive Value ($)", + "key": "incentive_value", + "bau_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars") + }, + { + "label": "Net Capital Cost ($)", + "key": "net_capital_cost", + "bau_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars") + }, + { + "label": "Annual Cost Savings ($)", + "key": "annual_cost_savings", + "bau_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars") + }, + { + "label": "Simple Payback (years)", + "key": "simple_payback", + "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") + }, + { + "label": "CO2 Emissions (tonnes)", + "key": "co2_emissions", + "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") + }, + { + "label": "CO2 Reduction (tonnes)", + "key": "co2_reduction", + "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") + }, + { + "label": "CO2 (%) savings", + "key": "co2_savings_percentage", + "bau_value": lambda df: 0, + "scenario_value": lambda df: 0 + }, + { + "label": "NPV ($)", + "key": "npv", + "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") + }, + { + "label": "PV Federal Tax Incentive (%)", + "key": "pv_federal_tax_incentive", + "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") + }, + { + "label": "Storage Federal Tax Incentive (%)", + "key": "storage_federal_tax_incentive", + "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), + "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") + } +] + +''' + 2. Defining BAU Columns: +------------------------ +- If your calculation involves BAU (Business As Usual) columns, ensure that the relevant BAU columns are included in the `bau_cells_config` dictionary. Each key in this dictionary represents a BAU variable used in calculations, and the value is the corresponding table label. + +- Example `bau_cells_config` for BAU references: + bau_cells_config = { + "grid_value": "Grid Purchased Electricity (kWh)", + "elec_cost_value": "Purchased Electricity Cost ($)", + } + +- When defining calculations that use BAU columns, reference the BAU values using the `bau` dictionary. For example, to calculate the optimal reduction in grid electricity purchases compared to BAU: + "formula": lambda col, bau, headers: f'=({bau["grid_value"]} - {col}{headers["Grid Purchased Electricity (kWh)"] + 2}) / {bau["grid_value"]}' + +- Note: the bau cell has to be associated with a variable name in the custom table +- Note: It is safe to define bau cells that are not being used. If they are not associated with an entry in the custom table, they will be safely ignored +''' + +# Define bau_cells configuration for calculations that reference bau cells +bau_cells_config = { + "grid_value" : "Grid Purchased Electricity (kWh)", + "elec_cost_value" : "Purchased Electricity Cost ($)", + "ng_reduction_value" : "Total Fuel (MMBtu)", + "total_elec_costs" : "Total Electric Costs ($)", + "total_fuel_costs" : "Total Fuel Costs ($)", + "co2_reduction_value": "CO2 Emissions (tonnes)", + "placeholder1_value" : "Placeholder1" +} + +''' +3. Defining Calculations: +------------------------- +- Each calculation should be defined using a dictionary with the following structure: + { + "name": , # The name of the calculation (matches the label in the table) + "formula": # A lambda function that calculates the desired value + } + +- The lambda function receives the following parameters: + - `col`: The column letter for the scenario data in Excel. + - `bau`: A dictionary of BAU cell references (if applicable). + - `headers`: A dictionary containing the row indices for relevant table headers. + + Example Calculation: + { + "name": "Net Purchased Electricity Reduction (%)", + "formula": lambda col, bau, headers: f'=({bau["grid_value"]} - {col}{headers["Grid Purchased Electricity (kWh)"] + 2}) / {bau["grid_value"]}' + } + - Note: The calculation name has to be the same as a variable in the custom table + - Note: It is safe to define calculations that are not being used, if they are not associated with an entry in the custom table, it will be ignored + ''' + +# Calculation logic +calculations_config = [ + { + "name": "Total Site Electricity Use (kWh)", + "formula": lambda col, bau, headers: ( + f'={col}{headers["PV Serving Load (kWh)"] + 2}+' + f'{col}{headers["Wind Serving Load (kWh)"] + 2}+' + f'{col}{headers["CHP Serving Load (kWh)"] + 2}+' + f'{col}{headers["Battery Serving Load (kWh)"] + 2}+' + f'{col}{headers["Backup Generator Serving Load (kWh)"] + 2}+' + f'{col}{headers["Steam Turbine Serving Load (kWh)"] + 2}+' + f'{col}{headers["Grid Purchased Electricity (kWh)"] + 2}' + ) + }, + + { + "name": "Net Purchased Electricity Reduction (%)", + "formula": lambda col, bau, headers: f'=({bau["grid_value"]}-{col}{headers["Grid Purchased Electricity (kWh)"] + 2})/{bau["grid_value"]}' + }, + { + "name": "Purchased Electricity Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Energy Charges ($)"] + 2}+{col}{headers["Demand Charges ($)"] + 2}+{col}{headers["Fixed Charges ($)"] + 2}' + }, + { + "name": "Net Electricity Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Purchased Electricity Cost ($)"] + 2}-{col}{headers["Electricity Export Benefit ($)"] + 2}' + }, + { + "name": "Electricity Cost Savings ($/year)", + "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}-{col}{headers["Purchased Electricity Cost ($)"] + 2}' + }, + { + "name": "Total Fuel (MMBtu)", + "formula": lambda col, bau, headers: f'={col}{headers["Boiler Fuel (MMBtu)"] + 2}+{col}{headers["CHP Fuel (MMBtu)"] + 2}' + }, + { + "name": "Natural Gas Reduction (%)", + "formula": lambda col, bau, headers: f'=({bau["ng_reduction_value"]}-{col}{headers["Total Fuel (MMBtu)"] + 2})/{bau["ng_reduction_value"]}' + }, + { + "name": "Total Thermal Production (MMBtu)", + "formula": lambda col, bau, headers: f'={col}{headers["Boiler Thermal Production (MMBtu)"] + 2}+{col}{headers["CHP Thermal Production (MMBtu)"] + 2}' + }, + # { + # "name": "Total Fuel Costs ($)", + # "formula": lambda col, bau, headers: f'={col}{headers["Heating System Fuel Cost ($)"] + 2}+{col}{headers["CHP Fuel Cost ($)"] + 2}' + # }, + { + "name": "Total Utility Costs ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Total Electric Costs ($)"] + 2}+{col}{headers["Total Fuel Costs ($)"] + 2}' + }, + # { + # "name": "Incentive Value ($)", + # "formula": lambda col, bau, headers: f'=({col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2})+{col}{headers["Additional Grant ($)"] + 2}' + # }, + # { + # "name": "Net Capital Cost ($)", + # "formula": lambda col, bau, headers: f'={col}{headers["Gross Capital Cost ($)"] + 2}-{col}{headers["Incentive Value ($)"] + 2}' + # }, + { + "name": "Annual Cost Savings ($)", + "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}+-{col}{headers["Purchased Electricity Cost ($)"] + 2}' + }, + { + "name": "Simple Payback (years)", + "formula": lambda col, bau, headers: f'={col}{headers["Net Capital Cost ($)"] + 2}/{col}{headers["Annual Cost Savings ($)"] + 2}' + }, + # { + # "name": "CO2 Reduction (tonnes)", + # "formula": lambda col, bau, headers: f'={bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2}' + # }, + { + "name": "CO2 (%) savings", + "formula": lambda col, bau, headers: f'=({bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2})/{bau["co2_reduction_value"]}' + }, + #Example Calculations + # Calculation Without Reference to bau_cells + { + "name": "Placeholder Calculation Without BAU Reference", + "formula": lambda col, bau, headers: f'={col}{headers["Placeholder1"] + 2}+{col}{headers["Placeholder2"] + 2}' + # This formula adds Placeholder1 and Placeholder2 values from the scenario. + }, + + # Calculation With Reference to bau_cells + { + "name": "Placeholder Calculation With BAU Reference", + "formula": lambda col, bau, headers: f'=({bau["placeholder1_value"]}-{col}{headers["Placeholder2"] + 2})/{bau["placeholder1_value"]}' + # This formula calculates the percentage change of Placeholder2 using Placeholder1's BAU value as the reference. + } +] \ No newline at end of file diff --git a/reoptjl/custom_table_helpers.py b/reoptjl/custom_table_helpers.py index c36db182a..a27a3a649 100644 --- a/reoptjl/custom_table_helpers.py +++ b/reoptjl/custom_table_helpers.py @@ -1,5 +1,7 @@ -# custom table helpers.py -def flatten_dict(d, parent_key='', sep='.'): +# custom_table_helpers.py +from typing import Dict, Any, List, Union + +def flatten_dict(d: Dict[str, Any], parent_key: str = '', sep: str = '.') -> Dict[str, Any]: """Flatten nested dictionary.""" items = [] for k, v in d.items(): @@ -10,29 +12,24 @@ def flatten_dict(d, parent_key='', sep='.'): items.append((new_key, v)) return dict(items) -def clean_data_dict(data_dict): +def clean_data_dict(data_dict: Dict[str, List[Any]]) -> Dict[str, List[Any]]: """Clean data dictionary by removing default values.""" - for key, value_array in data_dict.items(): - new_value_array = [ - "" if v in [0, float("nan"), "NaN", "0", "0.0", "$0.0", -0, "-0", "-0.0", "-$0.0", None] else v - for v in value_array - ] - data_dict[key] = new_value_array - return data_dict + default_values = {0, float("nan"), "NaN", "0", "0.0", "$0.0", -0, "-0", "-0.0", "-$0.0", None} + return { + key: ["" if v in default_values else v for v in value_array] + for key, value_array in data_dict.items() + } -def sum_vectors(data): +def sum_vectors(data: Union[Dict[str, Any], List[Any]]) -> Union[Dict[str, Any], List[Any], Any]: """Sum numerical vectors within a nested data structure.""" if isinstance(data, dict): return {key: sum_vectors(value) for key, value in data.items()} elif isinstance(data, list): - if all(isinstance(item, (int, float)) for item in data): - return sum(data) - else: - return [sum_vectors(item) for item in data] + return sum(data) if all(isinstance(item, (int, float)) for item in data) else [sum_vectors(item) for item in data] else: return data -def colnum_string(n): +def colnum_string(n: int) -> str: """Convert a column number to an Excel-style column string.""" string = "" while n > 0: @@ -40,47 +37,5 @@ def colnum_string(n): string = chr(65 + remainder) + string return string -def safe_get(df, key, default=0): - return df.get(key, default) - -def check_bau_consistency(scenarios, tolerance_percentage=0.1): - """ - Check the consistency of BAU values within the 'outputs.Financial' section across all scenarios with a percentage-based tolerance. - - Args: - scenarios (list): List of scenario dictionaries to check. - tolerance_percentage (float): Tolerance percentage for allowable differences. - For example, 0.1 for 0.1% tolerance. - """ - bau_values_list = [] - all_bau_keys = set() - - for scenario in scenarios: - df_gen = flatten_dict(scenario['full_data']) - - current_bau_values = {} - for key, value in df_gen.items(): - # Focus only on keys related to 'outputs.Financial' and ending with '_bau' - if key.startswith('outputs.Financial') and key.endswith('_bau'): - current_bau_values[key] = value - all_bau_keys.add(key) - - bau_values_list.append(current_bau_values) - - # Perform consistency check across all `_bau` values within 'outputs.Financial' - first_bau_values = bau_values_list[0] - for idx, other_bau_values in enumerate(bau_values_list[1:], start=1): - for key in all_bau_keys: - first_value = first_bau_values.get(key, 0) - other_value = other_bau_values.get(key, 0) - - # Assign a default tolerance value before performing checks - tolerance = abs(first_value) * (tolerance_percentage / 100) if first_value != 0 else tolerance_percentage - - if first_value != 0: # Avoid division by zero - difference = abs(first_value - other_value) - if difference > tolerance: - raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}. Please check the BAU values for consistency.") - else: # Handle the case where the first value is 0 - if abs(other_value) > tolerance: - raise ValueError(f"Inconsistent BAU values found between scenario 1 and scenario {idx + 1}. Please check the BAU values for consistency.") +def safe_get(df: Dict[str, Any], key: str, default: Any = 0) -> Any: + return df.get(key, default) \ No newline at end of file diff --git a/reoptjl/views.py b/reoptjl/views.py index 95382cb73..ee82b2b13 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1,6 +1,7 @@ # REopt®, Copyright (c) Alliance for Sustainable Energy, LLC. See also https://github.com/NREL/REopt_API/blob/master/LICENSE. from django.db import models import uuid +from typing import List, Dict, Any import sys import traceback as tb import re @@ -21,12 +22,22 @@ import pandas as pd import json import logging -from reoptjl.custom_table_helpers import safe_get,flatten_dict, clean_data_dict, sum_vectors, colnum_string, check_bau_consistency + +from reoptjl.custom_table_helpers import flatten_dict, clean_data_dict, sum_vectors, colnum_string +from reoptjl.custom_table_config import * + import xlsxwriter from collections import defaultdict import io log = logging.getLogger(__name__) +class CustomTableError(Exception): + pass + +def log_and_raise_error(task_name: str) -> None: + exc_type, exc_value, exc_traceback = sys.exc_info() + log.error(f"Error in {task_name}: {exc_value}, traceback: {tb.format_tb(exc_traceback)}") + raise CustomTableError(f"Error in {task_name}") def make_error_resp(msg): resp = dict() @@ -117,7 +128,6 @@ def outputs(request): except Exception as e: return JsonResponse({"Error": "Unexpected error in help endpoint: {}".format(e.args[0])}, status=500) - def results(request, run_uuid): """ results endpoint for reoptjl jobs @@ -1189,14 +1199,7 @@ def easiur_costs(request): ############################################################################################################################## ################################################# START Custom Table ######################################################### ############################################################################################################################## -def log_and_raise_error(task_name): - exc_type, exc_value, exc_traceback = sys.exc_info() - log.error(f"Error in {task_name}: {exc_value}, traceback: {tb.format_tb(exc_traceback)}") - err = UnexpectedError(exc_type, exc_value, exc_traceback, task=task_name) - err.save_to_db() - raise - -def access_raw_data(run_uuids, request): +def access_raw_data(run_uuids: List[str], request: Any) -> Dict[str, List[Dict[str, Any]]]: try: usermeta = UserProvidedMeta.objects.filter(meta__run_uuid__in=run_uuids).only('meta__run_uuid', 'description', 'address') meta_data_dict = {um.meta.run_uuid: {"description": um.description, "address": um.address} for um in usermeta} @@ -1205,7 +1208,7 @@ def access_raw_data(run_uuids, request): "scenarios": [ { "run_uuid": str(run_uuid), - "full_data": process_raw_data(request, run_uuid), + "full_data": summarize_vector_data(request, run_uuid), "meta_data": meta_data_dict.get(run_uuid, {}) } for run_uuid in run_uuids @@ -1213,24 +1216,17 @@ def access_raw_data(run_uuids, request): } except Exception: log_and_raise_error('access_raw_data') - - except Exception as e: - exc_type, exc_value, exc_traceback = sys.exc_info() - log.error(f"Error in access_raw_data: {exc_value}, traceback: {tb.format_tb(exc_traceback)}") - err = UnexpectedError(exc_type, exc_value, exc_traceback, task='access_raw_data') - err.save_to_db() - raise -def process_raw_data(request, run_uuid): +def summarize_vector_data(request: Any, run_uuid: str) -> Dict[str, Any]: try: response = results(request, run_uuid) if response.status_code == 200: return sum_vectors(json.loads(response.content)) return {"error": f"Failed to fetch data for run_uuid {run_uuid}"} except Exception: - log_and_raise_error('process_raw_data') + log_and_raise_error('summarize_vector_data') -def generate_data_dict(config, df_gen, suffix=""): +def generate_data_dict(config: List[Dict[str, Any]], df_gen: Dict[str, Any]) -> Dict[str, List[Any]]: try: data_dict = defaultdict(list) for entry in config: @@ -1240,18 +1236,18 @@ def generate_data_dict(config, df_gen, suffix=""): except Exception: log_and_raise_error('generate_data_dict') -def get_REopt_data(data_f, scenario_name, config): +def generate_reopt_dataframe(data_f: Dict[str, Any], scenario_name: str, config: List[Dict[str, Any]]) -> pd.DataFrame: try: scenario_name_str = str(scenario_name) df_gen = flatten_dict(data_f) - data_dict = generate_data_dict(config, df_gen, "_bau" if "BAU" in scenario_name_str.upper() else "") + data_dict = generate_data_dict(config, df_gen) data_dict["Scenario"] = [scenario_name_str] col_order = ["Scenario"] + [entry["label"] for entry in config] return pd.DataFrame(data_dict)[col_order] except Exception: - log_and_raise_error('get_REopt_data') + log_and_raise_error('generate_reopt_dataframe') -def get_bau_values(scenarios, config): +def get_bau_values(scenarios: List[Dict[str, Any]], config: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: try: bau_values_per_scenario = { scenario['run_uuid']: {entry["label"]: None for entry in config} for scenario in scenarios @@ -1268,15 +1264,15 @@ def get_bau_values(scenarios, config): return bau_values_per_scenario except Exception: log_and_raise_error('get_bau_values') - -def process_scenarios(scenarios, reopt_data_config): + +def process_scenarios(scenarios: List[Dict[str, Any]], reopt_data_config: List[Dict[str, Any]]) -> pd.DataFrame: try: bau_values_per_scenario = get_bau_values(scenarios, reopt_data_config) combined_df = pd.DataFrame() for idx, scenario in enumerate(scenarios): run_uuid = scenario['run_uuid'] - df_result = get_REopt_data(scenario['full_data'], run_uuid, reopt_data_config) + df_result = generate_reopt_dataframe(scenario['full_data'], run_uuid, reopt_data_config) df_result["Scenario"] = run_uuid bau_data = {key: [value] for key, value in bau_values_per_scenario[run_uuid].items()} @@ -1291,12 +1287,12 @@ def process_scenarios(scenarios, reopt_data_config): except Exception: log_and_raise_error('process_scenarios') -def create_custom_comparison_table(request): +def generate_custom_comparison_table(request: Any) -> HttpResponse: if request.method != 'GET': return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) try: - table_config_name = request.GET.get('table_config_name', 'webtool_table') + table_config_name = request.GET.get('table_config_name', 'custom_table_webtool') run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] if not run_uuids: return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) @@ -1307,29 +1303,30 @@ def create_custom_comparison_table(request): except ValueError: return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) - scenarios = access_raw_data(run_uuids, request) target_custom_table = globals().get(table_config_name) if not target_custom_table: return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) + scenarios = access_raw_data(run_uuids, request) final_df = process_scenarios(scenarios['scenarios'], target_custom_table) final_df_transpose = final_df.transpose() final_df_transpose.columns = final_df_transpose.iloc[0] final_df_transpose = final_df_transpose.drop(final_df_transpose.index[0]) output = io.BytesIO() - create_custom_table_excel(final_df_transpose, target_custom_table, calculations, output) + generate_excel_workbook(final_df_transpose, target_custom_table, output) output.seek(0) response = HttpResponse(output, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response['Content-Disposition'] = 'attachment; filename="comparison_table.xlsx"' return response - except ValueError as e: - log_and_return_error(e, 'create_custom_comparison_table', 500) - except Exception: - log_and_raise_error('create_custom_comparison_table') - -def create_custom_table_excel(df, custom_table, calculations, output): + except CustomTableError as e: + return JsonResponse({"Error": str(e)}, status=500) + except Exception as e: + log.error(f"Unexpected error in generate_custom_comparison_table: {e}") + return JsonResponse({"Error": "An unexpected error occurred. Please try again later."}, status=500) + +def generate_excel_workbook(df: pd.DataFrame, custom_table: List[Dict[str, Any]], output: io.BytesIO) -> None: try: workbook = xlsxwriter.Workbook(output, {'in_memory': True}) worksheet = workbook.add_worksheet('Custom Table') @@ -1466,7 +1463,7 @@ def get_bau_column(col): return col - 1 if col > 1 else 1 relevant_columns = [entry["label"] for entry in custom_table] - relevant_calculations = [calc for calc in calculations if calc["name"] in relevant_columns] + relevant_calculations = [calc for calc in calculations_config if calc["name"] in relevant_columns] logged_messages = set() missing_entries = [] @@ -1480,16 +1477,7 @@ def get_bau_column(col): bau_col = get_bau_column(col) # Get the corresponding BAU column bau_col_letter = colnum_string(bau_col) # Convert the column number to letter for Excel reference - bau_cells = { - 'grid_value': f'{bau_col_letter}{headers["Grid Purchased Electricity (kWh)"] + 2}' if "Grid Purchased Electricity (kWh)" in headers else None, - 'elec_cost_value': f'{bau_col_letter}{headers["Purchased Electricity Cost ($)"] + 2}' if "Purchased Electricity Cost ($)" in headers else None, - 'ng_reduction_value': f'{bau_col_letter}{headers["Total Fuel (MMBtu)"] + 2}' if "Total Fuel (MMBtu)" in headers else None, - # 'util_cost_value': f'{bau_col_letter}{headers["Total Utility Costs ($)"] + 2}' if "Total Utility Costs ($)" in headers else None, - 'total_elec_costs': f'{bau_col_letter}{headers["Total Electric Costs ($)"] + 2}' if "Total Electric Costs ($)" in headers else None, - 'total_fuel_costs': f'{bau_col_letter}{headers["Total Fuel Costs ($)"] + 2}' if "Total Fuel Costs ($)" in headers else None, - 'co2_reduction_value': f'{bau_col_letter}{headers["CO2 Emissions (tonnes)"] + 2}' if "CO2 Emissions (tonnes)" in headers else None, - 'placeholder1_value': f'{bau_col_letter}{headers["Placeholder1"] + 2}' if "Placeholder1" in headers else None, - } + bau_cells = {cell_name: f'{bau_col_letter}{headers[header] + 2}' for cell_name, header in bau_cells_config.items() if header in headers} for calc in relevant_calculations: try: @@ -1507,18 +1495,18 @@ def get_bau_column(col): row_idx = headers.get(calc["name"]) if row_idx is not None: worksheet.write(row_idx + 1, col - 1, "MISSING REFERENCE IN FORMULA", error_format) - message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Table configuration provided. Update the Table to include {missing_keys}. Writing 'MISSING DATA' instead." + message = f"Cannot calculate '{calc['name']}' because the required fields are missing: {', '.join(missing_keys)} in the Table configuration provided. Update the Table to include {missing_keys}." if message not in logged_messages: logged_messages.add(message) missing_entries.append(calc["name"]) except KeyError as e: missing_field = str(e) - message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Table configuration provided. Update the Table to include {missing_field}. Writing 'MISSING DATA' instead." + message = f"Cannot calculate '{calc['name']}' because the field '{missing_field}' is missing in the Table configuration provided. Update the Table to include {missing_field}." if message not in logged_messages: logged_messages.add(message) row_idx = headers.get(calc["name"]) if row_idx is not None: - worksheet.write(row_idx + 1, col - 1, "MISSING DATA", error_format) + worksheet.write(row_idx + 1, col - 1, "MISSING REFERENCE IN FORMULA", error_format) missing_entries.append(calc["name"]) if missing_entries: @@ -1527,1038 +1515,8 @@ def get_bau_column(col): workbook.close() except Exception: - log_and_raise_error('create_custom_table_excel') - - -# Configuration -# Set up table needed along with REopt dictionaries to grab data -# Example Custom Table Configuration -# example_table = [ -# { -# "label": "Site Name", -# "key": "site", -# "bau_value": lambda df: "", -# "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") -# }, -# { -# "label": "Site Address", -# "key": "site_address", -# "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), -# "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") -# }, -# # Example 2: Concatenating Strings -# { -# "label": "Site Location", -# "key": "site_lat_long", -# "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", -# "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" -# }, -# { -# "label": "Technology Sizing", # This is your separator label -# "key": "tech_separator", #MUST HAVE "separator" somewhere in the name -# "bau_value": lambda df: "", -# "scenario_value": lambda df: "" -# }, -# # Example 3: Calculated Value (Sum of Two Fields), this does not show up in formulas -# { -# "label": "Combined Renewable Size (kW)", -# "key": "combined_renewable_size", -# "bau_value": lambda df: 0, -# "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") + safe_get(df, "outputs.Wind.size_kw") #NOTE: These calculations will not show up as in the excel calculations -# }, - -# # Example 4: Hardcoded Values -# { -# "label": "Hardcoded Values (kWh)", -# "key": "hardcoded_value", -# "bau_value": lambda df: 500, # BAU scenario -# "scenario_value": lambda df: 1000 # other scenarios -# }, - -# # Example 5: Conditional Formatting -# { -# "label": "PV Size Status", -# "key": "pv_size_status", -# "bau_value": lambda df: 0, -# "scenario_value": lambda df: "Above Threshold" if safe_get(df, "outputs.PV.size_kw") > 2500 else "Below Threshold" -# }, -# #Example 6 and 7: First define any data that might need to be referenced, Here I've defined two placeholders -# # Define Placeholder1 -# { -# "label": "Placeholder1", -# "key": "placeholder1", -# "bau_value": lambda df: 100, # BAU value -# "scenario_value": lambda df: 200 # Scenario value -# }, -# # Define Placeholder2 -# { -# "label": "Placeholder2", -# "key": "placeholder2", -# "bau_value": lambda df: 50, # BAU value -# "scenario_value": lambda df: 100 # Scenario value -# }, -# # Example 6: Calculation Without Reference to BAU -# { -# "label": "Placeholder Calculation Without BAU Reference", -# "key": "placeholder_calculation_without_bau", -# "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel -# "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel -# }, -# # Example 7: Calculation With Reference to BAU -# { -# "label": "Placeholder Calculation With BAU Reference", -# "key": "placeholder_calculation_with_bau", -# "bau_value": lambda df: 0, # Placeholder, replaced by formula in Excel -# "scenario_value": lambda df: 0 # Placeholder, replaced by formula in Excel -# }, -# { -# "label": "Results URL", -# "key": "url", -# "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), -# "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") -# } -# ] - -# # TASC/Single Site Configuration -# single_site_custom_table = [ -# { -# "label": "Site Name", -# "key": "site", -# "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), -# "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") -# }, -# { -# "label": "Site Address", -# "key": "site_address", -# "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), -# "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") -# }, -# { -# "label": "Site Location", -# "key": "site_lat_long", -# "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", -# "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" -# }, -# { -# "label": "Technology Sizing", # This is your separator label -# "key": "tech_separator", #MUST HAVE "separator" somewhere in the name to be identified correctly as a section separator -# "bau_value": lambda df: "", -# "scenario_value": lambda df: "" -# }, -# { -# "label": "PV Nameplate capacity (kW), new", -# "key": "pv_size_purchased", -# "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") -# }, -# { -# "label": "PV Nameplate capacity (kW), existing", -# "key": "pv_size_existing", -# "bau_value": lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.PV.existing_kw") -# }, -# { -# "label": "PV Serving Load (kWh)", -# "key": "pv_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") -# }, -# { -# "label": "Wind Nameplate capacity (kW), new", -# "key": "wind_size_purchased", -# "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") -# }, -# { -# "label": "Wind Serving Load (kWh)", -# "key": "wind_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") -# }, -# { -# "label": "Backup Generator Nameplate capacity (kW), new", -# "key": "backup_generator_capacity_purchased", -# "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") -# }, -# { -# "label": "Backup Generator Nameplate capacity (kW), existing", -# "key": "backup_generator_capacity_existing", -# "bau_value": lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") -# }, -# { -# "label": "Backup Generator Serving Load (kWh)", -# "key": "backup_generator_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw") -# }, -# { -# "label": "Battery power (kW)", -# "key": "battery_power", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") -# }, -# { -# "label": "Battery capacity (kWh)", -# "key": "battery_capacity", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") -# }, -# { -# "label": "Battery Serving Load (kWh)", -# "key": "battery_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") -# }, -# { -# "label": "CHP capacity (kW)", -# "key": "chp_capacity", -# "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") -# }, -# { -# "label": "CHP Serving Load (kWh)", -# "key": "chp_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") -# }, -# { -# "label": "Absorption chiller capacity (tons)", -# "key": "absorption_chiller_capacity", -# "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton") -# }, -# { -# "label": "Absorption Chiller Serving Load (ton)", -# "key": "absorption_chiller_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton") -# }, -# { -# "label": "Chilled water TES capacity (gallons)", -# "key": "chilled_water_tes_capacity", -# "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal") -# }, -# { -# "label": "Chilled Water TES Serving Load (ton)", -# "key": "chilled_water_tes_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton") -# }, -# { -# "label": "Hot water TES capacity (gallons)", -# "key": "hot_water_tes_capacity", -# "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") -# }, -# { -# "label": "Hot Water TES Serving Load (MMBtu)", -# "key": "hot_water_tes_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour") -# }, -# { -# "label": "Steam turbine capacity (kW)", -# "key": "steam_turbine_capacity", -# "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") -# }, -# { -# "label": "Steam Turbine Serving Load (kWh)", -# "key": "steam_turbine_serving_load", -# "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw") -# }, -# { -# "label": "GHP heat pump capacity (ton)", -# "key": "ghp_heat_pump_capacity", -# "bau_value": lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") -# }, -# { -# "label": "GHP ground heat exchanger size (ft)", -# "key": "ghp_ground_heat_exchanger_size", -# "bau_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") -# }, -# { -# "label": "Grid Purchased Electricity (kWh)", -# "key": "grid_purchased_electricity", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") -# }, -# { -# "label": "Total Site Electricity Use (kWh)", -# "key": "total_site_electricity_use", -# "bau_value": lambda df: 0, -# "scenario_value": lambda df: 0 -# }, -# { -# "label": "Net Purchased Electricity Reduction (%)", -# "key": "net_purchased_electricity_reduction", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf") -# }, -# { -# "label": "Financials", # This is your separator label -# "key": "fin_separator", #MUST HAVE "separator" somewhere in the name -# "bau_value": lambda df: "", -# "scenario_value": lambda df: "" -# }, -# { -# "label": "Electricity Energy Cost ($)", -# "key": "electricity_energy_cost", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") -# }, -# { -# "label": "Electricity Demand Cost ($)", -# "key": "electricity_demand_cost", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") -# }, -# { -# "label": "Utility Fixed Cost ($)", -# "key": "utility_fixed_cost", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") -# }, -# { -# "label": "Purchased Electricity Cost ($)", -# "key": "purchased_electricity_cost", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") -# }, -# { -# "label": "Electricity Export Benefit ($)", -# "key": "electricity_export_benefit", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax") -# }, -# { -# "label": "Net Electricity Cost ($)", -# "key": "net_electricity_cost", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax") -# }, -# { -# "label": "Electricity Cost Savings ($/year)", -# "key": "electricity_cost_savings", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau") -# }, -# { -# "label": "Boiler Fuel (MMBtu)", -# "key": "boiler_fuel", -# "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu") -# }, -# { -# "label": "CHP Fuel (MMBtu)", -# "key": "chp_fuel", -# "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") -# }, -# { -# "label": "Total Fuel (MMBtu)", -# "key": "total_fuel", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh") -# }, -# { -# "label": "Natural Gas Reduction (%)", -# "key": "natural_gas_reduction", -# "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau") -# }, -# { -# "label": "Boiler Thermal Production (MMBtu)", -# "key": "boiler_thermal_production", -# "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") -# }, -# { -# "label": "CHP Thermal Production (MMBtu)", -# "key": "chp_thermal_production", -# "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") -# }, -# { -# "label": "Total Thermal Production (MMBtu)", -# "key": "total_thermal_production", -# "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") -# }, -# { -# "label": "Heating System Fuel Cost ($)", -# "key": "heating_system_fuel_cost", -# "bau_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars") -# }, -# { -# "label": "CHP Fuel Cost ($)", -# "key": "chp_fuel_cost", -# "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") -# }, -# { -# "label": "Total Fuel (NG) Cost ($)", -# "key": "total_fuel_ng_cost", -# "bau_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars") -# }, -# { -# "label": "Total Utility Cost ($)", -# "key": "total_utility_cost", -# "bau_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars") -# }, -# { -# "label": "O&M Cost Increase ($)", -# "key": "om_cost_increase", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") -# }, -# { -# "label": "Payback Period (years)", -# "key": "payback_period", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") -# }, -# { -# "label": "Gross Capital Cost ($)", -# "key": "gross_capital_cost", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") -# }, -# { -# "label": "Federal Tax Incentive (30%)", -# "key": "federal_tax_incentive", -# "bau_value": lambda df: 0.3, -# "scenario_value": lambda df: 0.3 -# }, -# { -# "label": "Additional Grant ($)", -# "key": "additional_grant", -# "bau_value": lambda df: 0, -# "scenario_value": lambda df: 0 -# }, -# { -# "label": "Incentive Value ($)", -# "key": "incentive_value", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars") -# }, -# { -# "label": "Net Capital Cost ($)", -# "key": "net_capital_cost", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") -# }, -# { -# "label": "Annual Cost Savings ($)", -# "key": "annual_cost_savings", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars") -# }, -# { -# "label": "Simple Payback (years)", -# "key": "simple_payback", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") -# }, -# { -# "label": "CO2 Emissions (tonnes)", -# "key": "co2_emissions", -# "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") -# }, -# { -# "label": "CO2 Reduction (tonnes)", -# "key": "co2_reduction", -# "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") -# }, -# { -# "label": "CO2 (%) savings", -# "key": "co2_savings_percentage", -# "bau_value": lambda df: 0, -# "scenario_value": lambda df: 0 -# }, -# { -# "label": "NPV ($)", -# "key": "npv", -# "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), -# "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") -# }, -# { -# "label": "PV Federal Tax Incentive (%)", -# "key": "pv_federal_tax_incentive", -# "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), -# "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") -# }, -# { -# "label": "Storage Federal Tax Incentive (%)", -# "key": "storage_federal_tax_incentive", -# "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), -# "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") -# }, -# { -# "label": "Results URL", -# "key": "url", -# "bau_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid"), -# "scenario_value": lambda df: f"https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/"+safe_get(df, "webtool_uuid") -# }, -# ] - -webtool_table = [ - { - "label": "Evaluation Name", - "key": "evaluation_name", - "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), - "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") - }, - { - "label": "BAU or Optimal Case?", - "key": "bau_or_optimal_case", - "bau_value": lambda df: "BAU", - "scenario_value": lambda df: "Optimal" - }, - { - "label": "Site Location", - "key": "site_location", - "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", - "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" - }, - { - "label": "Results URL", - "key": "url", - "bau_value": lambda df: f'=HYPERLINK("https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/{safe_get(df, "webtool_uuid")}", "Results Link")', - "scenario_value": lambda df: f'=HYPERLINK("https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/{safe_get(df, "webtool_uuid")}", "Results Link")' - }, - { - "label": "System Capacities", - "key": "system_capacities_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "PV capacity, new (kW)", - "key": "pv_capacity_new", - "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") - }, - { - "label": "PV capacity, existing (kW)", - "key": "pv_size_purchased", - "bau_value": lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.existing_kw") - }, - { - "label": "Wind Capacity (kW)", - "key": "wind_capacity", - "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") - }, - { - "label": "Backup Generator Capacity, New (kW)", - "key": "backup_generator_new", - "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") - }, - { - "label": "Backup Generator Capacity, Existing (kW)", - "key": "backup_generator_existing", - "bau_value": lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") - }, - { - "label": "Generator Annual Fuel Consumption (gallons)", - "key": "backup_generator_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") - }, - { - "label": "Generator Fuel Cost ($)", - "key": "backup_generator_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") - }, - { - "label": "Generator Lifecycle Fuel Cost ($)", - "key": "lifecycle_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax") - }, - { - "label": "Battery Power Capacity (kW)", - "key": "battery_power_capacity", - "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") - }, - { - "label": "Battery Energy Capacity (kWh)", - "key": "battery_energy_capacity", - "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") - }, - { - "label": "CHP Capacity (kW)", - "key": "chp_capacity", - "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") - }, - { - "label": "Absorption Chiller Capacity (tons)", - "key": "absorption_chiller_capacity", - "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton") - }, - { - "label": "Chilled Water TES Capacity (gallons)", - "key": "chilled_water_tes_capacity", - "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal") - }, - { - "label": "Hot Water TES Capacity (gallons)", - "key": "hot_water_tes_capacity", - "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") - }, - { - "label": "Steam Turbine Capacity (kW)", - "key": "steam_turbine_capacity", - "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") - }, - { - "label": "GHP Heat Pump Capacity (ton)", - "key": "ghp_heat_pump_capacity", - "bau_value": lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") - }, - { - "label": "GHP Ground Heat Exchanger Size (ft)", - "key": "ghp_ground_heat_exchanger_size", - "bau_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") - }, - { - "label": "Summary Financial Metrics", - "key": "summary_financial_metrics_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Gross Capital Costs, Before Incentives ($)", - "key": "gross_capital_costs_before_incentives", - "bau_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs") - }, - { - "label": "Present Value of Incentives ($)", - "key": "present_value_of_incentives", - "bau_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit") - }, - { - "label": "Net Capital Cost ($)", - "key": "net_capital_cost", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") - }, - { - "label": "Year 1 O&M Cost, Before Tax ($)", - "key": "year_1_om_cost_before_tax", - "bau_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax") - }, - { - "label": "Total Life Cycle Costs ($)", - "key": "total_life_cycle_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") - }, - { - "label": "Net Present Value ($)", - "key": "npv", - "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") - }, - { - "label": "Payback Period (years)", - "key": "payback_period", - "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") - }, - { - "label": "Simple Payback (years)", - "key": "simple_payback_period", - "bau_value": lambda df: safe_get(df, ""), - "scenario_value": lambda df: safe_get(df, "") - }, - { - "label": "Internal Rate of Return (%)", - "key": "internal_rate_of_return", - "bau_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return") - }, - { - "label": "Life Cycle Cost Breakdown", - "key": "lifecycle_cost_breakdown_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Technology Capital Costs + Replacements, After Incentives ($)", - "key": "technology_capital_costs_after_incentives", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs") - }, - { - "label": "O&M Costs ($)", - "key": "om_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") - }, - { - "label": "Total Electric Costs ($)", - "key": "total_electric_utility_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax") - }, - { - "label": "Total Fuel Costs ($)", - "key": "total_fuel_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax") - }, - { - "label": "Total Utility Costs ($)", - "key": "total_fuel_costs", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Total Emissions Costs ($)", - "key": "total_emissions_costs", - "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health") - }, - { - "label": "LCC ($)", - "key": "lcc", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") - }, - { - "label": "NPV as a % of BAU LCC (%)", - "key": "npv_bau_percent", - "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent") - }, - { - "label": "Year 1 Electric Bill", - "key": "year_1_electric_bill_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Electric Grid Purchases (kWh)", - "key": "electric_grid_purchases", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") - }, - { - "label": "Energy Charges ($)", - "key": "electricity_energy_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") - }, - { - "label": "Demand Charges ($)", - "key": "electricity_demand_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") - }, - { - "label": "Fixed Charges ($)", - "key": "utility_fixed_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") - }, - { - "label": "Purchased Electricity Cost ($)", - "key": "purchased_electricity_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") - }, - { - "label": "Annual Cost Savings ($)", - "key": "annual_cost_savings", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Year 1 Fuel Costs & Consumption", - "key": "year_1_fuel_costs_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Boiler Fuel Consumption (mmbtu)", - "key": "boiler_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu") - }, - { - "label": "Boiler Fuel Costs ($)", - "key": "boiler_fuel_costs", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax") - }, - { - "label": "CHP Fuel Consumption (mmbtu)", - "key": "chp_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") - }, - { - "label": "CHP Fuel Cost ($)", - "key": "chp_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") - }, - { - "label": "Backup Generator Fuel Consumption (gallons)", - "key": "backup_generator_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") - }, - { - "label": "Backup Generator Fuel Cost ($)", - "key": "backup_generator_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") - }, - { - "label": "Renewable Energy & Emissions", - "key": "renewable_energy_emissions_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Annual % Renewable Electricity (%)", - "key": "annual_renewable_electricity", - "bau_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction") - }, - { - "label": "Year 1 CO2 Emissions (tonnes)", - "key": "year_1_co2_emissions", - "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") - }, - { - "label": "CO2 Emissions (tonnes)", - "key": "co2_emissions", - "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") - }, - { - "label": "CO2 (%) savings", - "key": "co2_savings_percentage", - "bau_value": lambda df: 0, - "scenario_value": lambda df: 0 - }, - { - "label": "Annual Energy Production & Throughput", - "key": "energy_production_throughput_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "PV (kWh)", - "key": "pv_kwh", - "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") - }, - { - "label": "Wind (kWh)", - "key": "wind_kwh", - "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") - }, - { - "label": "CHP (kWh)", - "key": "chp_kwh", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") - }, - { - "label": "CHP (MMBtu)", - "key": "chp_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") - }, - { - "label": "Boiler (MMBtu)", - "key": "boiler_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") - }, - { - "label": "Battery (kWh)", - "key": "battery_kwh", - "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") - }, - { - "label": "HW-TES (MMBtu)", - "key": "hw_tes_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu") - }, - { - "label": "CW-TES (MMBtu)", - "key": "cw_tes_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu") - }, - { - "label": "Breakdown of Incentives", - "key": "breakdown_of_incentives_separator", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" - }, - { - "label": "Federal Tax Incentive (30%)", - "key": "federal_tax_incentive_30", - "bau_value": lambda df: 0.3, - "scenario_value": lambda df: 0.3 - }, - { - "label": "PV Federal Tax Incentive (%)", - "key": "pv_federal_tax_incentive", - "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), - "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") - }, - { - "label": "Storage Federal Tax Incentive (%)", - "key": "storage_federal_tax_incentive", - "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), - "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") - }, - # { - # "label": "Incentive Value ($)", - # "key": "incentive_value", - # "bau_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2_bau"), - # "scenario_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2") - # }, - { - "label": "Additional Grant ($)", - "key": "iac_grant", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax") - } -] - - - -# Configuration for calculations -calculations = [ - { - "name": "Total Site Electricity Use (kWh)", - "formula": lambda col, bau, headers: ( - f'={col}{headers["PV Serving Load (kWh)"] + 2}+' - f'{col}{headers["Wind Serving Load (kWh)"] + 2}+' - f'{col}{headers["CHP Serving Load (kWh)"] + 2}+' - f'{col}{headers["Battery Serving Load (kWh)"] + 2}+' - f'{col}{headers["Backup Generator Serving Load (kWh)"] + 2}+' - f'{col}{headers["Steam Turbine Serving Load (kWh)"] + 2}+' - f'{col}{headers["Grid Purchased Electricity (kWh)"] + 2}' - ) - }, - - { - "name": "Net Purchased Electricity Reduction (%)", - "formula": lambda col, bau, headers: f'=({bau["grid_value"]}-{col}{headers["Grid Purchased Electricity (kWh)"] + 2})/{bau["grid_value"]}' - }, - { - "name": "Purchased Electricity Cost ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Energy Charges ($)"] + 2}+{col}{headers["Demand Charges ($)"] + 2}+{col}{headers["Fixed Charges ($)"] + 2}' - }, - { - "name": "Net Electricity Cost ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Purchased Electricity Cost ($)"] + 2}-{col}{headers["Electricity Export Benefit ($)"] + 2}' - }, - { - "name": "Electricity Cost Savings ($/year)", - "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}-{col}{headers["Purchased Electricity Cost ($)"] + 2}' - }, - { - "name": "Total Fuel (MMBtu)", - "formula": lambda col, bau, headers: f'={col}{headers["Boiler Fuel (MMBtu)"] + 2}+{col}{headers["CHP Fuel (MMBtu)"] + 2}' - }, - { - "name": "Natural Gas Reduction (%)", - "formula": lambda col, bau, headers: f'=({bau["ng_reduction_value"]}-{col}{headers["Total Fuel (MMBtu)"] + 2})/{bau["ng_reduction_value"]}' - }, - { - "name": "Total Thermal Production (MMBtu)", - "formula": lambda col, bau, headers: f'={col}{headers["Boiler Thermal Production (MMBtu)"] + 2}+{col}{headers["CHP Thermal Production (MMBtu)"] + 2}' - }, - # { - # "name": "Total Fuel Costs ($)", - # "formula": lambda col, bau, headers: f'={col}{headers["Heating System Fuel Cost ($)"] + 2}+{col}{headers["CHP Fuel Cost ($)"] + 2}' - # }, - { - "name": "Total Utility Costs ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Total Electric Costs ($)"] + 2}+{col}{headers["Total Fuel Costs ($)"] + 2}' - }, - # { - # "name": "Incentive Value ($)", - # "formula": lambda col, bau, headers: f'=({col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2})+{col}{headers["Additional Grant ($)"] + 2}' - # }, - # { - # "name": "Net Capital Cost ($)", - # "formula": lambda col, bau, headers: f'={col}{headers["Gross Capital Cost ($)"] + 2}-{col}{headers["Incentive Value ($)"] + 2}' - # }, - { - "name": "Annual Cost Savings ($)", - "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}+-{col}{headers["Purchased Electricity Cost ($)"] + 2}' - }, - { - "name": "Simple Payback (years)", - "formula": lambda col, bau, headers: f'={col}{headers["Net Capital Cost ($)"] + 2}/{col}{headers["Annual Cost Savings ($)"] + 2}' - }, - # { - # "name": "CO2 Reduction (tonnes)", - # "formula": lambda col, bau, headers: f'={bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2}' - # }, - { - "name": "CO2 (%) savings", - "formula": lambda col, bau, headers: f'=({bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2})/{bau["co2_reduction_value"]}' - }, - #Example Calculations - # Calculation Without Reference to bau_cells - { - "name": "Placeholder Calculation Without BAU Reference", - "formula": lambda col, bau, headers: f'={col}{headers["Placeholder1"] + 2}+{col}{headers["Placeholder2"] + 2}' - # This formula adds Placeholder1 and Placeholder2 values from the scenario. - }, - - # Calculation With Reference to bau_cells - { - "name": "Placeholder Calculation With BAU Reference", - "formula": lambda col, bau, headers: f'=({bau["placeholder1_value"]}-{col}{headers["Placeholder2"] + 2})/{bau["placeholder1_value"]}' - # This formula calculates the percentage change of Placeholder2 using Placeholder1's BAU value as the reference. - } -] + log_and_raise_error('generate_excel_workbook') -############################################################### -################ END Custom Table ############################# -############################################################### \ No newline at end of file +############################################################################################################################## +################################################### END Custom Table ######################################################### +############################################################################################################################## \ No newline at end of file From 48c8f628b4132cbf44ca21d129a7c19a2b75d8b6 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Tue, 24 Sep 2024 17:18:06 -0600 Subject: [PATCH 35/44] updated webtool table configuration --- julia_src/http.jl | 1 - reoptjl/custom_table_config.py | 945 +++++++++++++++++++++++---------- reoptjl/views.py | 172 +++++- 3 files changed, 820 insertions(+), 298 deletions(-) diff --git a/julia_src/http.jl b/julia_src/http.jl index 6702bd6ed..6af9762ac 100644 --- a/julia_src/http.jl +++ b/julia_src/http.jl @@ -572,6 +572,5 @@ HTTP.register!(ROUTER, "GET", "/ghp_efficiency_thermal_factors", ghp_efficiency_ HTTP.register!(ROUTER, "GET", "/ground_conductivity", ground_conductivity) HTTP.register!(ROUTER, "GET", "/health", health) HTTP.register!(ROUTER, "GET", "/get_existing_chiller_default_cop", get_existing_chiller_default_cop) -HTTP.register!(ROUTER, "GET", "/generate_custom_comparison_table", generate_custom_comparison_table) HTTP.register!(ROUTER, "GET", "/get_ashp_defaults", get_ashp_defaults) HTTP.serve(ROUTER, "0.0.0.0", 8081, reuseaddr=true) diff --git a/reoptjl/custom_table_config.py b/reoptjl/custom_table_config.py index 22ccd82f5..966d2ef89 100644 --- a/reoptjl/custom_table_config.py +++ b/reoptjl/custom_table_config.py @@ -112,456 +112,813 @@ # Webtool table configuration custom_table_webtool = [ + ##################################################################################################### + ################################ General Information ################################ + ##################################################################################################### + { - "label": "Evaluation Name", - "key": "evaluation_name", - "bau_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided"), + "label" : "Evaluation Name", + "key" : "evaluation_name", + "bau_value" : lambda df: safe_get(df, "inputs.Meta.description", "None provided"), "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") }, { - "label": "BAU or Optimal Case?", - "key": "bau_or_optimal_case", - "bau_value": lambda df: "BAU", + "label" : "BAU or Optimal Case?", + "key" : "bau_or_optimal_case", + "bau_value" : lambda df: "BAU", "scenario_value": lambda df: "Optimal" }, { - "label": "Site Location", - "key": "site_location", - "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", + "label" : "Site Location", + "key" : "site_location", + "bau_value" : lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" }, { - "label": "Results URL", - "key": "url", - "bau_value": lambda df: '', + "label" : "Results URL", + "key" : "url", + "bau_value" : lambda df: f'=HYPERLINK("https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/{safe_get(df, "webtool_uuid")}", "Results Link")', "scenario_value": lambda df: f'=HYPERLINK("https://custom-table-download-reopt-stage.its.nrel.gov/tool/results/{safe_get(df, "webtool_uuid")}", "Results Link")' }, + ##################################################################################################### + ######################### System Capacities ############################# + ##################################################################################################### { - "label": "System Capacities", - "key": "system_capacities_separator", - "bau_value": lambda df: "", + "label" : "System Capacities", + "key" : "system_capacities_separator", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, { - "label": "PV capacity, new (kW)", - "key": "pv_capacity_new", - "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), + "label" : "PV capacity, new (kW)", + "key" : "pv_capacity_new", + "bau_value" : lambda df: safe_get(df, "outputs.PV.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") }, { - "label": "PV capacity, existing (kW)", - "key": "pv_size_purchased", - "bau_value": lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), + "label" : "PV capacity, existing (kW)", + "key" : "pv_size_purchased", + "bau_value" : lambda df: safe_get(df, "outputs.PV.existing_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.PV.existing_kw") }, { - "label": "Wind Capacity (kW)", - "key": "wind_capacity", - "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), + "label" : "Wind Capacity (kW)", + "key" : "wind_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") }, + # Moved Battery up in front of generator { - "label": "Backup Generator Capacity, New (kW)", - "key": "backup_generator_new", - "bau_value": lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") - }, - { - "label": "Backup Generator Capacity, Existing (kW)", - "key": "backup_generator_existing", - "bau_value": lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") + "label" : "Battery Power Capacity (kW)", + "key" : "battery_power_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") }, { - "label": "Generator Annual Fuel Consumption (gallons)", - "key": "backup_generator_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") + "label" : "Battery Energy Capacity (kWh)", + "key" : "battery_energy_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") }, { - "label": "Generator Fuel Cost ($)", - "key": "backup_generator_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") + "label" : "Backup Generator Capacity, New (kW)", + "key" : "backup_generator_new", + "bau_value" : lambda df: safe_get(df, "outputs.Generator.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.size_kw") }, { - "label": "Generator Lifecycle Fuel Cost ($)", - "key": "lifecycle_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.lifecycle_fuel_cost_after_tax") + "label" : "Backup Generator Capacity, Existing (kW)", + "key" : "backup_generator_existing", + "bau_value" : lambda df: safe_get(df, "outputs.Generator.existing_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.existing_kw") }, { - "label": "Battery Power Capacity (kW)", - "key": "battery_power_capacity", - "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kw") + "label" : "CHP Capacity (kW)", + "key" : "chp_capacity", + "bau_value" : lambda df : safe_get(df, "outputs.CHP.size_kw_bau"), + "scenario_value": lambda df : safe_get(df, "outputs.CHP.size_kw") }, { - "label": "Battery Energy Capacity (kWh)", - "key": "battery_energy_capacity", - "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.size_kwh") + "label" : "Steam Turbine Capacity (kW)", + "key" : "steam_turbine_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") }, { - "label": "CHP Capacity (kW)", - "key": "chp_capacity", - "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") + "label" : "Hot Water TES Capacity (gallons)", + "key" : "hot_water_tes_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") }, { - "label": "Absorption Chiller Capacity (tons)", - "key": "absorption_chiller_capacity", - "bau_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), + "label" : "Absorption Chiller Capacity (tons)", + "key" : "absorption_chiller_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton_bau"), "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.size_ton") }, { - "label": "Chilled Water TES Capacity (gallons)", - "key": "chilled_water_tes_capacity", - "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), + "label" : "Chilled Water TES Capacity (gallons)", + "key" : "chilled_water_tes_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.size_gal") }, { - "label": "Hot Water TES Capacity (gallons)", - "key": "hot_water_tes_capacity", - "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.size_gal") + "label" : "GHP Heat Pump Capacity (ton)", + "key" : "ghp_heat_pump_capacity", + "bau_value" : lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") }, { - "label": "Steam Turbine Capacity (kW)", - "key": "steam_turbine_capacity", - "bau_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.size_kw") + "label" : "GHP Ground Heat Exchanger Size (ft)", + "key" : "ghp_ground_heat_exchanger_size", + "bau_value" : lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") }, + # New ASHP entries { - "label": "GHP Heat Pump Capacity (ton)", - "key": "ghp_heat_pump_capacity", - "bau_value": lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") + "label" : "ASHP Space Heating and Cooling Capacity (ton)", + "key" : "ashp_space_heating_cap", + "bau_value" : lambda df: safe_get(df, "outputs.ASHPSpaceHeater.size_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ASHPSpaceHeater.size_ton") }, { - "label": "GHP Ground Heat Exchanger Size (ft)", - "key": "ghp_ground_heat_exchanger_size", - "bau_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") + "label" : "ASHP Water Heating Capacity (ton)", + "key" : "ashp_water_heating_cap", + "bau_value" : lambda df: safe_get(df, "outputs.ASHPWaterHeater.size_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ASHPWaterHeater.size_ton") }, + ##################################################################################################### + ########################### Summary Financial Metrics ########################### + ##################################################################################################### { - "label": "Summary Financial Metrics", - "key": "summary_financial_metrics_separator", - "bau_value": lambda df: "", + "label" : "Summary Financial Metrics", + "key" : "summary_financial_metrics_separator", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, { - "label": "Gross Capital Costs, Before Incentives ($)", - "key": "gross_capital_costs_before_incentives", - "bau_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_bau"), + "label" : "Gross Upfront Capital Costs, Before Incentives ($)", + "key" : "gross_capital_costs_before_incentives", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs") }, { - "label": "Present Value of Incentives ($)", - "key": "present_value_of_incentives", - "bau_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.production_incentive_max_benefit") + "label" : "Net Upfront Capital Cost, After Incentives ($)", + "key" : "net_upfront_capital_cost", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_after_incentives_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_after_incentives") }, + #CALCULATED VALUE { - "label": "Net Capital Cost ($)", - "key": "net_capital_cost", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") + "label" : "Present Value of Incentives ($)", + "key" : "present_value_of_incentives", + "bau_value" : lambda df: "", + "scenario_value": lambda df: "" }, { - "label": "Year 1 O&M Cost, Before Tax ($)", - "key": "year_1_om_cost_before_tax", - "bau_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax_bau"), + "label" : "Year 1 O&M Cost, Before Tax ($)", + "key" : "year_1_om_cost_before_tax", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.year_one_om_costs_before_tax") }, { - "label": "Total Life Cycle Costs ($)", - "key": "total_life_cycle_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") - }, - { - "label": "Net Present Value ($)", - "key": "npv", - "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), + "label" : "Net Present Value ($)", + "key" : "npv", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.npv_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") }, { - "label": "Payback Period (years)", - "key": "payback_period", - "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), + "label" : "Payback Period (years)", + "key" : "payback_period", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") }, { - "label": "Simple Payback (years)", - "key": "simple_payback_period", - "bau_value": lambda df: safe_get(df, ""), - "scenario_value": lambda df: safe_get(df, "") - }, - { - "label": "Internal Rate of Return (%)", - "key": "internal_rate_of_return", - "bau_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return_bau"), + "label" : "Internal Rate of Return (%)", + "key" : "internal_rate_of_return", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.internal_rate_of_return") }, + ##################################################################################################### + ############################ Life Cycle Cost Breakdown ########################### + ##################################################################################################### { - "label": "Life Cycle Cost Breakdown", - "key": "lifecycle_cost_breakdown_separator", - "bau_value": lambda df: "", + "label" : "Life Cycle Cost Breakdown", + "key" : "lifecycle_cost_breakdown_separator", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, { - "label": "Technology Capital Costs + Replacements, After Incentives ($)", - "key": "technology_capital_costs_after_incentives", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs_bau"), + "label" : "Technology Capital Costs + Replacements, After Incentives ($)", + "key" : "technology_capital_costs_after_incentives", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_generation_tech_capital_costs") }, { - "label": "O&M Costs ($)", - "key": "om_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), + "label" : "O&M Costs ($)", + "key" : "om_costs", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") }, { - "label": "Total Electric Costs ($)", - "key": "total_electric_utility_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax_bau"), + "label" : "Total Electric Costs ($)", + "key" : "total_electric_utility_costs", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax") }, { - "label": "Total Fuel Costs ($)", - "key": "total_fuel_costs", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax_bau"), + "label" : "Total Fuel Costs ($)", + "key" : "total_fuel_costs", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax") }, { - "label": "Total Utility Costs ($)", - "key": "total_fuel_costs", - "bau_value": lambda df: "", - "scenario_value": lambda df: "" + "label" : "Total Utility Costs ($)", + "key" : "total_utility_costs", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax_bau")+ safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_fuel_costs_after_tax")+ safe_get(df, "outputs.Financial.lifecycle_elecbill_after_tax") }, { - "label": "Total Emissions Costs ($)", - "key": "total_emissions_costs", - "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_cost_health") + "label" : "Total Hypothetical Emissions Costs (not included in LCC)", + "key" : "total_emissions_costs", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.lifecycle_emissions_cost_climate_bau") + safe_get(df, "outputs.Financial.lifecycle_emissions_cost_health_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_emissions_cost_climate") + safe_get(df, "outputs.Financial.lifecycle_emissions_cost_health") }, { - "label": "LCC ($)", - "key": "lcc", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lcc_bau"), + "label" : "Lifecycle Costs ($)", + "key" : "lcc", + "bau_value" : lambda df: safe_get(df, "outputs.Financial.lcc_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Financial.lcc") }, + # Calculated NPV as a % of BAU LCC (%) { - "label": "NPV as a % of BAU LCC (%)", - "key": "npv_bau_percent", - "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv_as_bau_lcc_percent") + "label" : "NPV as a % of BAU LCC (%)", + "key" : "npv_bau_percent", + "bau_value" : lambda df: "", + "scenario_value": lambda df: "" }, + ##################################################################################################### + ############################ Year 1 Electric Bill ########################### + ##################################################################################################### { - "label": "Year 1 Electric Bill", - "key": "year_1_electric_bill_separator", - "bau_value": lambda df: "", + "label" : "Year 1 Electric Bill", + "key" : "year_1_electric_bill_separator", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, { - "label": "Electric Grid Purchases (kWh)", - "key": "electric_grid_purchases", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), + "label" : "Electric Grid Purchases (kWh)", + "key" : "electric_grid_purchases", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") }, { - "label": "Energy Charges ($)", - "key": "electricity_energy_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), + "label" : "Energy Charges ($)", + "key" : "electricity_energy_cost", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") }, { - "label": "Demand Charges ($)", - "key": "electricity_demand_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), + "label" : "Demand Charges ($)", + "key" : "electricity_demand_cost", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") }, { - "label": "Fixed Charges ($)", - "key": "utility_fixed_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), + "label" : "Fixed Charges ($)", + "key" : "utility_fixed_cost", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") }, { - "label": "Purchased Electricity Cost ($)", - "key": "purchased_electricity_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), + "label" : "Purchased Electricity Cost ($)", + "key" : "purchased_electricity_cost", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") }, { - "label": "Annual Cost Savings ($)", - "key": "annual_cost_savings", - "bau_value": lambda df: "", + "label" : "Electricity Cost Savings ($)", + "key" : "electricity_cost_savings", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, + ##################################################################################################### + ############################ Year 1 Fuel Cost ########################### + ##################################################################################################### { - "label": "Year 1 Fuel Costs & Consumption", - "key": "year_1_fuel_costs_separator", - "bau_value": lambda df: "", + "label" : "Year 1 Fuel Cost", + "key" : "year_1_fuel_cost_separator", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, { - "label": "Boiler Fuel Consumption (mmbtu)", - "key": "boiler_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_fuel_consumption_mmbtu") - }, - { - "label": "Boiler Fuel Costs ($)", - "key": "boiler_fuel_costs", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax_bau"), + "label" : "Boiler Fuel Cost ($)", + "key" : "boiler_fuel_cost", + "bau_value" : lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax_bau"), "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax") }, { - "label": "CHP Fuel Consumption (mmbtu)", - "key": "chp_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") + "label" : "CHP Fuel Cost ($)", + "key" : "chp_fuel_cost", + "bau_value" : lambda df : safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df : safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") }, { - "label": "CHP Fuel Cost ($)", - "key": "chp_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") + "label" : "Backup Generator Fuel Cost ($)", + "key" : "backup_generator_fuel_cost", + "bau_value" : lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") }, { - "label": "Backup Generator Fuel Consumption (gallons)", - "key": "backup_generator_fuel_consumption", - "bau_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_fuel_consumption_gal") + "label" : "Fuel Cost ($)", + "key" : "fuel_cost", + "bau_value" : lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax_bau")+safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau")+safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.year_one_fuel_cost_before_tax")+safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax")+safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") }, { - "label": "Backup Generator Fuel Cost ($)", - "key": "backup_generator_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Generator.year_one_fuel_cost_before_tax") + "label" : "Fuel Cost Savings ($)", + "key" : "uel_cost_savings", + "bau_value" : lambda df: "", + "scenario_value": lambda df: "" }, + ##################################################################################################### + ############################ Renewable Energy & Emissions ########################### + ##################################################################################################### { - "label": "Renewable Energy & Emissions", - "key": "renewable_energy_emissions_separator", - "bau_value": lambda df: "", + "label" : "Renewable Energy & Emissions", + "key" : "renewable_energy_emissions_separator", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, { - "label": "Annual % Renewable Electricity (%)", - "key": "annual_renewable_electricity", - "bau_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction_bau"), + "label" : "Annual % Renewable Electricity (%)", + "key" : "annual_renewable_electricity", + "bau_value" : lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Site.renewable_electricity_fraction") }, { - "label": "Year 1 CO2 Emissions (tonnes)", - "key": "year_1_co2_emissions", - "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), + "label" : "Annual CO2 Emissions (tonnes)", + "key" : "annual_co2_emissions", + "bau_value" : lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") }, + # Added emissions from electricity and fuels { - "label": "CO2 Emissions (tonnes)", - "key": "co2_emissions", - "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), + "label" : "Annual CO2 Emissions from Electricity (tonnes)", + "key" : "annual_co2_emissions_electricity", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricUtility.annual_emissions_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_emissions_tonnes_CO2") + }, + { + "label" : "Annual CO2 Emissions from Fuel (tonnes)", + "key" : "annual_co2_emissions_fuel", + "bau_value" : lambda df: safe_get(df, "outputs.Site.annual_emissions_from_fuelburn_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_from_fuelburn_tonnes_CO2") + }, + { + "label" : "CO2 Emissions (tonnes)", + "key" : "co2_emissions", + "bau_value" : lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") }, + # CO2 (%) savings calculation { - "label": "CO2 (%) savings", - "key": "co2_savings_percentage", - "bau_value": lambda df: 0, - "scenario_value": lambda df: 0 + "label" : "CO2 (%) savings", + "key" : "co2_savings_percentage", + "bau_value" : lambda df: "", + "scenario_value": lambda df: "" }, + #################################################################################################################################### + ##################### Playground - Explore Effect of Additional Incentives or Costs, outside of REopt ############################## + ##################################################################################################### { - "label": "Annual Energy Production & Throughput", - "key": "energy_production_throughput_separator", + "label": "Playground - Explore Effect of Additional Incentives or Costs, outside of REopt", + "key": "playground_separator", "bau_value": lambda df: "", "scenario_value": lambda df: "" }, { - "label": "PV (kWh)", - "key": "pv_kwh", - "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") + "label": "Net Upfront Capital Cost After Incentives but without MACRS ($)", + "key": "net_upfront_capital_cost_without_macrs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_after_incentives_without_macrs_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_after_incentives_without_macrs") }, { - "label": "Wind (kWh)", - "key": "wind_kwh", - "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), + "label": "Net Upfront Capital Cost After Incentives with MACRS ($)", + "key": "net_upfront_capital_cost_with_macrs", + "bau_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_after_incentives_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Financial.initial_capital_costs_after_incentives") + }, + { + "label": "Additional Upfront Incentive ($)", + "key": "additional_upfront_incentive_input", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Additional Upfront Cost ($)", + "key": "additional_upfront_cost_input", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Additional Yearly Cost Savings ($/Year)", + "key": "additional_yearly_cost_savings_input", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Additional Yearly Cost ($/Year)", + "key": "additional_yearly_cost_input", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Modified Net Upfront Capital Cost ($)", + "key": "modified_net_upfront_capital_cost", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Modified Simple Payback Period (years)", + "key": "modified_simple_payback_period", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + #################################################################################################################################### + ##################### Playground - Consider Unaddressable Fuel Consumption in Emissions Reduction % Calculation #################### + ##################################################################################################################################### + { + "label": "Playground - Consider Unaddressable Fuel Consumption in Emissions Reduction % Calculation", + "key": "playground_emissions_reduction_separator", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label": "Unaddressable Heating Load (Mmbtu/Year)", + "key": "unaddressable_heating_load", + "bau_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_total_unaddressable_heating_load_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_total_unaddressable_heating_load_mmbtu") + }, + { + "label": "Unaddressable CO2 Emissions (tonnes)", + "key": "unaddressable_co2_emissions", + "bau_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_emissions_from_unaddressable_heating_load_tonnes_CO2_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_emissions_from_unaddressable_heating_load_tonnes_CO2") + }, + { + "label": "CO2 Savings Including Unaddressable (%)", + "key": "co2_savings_including_unaddressable", + "bau_value": lambda df: "", + "scenario_value": lambda df: "" + }, + ##################################################################################################### + ############################# Annual Electric Production ############################# + ##################################################################################################### + { + "label" : "Annual Electric Production", + "key" : "annual_electric_production_separator", + "bau_value" : lambda df: "", + "scenario_value": lambda df: "", + "comments" : "Split into Electric, Heating, and Cooling Sections" + }, + { + "label" : "Grid Serving Load (kWh)", + "key" : "grid_serving_load", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw") + }, + { + "label" : "Grid Charging Battery (kWh)", + "key" : "grid_charging_battery", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_storage_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_storage_series_kw") + }, + { + "label" : "PV Serving Load (kWh)", + "key" : "pv_serving_load", + "bau_value" : lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") + }, + { + "label" : "PV Charging Battery (kWh)", + "key" : "pv_charging_battery", + "bau_value" : lambda df: safe_get(df, "outputs.PV.electric_to_storage_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_storage_series_kw") + }, + { + "label" : "PV Exported to Grid (kWh)", + "key" : "pv_exported_to_grid", + "bau_value" : lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw") + }, + { + "label" : "PV Curtailment (kWh)", + "key" : "pv_curtailment", + "bau_value" : lambda df: safe_get(df, "outputs.PV.electric_curtailed_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_curtailed_series_kw") + }, + { + "label" : "PV Year One Electricity Produced (kWh)", + "key" : "pv_year_one_electricity_produced", + "bau_value" : lambda df: safe_get(df, "outputs.PV.year_one_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.PV.year_one_energy_produced_kwh") + }, + { + "label" : "Wind Serving Load (kWh)", + "key" : "wind_serving_load", + "bau_value" : lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") + }, + { + "label" : "Wind Charging Battery (kWh)", + "key" : "wind_charging_battery", + "bau_value" : lambda df: safe_get(df, "outputs.Wind.electric_to_storage_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_storage_series_kw") + }, + { + "label" : "Wind Exported to Grid (kWh)", + "key" : "wind_exported_to_grid", + "bau_value" : lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw") + }, + { + "label" : "Wind Curtailment (kWh)", + "key" : "wind_curtailment", + "bau_value" : lambda df: safe_get(df, "outputs.Wind.electric_curtailed_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_curtailed_series_kw") + }, + { + "label" : "Wind Total Electricity Produced (kWh)", + "key" : "wind_total_electricity_produced", + "bau_value" : lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") }, { - "label": "CHP (kWh)", - "key": "chp_kwh", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), + "label" : "Battery Serving Load (kWh)", + "key" : "battery_serving_load", + "bau_value" : lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") + }, + { + "label" : "Generator Serving Load (kWh)", + "key" : "generator_serving_load", + "bau_value" : lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.electric_to_load_series_kw") + }, + { + "label" : "Generator Charging Battery (kWh)", + "key" : "generator_charging_battery", + "bau_value" : lambda df: safe_get(df, "outputs.Generator.electric_to_storage_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.electric_to_storage_series_kw") + }, + { + "label" : "Generator Exported to Grid (kWh)", + "key" : "generator_exported_to_grid", + "bau_value" : lambda df: safe_get(df, "outputs.Generator.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.electric_to_grid_series_kw") + }, + { + "label" : "Generator Total Electricity Produced (kWh)", + "key" : "generator_total_electricity_produced", + "bau_value" : lambda df: safe_get(df, "outputs.Generator.annual_energy_produced_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.Generator.annual_energy_produced_kwh") + }, + { + "label" : "CHP Serving Load (kWh)", + "key" : "chp_serving_load", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") + }, + { + "label" : "CHP Charging Battery (kWh)", + "key" : "chp_charging_battery", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.electric_to_storage_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_storage_series_kw") + }, + { + "label" : "CHP Exported to Grid (kWh)", + "key" : "chp_exported_to_grid", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw") + }, + { + "label" : "CHP Total Electricity Produced (kWh)", + "key" : "chp_total_electricity_produced", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") }, { - "label": "CHP (MMBtu)", - "key": "chp_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), + "label" : "Steam Turbine Serving Load (kWh)", + "key" : "steam_turbine_serving_load", + "bau_value" : lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_load_series_kw") + }, + { + "label" : "Steam Turbine Charging Battery (kWh)", + "key" : "steam_turbine_charging_battery", + "bau_value" : lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_storage_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_storage_series_kw") + }, + { + "label" : "Steam Turbine Exported to Grid (kWh)", + "key" : "steam_turbine_exported_to_grid", + "bau_value" : lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_grid_series_kw_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.electric_to_grid_series_kw") + }, + { + "label" : "Steam Turbine Total Electricity Produced (kWh)", + "key" : "steam_turbine_total_electricity_produced", + "bau_value" : lambda df: safe_get(df, "outputs.SteamTurbine.annual_electric_production_kwh_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.annual_electric_production_kwh") + }, + ##################################################################################################### + ############################## Annual Heating Thermal Production ############################# + ##################################################################################################### + { + "label" : "Annual Heating Thermal Production", + "key" : "annual_heating_thermal_production_separator", + "bau_value" : lambda df: "", + "scenario_value": lambda df: "" + }, + { + "label" : "Existing Heating System Serving Thermal Load (MMBtu)", + "key" : "existing_heating_system_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.ExistingBoiler.thermal_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.thermal_to_load_series_mmbtu_per_hour") + }, + { + "label" : "Existing Heating System Thermal to Steam Turbine (MMBtu)", + "key" : "existing_heating_system_thermal_to_steam_turbine", + "bau_value" : lambda df: safe_get(df, "outputs.ExistingBoiler.thermal_to_steamturbine_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.thermal_to_steamturbine_series_mmbtu_per_hour") + }, + { + "label" : "Existing Heating System Charging Hot Water Storage (MMBtu)", + "key" : "existing_heating_system_charging_hot_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.ExistingBoiler.thermal_to_storage_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.thermal_to_storage_series_mmbtu_per_hour") + }, + { + "label" : "Existing Heating System Total Thermal Produced (MMBtu)", + "key" : "existing_heating_system_total_thermal_produced", + "bau_value" : lambda df : safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df : safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") + }, + { + "label" : "CHP Serving Thermal Load (MMBtu)", + "key" : "chp_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour") + }, + { + "label" : "CHP Charging Hot Water Storage (MMBtu)", + "key" : "chp_charging_hot_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.thermal_to_storage_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_storage_series_mmbtu_per_hour") + }, + { + "label" : "CHP Thermal to Steam Turbine (MMBtu)", + "key" : "chp_thermal_to_steam_turbine", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.thermal_to_steamturbine_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_steamturbine_series_mmbtu_per_hour") + }, + { + "label" : "CHP Thermal Vented (MMBtu)", + "key" : "chp_thermal_vented", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.thermal_curtailed_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_curtailed_series_mmbtu_per_hour") + }, + { + "label" : "CHP Total Thermal Produced (MMBtu)", + "key" : "chp_total_thermal_produced", + "bau_value" : lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") }, { - "label": "Boiler (MMBtu)", - "key": "boiler_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") + "label" : "Steam Turbine Serving Thermal Load (MMBtu)", + "key" : "steam_turbine_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.SteamTurbine.thermal_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.thermal_to_load_series_mmbtu_per_hour") }, { - "label": "Battery (kWh)", - "key": "battery_kwh", - "bau_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricStorage.storage_to_load_series_kw") + "label" : "Steam Turbine Charging Hot Water Storage (MMBtu)", + "key" : "steam_turbine_charging_hot_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.SteamTurbine.thermal_to_storage_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.thermal_to_storage_series_mmbtu_per_hour") }, { - "label": "HW-TES (MMBtu)", - "key": "hw_tes_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.annual_energy_produced_mmbtu") + "label" : "Steam Turbine Total Thermal Produced (MMBtu)", + "key" : "steam_turbine_total_thermal_produced", + "bau_value" : lambda df : safe_get(df, "outputs.SteamTurbine.annual_thermal_production_mmbtu_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.SteamTurbine.annual_thermal_production_mmbtu") }, { - "label": "CW-TES (MMBtu)", - "key": "cw_tes_mmbtu", - "bau_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.annual_energy_produced_mmbtu") + "label" : "GHP Reduction of Thermal Load (MMBtu)", + "key" : "ghp_reduction_of_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.GHP.space_heating_thermal_load_reduction_with_ghp_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.space_heating_thermal_load_reduction_with_ghp_mmbtu_per_hour") }, { - "label": "Breakdown of Incentives", - "key": "breakdown_of_incentives_separator", - "bau_value": lambda df: "", + "label" : "GHP Serving Thermal Load (MMBtu)", + "key" : "ghp_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.GHP.thermal_to_space_heating_load_series_mmbtu_per_hour_bau") + safe_get(df, "outputs.GHP.thermal_to_dhw_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.thermal_to_space_heating_load_series_mmbtu_per_hour") + safe_get(df, "outputs.GHP.thermal_to_dhw_load_series_mmbtu_per_hour") + }, + { + "label" : "ASHP Serving Thermal Load (MMBtu)", + "key" : "ashp_serving_thermal_load", + "bau_value" : lambda df : safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df : safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_load_series_mmbtu_per_hour") + }, + { + "label" : "ASHP Charging Hot Water Storage (MMBtu)", + "key" : "ashp_charging_hot_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_storage_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_storage_series_mmbtu_per_hour") + }, + { + "label" : "ASHP Water Heater Serving Thermal Load (MMBtu)", + "key" : "ashp_water_heater_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.ASHPWaterHeater.thermal_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ASHPWaterHeater.thermal_to_load_series_mmbtu_per_hour") + }, + { + "label" : "ASHP Water Heater Charging Hot Water Storage (MMBtu)", + "key" : "ashp_water_heater_charging_hot_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.ASHPWaterHeater.thermal_to_storage_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ASHPWaterHeater.thermal_to_storage_series_mmbtu_per_hour") + }, + { + "label" : "Hot Water Storage Serving Thermal Load (MMBtu)", + "key" : "hot_water_storage_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.HotThermalStorage.storage_to_load_series_mmbtu_per_hour") + }, + ##################################################################################################### + ############################ Annual Cooling Thermal Production ############################ + ##################################################################################################### + + { + "label" : "Annual Cooling Thermal Production", + "key" : "annual_cooling_thermal_production_separator", + "bau_value" : lambda df: "", "scenario_value": lambda df: "" }, { - "label": "Federal Tax Incentive (30%)", - "key": "federal_tax_incentive_30", - "bau_value": lambda df: 0.3, - "scenario_value": lambda df: 0.3 + "label" : "Existing Cooling Plant Serving Thermal Load (ton-hr)", + "key" : "existing_cooling_plant_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.ExistingChiller.thermal_to_load_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingChiller.thermal_to_load_series_ton") }, { - "label": "PV Federal Tax Incentive (%)", - "key": "pv_federal_tax_incentive", - "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), - "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") + "label" : "Existing Cooling Plant Charging Chilled Water Storage (ton-hr)", + "key" : "existing_cooling_plant_charging_chilled_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.ExistingChiller.thermal_to_storage_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ExistingChiller.thermal_to_storage_series_ton") }, { - "label": "Storage Federal Tax Incentive (%)", - "key": "storage_federal_tax_incentive", - "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), - "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") + "label" : "GHP Reduction of Thermal Load (ton-hr)", + "key" : "ghp_reduction_of_thermal_load_cooling", + "bau_value" : lambda df: safe_get(df, "outputs.GHP.cooling_thermal_load_reduction_with_ghp_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.cooling_thermal_load_reduction_with_ghp_ton") }, - # { - # "label": "Incentive Value ($)", - # "key": "incentive_value", - # "bau_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2_bau"), - # "scenario_value": lambda df: safe_get(df, "outputs.Financial.breakeven_cost_of_emissions_reduction_per_tonne_CO2") - # }, { - "label": "Additional Grant ($)", - "key": "iac_grant", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_production_incentive_after_tax") - } + "label" : "GHP Serving Thermal Load (ton-hr)", + "key" : "ghp_serving_thermal_load_cooling", + "bau_value" : lambda df: safe_get(df, "outputs.GHP.thermal_to_load_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.thermal_to_load_series_ton") + }, + { + "label" : "ASHP Serving Thermal Load (ton-hr)", + "key" : "ashp_serving_thermal_load_cooling", + "bau_value" : lambda df: safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_load_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_load_series_ton") + }, + { + "label" : "ASHP Charging Chilled Water Storage (ton-hr)", + "key" : "ashp_charging_chilled_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_storage_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ASHPSpaceHeater.thermal_to_storage_series_ton") + }, + { + "label" : "Absorption Chiller Serving Thermal Load (ton-hr)", + "key" : "absorption_chiller_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_load_series_ton") + }, + { + "label" : "Absorption Chiller Charging Chilled Water Storage (ton-hr)", + "key" : "absorption_chiller_charging_chilled_water_storage", + "bau_value" : lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_storage_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.AbsorptionChiller.thermal_to_storage_series_ton") + }, + { + "label" : "Chilled Water Storage Serving Thermal Load (ton-hr)", + "key" : "chilled_water_storage_serving_thermal_load", + "bau_value" : lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.ColdThermalStorage.storage_to_load_series_ton") + }, ] # IEDO TASC Configuration @@ -904,9 +1261,10 @@ "elec_cost_value" : "Purchased Electricity Cost ($)", "ng_reduction_value" : "Total Fuel (MMBtu)", "total_elec_costs" : "Total Electric Costs ($)", - "total_fuel_costs" : "Total Fuel Costs ($)", + "fuel_costs" : "Fuel Cost ($)", "co2_reduction_value": "CO2 Emissions (tonnes)", - "placeholder1_value" : "Placeholder1" + "placeholder1_value" : "Placeholder1", + "lcc_value" : "Lifecycle Costs ($)" } ''' @@ -934,6 +1292,32 @@ # Calculation logic calculations_config = [ + { + "name": "Present Value of Incentives ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Gross Upfront Capital Costs, Before Incentives ($)"] + 2} - {col}{headers["Net Upfront Capital Cost, After Incentives ($)"] + 2}' + }, + { + "name": "Electricity Cost Savings ($)", + "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}-{col}{headers["Purchased Electricity Cost ($)"] + 2}' + }, + { + "name": "NPV as a % of BAU LCC (%)", + "formula": lambda col, bau, headers: f'=({col}{headers["Net Present Value ($)"] + 2}/{bau["lcc_value"]})' + }, + { + "name": "Fuel Cost Savings ($)", + "formula": lambda col, bau, headers: f'={bau["fuel_costs"]}-{col}{headers["Fuel Cost ($)"] + 2}' + }, + + { + "name": "Modified Net Upfront Capital Cost ($)", + "formula": lambda col, bau, headers: f'={col}{headers["Net Upfront Capital Cost After Incentives but without MACRS ($)"] + 2} - {col}{headers["Additional Upfront Incentive ($)"] + 2}+{col}{headers["Additional Upfront Cost ($)"] + 2}' + }, + + { + "name": "Modified Simple Payback Period (years)", + "formula": lambda col, bau, headers: f'=({col}{headers["Modified Net Upfront Capital Cost ($)"] + 2})/({col}{headers["Electricity Cost Savings ($)"] + 2}+{col}{headers["Fuel Cost Savings ($)"] + 2}+{col}{headers["Additional Yearly Cost Savings ($/Year)"] + 2}-{col}{headers["Year 1 O&M Cost, Before Tax ($)"] + 2}-{col}{headers["Additional Yearly Cost ($/Year)"] + 2})' + }, { "name": "Total Site Electricity Use (kWh)", "formula": lambda col, bau, headers: ( @@ -951,18 +1335,11 @@ "name": "Net Purchased Electricity Reduction (%)", "formula": lambda col, bau, headers: f'=({bau["grid_value"]}-{col}{headers["Grid Purchased Electricity (kWh)"] + 2})/{bau["grid_value"]}' }, - { - "name": "Purchased Electricity Cost ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Energy Charges ($)"] + 2}+{col}{headers["Demand Charges ($)"] + 2}+{col}{headers["Fixed Charges ($)"] + 2}' - }, { "name": "Net Electricity Cost ($)", "formula": lambda col, bau, headers: f'={col}{headers["Purchased Electricity Cost ($)"] + 2}-{col}{headers["Electricity Export Benefit ($)"] + 2}' }, - { - "name": "Electricity Cost Savings ($/year)", - "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}-{col}{headers["Purchased Electricity Cost ($)"] + 2}' - }, + { "name": "Total Fuel (MMBtu)", "formula": lambda col, bau, headers: f'={col}{headers["Boiler Fuel (MMBtu)"] + 2}+{col}{headers["CHP Fuel (MMBtu)"] + 2}' @@ -979,10 +1356,10 @@ # "name": "Total Fuel Costs ($)", # "formula": lambda col, bau, headers: f'={col}{headers["Heating System Fuel Cost ($)"] + 2}+{col}{headers["CHP Fuel Cost ($)"] + 2}' # }, - { - "name": "Total Utility Costs ($)", - "formula": lambda col, bau, headers: f'={col}{headers["Total Electric Costs ($)"] + 2}+{col}{headers["Total Fuel Costs ($)"] + 2}' - }, + # { + # "name": "Total Utility Costs ($)", + # "formula": lambda col, bau, headers: f'={col}{headers["Total Electric Costs ($)"] + 2}+{col}{headers["Total Fuel Costs ($)"] + 2}' + # }, # { # "name": "Incentive Value ($)", # "formula": lambda col, bau, headers: f'=({col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2})+{col}{headers["Additional Grant ($)"] + 2}' diff --git a/reoptjl/views.py b/reoptjl/views.py index e9dc3692c..1020d2688 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1269,7 +1269,7 @@ def easiur_costs(request): # return JsonResponse({"Error": "Unexpected Error. Please check your input parameters and contact reopt@nrel.gov if problems persist."}, status=500) ############################################################################################################################## -################################################# START Custom Table ######################################################### +################################################# START Results Table ######################################################### ############################################################################################################################## def access_raw_data(run_uuids: List[str], request: Any) -> Dict[str, List[Dict[str, Any]]]: try: @@ -1401,7 +1401,11 @@ def generate_custom_comparison_table(request: Any) -> HttpResponse: def generate_excel_workbook(df: pd.DataFrame, custom_table: List[Dict[str, Any]], output: io.BytesIO) -> None: try: workbook = xlsxwriter.Workbook(output, {'in_memory': True}) - worksheet = workbook.add_worksheet('Custom Table') + # Add the 'Instructions' worksheet + instructions_worksheet = workbook.add_worksheet('Instructions') + + # Add the 'Results Table' worksheet + worksheet = workbook.add_worksheet('Results Table') # Scenario header formatting with colors scenario_colors = ['#0B5E90', '#00A4E4','#f46d43','#fdae61', '#66c2a5', '#d53e4f', '#3288bd'] @@ -1422,12 +1426,33 @@ def generate_excel_workbook(df: pd.DataFrame, custom_table: List[Dict[str, Any]] formula_percent_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) formula_currency_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) - # Message format to match formula style - message_format = workbook.add_format({'bg_color': '#0B5E90', 'align': 'center','valign': 'center','border': 1,'font_color': formula_color, 'bold': True, 'font_size': 12, 'italic': True }) + # Message format for formula cells (blue background with white text) + formula_message_format = workbook.add_format({ + 'bg_color': '#0B5E90', + 'font_color': '#F8F8FF', + 'align': 'center', + 'valign': 'center', + 'border': 1, + 'bold': True, + 'font_size': 12, + 'italic': True + }) + + # Message format for input cells (yellow background) + input_message_format = workbook.add_format({ + 'bg_color': '#FFFC79', + 'align': 'center', + 'valign': 'center', + 'border': 1, + 'bold': True, + 'font_size': 12 + }) # Separator format for rows that act as visual dividers separator_format = workbook.add_format({'bg_color': '#5D6A71', 'bold': True, 'border': 1,'font_size': 11,'font_color': 'white'}) + input_cell_format = workbook.add_format({'bg_color': '#FFFC79', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10}) + # Combine row color with cell format, excluding formulas def get_combined_format(label, row_color, is_formula=False): if is_formula: @@ -1513,20 +1538,38 @@ def get_combined_format(label, row_color, is_formula=False): is_formula = False # Detect if this cell contains a formula if isinstance(value, str) and "formula" in value.lower(): is_formula = True - - cell_format = get_combined_format(variable, row_color, is_formula) + # Check if the key contains 'input' to apply the input format + if 'input' in key.lower(): + cell_format = input_cell_format + else: + cell_format = get_combined_format(variable, row_color, is_formula) + # cell_format = get_combined_format(variable, row_color, is_formula) if pd.isnull(value) or value == '-': worksheet.write(row_num + 1 + row_offset, col_num + 1, "", cell_format) else: worksheet.write(row_num + 1 + row_offset, col_num + 1, value, cell_format) - # Update the message to include clear information about BAU values being hidden for novice users - message_text = ( - "Values in white are formulas, so please do not enter anything in those cells." + # Update the messages + formula_message_text = "Values with white text on blue background are formulas; please do not edit these cells." + input_message_text = "Yellow cells are inputs; you can modify these to explore consideration of additional Incentives or Costs, outside of REopt." + + # Determine the placement of the messages + last_row = len(df.index) + 2 # Adjust the row index for message placement + + # Place the first message about formulas + worksheet.merge_range( + last_row, 0, + last_row, len(df.columns), + formula_message_text, formula_message_format ) + last_row += 1 # Move to the next row for the second message - # Merge the range and apply the updated message - worksheet.merge_range(len(df.index) + 2, 0, len(df.index) + 2, len(df.columns), message_text, message_format) + # Place the second message about inputs + worksheet.merge_range( + last_row, 0, + last_row, len(df.columns), + input_message_text, input_message_format + ) headers = {header: idx for idx, header in enumerate(df.index)} headers["Scenario"] = 0 @@ -1584,11 +1627,114 @@ def get_bau_column(col): if missing_entries: print(f"missing_entries in the input table: {', '.join(set(missing_entries))}. Please update the configuration if necessary.") - workbook.close() + # Formats for the instructions sheet + title_format = workbook.add_format({ + 'bold': True, 'font_size': 18, 'align': 'left', 'valign': 'top' + }) + subtitle_format = workbook.add_format({ + 'bold': True, 'font_size': 14, 'align': 'left', 'valign': 'top' + }) + text_format = workbook.add_format({ + 'font_size': 12, 'align': 'left', 'valign': 'top', 'text_wrap': True + }) + bullet_format = workbook.add_format({ + 'font_size': 12, 'align': 'left', 'valign': 'top', 'text_wrap': True, 'indent': 1 + }) + + # Set column width and default row height + instructions_worksheet.set_column(0, 0, 100) + instructions_worksheet.set_default_row(15) + + # Start writing instructions + row = 0 + instructions_worksheet.write(row, 0, "Instructions for Using the REopt Results Table Workbook", title_format) + row += 2 + + # General Introduction + general_instructions = ( + "Welcome to the REopt Results Table Workbook !\n\n" + "This workbook contains all of the results of your selected REopt analysis scenarios. " + "Please read the following instructions carefully to understand how to use this workbook effectively." + ) + instructions_worksheet.write(row, 0, general_instructions, text_format) + row += 4 + # Using the 'Results Table' Sheet with formula format + instructions_worksheet.write(row, 0, "Using the 'Results Table' Sheet", subtitle_format) + row += 1 + + custom_table_instructions = ( + "The 'Results Table' sheet displays the scenario results of your REopt analysis in a structured format. " + "Here's how to use it:" + ) + instructions_worksheet.write(row, 0, custom_table_instructions, text_format) + row += 2 + + steps = [ + "1. Review the Results: Browse through the table to understand the system capacities, financial metrics, and energy production details.", + "2. Identify Editable Fields: Look for yellow cells in the 'Playground' section where you can input additional incentives or costs.", + "3. Avoid Editing Formulas: Do not edit cells with blue background and white text, as they contain important formulas.", + "4. Interpreting BAU and Optimal Scenarios: 'BAU' stands for 'Business as Usual' and represents the baseline scenario without any new investments. 'Optimal' scenarios show the results with optimized investments.", + "5. Hidden BAU Columns: If all scenarios are for a single site, identical BAU columns may be hidden except for the first one. For multiple sites where financials and energy consumption differ, all BAU columns will be visible." + ] + for step in steps: + instructions_worksheet.write(row, 0, step, bullet_format) + row += 1 + row += 2 + + # Notes for the Playground Section + instructions_worksheet.write(row, 0, "Notes for the Playground Section", subtitle_format) + row += 1 + + playground_notes = ( + "The 'Playground' section allows you to explore the effects of additional incentives or costs on your project's financial metrics." + ) + instructions_worksheet.write(row, 0, playground_notes, text_format) + row += 2 + + playground_items = [ + "- Net Upfront Capital Cost After Incentives but without MACRS ($): Represents the upfront cost after incentives, excluding MACRS depreciation benefits.", + "- Net Upfront Capital Cost After Incentives with MACRS ($): Includes MACRS depreciation, which provides tax benefits over the first 5-7 years.", + "- Additional Upfront Incentive ($): Input any additional grants or incentives (e.g., IAC grant, state or local grants).", + "- Additional Upfront Cost ($): Input any extra upfront costs (e.g., interconnection upgrades, microgrid components).", + "- Additional Yearly Cost Savings ($/year): Input any ongoing yearly savings (e.g., improved productivity, product sales with ESG designation).", + "- Additional Yearly Cost ($/year): Input any additional yearly costs (e.g., microgrid operation and maintenance).", + "- Playground-Modified Net Upfront Capital Cost ($): This value recalculates based on your inputs.", + "- Playground-Modified Simple Payback Period (years): Recalculates the payback period based on your inputs, providing a more conventional 'simple' payback period." + ] + for item in playground_items: + instructions_worksheet.write(row, 0, item, bullet_format) + row += 1 + row += 2 + + # Unaddressable Heating Load and Emissions + instructions_worksheet.write(row, 0, "Unaddressable Heating Load and Emissions", subtitle_format) + row += 1 + + unaddressable_notes = ( + "In scenarios where there is an unaddressable heating load (heating demand that cannot be served by the technologies analyzed), " + "the associated fuel consumption and emissions are not accounted for in the standard REopt outputs.\n\n" + "The 'Unaddressable CO₂ Emissions' row in the 'Playground' section includes these emissions, providing a more comprehensive view of your site's total emissions. " + "Including unaddressable emissions results in a lower percentage reduction because the total emissions baseline is larger." + ) + instructions_worksheet.write(row, 0, unaddressable_notes, text_format) + row += 2 + + # Final Note and Contact Info + instructions_worksheet.write(row, 0, "Thank you for using the REopt Output Workbook!", text_format) + row += 1 + contact_info = "For support or feedback, please contact the REopt team at reopt@nrel.gov." + instructions_worksheet.write(row, 0, contact_info, text_format) + + # Freeze panes to keep the title visible + instructions_worksheet.freeze_panes(1, 0) + + # Close the workbook after all sheets are written + workbook.close() + except Exception: log_and_raise_error('generate_excel_workbook') ############################################################################################################################## -################################################### END Custom Table ######################################################### +################################################### END Results Table ######################################################### ############################################################################################################################## \ No newline at end of file From 1d84f7450d56f1a51529970b09396f7faee04386 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Tue, 24 Sep 2024 19:30:21 -0600 Subject: [PATCH 36/44] final edits to configutation --- reoptjl/custom_table_config.py | 367 ++------------------------------- 1 file changed, 17 insertions(+), 350 deletions(-) diff --git a/reoptjl/custom_table_config.py b/reoptjl/custom_table_config.py index 966d2ef89..5a97ca458 100644 --- a/reoptjl/custom_table_config.py +++ b/reoptjl/custom_table_config.py @@ -476,7 +476,7 @@ "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_from_fuelburn_tonnes_CO2") }, { - "label" : "CO2 Emissions (tonnes)", + "label" : "Total CO2 Emissions (tonnes)", "key" : "co2_emissions", "bau_value" : lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") @@ -557,13 +557,13 @@ { "label": "Unaddressable Heating Load (Mmbtu/Year)", "key": "unaddressable_heating_load", - "bau_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_total_unaddressable_heating_load_mmbtu_bau"), + "bau_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_total_unaddressable_heating_load_mmbtu"), "scenario_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_total_unaddressable_heating_load_mmbtu") }, { "label": "Unaddressable CO2 Emissions (tonnes)", "key": "unaddressable_co2_emissions", - "bau_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_emissions_from_unaddressable_heating_load_tonnes_CO2_bau"), + "bau_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_emissions_from_unaddressable_heating_load_tonnes_CO2"), "scenario_value": lambda df: safe_get(df, "outputs.HeatingLoad.annual_emissions_from_unaddressable_heating_load_tonnes_CO2") }, { @@ -921,322 +921,6 @@ }, ] -# IEDO TASC Configuration -custom_table_tasc = [ - { - "label": "Site Name", - "key": "site", - "bau_value": lambda df: "", - "scenario_value": lambda df: safe_get(df, "inputs.Meta.description", "None provided") - }, - { - "label": "Site Location", - "key": "site_lat_long", - "bau_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})", - "scenario_value": lambda df: f"({safe_get(df, 'inputs.Site.latitude')}, {safe_get(df, 'inputs.Site.longitude')})" - }, - { - "label": "Site Address", - "key": "site_address", - "bau_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided"), - "scenario_value": lambda df: safe_get(df, "inputs.Meta.address", "None provided") - }, - { - "label": "PV Size (kW)", - "key": "pv_size", - "bau_value": lambda df: safe_get(df, "outputs.PV.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.size_kw") - }, - { - "label": "Wind Size (kW)", - "key": "wind_size", - "bau_value": lambda df: safe_get(df, "outputs.Wind.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.size_kw") - }, - { - "label": "CHP Size (kW)", - "key": "chp_size", - "bau_value": lambda df: safe_get(df, "outputs.CHP.size_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.size_kw") - }, - { - "label": "PV Total Electricity Produced (kWh)", - "key": "pv_total_electricity_produced", - "bau_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.annual_energy_produced_kwh") - }, - { - "label": "PV Exported to Grid (kWh)", - "key": "pv_exported_to_grid", - "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_grid_series_kw") - }, - { - "label": "PV Serving Load (kWh)", - "key": "pv_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.PV.electric_to_load_series_kw") - }, - { - "label": "Wind Total Electricity Produced (kWh)", - "key": "wind_total_electricity_produced", - "bau_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.annual_energy_produced_kwh") - }, - { - "label": "Wind Exported to Grid (kWh)", - "key": "wind_exported_to_grid", - "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_grid_series_kw") - }, - { - "label": "Wind Serving Load (kWh)", - "key": "wind_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Wind.electric_to_load_series_kw") - }, - { - "label": "CHP Total Electricity Produced (kWh)", - "key": "chp_total_electricity_produced", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_electric_production_kwh") - }, - { - "label": "CHP Exported to Grid (kWh)", - "key": "chp_exported_to_grid", - "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_grid_series_kw") - }, - { - "label": "CHP Serving Load (kWh)", - "key": "chp_serving_load", - "bau_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.electric_to_load_series_kw") - }, - { - "label": "CHP Serving Thermal Load (MMBtu)", - "key": "chp_serving_thermal_load", - "bau_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.thermal_to_load_series_mmbtu_per_hour") - }, - { - "label": "Grid Purchased Electricity (kWh)", - "key": "grid_purchased_electricity", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh") - }, - { - "label": "Total Site Electricity Use (kWh)", - "key": "total_site_electricity_use", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw") - }, - { - "label": "Net Purchased Electricity Reduction (%)", - "key": "net_purchased_electricity_reduction", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kwsdf") - }, - { - "label": "Electricity Energy Cost ($)", - "key": "electricity_energy_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_energy_cost_before_tax") - }, - { - "label": "Electricity Demand Cost ($)", - "key": "electricity_demand_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_demand_cost_before_tax") - }, - { - "label": "Utility Fixed Cost ($)", - "key": "utility_fixed_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_fixed_cost_before_tax") - }, - { - "label": "Purchased Electricity Cost ($)", - "key": "purchased_electricity_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_bill_before_tax") - }, - { - "label": "Electricity Export Benefit ($)", - "key": "electricity_export_benefit", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.year_one_export_benefit_before_tax") - }, - { - "label": "Net Electricity Cost ($)", - "key": "net_electricity_cost", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax") - }, - { - "label": "Electricity Cost Savings ($/year)", - "key": "electricity_cost_savings", - "bau_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricTariff.lifecycle_energy_cost_after_tax_bau") - }, - { - "label": "Boiler Fuel (MMBtu)", - "key": "boiler_fuel", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.fuel_used_mmbtu") - }, - { - "label": "CHP Fuel (MMBtu)", - "key": "chp_fuel", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_fuel_consumption_mmbtu") - }, - { - "label": "Total Fuel (MMBtu)", - "key": "total_fuel", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.total_energy_supplied_kwh") - }, - { - "label": "Natural Gas Reduction (%)", - "key": "natural_gas_reduction", - "bau_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ElectricUtility.annual_energy_supplied_kwh_bau") - }, - { - "label": "Boiler Thermal Production (MMBtu)", - "key": "boiler_thermal_production", - "bau_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.ExistingBoiler.annual_thermal_production_mmbtu") - }, - { - "label": "CHP Thermal Production (MMBtu)", - "key": "chp_thermal_production", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") - }, - { - "label": "Total Thermal Production (MMBtu)", - "key": "total_thermal_production", - "bau_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.annual_thermal_production_mmbtu") - }, - { - "label": "Heating System Fuel Cost ($)", - "key": "heating_system_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.heating_system_fuel_cost_us_dollars") - }, - { - "label": "CHP Fuel Cost ($)", - "key": "chp_fuel_cost", - "bau_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.CHP.year_one_fuel_cost_before_tax") - }, - { - "label": "Total Fuel (NG) Cost ($)", - "key": "total_fuel_ng_cost", - "bau_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.total_fuel_cost_us_dollars") - }, - { - "label": "Total Utility Cost ($)", - "key": "total_utility_cost", - "bau_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.total_utility_cost_us_dollars") - }, - { - "label": "O&M Cost Increase ($)", - "key": "om_cost_increase", - "bau_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.om_and_replacement_present_cost_after_tax") - }, - { - "label": "Payback Period (years)", - "key": "payback_period", - "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") - }, - { - "label": "Gross Capital Cost ($)", - "key": "gross_capital_cost", - "bau_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.lifecycle_capital_costs") - }, - { - "label": "Federal Tax Incentive (30%)", - "key": "federal_tax_incentive", - "bau_value": lambda df: 0.3, - "scenario_value": lambda df: 0.3 - }, - { - "label": "Additional Grant ($)", - "key": "additional_grant", - "bau_value": lambda df: 0, - "scenario_value": lambda df: 0 - }, - { - "label": "Incentive Value ($)", - "key": "incentive_value", - "bau_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.total_incentives_value_us_dollars") - }, - { - "label": "Net Capital Cost ($)", - "key": "net_capital_cost", - "bau_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.net_capital_cost_us_dollars") - }, - { - "label": "Annual Cost Savings ($)", - "key": "annual_cost_savings", - "bau_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.annual_cost_savings_us_dollars") - }, - { - "label": "Simple Payback (years)", - "key": "simple_payback", - "bau_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.simple_payback_years") - }, - { - "label": "CO2 Emissions (tonnes)", - "key": "co2_emissions", - "bau_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.annual_emissions_tonnes_CO2") - }, - { - "label": "CO2 Reduction (tonnes)", - "key": "co2_reduction", - "bau_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Site.lifecycle_emissions_tonnes_CO2") - }, - { - "label": "CO2 (%) savings", - "key": "co2_savings_percentage", - "bau_value": lambda df: 0, - "scenario_value": lambda df: 0 - }, - { - "label": "NPV ($)", - "key": "npv", - "bau_value": lambda df: safe_get(df, "outputs.Financial.npv_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.Financial.npv") - }, - { - "label": "PV Federal Tax Incentive (%)", - "key": "pv_federal_tax_incentive", - "bau_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction_bau"), - "scenario_value": lambda df: safe_get(df, "inputs.PV.federal_itc_fraction") - }, - { - "label": "Storage Federal Tax Incentive (%)", - "key": "storage_federal_tax_incentive", - "bau_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction_bau"), - "scenario_value": lambda df: safe_get(df, "inputs.ElectricStorage.total_itc_fraction") - } -] - ''' 2. Defining BAU Columns: ------------------------ @@ -1257,14 +941,15 @@ # Define bau_cells configuration for calculations that reference bau cells bau_cells_config = { - "grid_value" : "Grid Purchased Electricity (kWh)", - "elec_cost_value" : "Purchased Electricity Cost ($)", - "ng_reduction_value" : "Total Fuel (MMBtu)", - "total_elec_costs" : "Total Electric Costs ($)", - "fuel_costs" : "Fuel Cost ($)", - "co2_reduction_value": "CO2 Emissions (tonnes)", - "placeholder1_value" : "Placeholder1", - "lcc_value" : "Lifecycle Costs ($)" + "grid_value" : "Grid Purchased Electricity (kWh)", + "elec_cost_value" : "Purchased Electricity Cost ($)", + "ng_reduction_value" : "Total Fuel (MMBtu)", + "total_elec_costs" : "Total Electric Costs ($)", + "fuel_costs" : "Fuel Cost ($)", + "total_co2_emission_value" : "Total CO2 Emissions (tonnes)", + "placeholder1_value" : "Placeholder1", + "lcc_value" : "Lifecycle Costs ($)", + "annual_co2_emissions_value": "Annual CO2 Emissions (tonnes)" } ''' @@ -1318,6 +1003,10 @@ "name": "Modified Simple Payback Period (years)", "formula": lambda col, bau, headers: f'=({col}{headers["Modified Net Upfront Capital Cost ($)"] + 2})/({col}{headers["Electricity Cost Savings ($)"] + 2}+{col}{headers["Fuel Cost Savings ($)"] + 2}+{col}{headers["Additional Yearly Cost Savings ($/Year)"] + 2}-{col}{headers["Year 1 O&M Cost, Before Tax ($)"] + 2}-{col}{headers["Additional Yearly Cost ($/Year)"] + 2})' }, + { + "name": "CO2 Savings Including Unaddressable (%)", + "formula": lambda col, bau, headers: f'=({bau["annual_co2_emissions_value"]}-{col}{headers["Annual CO2 Emissions (tonnes)"] + 2})/({bau["annual_co2_emissions_value"]}+{col}{headers["Unaddressable CO2 Emissions (tonnes)"] + 2})' + }, { "name": "Total Site Electricity Use (kWh)", "formula": lambda col, bau, headers: ( @@ -1330,7 +1019,6 @@ f'{col}{headers["Grid Purchased Electricity (kWh)"] + 2}' ) }, - { "name": "Net Purchased Electricity Reduction (%)", "formula": lambda col, bau, headers: f'=({bau["grid_value"]}-{col}{headers["Grid Purchased Electricity (kWh)"] + 2})/{bau["grid_value"]}' @@ -1339,7 +1027,6 @@ "name": "Net Electricity Cost ($)", "formula": lambda col, bau, headers: f'={col}{headers["Purchased Electricity Cost ($)"] + 2}-{col}{headers["Electricity Export Benefit ($)"] + 2}' }, - { "name": "Total Fuel (MMBtu)", "formula": lambda col, bau, headers: f'={col}{headers["Boiler Fuel (MMBtu)"] + 2}+{col}{headers["CHP Fuel (MMBtu)"] + 2}' @@ -1352,22 +1039,6 @@ "name": "Total Thermal Production (MMBtu)", "formula": lambda col, bau, headers: f'={col}{headers["Boiler Thermal Production (MMBtu)"] + 2}+{col}{headers["CHP Thermal Production (MMBtu)"] + 2}' }, - # { - # "name": "Total Fuel Costs ($)", - # "formula": lambda col, bau, headers: f'={col}{headers["Heating System Fuel Cost ($)"] + 2}+{col}{headers["CHP Fuel Cost ($)"] + 2}' - # }, - # { - # "name": "Total Utility Costs ($)", - # "formula": lambda col, bau, headers: f'={col}{headers["Total Electric Costs ($)"] + 2}+{col}{headers["Total Fuel Costs ($)"] + 2}' - # }, - # { - # "name": "Incentive Value ($)", - # "formula": lambda col, bau, headers: f'=({col}{headers["Federal Tax Incentive (30%)"] + 2}*{col}{headers["Gross Capital Cost ($)"] + 2})+{col}{headers["Additional Grant ($)"] + 2}' - # }, - # { - # "name": "Net Capital Cost ($)", - # "formula": lambda col, bau, headers: f'={col}{headers["Gross Capital Cost ($)"] + 2}-{col}{headers["Incentive Value ($)"] + 2}' - # }, { "name": "Annual Cost Savings ($)", "formula": lambda col, bau, headers: f'={bau["elec_cost_value"]}+-{col}{headers["Purchased Electricity Cost ($)"] + 2}' @@ -1376,13 +1047,9 @@ "name": "Simple Payback (years)", "formula": lambda col, bau, headers: f'={col}{headers["Net Capital Cost ($)"] + 2}/{col}{headers["Annual Cost Savings ($)"] + 2}' }, - # { - # "name": "CO2 Reduction (tonnes)", - # "formula": lambda col, bau, headers: f'={bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2}' - # }, { "name": "CO2 (%) savings", - "formula": lambda col, bau, headers: f'=({bau["co2_reduction_value"]}-{col}{headers["CO2 Emissions (tonnes)"] + 2})/{bau["co2_reduction_value"]}' + "formula": lambda col, bau, headers: f'=({bau["total_co2_emission_value"]}-{col}{headers["Total CO2 Emissions (tonnes)"] + 2})/{bau["total_co2_emission_value"]}' }, #Example Calculations # Calculation Without Reference to bau_cells From fc33b20a64deb4905132e60d6f6e5db41f67c8ba Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Wed, 25 Sep 2024 17:59:38 -0600 Subject: [PATCH 37/44] added more column spacing --- reoptjl/views.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 1020d2688..98a7ce0cc 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1470,7 +1470,7 @@ def get_combined_format(label, row_color, is_formula=False): return workbook.add_format(base_data_format) # Set column width for the first column (labels column) - worksheet.set_column(0, 0, 35) + worksheet.set_column(0, 0, 45) # Setting column widths and writing headers for other columns column_width = 25 @@ -1699,8 +1699,8 @@ def get_bau_column(col): "- Additional Upfront Cost ($): Input any extra upfront costs (e.g., interconnection upgrades, microgrid components).", "- Additional Yearly Cost Savings ($/year): Input any ongoing yearly savings (e.g., improved productivity, product sales with ESG designation).", "- Additional Yearly Cost ($/year): Input any additional yearly costs (e.g., microgrid operation and maintenance).", - "- Playground-Modified Net Upfront Capital Cost ($): This value recalculates based on your inputs.", - "- Playground-Modified Simple Payback Period (years): Recalculates the payback period based on your inputs, providing a more conventional 'simple' payback period." + "- Modified Net Upfront Capital Cost ($): This value recalculates based on your inputs.", + "- Modified Simple Payback Period (years): Recalculates the payback period based on your inputs, providing a more conventional 'simple' payback period." ] for item in playground_items: instructions_worksheet.write(row, 0, item, bullet_format) @@ -1721,7 +1721,7 @@ def get_bau_column(col): row += 2 # Final Note and Contact Info - instructions_worksheet.write(row, 0, "Thank you for using the REopt Output Workbook!", text_format) + instructions_worksheet.write(row, 0, "Thank you for using the REopt Results Table Workbook!", text_format) row += 1 contact_info = "For support or feedback, please contact the REopt team at reopt@nrel.gov." instructions_worksheet.write(row, 0, contact_info, text_format) From 03d312e09535efa1cce4cdc9d601d67675d24fa8 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 26 Sep 2024 12:15:27 -0600 Subject: [PATCH 38/44] corrected GHP outputs in the webtool config --- reoptjl/custom_table_config.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/reoptjl/custom_table_config.py b/reoptjl/custom_table_config.py index 5a97ca458..a02e206d1 100644 --- a/reoptjl/custom_table_config.py +++ b/reoptjl/custom_table_config.py @@ -225,14 +225,14 @@ { "label" : "GHP Heat Pump Capacity (ton)", "key" : "ghp_heat_pump_capacity", - "bau_value" : lambda df: safe_get(df, "outputs.GHP.size_ton_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_ton") + "bau_value" : lambda df: safe_get(df, "outputs.GHP.size_heat_pump_ton_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.size_heat_pump_ton") }, { "label" : "GHP Ground Heat Exchanger Size (ft)", "key" : "ghp_ground_heat_exchanger_size", - "bau_value" : lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.GHP.length_boreholes_ft") + "bau_value" : lambda df: safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.length_boreholes_ft_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.length_boreholes_ft") }, # New ASHP entries { @@ -939,7 +939,7 @@ - Note: It is safe to define bau cells that are not being used. If they are not associated with an entry in the custom table, they will be safely ignored ''' -# Define bau_cells configuration for calculations that reference bau cells +# Define bau_cells configuration for calculations that reference bau cells, call these bau values within calculations bau_cells_config = { "grid_value" : "Grid Purchased Electricity (kWh)", "elec_cost_value" : "Purchased Electricity Cost ($)", From a75e513062a3625fd4bd111e2c496e89db8258a1 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Thu, 26 Sep 2024 17:12:47 -0600 Subject: [PATCH 39/44] updated total length of GHP HE size in the config --- reoptjl/custom_table_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reoptjl/custom_table_config.py b/reoptjl/custom_table_config.py index a02e206d1..d30a7c831 100644 --- a/reoptjl/custom_table_config.py +++ b/reoptjl/custom_table_config.py @@ -231,8 +231,8 @@ { "label" : "GHP Ground Heat Exchanger Size (ft)", "key" : "ghp_ground_heat_exchanger_size", - "bau_value" : lambda df: safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.length_boreholes_ft_bau"), - "scenario_value": lambda df: safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.length_boreholes_ft") + "bau_value" : lambda df: safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.length_boreholes_ft_bau")*safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.number_of_boreholes_bau"), + "scenario_value": lambda df: safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.length_boreholes_ft")*safe_get(df, "outputs.GHP.ghpghx_chosen_outputs.number_of_boreholes") }, # New ASHP entries { From 4e4e1530a20d3f98c23015f7bb73bb0a2cde3004 Mon Sep 17 00:00:00 2001 From: bill-becker Date: Thu, 26 Sep 2024 22:10:53 -0600 Subject: [PATCH 40/44] Add back newline for Docker files --- docker-compose.yml | 3 ++- julia_src/Dockerfile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 81d32040f..ac8141898 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -71,4 +71,5 @@ services: ports: - "8081:8081" volumes: - - ./julia_src:/opt/julia_src \ No newline at end of file + - ./julia_src:/opt/julia_src + \ No newline at end of file diff --git a/julia_src/Dockerfile b/julia_src/Dockerfile index 5dee79f50..e75b58da6 100644 --- a/julia_src/Dockerfile +++ b/julia_src/Dockerfile @@ -19,4 +19,4 @@ RUN julia --project=/opt/julia_src -e 'import Pkg; Pkg.instantiate();' RUN julia --project=/opt/julia_src precompile.jl EXPOSE 8081 -CMD ["bash"] \ No newline at end of file +CMD ["bash"] From e5e53d2a79456a280f20befa1dfb31d1ebef6ef6 Mon Sep 17 00:00:00 2001 From: bill-becker Date: Thu, 26 Sep 2024 22:12:06 -0600 Subject: [PATCH 41/44] Avoid spaces in newline --- docker-compose.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index ac8141898..a3ebd222f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -72,4 +72,3 @@ services: - "8081:8081" volumes: - ./julia_src:/opt/julia_src - \ No newline at end of file From 3d623a448d5f0161624ca512dd873e1d116aca3b Mon Sep 17 00:00:00 2001 From: bill-becker Date: Thu, 26 Sep 2024 22:19:05 -0600 Subject: [PATCH 42/44] Change endpoint name to generate_results_table --- CHANGELOG.md | 1 + reoptjl/urls.py | 2 +- reoptjl/views.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf855d1f9..36a410c04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ Classify the change according to the following categories: - Added new model **ASHPWaterHeaterOutputs** - Added new model **ASHPSpaceHeaterInputs** - Added new model **ASHPSpaceHeaterOutputs** +- Added /job/generate_results_table endpoint which takes a list of run_uuid's and creates a results table spreadsheet to download in response ## v3.9.4 ### Minor Updates diff --git a/reoptjl/urls.py b/reoptjl/urls.py index ab70aa413..a0ffe3536 100644 --- a/reoptjl/urls.py +++ b/reoptjl/urls.py @@ -23,7 +23,7 @@ re_path(r'^invalid_urdb/?$', reoviews.invalid_urdb), re_path(r'^schedule_stats/?$', reoviews.schedule_stats), re_path(r'^get_existing_chiller_default_cop/?$', views.get_existing_chiller_default_cop), - re_path(r'^job/generate_custom_comparison_table/?$', views.generate_custom_comparison_table), + re_path(r'^job/generate_results_table/?$', views.generate_results_table), re_path(r'^get_ashp_defaults/?$', views.get_ashp_defaults), re_path(r'^summary_by_runuuids/?$', views.summary_by_runuuids), re_path(r'^link_run_to_portfolios/?$', views.link_run_uuids_to_portfolio_uuid) diff --git a/reoptjl/views.py b/reoptjl/views.py index c600f5aa4..1f2c3ab84 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1660,7 +1660,7 @@ def process_scenarios(scenarios: List[Dict[str, Any]], reopt_data_config: List[D except Exception: log_and_raise_error('process_scenarios') -def generate_custom_comparison_table(request: Any) -> HttpResponse: +def generate_results_table(request: Any) -> HttpResponse: if request.method != 'GET': return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) @@ -1696,7 +1696,7 @@ def generate_custom_comparison_table(request: Any) -> HttpResponse: except CustomTableError as e: return JsonResponse({"Error": str(e)}, status=500) except Exception as e: - log.error(f"Unexpected error in generate_custom_comparison_table: {e}") + log.error(f"Unexpected error in generate_results_table: {e}") return JsonResponse({"Error": "An unexpected error occurred. Please try again later."}, status=500) def generate_excel_workbook(df: pd.DataFrame, custom_table: List[Dict[str, Any]], output: io.BytesIO) -> None: From c4f5cc2d817186147f528b0cbf37a3aa4f0d70ba Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Fri, 27 Sep 2024 10:59:39 -0600 Subject: [PATCH 43/44] reduced decimal places to 0 --- reoptjl/views.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 1f2c3ab84..4cf1a8b4b 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1719,13 +1719,13 @@ def generate_excel_workbook(df: pd.DataFrame, custom_table: List[Dict[str, Any]] # Base formats for errors, percentages, and currency values error_format = workbook.add_format({'bg_color': '#FFC7CE', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': 'white', 'bold': True, 'font_size': 10}) base_percent_format = {'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} - base_currency_format = {'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} + base_currency_format = {'num_format': '$#,##0', 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} # Formula formats using dark blue background formula_color = '#F8F8FF' - formula_format = workbook.add_format({'num_format': '#,##0.00','bg_color': '#0B5E90', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + formula_format = workbook.add_format({'num_format': '#,##0','bg_color': '#0B5E90', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) formula_percent_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '0%', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) - formula_currency_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '$#,##0.00', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) + formula_currency_format = workbook.add_format({'bg_color': '#0B5E90', 'num_format': '$#,##0', 'align': 'center', 'valign': 'center', 'border': 1, 'font_color': formula_color, 'font_size': 10, 'italic': True}) # Message format for formula cells (blue background with white text) formula_message_format = workbook.add_format({ @@ -1762,7 +1762,7 @@ def get_combined_format(label, row_color, is_formula=False): elif '%' in label: return formula_percent_format return formula_format - base_data_format = {'num_format': '#,##0.00','bg_color': row_color, 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} + base_data_format = {'num_format': '#,##0','bg_color': row_color, 'align': 'center', 'valign': 'center', 'border': 1, 'font_size': 10} if label: if '$' in label: return workbook.add_format({**base_currency_format, 'bg_color': row_color}) From 0941719b2717cbeafbaaa85eb4a7e0c38a9a9db4 Mon Sep 17 00:00:00 2001 From: Byron Pullutasig <115118857+bpulluta@users.noreply.github.com> Date: Fri, 27 Sep 2024 11:32:55 -0600 Subject: [PATCH 44/44] fix offgrid bug --- reoptjl/views.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/reoptjl/views.py b/reoptjl/views.py index 4cf1a8b4b..2bd5e1651 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -1631,13 +1631,19 @@ def get_bau_values(scenarios: List[Dict[str, Any]], config: List[Dict[str, Any]] df_gen = flatten_dict(scenario['full_data']) for entry in config: bau_func = entry.get("bau_value") + # Try to apply the BAU function, and if it fails, set value to 0 if bau_func: - bau_values_per_scenario[run_uuid][entry["label"]] = bau_func(df_gen) + try: + bau_values_per_scenario[run_uuid][entry["label"]] = bau_func(df_gen) + except Exception: + bau_values_per_scenario[run_uuid][entry["label"]] = 0 + else: + bau_values_per_scenario[run_uuid][entry["label"]] = 0 return bau_values_per_scenario except Exception: log_and_raise_error('get_bau_values') - + def process_scenarios(scenarios: List[Dict[str, Any]], reopt_data_config: List[Dict[str, Any]]) -> pd.DataFrame: try: bau_values_per_scenario = get_bau_values(scenarios, reopt_data_config)