diff --git a/.gitignore b/.gitignore index 001a15f..0d3ae26 100644 --- a/.gitignore +++ b/.gitignore @@ -111,3 +111,6 @@ mydjangosite/.idea/ # Secrets my_secrets/ secrets/ + +# Downloads from Dash App. TODO: move to different folder? +downloads/ diff --git a/CHANGELOG.md b/CHANGELOG.md index bf45edc..bd92926 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,21 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased]## [0.1.0.0b6] +## [Unreleased]## [0.1.1.1b0] + +## [0.1.1.0] - 2022-01-11 +### Changed +- Version bump-up to 0.1.1.0 due to many new features +- DashApp, DoDashApp `debug` property renamed to `dash_debug` +- HomePageEdit and PrepareDataEdit are default main pages ### Added +- HomePage: Scenario duplicate, rename, delete +- DoDashApp.db_echo flag for DB connection debugging +- HomePage: Download scenario as Excel file +- HomePage: Download all scenarios as zip archive +- HomePage: Upload scenarios (from individual .xlsx or multiple in .zip) +- Read and Delete scenario using SQLAlchemy +- Duplicate scenario using SQLAlchemy insert-select ## [0.1.0.0b5] - 2022-01-05 ### Changed diff --git a/PyCharmProjectReadMe.md b/PyCharmProjectReadMe.md new file mode 100644 index 0000000..3691478 --- /dev/null +++ b/PyCharmProjectReadMe.md @@ -0,0 +1,16 @@ +# DSE_DO_Dashboard - PyCharm Project setup + +## Install environment and dependencies +1. File -> Settings -> Project -> Python Interpreter +Create a new virtual environment +2. Let `requirements.txt` install the dependencies + +## Test the dse-do-dashboard package +In the `test` folder, add dashboard tests. +In order for the test to be able to do a regular import, add the `test` folder as a content root / source folder to the project + +## Add dependency on dse-do-utils +Add the dse-do-utils root as a source folder to the project +1. File -> Settings -> Project -> Project Structure +2. Add Content Root +This allows us to develop the both packages in combination. Change the dse-do-utils and test in the dse-do-dashboard \ No newline at end of file diff --git a/dse_do_dashboard/dash_app.py b/dse_do_dashboard/dash_app.py index bebd561..fca6849 100644 --- a/dse_do_dashboard/dash_app.py +++ b/dse_do_dashboard/dash_app.py @@ -25,11 +25,11 @@ class DashApp(ABC): def __init__(self, logo_file_name: str = 'IBM.png', cache_config: Dict = {}, port: int = 8050, - debug: bool = False, + dash_debug: bool = False, host_env: Optional[HostEnvironment] = None): self.port = port self.host_env = host_env - self.debug = debug + self.dash_debug = dash_debug self.app = self.create_dash_app() # Margins to layout the header, sidebar and content area: @@ -83,7 +83,7 @@ def run_server(self): DA.run_server() """ - self.app.run_server(debug=self.debug, port=self.port) + self.app.run_server(debug=self.dash_debug, port=self.port) def config_cache(self): self.cache = Cache() diff --git a/dse_do_dashboard/do_dash_app.py b/dse_do_dashboard/do_dash_app.py index 2b76229..57a8bfe 100644 --- a/dse_do_dashboard/do_dash_app.py +++ b/dse_do_dashboard/do_dash_app.py @@ -4,6 +4,9 @@ from typing import Dict, List, Optional import pandas as pd + +from dse_do_dashboard.main_pages.home_page_edit import HomePageEdit +from dse_do_dashboard.main_pages.prepare_data_page_edit import PrepareDataPageEdit from dse_do_utils import DataManager from dse_do_utils.scenariodbmanager import ScenarioDbManager @@ -45,6 +48,7 @@ class DoDashApp(DashApp): """ def __init__(self, db_credentials: Dict, schema: Optional[str] = None, + db_echo: Optional[bool] = False, logo_file_name: Optional[str] = 'IBM.png', cache_config: Optional[Dict]= {}, visualization_pages: Optional[List[VisualizationPage]]= [], @@ -52,7 +56,7 @@ def __init__(self, db_credentials: Dict, data_manager_class=None, plotly_manager_class=None, port: Optional[int] = 8050, - debug: Optional[bool] = False, + dash_debug: Optional[bool] = False, host_env: Optional[HostEnvironment] = None ): """Create a Dashboard app. @@ -70,13 +74,14 @@ def __init__(self, db_credentials: Dict, :param plotly_manager_class: class of the PlotlyManager. Either specify the `data_manager_class` and the `plotly_manager_class` or override the method `get_plotly_manager` :param port: Port for DashApp. Default = 8050. - :param debug: If true, runs dash app server in debug mode. + :param dash_debug: If true, runs dash app server in debug mode. :param host_env: If HostEnvironment.CPD402, will use the ws_applications import make_link to generate a requests_pathname_prefix for the Dash app. For use with custom environment in CPD v4.0.02. The alternative (None of HostEnvironment.Local) runs the Dash app regularly. """ self.db_credentials = db_credentials self.schema = schema + self.db_echo = db_echo self.database_manager_class = database_manager_class # assert issubclass(self.database_manager_class, ScenarioDbManager) self.dbm = self.create_database_manager_instance() @@ -118,7 +123,7 @@ def __init__(self, db_credentials: Dict, self.read_scenario_table_from_db_callback = None # For Flask caching self.read_scenarios_table_from_db_callback = None # For Flask caching - super().__init__(logo_file_name=logo_file_name, cache_config=cache_config, port=port, debug=debug, host_env=host_env) + super().__init__(logo_file_name=logo_file_name, cache_config=cache_config, port=port, dash_debug=dash_debug, host_env=host_env) def create_database_manager_instance(self) -> ScenarioDbManager: """Create an instance of a ScenarioDbManager. @@ -126,7 +131,7 @@ def create_database_manager_instance(self) -> ScenarioDbManager: Optionally, override this method.""" if self.database_manager_class is not None and self.db_credentials is not None: print(f"Connecting to DB2 at {self.db_credentials['host']}") - dbm = self.database_manager_class(credentials=self.db_credentials, schema=self.schema, echo=False) + dbm = self.database_manager_class(credentials=self.db_credentials, schema=self.schema, echo=self.db_echo) else: print("Error: either specifiy `database_manager_class`, `db_credentials` and `schema`, or override `create_database_manager_instance`.") return dbm @@ -136,8 +141,8 @@ def create_main_pages(self) -> List[MainPage]: Can be overridden to replace by subclasses (not typical). """ main_pages = [ - HomePage(self), - PrepareDataPage(self), + HomePageEdit(self), + PrepareDataPageEdit(self), RunModelPage(self), ExploreSolutionPage(self), VisualizationTabsPage(self) @@ -449,6 +454,7 @@ def get_table_by_name(self, dm: DataManager, def get_table_schema(self, table_name: str) -> Optional[ScenarioTableSchema]: table_schema = None + # print(f"get_table_schema - {self.table_schemas}") if self.table_schemas is not None and table_name in self.table_schemas: table_schema = self.table_schemas[table_name] return table_schema diff --git a/dse_do_dashboard/main_pages/home_page_edit.py b/dse_do_dashboard/main_pages/home_page_edit.py new file mode 100644 index 0000000..387f39b --- /dev/null +++ b/dse_do_dashboard/main_pages/home_page_edit.py @@ -0,0 +1,614 @@ +# Copyright IBM All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import ast +import base64 +import io +import os +import pathlib +import tempfile +import zipfile + +import flask +import pandas as pd +from dash.exceptions import PreventUpdate +import dash +from dse_do_utils import ScenarioManager + +from dse_do_dashboard.main_pages.main_page import MainPage +from dash import dcc, html, Output, Input, State, ALL, MATCH +import dash_daq as daq +import dash_bootstrap_components as dbc + + +class HomePageEdit(MainPage): + """ + Includes: + - Duplicate Scenario + - Rename Scenario + - Delete Scenario + - Download Scenario(s) + - Upload Scenario(s) + """ + def __init__(self, dash_app): + super().__init__(dash_app, + page_name='Home', + page_id='home', + url='', + ) + + def get_layout(self): + scenarios_df = self.dash_app.read_scenarios_table_from_db_cached() #.reset_index() # SCDB2.get_scenarios_df().reset_index() + + layout = html.Div([ + + dbc.Card([ + dbc.CardHeader(html.Div("Reference Scenario", style={'width': '28vw'})), + dbc.CardBody([ + # dbc.CardHeader(html.Div("Reference Scenario", style={'width': '28vw'})), + dcc.Dropdown( + id='reference_scenario', + options=[ + {'label': i, 'value': i} + # for i in scenarios_df.reset_index().scenario_name + for i in scenarios_df.index + ], style = {'width': '28vw'}) + ]) + ], style = {'width': '30vw'}), + + dbc.Card([ + dbc.CardHeader(html.Div("Scenarios", style={'width': '80vw'})), + dbc.CardBody( + # id='scenario_table_card', + [ + # html.Div(id="scenario_table_div", style={'width': '78vw'}, + # ), + html.Div(children=self.get_scenario_operations_table(scenarios_df)), + ] + ), + ], # style={'width': '80vw'} + ), + + dbc.Accordion( + [ + dbc.AccordionItem( + [ + html.P("Download all scenarios in a .zip archive. May take a long time."), + html.Hr(), + dbc.Button( + "Download all scenarios", + id="download_scenarios_button", + className="mb-3", + color="primary", + n_clicks=0, + ), + dcc.Download(id='download_scenarios_download'), + ], + title="Download All Scenarios", + ), + + dbc.AccordionItem( + [ + html.P("Select or drop one or more .xlsx or one .zip (with multiple .xlsx)"), + html.Hr(), + dcc.Upload( + id='upload_scenario', # 'upload-data', + children=html.Div([ + 'Drag and Drop or ', + html.A('Select Files') + ]), + style={ + 'width': '100%', + 'height': '60px', + 'lineHeight': '60px', + 'borderWidth': '1px', + 'borderStyle': 'dashed', + 'borderRadius': '5px', + 'textAlign': 'center', + 'margin': '10px' + }, + # Allow multiple files to be uploaded + multiple=True + ), + html.Div(id='output_data_upload'), + ], + title="Upload Scenarios", + ), + + dbc.AccordionItem( + [ + daq.StopButton( + id='stop_server_button', + label=f'Stop the Dash server. ' + f'Will release the port number {self.dash_app.port}.', + n_clicks=0 + ), + html.Div(id='stop_server_button_output') + ], + title="Stop Server", + ), + ], + start_collapsed=True, + ), + + # dbc.Card([ + # dbc.CardBody( + # [ + # dbc.Button( + # "Show 'Stop Server' Button", + # id="collapse_stop_server_button", + # className="mb-3", + # color="primary", + # n_clicks=0, + # ), + # dbc.Collapse( + # dbc.Card(dbc.CardBody([ + # # daq.StopButton( + # # id='stop_server_button', + # # label=f'Stop the Dash server. ' + # # f'Will release the port number {self.dash_app.port}.', + # # n_clicks=0 + # # ), + # html.Div(id='stop_server_button_output') + # ])), + # id="collapse_stop_server_button_state", + # is_open=False, + # ), + # ] + # ), + # ], style={'width': '80vw'}), + + ]) + return layout + + def get_scenario_operations_table(self, scenarios_df, ): + """Create a layout which allows a user to select an operation on a scenario: duplicate, rename, delete""" + layout = [] + # for scenario_name in scenarios_df.reset_index().scenario_name: + for scenario_name in scenarios_df.index: + layout.append( + dbc.Card(dbc.Row([ + dbc.Col(self.get_scenario_edit_dropdown(scenario_name, scenarios_df), width=1), + dbc.Col(scenario_name),])), + ) + # layout.extend([ + # dbc.Button( + # "Download all scenarios", + # id="download_scenarios_button", + # className="mb-3", + # color="primary", + # n_clicks=0, + # ), + # dcc.Download(id='download_scenarios_download'), + # ] + # ) + # break + return layout + + def get_scenario_edit_dropdown(self, scenario_name, scenarios_df): + dropdown = html.Div( + [ + dbc.DropdownMenu( + [ + dbc.DropdownMenuItem( + "Duplicate", + id = {'type':'duplicate_scenario_mi', 'index': scenario_name}, + n_clicks=0, + ), + self.get_scenario_rename_modal_dialog(scenario_name), + dbc.DropdownMenuItem( + "Rename", + id = {'type':'rename_scenario_mi', 'index': scenario_name}, + n_clicks=0 + ), + self.get_scenario_duplicate_modal_dialog(scenario_name, scenarios_df), + dbc.DropdownMenuItem( + "Download", + id = {'type':'download_scenario_mi', 'index': scenario_name}, + n_clicks=0 + ), + dcc.Download(id={'type':'download_scenario_download', 'index': scenario_name}), + # html.A(id={'type':'download_scenario_link', 'index': scenario_name}, + # children='Download File' + # ), + dbc.DropdownMenuItem(divider=True), + dbc.DropdownMenuItem( + "Delete", + id = {'type':'delete_scenario_mi', 'index': scenario_name}, + n_clicks=0 + ), + self.get_scenario_delete_modal_dialog(scenario_name), + ], + label="...", + size="sm", + ), + # html.P(id="item_clicks", className="mt-3"), + ] + ) + return dropdown + + def get_scenario_rename_modal_dialog(self, scenario_name: str): + modal = dbc.Modal( + [ + dbc.ModalHeader("Rename Scenario"), + dbc.ModalBody(f"New scenario name for '{scenario_name}'"), + dbc.Input( + id = {'type':'rename_scenario_modal_input', 'index': scenario_name}, + value=scenario_name, type="text"), + dbc.ModalFooter([ + dbc.Button("Cancel", + id = {'type':'rename_scenario_modal_cancel', 'index': scenario_name}, + className="ml-auto"), + dbc.Button("Rename", + id = {'type':'rename_scenario_modal_rename', 'index': scenario_name}, + className="ml-auto"), + ]), + ], + id = {'type':'rename_scenario_modal', 'index': scenario_name}, + ) + return modal + + def get_scenario_duplicate_modal_dialog(self, scenario_name: str, scenarios_df=None): + new_scenario_name = self.dash_app.dbm._find_free_duplicate_scenario_name(scenario_name, scenarios_df) + modal = dbc.Modal( + [ + dbc.ModalHeader("Duplicate Scenario"), + dbc.ModalBody(f"Name for the duplicate of the scenario '{scenario_name}':"), + dbc.Input( + id = {'type':'duplicate_scenario_modal_input', 'index': scenario_name}, + value=new_scenario_name, type="text"), + dbc.ModalFooter([ + dbc.Button("Cancel", + id = {'type':'duplicate_scenario_modal_cancel', 'index': scenario_name}, + className="ml-auto"), + dbc.Button("Duplicate", + id = {'type':'duplicate_scenario_modal_rename', 'index': scenario_name}, + className="ml-auto"), + ]), + ], + id = {'type':'duplicate_scenario_modal', 'index': scenario_name}, + ) + return modal + + def get_scenario_delete_modal_dialog(self, scenario_name: str): + modal = dbc.Modal( + [ + dbc.ModalHeader("Delete Scenario"), + dbc.ModalBody(f"Delete the scenario '{scenario_name}':"), + dbc.ModalFooter([ + dbc.Button("Cancel", + id = {'type':'delete_scenario_modal_cancel', 'index': scenario_name}, + className="ml-auto"), + dbc.Button("Delete", + id = {'type':'delete_scenario_modal_rename', 'index': scenario_name}, + className="ml-auto"), + ]), + ], + id = {'type':'delete_scenario_modal', 'index': scenario_name}, + ) + return modal + + def parse_scenario_upload_contents_callback(self, contents, filename, date): + """Called for each uploaded scenario""" + content_type, content_string = contents.split(',') + print(f"Upload file. filename={filename}, content_type={content_type}") + decoded = base64.b64decode(content_string) + # root, file_extension = os.path.splitext(filename) + file_extension = pathlib.Path(filename).suffix + scenario_name = pathlib.Path(filename).stem + + print(f"scenario_name = {scenario_name}, extension = {file_extension}") + try: + if file_extension == '.xlsx': + # pass + # Assume that the user uploaded an excel file + # df = pd.read_excel(io.BytesIO(decoded)) + xl = pd.ExcelFile(io.BytesIO(decoded)) + # Read data from Excel + inputs, outputs = ScenarioManager.load_data_from_excel_s(xl) + # s = f"inputs = {inputs.keys()}, outputs = {outputs.keys()}" + # print(s) + print("Input tables: {}".format(", ".join(inputs.keys()))) + print("Output tables: {}".format(", ".join(outputs.keys()))) + self.dash_app.dbm.replace_scenario_in_db(scenario_name=scenario_name, inputs=inputs, outputs=outputs) + child = html.Div([ + html.P(f"Uploaded scenario: '{scenario_name}' from '{filename}'"), + html.P(f"Input tables: {', '.join(inputs.keys())}"), + html.P(f"Output tables: {', '.join(outputs.keys())}"), + ]) + return child + elif file_extension == '.zip': + zip_file = zipfile.ZipFile(io.BytesIO(decoded)) + # unzip_results = [html.P(f"Support for zip archives (of .xslx) is pending: {filename}")] + unzip_results = [] + for info in zip_file.infolist(): + scenario_name = pathlib.Path(info.filename).stem + file_extension = pathlib.Path(info.filename).suffix + if file_extension == '.xlsx': + print(f"file in zip : {info.filename}") + filecontents = zip_file.read(info) + xl = pd.ExcelFile(filecontents) + inputs, outputs = ScenarioManager.load_data_from_excel_s(xl) + print("Input tables: {}".format(", ".join(inputs.keys()))) + print("Output tables: {}".format(", ".join(outputs.keys()))) + self.dash_app.dbm.replace_scenario_in_db(scenario_name=scenario_name, inputs=inputs, outputs=outputs) # + unzip_results.append(html.P(f"Uploaded scenario: '{scenario_name}' from '{filename}'"),) + else: + unzip_results.append(html.P(f"File: '{info.filename}' is not a .xlsx. Skipped."),) + child = html.Div(unzip_results) + return child + else: + return html.P(f"Unsupported file type: {filename}") + except Exception as e: + print(e) + return html.Div([ + f'There was an error processing this file: {e}' + ]) + + return html.P(f"Uploaded scenario {filename}") + + def set_dash_callbacks(self): + app = self.dash_app.app + + ############################################################################# + # Scenario operations callbacks + ############################################################################# + @app.callback(Output('output_data_upload', 'children'), + Input('upload_scenario', 'contents'), + State('upload_scenario', 'filename'), + State('upload_scenario', 'last_modified')) + def update_output(list_of_contents, list_of_names, list_of_dates): + """Supports uploading a set of scenarios""" + if list_of_contents is not None: + children = [ + # f"{n}, {d}" for c, n, d in zip(list_of_contents, list_of_names, list_of_dates) + self.parse_scenario_upload_contents_callback(c, n, d) for c, n, d in zip(list_of_contents, list_of_names, list_of_dates) + ] + return children + + + @app.callback([ + Output('download_scenarios_button', 'n_clicks'), + Output('download_scenarios_download', 'data'), + ], + Input('download_scenarios_button', 'n_clicks'), + prevent_initial_call=True + ) + def download_scenarios_callback(n_clicks): + """Download all scenarios in a zip file. + TODO: download selected set of scenarios + """ + print("Download all scenarios") + scenarios_df = self.dash_app.read_scenarios_table_from_db_cached() + data = None + with tempfile.TemporaryDirectory() as tmpdir: + zip_filepath = os.path.join(tmpdir, 'scenarios.zip') + with zipfile.ZipFile(zip_filepath, 'w') as zipMe: + for scenario_name in scenarios_df.index: + print(f"Download scenario {scenario_name}") + inputs, outputs = self.dash_app.dbm.read_scenario_from_db(scenario_name) + filename = f'{scenario_name}.xlsx' + filepath = os.path.join(tmpdir, filename) + with pd.ExcelWriter(filepath) as writer: + ScenarioManager.write_data_to_excel_s(writer, inputs=inputs, outputs=outputs) + writer.save() + zipMe.write(filepath, arcname=filename, compress_type=zipfile.ZIP_DEFLATED) + data = dcc.send_file(zip_filepath) + + return 0, data + + @app.callback([ + Output({'type': 'download_scenario_mi', 'index': MATCH}, 'n_clicks'), + Output({'type': 'download_scenario_download', 'index': MATCH}, 'data'), + ], + Input({'type': 'download_scenario_mi', 'index': MATCH}, 'n_clicks'), + State({'type': 'download_scenario_mi', 'index': MATCH}, 'id'), + prevent_initial_call=True + ) + def download_scenario_callback(n_clicks, id): + """We need the `n_clicks` as input. Only the `id` will not be triggered when a user selects the menu option. + See https://community.plotly.com/t/excel-writer-to-dcc-download/54132/5 for use of tempfile.TemporaryDirectory() + """ + scenario_name = id['index'] + print(f"Download scenario {scenario_name}") + # df = pd.DataFrame({'dropdown_value': [1, 2, 3]}) + # relative_filename = os.path.join( + # 'downloads', + # '{}-download.xlsx'.format(scenario_name) + # ) + # absolute_filename = os.path.join(os.getcwd(), relative_filename) + # writer = pd.ExcelWriter(absolute_filename) + # df.to_excel(writer, 'Sheet1') + # writer.save() + # href = './{}'.format(relative_filename) + + multi_threaded = False # Enabling multi-threading does NOT result in speedup. In fact for small scenarios it is slower! + inputs, outputs = self.dash_app.dbm.read_scenario_from_db(scenario_name, multi_threaded) + #TODO: inputs include a scenario table. Remove. + + data = None + with tempfile.TemporaryDirectory() as tmpdir: + filename = f'{scenario_name}.xlsx' + filepath = os.path.join(tmpdir, filename) + with pd.ExcelWriter(filepath) as writer: + ScenarioManager.write_data_to_excel_s(writer, inputs=inputs, outputs=outputs) + writer.save() + data = dcc.send_file(filepath) + + return 0, data + + # @app.server.route('/downloads/') + # def serve_static(path): + # root_dir = os.getcwd() + # return flask.send_from_directory( + # os.path.join(root_dir, 'downloads'), path + # ) + + @app.callback( + Output({'type': 'delete_scenario_modal', 'index': MATCH}, "is_open"), + [ + Input({'type': 'delete_scenario_mi', 'index': MATCH}, 'n_clicks'), + Input({'type': 'delete_scenario_modal_cancel', 'index': MATCH}, "n_clicks"), + Input({'type': 'delete_scenario_modal_rename', 'index': MATCH}, "n_clicks"), + ], + [State({'type': 'delete_scenario_modal', 'index': MATCH}, "is_open"), + # State({'type': 'delete_scenario_modal_input', 'index': MATCH}, "value") + ], + ) + def toggle_delete_modal(n1, n2, n3, is_open): + """ + + """ + ctx = dash.callback_context + if not ctx.triggered: + raise PreventUpdate + + if n1 or n2 or n3: + # print(f"Rename modal: {new_scenario_name}") + # print(f"ctx.triggered[0] = {ctx.triggered[0]}") + triggered_component_id_str = ctx.triggered[0]['prop_id'].split('.')[0] # This returns a STRING representation of the pattern-matching id + # print(f"Rename context id = {triggered_component_id}") + triggered_component_id_dict = ast.literal_eval(triggered_component_id_str) # Convert the string to a Dict to get the type. + ctx_type = triggered_component_id_dict['type'] + current_scenario_name = triggered_component_id_dict['index'] + # print(f"Rename context type = {ctx_type}") + + if ctx_type == 'delete_scenario_modal_rename': + print(f"Deleting scenario from {current_scenario_name}") + self.dash_app.dbm.delete_scenario_from_db(current_scenario_name) + + return not is_open + return is_open + + @app.callback( + Output({'type': 'duplicate_scenario_modal', 'index': MATCH}, "is_open"), + [ + Input({'type': 'duplicate_scenario_mi', 'index': MATCH}, 'n_clicks'), + Input({'type': 'duplicate_scenario_modal_cancel', 'index': MATCH}, "n_clicks"), + Input({'type': 'duplicate_scenario_modal_rename', 'index': MATCH}, "n_clicks"), + ], + [State({'type': 'duplicate_scenario_modal', 'index': MATCH}, "is_open"), + State({'type': 'duplicate_scenario_modal_input', 'index': MATCH}, "value") + ], + ) + def toggle_duplicate_modal(n1, n2, n3, is_open, new_scenario_name): + """ + TODO: replace by a duplicate + delete + """ + ctx = dash.callback_context + if not ctx.triggered: + raise PreventUpdate + + if n1 or n2 or n3: + # print(f"Rename modal: {new_scenario_name}") + # print(f"ctx.triggered[0] = {ctx.triggered[0]}") + triggered_component_id_str = ctx.triggered[0]['prop_id'].split('.')[0] # This returns a STRING representation of the pattern-matching id + # print(f"Rename context id = {triggered_component_id}") + triggered_component_id_dict = ast.literal_eval(triggered_component_id_str) # Convert the string to a Dict to get the type. + ctx_type = triggered_component_id_dict['type'] + current_scenario_name = triggered_component_id_dict['index'] + # print(f"Rename context type = {ctx_type}") + + if ctx_type == 'duplicate_scenario_modal_rename': + if new_scenario_name != current_scenario_name: + print(f"Duplicating scenario from {current_scenario_name} to {new_scenario_name}") + self.dash_app.dbm.duplicate_scenario_in_db(current_scenario_name, new_scenario_name) + + return not is_open + return is_open + + @app.callback( + Output({'type': 'rename_scenario_modal', 'index': MATCH}, "is_open"), + [ + Input({'type': 'rename_scenario_mi', 'index': MATCH}, 'n_clicks'), + Input({'type': 'rename_scenario_modal_cancel', 'index': MATCH}, "n_clicks"), + Input({'type': 'rename_scenario_modal_rename', 'index': MATCH}, "n_clicks"), + ], + [State({'type': 'rename_scenario_modal', 'index': MATCH}, "is_open"), + State({'type': 'rename_scenario_modal_input', 'index': MATCH}, "value") + ], + ) + def toggle_rename_modal(n1, n2, n3, is_open, new_scenario_name): + """ + TODO: trigger reload of scenario table and update of UI + If `rename_scenario_modal_rename` then do the rename + """ + ctx = dash.callback_context + if not ctx.triggered: + raise PreventUpdate + + if n1 or n2 or n3: + # print(f"Rename modal: {new_scenario_name}") + # print(f"ctx.triggered[0] = {ctx.triggered[0]}") + triggered_component_id_str = ctx.triggered[0]['prop_id'].split('.')[0] # This returns a STRING representation of the pattern-matching id + # print(f"Rename context id = {triggered_component_id}") + triggered_component_id_dict = ast.literal_eval(triggered_component_id_str) # Convert the string to a Dict to get the type. + ctx_type = triggered_component_id_dict['type'] + current_scenario_name = triggered_component_id_dict['index'] + # print(f"Rename context type = {ctx_type}") + + if ctx_type == 'rename_scenario_modal_rename': + if new_scenario_name != current_scenario_name: + print(f"Renaming scenario from {current_scenario_name} to {new_scenario_name}") + self.dash_app.dbm.rename_scenario_in_db(current_scenario_name, new_scenario_name) + + return not is_open + return is_open + + # @app.callback( + # Output({'type': 'duplicate_scenario_mi', 'index': MATCH}, 'n_clicks'), + # Input({'type': 'duplicate_scenario_mi', 'index': MATCH}, 'n_clicks'), + # State({'type': 'duplicate_scenario_mi', 'index': MATCH}, 'id'), + # prevent_initial_call=True + # ) + # def duplicate_scenario_callback(n_clicks, id): + # """We need the `n_clicks` as input. Only the `id` will not be triggered when a user selects the menu option.""" + # scenario_name = id['index'] + # print(f"Duplicate scenario {scenario_name}") + # raise PreventUpdate + # return 0 + # + # @app.callback( + # Output({'type': 'rename_scenario_mi', 'index': MATCH}, 'n_clicks'), + # Input({'type': 'rename_scenario_mi', 'index': MATCH}, 'n_clicks'), + # State({'type': 'rename_scenario_mi', 'index': MATCH}, 'id'), + # prevent_initial_call=True + # ) + # def rename_scenario_callback(n_clicks, id): + # """We need the `n_clicks` as input. Only the `id` will not be triggered when a user selects the menu option.""" + # scenario_name = id['index'] + # print(f"Rename scenario {scenario_name}") + # raise PreventUpdate + # return 0 + # + # @app.callback( + # Output({'type': 'delete_scenario_mi', 'index': MATCH}, 'n_clicks'), + # Input({'type': 'delete_scenario_mi', 'index': MATCH}, 'n_clicks'), + # State({'type': 'delete_scenario_mi', 'index': MATCH}, 'id'), + # prevent_initial_call=True + # ) + # def delete_scenario_callback(n_clicks, id): + # """We need the `n_clicks` as input. Only the `id` will not be triggered when a user selects the menu option.""" + # scenario_name = id['index'] + # print(f"Delete scenario {scenario_name}") + # raise PreventUpdate + # return 0 + + ############################################################################# + @app.callback( + Output("collapse_stop_server_button_state", "is_open"), + [Input("collapse_stop_server_button", "n_clicks")], + [State("collapse_stop_server_button_state", "is_open")], + ) + def toggle_collapse(n, is_open): + if n: + return not is_open + return is_open + + @app.callback( + Output('stop_server_button_output', 'children'), + Input('stop_server_button', 'n_clicks'), + prevent_initial_call=True + ) + def update_output(n_clicks): + self.dash_app.shutdown() + return 'The stop button has been clicked {} times.'.format(n_clicks) \ No newline at end of file diff --git a/dse_do_dashboard/main_pages/prepare_data_page.py b/dse_do_dashboard/main_pages/prepare_data_page.py index 2750872..6268f4e 100644 --- a/dse_do_dashboard/main_pages/prepare_data_page.py +++ b/dse_do_dashboard/main_pages/prepare_data_page.py @@ -14,44 +14,70 @@ class PrepareDataPage(MainPage): - def __init__(self, dash_app): + def __init__(self, dash_app, + page_name: str = 'Prepare Data', + page_id: str = 'prepare-data', + url: str = 'prepare-data'): super().__init__(dash_app, - page_name='Prepare Data', - page_id='prepare-data', - url='prepare-data', + page_name=page_name, + page_id=page_id, + url=url, ) def get_layout(self): - input_tables = self.dash_app.get_input_table_names() + # input_tables = self.dash_app.get_input_table_names() layout = html.Div([ - - dbc.Card([ - dbc.CardHeader('Input Table', style= {'fullscreen':True}), - dbc.CardBody( - dcc.Dropdown(id='input_table_drpdwn', - options=[ {'label': i, 'value': i} - for i in input_tables], - value=input_tables[0], - style = {'width': '75vw','height':'2vw'}, - ), - ), - ], style = {'width': '80vw'}), + self.get_input_table_selection_card(), + # dbc.Card([ + # dbc.CardHeader('Input Table', style= {'fullscreen':True}), + # dbc.CardBody( + # dcc.Dropdown(id='input_table_drpdwn', + # options=[ {'label': i, 'value': i} + # for i in input_tables], + # value=input_tables[0], + # style = {'width': '75vw','height':'2vw'}, + # ), + # ), + # ], style = {'width': '80vw'}), dbc.Card([ # dbc.CardHeader('Input Table'), - dbc.CardBody(id = 'input_data_table_card',style = {'width': '79vw'} ), + dbc.CardBody(id='input_data_table_card', style={'width': '79vw'} ), html.Div(id="input_data_table_div"), - ], style = {'width': '80vw'}), + ], style={'width': '80vw'}), - dbc.Card([ - - dbc.CardBody(id = 'input_pivot_table_card',style = {'width': '79vw'}), - html.Div(id="input_pivot_table_div"), - ], style = {'width': '80vw'}) + # dbc.Card([ + # + # dbc.CardBody(id = 'input_pivot_table_card',style = {'width': '79vw'}), + # html.Div(id="input_pivot_table_div"), + # ], style = {'width': '80vw'}) + self.get_pivot_table_card(), ]) return layout + def get_input_table_selection_card(self): + input_tables = self.dash_app.get_input_table_names() + card = dbc.Card([ + dbc.CardHeader('Input Table', style={'fullscreen': True}), + dbc.CardBody( + dcc.Dropdown(id='input_table_drpdwn', + options=[ {'label': i, 'value': i} + for i in input_tables], + value=input_tables[0], + style = {'width': '75vw', 'height': '2vw'}, + ), + ), + ], style={'width': '80vw'}) + return card + + def get_pivot_table_card(self): + card = dbc.Card([ + dbc.CardBody(id='input_pivot_table_card', style={'width': '79vw'}), + html.Div(id="input_pivot_table_div"), + ], style={'width': '80vw'}) + return card + def update_data_and_pivot_input_table_callback(self, scenario_name, table_name): """Body for the Dash callback. @@ -67,7 +93,7 @@ def update_data_and_pivot_input_table(scenario_name, table_name): return [data_table_children, pivot_table_children] """ - print(f"update_data_and_pivot_input_table for {table_name} in {scenario_name}") + # print(f"update_data_and_pivot_input_table for {table_name} in {scenario_name}") input_table_names = [table_name] pm = self.dash_app.get_plotly_manager(scenario_name, input_table_names, []) dm = pm.dm diff --git a/dse_do_dashboard/main_pages/prepare_data_page_edit.py b/dse_do_dashboard/main_pages/prepare_data_page_edit.py index 7d813f1..5821931 100644 --- a/dse_do_dashboard/main_pages/prepare_data_page_edit.py +++ b/dse_do_dashboard/main_pages/prepare_data_page_edit.py @@ -1,7 +1,7 @@ # Copyright IBM All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from typing import List +from typing import List, Optional, Dict import pandas as pd import dash @@ -12,41 +12,37 @@ from dash import dcc, html import dash_bootstrap_components as dbc import pprint +from dash import dash_table +from dse_do_dashboard.main_pages.prepare_data_page import PrepareDataPage from dse_do_dashboard.utils.dash_common_utils import get_data_table_card_children, get_pivot_table_card_children, \ - diff_dashtable_mi, ScenarioTableSchema + diff_dashtable_mi, ScenarioTableSchema, table_type from dse_do_dashboard.utils.scenariodbmanager_update import DbCellUpdate -class PrepareDataPageEdit(MainPage): +class PrepareDataPageEdit(PrepareDataPage): + """Includes feature to edit input tables. + Use instead of `PrepareDataPage`. + Do not combine in same app: will cause duplicate callbacks.""" def __init__(self, dash_app): self.data_table_id = 'input_data_table' super().__init__(dash_app, - page_name='Prepare Data Edit', - page_id='prepare-data-edit', - url='prepare-data-edit', + page_name='Prepare Data', + page_id='prepare-data', + url='prepare-data', ) def get_layout(self): input_tables = self.dash_app.get_input_table_names() layout = html.Div([ dcc.Store(id="current_table_name"), - dbc.Card([ - dbc.CardHeader('Input Table', style= {'fullscreen':True}), - dbc.CardBody( - dcc.Dropdown(id='input_table_drpdwn', - options=[ {'label': i, 'value': i} - for i in input_tables], - value=input_tables[0], - style = {'width': '75vw','height':'2vw'}, - ), - ), - ], style = {'width': '80vw'}), + + self.get_input_table_selection_card(), dbc.Card([ # dbc.CardHeader('Input Table'), - dbc.CardBody(id = 'input_data_table_card',style = {'width': '79vw'}, - children=get_data_table_card_children(df=pd.DataFrame(), table_name='None', data_table_id=self.data_table_id) # We need to initialize a DataTable, otherwise issues with registering callbacks + dbc.CardBody(id='input_data_table_card', style={'width': '79vw'}, + children=get_data_table_card_children(df=pd.DataFrame(), table_name='None', editable=True, data_table_id=self.data_table_id) # We need to initialize a DataTable, otherwise issues with registering callbacks ), html.Button("No table updates", id="commit_changes_button", disabled=True), @@ -56,21 +52,13 @@ def get_layout(self): html.Div(id="my_data_table_output") ], style = {'width': '80vw'}), - dbc.Card([ - - dbc.CardBody(id = 'input_pivot_table_card',style = {'width': '79vw'}), - html.Div(id="input_pivot_table_div"), - ], style = {'width': '80vw'}) - + self.get_pivot_table_card(), ]) return layout - - def update_data_and_pivot_input_table_callback(self, scenario_name, table_name): + def update_data_and_pivot_input_table_callback(self, scenario_name, table_name, diff_store_data=None): """Body for the Dash callback. - Usage:: - @app.callback([Output('input_data_table_card', 'children'), Output('input_pivot_table_card', 'children')], [Input('top_menu_scenarios_drpdwn', 'value'), @@ -79,6 +67,7 @@ def update_data_and_pivot_input_table(scenario_name, table_name): data_table_children, pivot_table_children = DA.update_data_and_pivot_input_table_callback(scenario_name, table_name) return [data_table_children, pivot_table_children] + TODO: share parts with parent """ # print(f"update_data_and_pivot_input_table for {table_name} in {scenario_name}") input_table_names = [table_name] @@ -87,10 +76,110 @@ def update_data_and_pivot_input_table(scenario_name, table_name): df = self.dash_app.get_table_by_name(dm=dm, table_name=table_name, index=False, expand=False) table_schema = self.dash_app.get_table_schema(table_name) pivot_table_config = self.dash_app.get_pivot_table_config(table_name) - data_table_children = get_data_table_card_children(df, table_name, table_schema, editable=True, data_table_id=self.data_table_id) + data_table_children = self.get_data_table_card_children(df, table_name, table_schema, editable=True, data_table_id=self.data_table_id, diff_store_data=diff_store_data) pivot_table_children = get_pivot_table_card_children(df, scenario_name, table_name, pivot_table_config) return data_table_children, pivot_table_children + def get_data_table_card_children(self, df, table_name:str, table_schema: Optional[ScenarioTableSchema] = None, + editable: bool = False, data_table_id:str=None, diff_store_data=None): + return [ + dbc.CardHeader( + table_name + # title=table_name, + # fullscreen=True + ), + self.get_data_table(df, table_schema, editable, data_table_id, diff_store_data) + ] + + def get_data_table(self, df, table_schema: Optional[ScenarioTableSchema] = None, editable: bool = False, data_table_id=None, diff_store_data=None) -> dash_table.DataTable: + """ + Generates a DataTable for a DataFrame. For use in 'Prepare Data' and 'Explore Solution' pages. + :param df: + :param table_schema: + :return: + """ + if data_table_id is None: + data_table_id = 'my_data_table' + index_columns = [] + if table_schema is not None: + df = df.set_index(table_schema.index_columns).reset_index() # ensures all index columns are first + index_columns = table_schema.index_columns + return dash_table.DataTable( + id=data_table_id, + data=df.to_dict('records'), + columns=[ + {'name': i, 'id': i, 'type': table_type(df[i])} + for i in df.columns + ], + fixed_rows={'headers': True}, + editable=editable, + # fixed_columns={'headers': False, 'data': 0}, # Does NOT create a horizontal scroll bar + filter_action="native", + sort_action="native", + sort_mode="multi", + style_cell={ + 'textOverflow': 'ellipsis', # See https://dash.plotly.com/datatable/width to control column-name width + 'maxWidth': 0, # Needs to be here for the 'ellipsis' option to work + 'overflow' : 'hidden', + 'font_family': 'sans-serif', + 'font_size': '12px', + 'textAlign': 'left'}, + style_table={ + 'maxHeight': '400px', + 'overflowY': 'scroll' + }, + style_header={ + 'if': { + 'column_id': index_columns + }, + # 'backgroundColor': 'rgb(230, 230, 230)', + 'fontWeight': 'bold' + }, + style_data_conditional=([ + { + 'if': { + 'column_id': index_columns + }, + 'fontWeight': 'bold', + # 'backgroundColor': '#0074D9', + # 'color': 'white' + }, + # { + # 'if': { + # 'row_index': 2, + # 'column_id': 'size' + # }, + # 'backgroundColor': '#d62728' + # }, + ] + self.get_table_style_data_conditional_cell_edit(diff_store_data) + )) + + def get_table_style_data_conditional_cell_edit(self, diff_store_data=None) -> List[Dict]: + """Return a list of dicts with conditional cell style. For each cell that has been edited. + Partly works: + - The coloring works, but + - The table needs to be refreshed/regenerated to get the color filtering active. + That refresh is not happening currently. Need to see how it can be triggered + That refresh will reset the edits in the table + So one would need to re-apply the changes from the store. + Seems like a lot of work for this feature. + """ + # print(f"get_table_style_data_conditional_cell_edit diff_store_data = {diff_store_data}") + if diff_store_data is None: + return [] + cell_edits = [] + for l in diff_store_data.values(): + for diff in l: + cell_edits.append({ + 'if': { + 'row_index': diff['row_idx'], + 'column_id': diff['column_name'] + }, + 'backgroundColor': '#d62728' + }) + return cell_edits + + def get_db_cell_updates(self, diff_store_data, ) -> List[DbCellUpdate]: """Get the changes in the diff store as DbCellUpdate. Note: from Python 3.7, a Dict maintains the order in which items are added. So no need to sort by time. @@ -149,14 +238,17 @@ def set_dash_callbacks(self): Will be called to register any callbacks :return: """ + # print("Set dash callbacks for PrepareDataPageEdit") + # super().set_dash_callbacks() app = self.dash_app.app @app.callback([Output('input_data_table_card', 'children'), Output('input_pivot_table_card', 'children')], [Input('top_menu_scenarios_drpdwn', 'value'), - Input('input_table_drpdwn', 'value')]) - def update_data_and_pivot_input_table(scenario_name:str, table_name:str): - data_table_children, pivot_table_children = self.update_data_and_pivot_input_table_callback(scenario_name, table_name) + Input('input_table_drpdwn', 'value')], + State("my_data_table_diff_store", "data")) + def update_data_and_pivot_input_table_edit(scenario_name:str, table_name:str, diff_store_data): + data_table_children, pivot_table_children = self.update_data_and_pivot_input_table_callback(scenario_name, table_name, diff_store_data) return [data_table_children, pivot_table_children] @app.callback([Output('commit_changes_button', 'style'), @@ -188,7 +280,7 @@ def change_button_style(data): else: return white_button_style, "No changes to commit" - + # print("Set dash callbacks for PrepareDataPageEdit.capture_and_commit_edits") @app.callback( [ Output("my_data_table_diff_store", "data"), @@ -210,7 +302,16 @@ def capture_and_commit_edits(ts, n_clicks, data, data_previous, diff_store_data, """ ctx = dash.callback_context if not ctx.triggered: - raise PreventUpdate + # print(f"Not triggered. diff_store_data = {diff_store_data}") + # raise PreventUpdate + # Attempt to enable Commit button if there are changes pending in the diff-store after a 'Refresh' button click + # Note that the value of the change in the table has been lost due to the refresh, but not the Store + # So the diff can still be committed, but doesn't properly show in the table + if diff_store_data is None: + raise PreventUpdate + else: + return diff_store_data, False + triggered_component_id = ctx.triggered[0]['prop_id'].split('.')[0] if triggered_component_id == self.data_table_id: diff --git a/dse_do_dashboard/utils/scenariodbmanager_update.py b/dse_do_dashboard/utils/scenariodbmanager_update.py index 5b81cb0..8be1561 100644 --- a/dse_do_dashboard/utils/scenariodbmanager_update.py +++ b/dse_do_dashboard/utils/scenariodbmanager_update.py @@ -1,28 +1,35 @@ # Copyright IBM All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 - +from multiprocessing.pool import ThreadPool from typing import Dict, List, NamedTuple, Any, Optional -from dse_do_utils.scenariodbmanager import ScenarioDbManager, ScenarioDbTable +import pandas as pd +import sqlalchemy +from dse_do_utils.scenariodbmanager import ScenarioDbManager, ScenarioDbTable, DbCellUpdate +# Typing aliases +Inputs = Dict[str, pd.DataFrame] +Outputs = Dict[str, pd.DataFrame] ######################################### # Added to dse-do-utils v 0.5.3.2b ######################################### -class DbCellUpdate(NamedTuple): - scenario_name: str - table_name: str - row_index: List[Dict[str, Any]] # e.g. [{'column': 'col1', 'value': 1}, {'column': 'col2', 'value': 'pear'}] - column_name: str - current_value: Any - previous_value: Any # Not used for DB operation - row_idx: int # Not used for DB operation +# class DbCellUpdate(NamedTuple): +# scenario_name: str +# table_name: str +# row_index: List[Dict[str, Any]] # e.g. [{'column': 'col1', 'value': 1}, {'column': 'col2', 'value': 'pear'}] +# column_name: str +# current_value: Any +# previous_value: Any # Not used for DB operation +# row_idx: int # Not used for DB operation class ScenarioDbManagerUpdate(ScenarioDbManager): """ + DEPRECATED - was used to develop DB features that are now migrated to dse-do-utils To test editable tables. Supports DB updates of edits. - Changes are added to dse-do-utils v 0.5.3.2b + Supports CRUD operations on scenarios. + Changes are added to dse-do-utils v 0.5.4.0b """ def __init__(self, input_db_tables: Dict[str, ScenarioDbTable], output_db_tables: Dict[str, ScenarioDbTable], @@ -35,36 +42,40 @@ def __init__(self, input_db_tables: Dict[str, ScenarioDbTable], output_db_tables # Update scenario # Added to dse-do-utils v 0.5.3.2b ############################################################################################ - def update_cell_changes_in_db(self, db_cell_updates: List[DbCellUpdate]): - """Update a set of cells in the DB. - - :param db_cell_updates: - :return: - """ - if self.enable_transactions: - print("Update cells with transaction") - with self.engine.begin() as connection: - self._update_cell_changes_in_db(db_cell_updates, connection=connection) - else: - self._update_cell_changes_in_db(db_cell_updates) - - def _update_cell_changes_in_db(self, db_cell_updates: List[DbCellUpdate], connection=None): - """Update an ordered list of single value changes (cell) in the DB.""" - for db_cell_change in db_cell_updates: - self._update_cell_change_in_db(db_cell_change, connection) - - def _update_cell_change_in_db(self, db_cell_update: DbCellUpdate, connection=None): - """Update a single value (cell) change in the DB.""" - db_table_name = self.db_tables[db_cell_update.table_name].db_table_name - column_change = f"{db_cell_update.column_name} = '{db_cell_update.current_value}'" - scenario_condition = f"scenario_name = '{db_cell_update.scenario_name}'" - pk_conditions = ' AND '.join([f"{pk['column']} = '{pk['value']}'" for pk in db_cell_update.row_index]) - sql = f"UPDATE {db_table_name} SET {column_change} WHERE {pk_conditions} AND {scenario_condition};" - # print(f"_update_cell_change_in_db = {sql}") - if connection is None: - self.engine.execute(sql) - else: - connection.execute(sql) + # def update_cell_changes_in_db(self, db_cell_updates: List[DbCellUpdate]): + # """Update a set of cells in the DB. + # + # :param db_cell_updates: + # :return: + # """ + # if self.enable_transactions: + # print("Update cells with transaction") + # with self.engine.begin() as connection: + # self._update_cell_changes_in_db(db_cell_updates, connection=connection) + # else: + # self._update_cell_changes_in_db(db_cell_updates, connection=self.engine) + + # def _update_cell_changes_in_db(self, db_cell_updates: List[DbCellUpdate], connection): + # """Update an ordered list of single value changes (cell) in the DB.""" + # for db_cell_change in db_cell_updates: + # self._update_cell_change_in_db(db_cell_change, connection) + + # def _update_cell_change_in_db(self, db_cell_update: DbCellUpdate, connection): + # """Update a single value (cell) change in the DB.""" + # # db_table_name = self.db_tables[db_cell_update.table_name].db_table_name + # # column_change = f"{db_cell_update.column_name} = '{db_cell_update.current_value}'" + # # scenario_condition = f"scenario_name = '{db_cell_update.scenario_name}'" + # # pk_conditions = ' AND '.join([f"{pk['column']} = '{pk['value']}'" for pk in db_cell_update.row_index]) + # # old_sql = f"UPDATE {db_table_name} SET {column_change} WHERE {pk_conditions} AND {scenario_condition};" + # + # db_table: ScenarioDbTable = self.db_tables[db_cell_update.table_name] + # t: sqlalchemy.Table = db_table.get_sa_table() + # pk_conditions = [(db_table.get_sa_column(pk['column']) == pk['value']) for pk in db_cell_update.row_index] + # target_col: sqlalchemy.Column = db_table.get_sa_column(db_cell_update.column_name) + # sql = t.update().where(sqlalchemy.and_((t.c.scenario_name == db_cell_update.scenario_name), *pk_conditions)).values({target_col:db_cell_update.current_value}) + # # print(f"_update_cell_change_in_db = {sql}") + # + # connection.execute(sql) ############################################################################################ # Missing CRUD operations on scenarios in DB: @@ -72,86 +83,396 @@ def _update_cell_change_in_db(self, db_cell_update: DbCellUpdate, connection=Non # - Duplicate scenario # - Rename scenario ############################################################################################ + # Migrated 2022-01-10 + # def delete_scenario_from_db(self, scenario_name: str): + # """Delete a scenario. Uses a transaction (when enabled).""" + # if self.enable_transactions: + # print("Delete scenario within a transaction") + # with self.engine.begin() as connection: + # self._delete_scenario_from_db(scenario_name=scenario_name, connection=connection) + # else: + # self._delete_scenario_from_db(scenario_name=scenario_name, connection=self.engine) + + # def duplicate_scenario_in_db(self, source_scenario_name: str, target_scenario_name: str): + # """Duplicate a scenario. Uses a transaction (when enabled).""" + # if self.enable_transactions: + # print("Duplicate scenario within a transaction") + # with self.engine.begin() as connection: + # self._duplicate_scenario_in_db(connection, source_scenario_name, target_scenario_name) + # else: + # self._duplicate_scenario_in_db(self.engine, source_scenario_name, target_scenario_name) + # + # def _duplicate_scenario_in_db(self, connection, source_scenario_name: str, target_scenario_name: str = None): + # """Is fully done in DB using SQL in one SQL execute statement + # :param source_scenario_name: + # :param target_scenario_name: + # :param connection: + # :return: + # """ + # if target_scenario_name is None: + # new_scenario_name = self._find_free_duplicate_scenario_name(source_scenario_name) + # elif self._check_free_scenario_name(target_scenario_name): + # new_scenario_name = target_scenario_name + # else: + # raise ValueError(f"Target name for duplicate scenario '{target_scenario_name}' already exists.") + # + # # inputs, outputs = self.read_scenario_from_db(source_scenario_name) + # # self._replace_scenario_in_db_transaction(scenario_name=new_scenario_name, inputs=inputs, outputs=outputs, + # # bulk=True, connection=connection) + # self._duplicate_scenario_in_db_sql(connection, source_scenario_name, new_scenario_name) + # + # def _duplicate_scenario_in_db_sql(self, connection, source_scenario_name: str, target_scenario_name: str = None): + # """ + # :param source_scenario_name: + # :param target_scenario_name: + # :param connection: + # :return: + # + # See https://stackoverflow.com/questions/9879830/select-modify-and-insert-into-the-same-table + # + # Problem: the table Parameter/parameters has a column 'value' (lower-case). + # Almost all of the column names in the DFs are lower-case, as are the column names in the ScenarioDbTable. + # Typically, the DB schema converts that the upper-case column names in the DB. + # But probably because 'VALUE' is a reserved word, it does NOT do this for 'value'. But that means in order to refer to this column in SQL, + # one needs to put "value" between double quotes. + # Problem is that you CANNOT do that for other columns, since these are in upper-case in the DB. + # Note that the kpis table uses upper case 'VALUE' and that seems to work fine + # + # Resolution: use SQLAlchemy to construct the SQL. Do NOT create SQL expressions by text manipulation. + # SQLAlchemy has the smarts to properly deal with these complex names. + # """ + # if target_scenario_name is None: + # new_scenario_name = self._find_free_duplicate_scenario_name(source_scenario_name) + # elif self._check_free_scenario_name(target_scenario_name): + # new_scenario_name = target_scenario_name + # else: + # raise ValueError(f"Target name for duplicate scenario '{target_scenario_name}' already exists.") + # + # batch_sql=False # BEWARE: batch = True does NOT work! + # sql_statements = [] + # + # # 1. Insert scenario in scenario table + # # sql_insert = f"INSERT INTO SCENARIO (scenario_name) VALUES ('{new_scenario_name}')" # Old + # # sa_scenario_table = list(self.input_db_tables.values())[0].table_metadata # Scenario table must be the first + # sa_scenario_table = list(self.input_db_tables.values())[0].get_sa_table() # Scenario table must be the first + # sql_insert = sa_scenario_table.insert().values(scenario_name = new_scenario_name) + # # print(f"_duplicate_scenario_in_db_sql - Insert SQL = {sql_insert}") + # if batch_sql: + # sql_statements.append(sql_insert) + # else: + # connection.execute(sql_insert) + # + # # 2. Do 'insert into select' to duplicate rows in each table + # for scenario_table_name, db_table in self.db_tables.items(): + # if scenario_table_name == 'Scenario': + # continue + # # else: + # # table_column_names = db_table.get_df_column_names() + # + # # print(f"#####TABLE METADATA: {type(db_table.table_metadata)}") + # # print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5))) + # + # # print(f"Columns for {scenario_table_name}: {table_column_names}") + # # target_column_names = table_column_names.copy() + # # # target_column_names = [f"{db_table.db_table_name}.{n}" for n in target_column_names] + # # target_columns_txt = ','.join(target_column_names) + # # # target_columns_txt = ','.join([f'"{n}"' for n in target_column_names]) + # # source_column_names = table_column_names.copy() + # # source_column_names[0] = f"'{target_scenario_name}'" + # # # source_columns_txt = ','.join(source_column_names) + # # + # # # other_source_column_names = table_column_names.copy()[1:] # Drop the scenario_name column + # # # other_source_column_names = [f"{db_table.db_table_name}.{n}" for n in other_source_column_names] + # # other_source_columns_txt = ','.join(other_source_column_names) + # # # source_columns = ','.join(f'"{n}"' for n in source_column_names[1:]) + # # # source_columns_txt = f"'{target_scenario_name}', {source_columns}" + # + # t: sqlalchemy.table = db_table.table_metadata # The table at hand + # s: sqlalchemy.table = sa_scenario_table # The scenario table + # # print("+++++++++++SQLAlchemy insert-select") + # select_columns = [s.c.scenario_name if c.name == 'scenario_name' else c for c in t.columns] # Replace the t.c.scenario_name with s.c.scenario_name, so we get the new value + # # print(f"select columns = {select_columns}") + # select_sql = (sqlalchemy.select(select_columns) + # .where(sqlalchemy.and_(t.c.scenario_name == source_scenario_name, s.c.scenario_name == target_scenario_name))) + # target_columns = [c for c in t.columns] + # sql_insert = t.insert().from_select(target_columns, select_sql) + # # print(f"sql_insert = {sql_insert}") + # + # # sql_insert = f"INSERT INTO {db_table.db_table_name} ({target_columns_txt}) SELECT '{target_scenario_name}',{other_source_columns_txt} FROM {db_table.db_table_name} WHERE scenario_name = '{source_scenario_name}'" + # if batch_sql: + # sql_statements.append(sql_insert) + # else: + # connection.execute(sql_insert) + # if batch_sql: + # batch_sql = ";\n".join(sql_statements) + # print(batch_sql) + # connection.execute(batch_sql) + # + # + # def _find_free_duplicate_scenario_name(self, scenario_name: str, scenarios_df=None) -> Optional[str]: + # """Finds next free scenario name based on pattern '{scenario_name}_copy_n'. + # Will try at maximum 20 attempts. + # """ + # max_num_attempts = 20 + # for i in range(1, max_num_attempts + 1): + # new_name = f"{scenario_name}({i})" + # free = self._check_free_scenario_name(new_name, scenarios_df) + # if free: + # return new_name + # raise ValueError(f"Cannot find free name for duplicate scenario. Tried {max_num_attempts}. Last attempt = {new_name}. Rename scenarios.") + # return None + # + # def _check_free_scenario_name(self, scenario_name, scenarios_df=None) -> bool: + # if scenarios_df is None: + # scenarios_df = self.get_scenarios_df() + # free = (False if scenario_name in scenarios_df.index else True) + # return free + # + # ############################################## + # def rename_scenario_in_db(self, source_scenario_name: str, target_scenario_name: str): + # """Rename a scenario. Uses a transaction (when enabled). + # TODO: get rename in SQL to work. Currently causing Integrity errors due to not being able to defer constraint checking.""" + # if self.enable_transactions: + # print("Rename scenario within a transaction") + # with self.engine.begin() as connection: + # # self._rename_scenario_in_db(source_scenario_name, target_scenario_name, connection=connection) + # self._rename_scenario_in_db_sql(connection, source_scenario_name, target_scenario_name) + # else: + # # self._rename_scenario_in_db(source_scenario_name, target_scenario_name) + # self._rename_scenario_in_db_sql(self.engine, source_scenario_name, target_scenario_name) + # + # # def _rename_scenario_in_db(self, source_scenario_name: str, target_scenario_name: str = None, connection=None): + # # """DEPRECATED: use _rename_scenario_in_db_sql: more efficient. Avoids moving data back and forth + # # TODO: do this with SQL. Avoids moving data from and too the DB. More efficient. + # # :param source_scenario_name: + # # :param target_scenario_name: + # # :param connection: + # # :return: + # # """ + # # if self._check_free_scenario_name(target_scenario_name): + # # new_scenario_name = target_scenario_name + # # else: + # # raise ValueError(f"Target name for rename scenario '{target_scenario_name}' already exists.") + # # + # # # Note that when using a transaction, the order of delete vs insert is irrelevant + # # inputs, outputs = self.read_scenario_from_db(source_scenario_name) + # # # print(f"KPI columns {outputs['kpis'].columns}") + # # outputs['kpis'].columns = outputs['kpis'].columns.str.upper() # HACK!!! TODO: fix in read_scenario + # # self._replace_scenario_in_db_transaction(scenario_name=new_scenario_name, inputs=inputs, outputs=outputs, + # # bulk=True, connection=connection) + # # self._delete_scenario_from_db(scenario_name=source_scenario_name, connection=connection) + # + # def _rename_scenario_in_db_sql(self, connection, source_scenario_name: str, target_scenario_name: str = None): + # """Rename scenario. + # Uses 2 steps: + # 1. Duplicate scenario + # 2. Delete source scenario. + # + # Problem is that we use scenario_name as a primary key. You should not change the value of primary keys in a DB. + # Instead, first copy the data using a new scenario_name, i.e. duplicate a scenario. Next, delete the original scenario. + # + # Long-term solution: use a scenario_seq sequence key as the PK. With scenario_name as a ordinary column in the scenario table. + # + # Old: + # Do a rename using an SQL UPDATE + # `UPDATE table SET scenario_name = 'target_scenario_name' WHERE scenario_name = 'source_scenario_name'` + # Bottleneck: causes Integrity errors while updating. Need to defer constraint checking! + # + # Work-around 1: + # Group all sql UPDATE statements in one sql statement. + # -> No exception, but doesn't seem to do anything! + # This is anyway NOT a good way + # See https://stackoverflow.com/questions/2499246/how-to-update-primary-key + # Apparently, we still need to first insert new rows (cascading!), then delete the old rows + # + # Use of 'insert into select': https://stackoverflow.com/questions/9879830/select-modify-and-insert-into-the-same-table + # """ + # # sql_statements = [] + # # for scenario_table_name, db_table in self.db_tables.items(): + # # sql_update = f"UPDATE {db_table.db_table_name} SET SCENARIO_NAME = '{target_scenario_name}' WHERE SCENARIO_NAME = '{source_scenario_name}';" + # # sql_statements.append(sql_update) + # # sql = "\n".join(sql_statements) + # # print(sql) + # # if connection is None: + # # self.engine.execute(sql) + # # else: + # # connection.execute(sql) + # + # # 1. Duplicate scenario + # self._duplicate_scenario_in_db_sql(connection, source_scenario_name, target_scenario_name) + # # 2. Delete scenario + # self._delete_scenario_from_db(source_scenario_name, connection=connection) + + + # def _delete_scenario_from_db(self, scenario_name: str, connection): + # """Deletes all rows associated with a given scenario. + # Note that it only deletes rows from tables defined in the self.db_tables, i.e. will NOT delete rows in 'auto-inserted' tables! + # Must do a 'cascading' delete to ensure not violating FK constraints. In reverse order of how they are inserted. + # Also deletes entry in scenario table + # Uses SQLAlchemy syntax to generate SQL + # TODO: batch all sql statements in single execute. Faster? And will that do the defer integrity checks? + # """ + # batch_sql=False + # insp = sqlalchemy.inspect(connection) + # tables_in_db = insp.get_table_names(schema=self.schema) + # sql_statements = [] + # for scenario_table_name, db_table in reversed(self.db_tables.items()): # Note this INCLUDES the SCENARIO table! + # if db_table.db_table_name in tables_in_db: + # # sql = f"DELETE FROM {db_table.db_table_name} WHERE scenario_name = '{scenario_name}'" # Old + # t = db_table.table_metadata # A Table() + # sql = t.delete().where(t.c.scenario_name == scenario_name) + # if batch_sql: + # sql_statements.append(sql) + # else: + # connection.execute(sql) + # + # # Because the scenario table has already been included in above loop, no need to do separately + # # Delete scenario entry in scenario table: + # # sql = f"DELETE FROM SCENARIO WHERE scenario_name = '{scenario_name}'" + # # sql_statements.append(sql) + # if batch_sql: + # batch_sql = ";\n".join(sql_statements) + # print(batch_sql) + # connection.execute(batch_sql) + + ############################################################################# + # for scenario_table_name, db_table in reversed(self.db_tables.items()): + # # if insp.has_table(db_table.db_table_name, schema=self.schema): # .has_table() only supported in SQLAlchemy 1.4+ + # if db_table.db_table_name in tables_in_db: + # sql = f"DELETE FROM {db_table.db_table_name} WHERE scenario_name = '{scenario_name}'" + # if connection is None: + # self.engine.execute(sql) + # else: + # connection.execute(sql) + # + # # Delete scenario entry in scenario table: + # sql = f"DELETE FROM SCENARIO WHERE scenario_name = '{scenario_name}'" + # if connection is None: + # self.engine.execute(sql) + # else: + # connection.execute(sql) + ############################################################################# + # if connection is None: + # insp = sqlalchemy.inspect(self.engine) + # print(f"inspector no transaction = {type(insp)}") + # tables_in_db = insp.get_table_names(schema=self.schema) + # print(tables_in_db) + # for scenario_table_name, db_table in reversed(self.db_tables.items()): + # if insp.has_table(db_table.db_table_name, schema=self.schema): + # if insp.has_table(db_table.db_table_name, schema=self.schema): + # sql = f"DELETE FROM {db_table.db_table_name} WHERE scenario_name = '{scenario_name}'" + # self.engine.execute(sql) + # + # # Delete scenario entry in scenario table: + # sql = f"DELETE FROM SCENARIO WHERE scenario_name = '{scenario_name}'" + # self.engine.execute(sql) + # else: + # insp = sqlalchemy.inspect(connection) + # print(f"inspector with transaction= {type(insp)}") + # print(insp.get_table_names(schema=self.schema)) + # for scenario_table_name, db_table in reversed(self.db_tables.items()): + # if insp.has_table(db_table.db_table_name, schema=self.schema): + # sql = f"DELETE FROM {db_table.db_table_name} WHERE scenario_name = '{scenario_name}'" + # connection.execute(sql) + # + # # Delete scenario entry in scenario table: + # sql = f"DELETE FROM SCENARIO WHERE scenario_name = '{scenario_name}'" + # connection.execute(sql) + ############################################# + # Migrated VT 2022-01-09: + # def read_scenario_from_db(self, scenario_name: str, multi_threaded: bool = False) -> (Inputs, Outputs): + # """Single scenario load. + # Main API to read a complete scenario. + # Reads all tables for a single scenario. + # Returns all tables in one dict + # + # Fixed: omit reading scenario table as an input. + # """ + # print(f"read_scenario_from_db.multi_threaded = {multi_threaded}") + # if multi_threaded: + # inputs, outputs = self._read_scenario_from_db_multi_threaded(scenario_name) + # else: + # if self.enable_transactions: + # with self.engine.begin() as connection: + # inputs, outputs = self._read_scenario_from_db(scenario_name, connection) + # else: + # inputs, outputs = self._read_scenario_from_db(scenario_name, self.engine) + # return inputs, outputs + + # Migrated VT 2022-01-09: + # def _read_scenario_from_db_multi_threaded(self, scenario_name) -> (Inputs, Outputs): + # """Reads all tables from a scenario using multi-threading""" + # class ReadTableFunction(object): + # def __init__(self, dbm): + # self.dbm = dbm + # def __call__(self, scenario_table_name, db_table): + # return self._read_scenario_db_table_from_db_thread(scenario_table_name, db_table) + # def _read_scenario_db_table_from_db_thread(self, scenario_table_name, db_table): + # with self.dbm.engine.begin() as connection: + # df = self.dbm._read_scenario_db_table_from_db(scenario_name, db_table, connection) + # dict = {scenario_table_name: df} + # return dict + # + # thread_number = 8 + # pool = ThreadPool(thread_number) + # thread_worker = ReadTableFunction(self) + # print("ThreadPool created") + # # input_tables = [(scenario_table_name, db_table) for scenario_table_name, db_table in self.input_db_tables.items()] + # # input_results = pool.starmap(thread_worker, input_tables) # Returns a list of Dict: [{scenario_table_name: df}] + # # inputs = {k:v for element in input_results for k,v in element.items()} # Convert list of Dict to one Dict. + # # # print(inputs) + # # output_tables = [(scenario_table_name, db_table) for scenario_table_name, db_table in self.output_db_tables.items()] + # # output_results = pool.starmap(thread_worker, output_tables) + # # outputs = {k:v for element in output_results for k,v in element.items()} + # + # all_tables = [(scenario_table_name, db_table) for scenario_table_name, db_table in self.db_tables.items() if scenario_table_name != 'Scenario'] + # # print(all_tables) + # all_results = pool.starmap(thread_worker, all_tables) + # inputs = {k:v for element in all_results for k,v in element.items() if k in self.input_db_tables.keys()} + # outputs = {k:v for element in all_results for k,v in element.items() if k in self.output_db_tables.keys()} + # + # print("All tables loaded") + # + # return inputs, outputs + + + # Migrated VT 2022-01-09: + # def _read_scenario_from_db(self, scenario_name: str, connection) -> (Inputs, Outputs): + # """Single scenario load. + # Main API to read a complete scenario. + # Reads all tables for a single scenario. + # Returns all tables in one dict + # """ + # inputs = {} + # for scenario_table_name, db_table in self.input_db_tables.items(): + # # print(f"scenario_table_name = {scenario_table_name}") + # if scenario_table_name != 'Scenario': # Skip the Scenario table as an input + # inputs[scenario_table_name] = self._read_scenario_db_table_from_db(scenario_name, db_table, connection=connection) + # + # outputs = {} + # for scenario_table_name, db_table in self.output_db_tables.items(): + # outputs[scenario_table_name] = self._read_scenario_db_table_from_db(scenario_name, db_table, connection=connection) + # # if scenario_table_name == 'kpis': + # # # print(f"kpis table columns = {outputs[scenario_table_name].columns}") + # # outputs[scenario_table_name] = outputs[scenario_table_name].rename(columns={'name': 'NAME'}) #HACK!!!!! + # return inputs, outputs - def delete_scenario_from_db(self, scenario_name: str): - """Delete a scenario. Uses a transaction (when enabled).""" - if self.enable_transactions: - print("Delete scenario within a transaction") - with self.engine.begin() as connection: - self._delete_scenario_from_db(scenario_name=scenario_name, connection=connection) - else: - self._delete_scenario_from_db(scenario_name=scenario_name) - - def duplicate_scenario_in_db(self, source_scenario_name: str, target_scenario_name: str): - """Duplicate a scenario. Uses a transaction (when enabled).""" - if self.enable_transactions: - print("Duplicate scenario within a transaction") - with self.engine.begin() as connection: - self._duplicate_scenario_in_db(source_scenario_name, target_scenario_name, connection=connection) - else: - self._duplicate_scenario_in_db(source_scenario_name, target_scenario_name) - - def _duplicate_scenario_in_db(self, source_scenario_name: str, target_scenario_name: str = None, connection=None): - """ - TODO: break _replace_scenario_in_db_transaction into a delete and insert. Then use the insert here. - :param source_scenario_name: - :param target_scenario_name: - :param connection: - :return: - """ - if target_scenario_name is None: - new_scenario_name = self._find_free_duplicate_scenario_name(source_scenario_name) - elif self._check_free_scenario_name(target_scenario_name): - new_scenario_name = target_scenario_name - else: - raise ValueError(f"Target name for duplicate scenario '{target_scenario_name}' already exists.") - - inputs, outputs = self.read_scenario_from_db(source_scenario_name) - self._replace_scenario_in_db_transaction(scenario_name=new_scenario_name, inputs=inputs, outputs=outputs, - bulk=True, connection=connection) - - def _find_free_duplicate_scenario_name(self, scenario_name: str) -> Optional[str]: - """Finds next free scenario name based on pattern '{scenario_name}_copy_n'. - Will try at maximum 20 attempts. - """ - max_num_attempts = 20 - for i in range(1, max_num_attempts + 1): - new_name = f"{scenario_name}_copy_{i}" - free = self._check_free_scenario_name(new_name) - if free: - return new_name - raise ValueError(f"Cannot find free name for duplicate scenario. Tried {max_num_attempts}. Last attempt = {new_name}. Rename scenarios.") - return None - - def _check_free_scenario_name(self, scenario_name) -> bool: - free = (False if scenario_name in self.get_scenarios_df().index else True) - return free - - ############################################## - def rename_scenario_in_db(self, source_scenario_name: str, target_scenario_name: str): - """Rename a scenario. Uses a transaction (when enabled).""" - if self.enable_transactions: - print("Duplicate scenario within a transaction") - with self.engine.begin() as connection: - self._rename_scenario_in_db(source_scenario_name, target_scenario_name, connection=connection) - else: - self._rename_scenario_in_db(source_scenario_name, target_scenario_name) - - def _rename_scenario_in_db(self, source_scenario_name: str, target_scenario_name: str = None, connection=None): - """ - TODO: do this with SQL. Avoids moving data from and too the DB. More efficient. - :param source_scenario_name: - :param target_scenario_name: - :param connection: - :return: - """ - if self._check_free_scenario_name(target_scenario_name): - new_scenario_name = target_scenario_name - else: - raise ValueError(f"Target name for rename scenario '{target_scenario_name}' already exists.") - - # Note that when using a transaction, the order of delete vs insert is irrelevant - inputs, outputs = self.read_scenario_from_db(source_scenario_name) - self._replace_scenario_in_db_transaction(scenario_name=new_scenario_name, inputs=inputs, outputs=outputs, - bulk=True, connection=connection) - self._delete_scenario_from_db(scenario_name=source_scenario_name, connection=connection) \ No newline at end of file + # Migrated VT 2022-01-09: + # def _read_scenario_db_table_from_db(self, scenario_name: str, db_table: ScenarioDbTable, connection) -> pd.DataFrame: + # """Read one table from the DB. + # Removes the `scenario_name` column. + # + # Modification: based on SQLAlchemy syntax. If doing the plain text SQL, then some column names not properly extracted + # """ + # db_table_name = db_table.db_table_name + # # sql = f"SELECT * FROM {db_table_name} WHERE scenario_name = '{scenario_name}'" # Old + # # db_table.table_metadata is a Table() + # t = db_table.table_metadata + # sql = t.select().where(t.c.scenario_name == scenario_name) # This is NOT a simple string! + # # print(f"_read_scenario_db_table_from_db SQL = {sql}") + # # df = pd.read_sql(sql, con=self.engine) + # df = pd.read_sql(sql, con=connection) + # if db_table_name != 'scenario': + # df = df.drop(columns=['scenario_name']) + # + # return df diff --git a/dse_do_dashboard/version.py b/dse_do_dashboard/version.py index 8396d51..8528c3d 100644 --- a/dse_do_dashboard/version.py +++ b/dse_do_dashboard/version.py @@ -9,4 +9,4 @@ See https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package """ -__version__ = "0.1.0.0b5" +__version__ = "0.1.1.0" diff --git a/requirements.txt b/requirements.txt index 9d2b16b..a465453 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,14 +1,17 @@ # For DSE_DO_Dashboard: dash~=2.0.0 -# gunicorn==19.9.0 # May not be necessary +## gunicorn==19.9.0 # May not be necessary flask_caching dash_bootstrap_components dash_pivottable -sqlalchemy==1.3.23 -dse-do-utils==0.5.3.1 +dash_daq +sqlalchemy==1.3.23 # 1.3.23 is version in CPD 4.0.2. Also ibm-db-sa doesn't yet fully support 1.4 +dse-do-utils==0.5.4.0 + + pandas==1.2.4 docplex -# openpyxl # May not be necessary, only for Excel parts of the dse-do-utils? +openpyxl # For scenario import and export to .xlsx # When using DB2/DB2WH ibm-db @@ -16,7 +19,6 @@ ibm-db-sa #For plots: folium==0.12.1 - plotly~=5.5.0 numpy~=1.21.5 @@ -26,6 +28,4 @@ m2r2 # wheel for building package: wheel -twine - -dash_daq \ No newline at end of file +twine \ No newline at end of file diff --git a/setup.py b/setup.py index fedcda1..af522a0 100644 --- a/setup.py +++ b/setup.py @@ -18,15 +18,16 @@ url="https://github.com/IBM/dse-do-dashboard", packages=setuptools.find_packages(), install_requires=[ - 'dse-do-utils>=0.5.3.1', + 'dse-do-utils>=0.5.4.0', 'dash>=2.0.0', 'flask_caching', 'dash_bootstrap_components', 'dash_pivottable', 'dash_daq', - 'sqlalchemy', + 'sqlalchemy>=1.3.23', 'pandas', - 'plotly' + 'plotly', + 'openpyxl', ], classifiers=[ "Development Status :: 4 - Beta", diff --git a/test/fruit_distribution/fruit/fruitdbmanager.py b/test/fruit_distribution/fruit/fruitdbmanager.py index 8affec1..89899e7 100644 --- a/test/fruit_distribution/fruit/fruitdbmanager.py +++ b/test/fruit_distribution/fruit/fruitdbmanager.py @@ -12,7 +12,7 @@ import pandas as pd -from dse_do_dashboard.utils.scenariodbmanager_update import ScenarioDbManagerUpdate +# from dse_do_dashboard.utils.scenariodbmanager_update import ScenarioDbManagerUpdate class ProductMarginTable(ScenarioDbTable): @@ -96,7 +96,7 @@ def __init__(self, db_table_name: str = 'customer_truck_output', extended_column super().__init__(db_table_name, columns_metadata, constraints_metadata) -class FruitScenarioDbManager(ScenarioDbManagerUpdate): +class FruitScenarioDbManager(ScenarioDbManager): def __init__(self, input_db_tables: Dict[str, ScenarioDbTable]=None, output_db_tables: Dict[str, ScenarioDbTable]=None, credentials=None, schema: str = None, echo=False, multi_scenario: bool = True): if input_db_tables is None: @@ -114,4 +114,7 @@ def __init__(self, input_db_tables: Dict[str, ScenarioDbTable]=None, output_db_t ('TruckOutput', CustomerTruckOutputTable()), ('kpis', KpiTable()), ]) - super().__init__(input_db_tables=input_db_tables, output_db_tables=output_db_tables, credentials=credentials, schema=schema, echo=echo, multi_scenario=multi_scenario) \ No newline at end of file + super().__init__(input_db_tables=input_db_tables, output_db_tables=output_db_tables, credentials=credentials, + schema=schema, echo=echo, multi_scenario=multi_scenario, + # enable_transactions=False # HACK!!! + ) \ No newline at end of file diff --git a/test/fruit_distribution/fruit_dash_app.py b/test/fruit_distribution/fruit_dash_app.py index d4aaf3f..e300a4f 100644 --- a/test/fruit_distribution/fruit_dash_app.py +++ b/test/fruit_distribution/fruit_dash_app.py @@ -7,6 +7,7 @@ from dse_do_dashboard.do_dash_app import DoDashApp from dse_do_dashboard.main_pages.explore_solution_page import ExploreSolutionPage from dse_do_dashboard.main_pages.home_page import HomePage +from dse_do_dashboard.main_pages.home_page_edit import HomePageEdit from dse_do_dashboard.main_pages.main_page import MainPage from dse_do_dashboard.main_pages.prepare_data_page import PrepareDataPage from dse_do_dashboard.main_pages.prepare_data_page_edit import PrepareDataPageEdit @@ -42,8 +43,8 @@ """ class FruitDashApp(DoDashApp): - def __init__(self, db_credentials: Dict, schema: str = None, cache_config: Dict = None, - port: int = 8050, debug: bool = False, host_env: str = None): + def __init__(self, db_credentials: Dict, schema: str = None, db_echo: bool = False, cache_config: Dict = None, + port: int = 8050, dash_debug: bool = False, host_env: str = None): visualization_pages = [ KpiPage(self), DemandPage(self), @@ -55,28 +56,30 @@ def __init__(self, db_credentials: Dict, schema: str = None, cache_config: Dict data_manager_class = FruitDataManager plotly_manager_class = FruitPlotlyManager super().__init__(db_credentials, schema, + db_echo=db_echo, logo_file_name=logo_file_name, cache_config=cache_config, visualization_pages = visualization_pages, database_manager_class=database_manager_class, data_manager_class=data_manager_class, plotly_manager_class=plotly_manager_class, - port=port, debug=debug, host_env=host_env) + port=port, dash_debug=dash_debug, host_env=host_env) - def create_main_pages(self) -> List[MainPage]: - """Creates the ordered list of main pages for the DO app. - Can be overridden to replace by subclasses (not typical). - """ - main_pages = [ - HomePage(self), - # PrepareDataPage(self), - PrepareDataPageEdit(self), - RunModelPage(self), - ExploreSolutionPage(self), - VisualizationTabsPage(self) - ] - return main_pages + # def create_main_pages(self) -> List[MainPage]: + # """Creates the ordered list of main pages for the DO app. + # Can be overridden to replace by subclasses (not typical). + # """ + # main_pages = [ + # # HomePage(self), + # HomePageEdit(self), + # # PrepareDataPage(self), + # PrepareDataPageEdit(self), + # RunModelPage(self), + # ExploreSolutionPage(self), + # VisualizationTabsPage(self) + # ] + # return main_pages # def shutdown(self): # from flask import request diff --git a/test/fruit_distribution/fruit_index.py b/test/fruit_distribution/fruit_index.py index 90fc6cb..30338b6 100644 --- a/test/fruit_distribution/fruit_index.py +++ b/test/fruit_distribution/fruit_index.py @@ -5,20 +5,22 @@ # from dashboard.my_secrets.db2wh import DB2Cloud_DO_Dashboards_credentials from fruit_dash_app import FruitDashApp +from dse_do_dashboard.dash_app import HostEnvironment if 'PROJECT_NAME' in os.environ: # This works in CP4D v4.0.2 - host_env = 'CP4D' + host_env = HostEnvironment.CPD402 #'CP4D' from ibm_watson_studio_lib import access_project_or_space wslib = access_project_or_space() DB2_credentials = wslib.get_connection("DB2Cloud_DO_Dashboards") else: - host_env = 'local' + host_env = HostEnvironment.Local # 'local' from my_secrets.db2wh import DB2Cloud_DO_Dashboards_credentials DB2_credentials = DB2Cloud_DO_Dashboards_credentials -DA = FruitDashApp(db_credentials=DB2_credentials, schema='FRUIT_V2', debug=True, host_env=host_env, -# port=8051 - ) +DA = FruitDashApp(db_credentials=DB2_credentials, schema='FRUIT_V2', dash_debug=True, host_env=host_env, + # port=8051, + # db_echo=True, + ) diff --git a/test/pharma/assets/IBM.png b/test/pharma/assets/IBM.png new file mode 100644 index 0000000..5e87de1 Binary files /dev/null and b/test/pharma/assets/IBM.png differ diff --git a/test/pharma/pharma_create_db_schema.py b/test/pharma/pharma_create_db_schema.py new file mode 100644 index 0000000..7dd2fec --- /dev/null +++ b/test/pharma/pharma_create_db_schema.py @@ -0,0 +1,20 @@ +# Creates a schema in the DB +# WARNING: will delete all existing tables +import os + +from pharma.pharma_dash_app import PharmaDashApp +from dse_do_dashboard.dash_app import HostEnvironment +from supply_chain.pharma.pharmascenariodbtables import PharmaScenarioDbManager + +if 'PROJECT_NAME' in os.environ: # This works in CP4D v4.0.2 + host_env = HostEnvironment.CPD402 #'CP4D' + from ibm_watson_studio_lib import access_project_or_space + wslib = access_project_or_space() + DB2_credentials = wslib.get_connection("DB2Cloud_DO_Dashboards") +else: + host_env = HostEnvironment.Local # 'local' + from my_secrets.db2wh import DB2Cloud_DO_Dashboards_credentials + DB2_credentials = DB2Cloud_DO_Dashboards_credentials + +scdb = PharmaScenarioDbManager(credentials = DB2_credentials, schema='PHARMA_V1', echo=True) +scdb.create_schema() \ No newline at end of file diff --git a/test/pharma/pharma_dash_app.py b/test/pharma/pharma_dash_app.py new file mode 100644 index 0000000..58ec3db --- /dev/null +++ b/test/pharma/pharma_dash_app.py @@ -0,0 +1,391 @@ +from typing import Dict, List + +from dse_do_dashboard.main_pages.explore_solution_page import ExploreSolutionPage +from dse_do_dashboard.main_pages.home_page_edit import HomePageEdit +from dse_do_dashboard.main_pages.main_page import MainPage +from dse_do_dashboard.main_pages.prepare_data_page_edit import PrepareDataPageEdit +from dse_do_dashboard.main_pages.run_model_page import RunModelPage +from dse_do_dashboard.main_pages.visualization_tabs_page import VisualizationTabsPage +from pharma.visualization_pages.capacity_page import CapacityPage +from pharma.visualization_pages.demand_fulfillment_page import DemandFulfillmentPage +from pharma.visualization_pages.demand_fulfillment_scroll_page import DemandFulfillmentScrollPage +from pharma.visualization_pages.demand_page import DemandPage +from pharma.visualization_pages.inventory_dos_page import InventoryDosPage +from pharma.visualization_pages.inventory_page import InventoryPage +from pharma.visualization_pages.kpi_page import KpiPage +from pharma.visualization_pages.maps_page import MapsPage +from pharma.visualization_pages.planned_production_page import PlannedProductionPage +from pharma.visualization_pages.production_page import ProductionPage +from pharma.visualization_pages.supply_page import SupplyPage +from pharma.visualization_pages.transportation_page import TransportationPage +from pharma.visualization_pages.utilization_page import UtilizationPage +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.utils.dash_common_utils import PivotTableConfig, ScenarioTableSchema, ForeignKeySchema +from supply_chain.pharma.pharmadatamanager import PharmaDataManager +from supply_chain.pharma.pharmaplotlymanager import PharmaPlotlyManager +from supply_chain.pharma.pharmascenariodbtables import PharmaScenarioDbManager + +""" +How-To create a DO Dashboard: +1. Subclass DoDashApp +2. In the `__init__()`, specify: + - visualization_pages: a list of instances of subclasses of VisualizationPage. + - logo_file_name (optional) - Needs to be located in the Dash `assets` folder + - database_manager_class (required) - Subclass of ScenarioDbManager + - data_manager_class (required) - Subclass of DataManager + - plotly_manager_class (required) - Subclass of PlotlyManager +3. Specify pivot-table configurations and table-schemas by overriding the methods: + - get_pivot_table_configs (optional) + - get_table_schemas (optional) +4. Create instance of DoDashApp-subclass and specify: + - db_credentials (required) + - schema (basically required) + - cache_config (optional) +""" + +class PharmaDashApp(DoDashApp): + def __init__(self, db_credentials: Dict, schema: str = None, db_echo: bool = False, cache_config: Dict = None, + port: int = 8050, dash_debug: bool = False, host_env: str = None): + visualization_pages = [ + KpiPage(self), + DemandPage(self), + CapacityPage(self), + ProductionPage(self), + PlannedProductionPage(self), + SupplyPage(self), + DemandFulfillmentPage(self), + DemandFulfillmentScrollPage(self), + InventoryPage(self), + InventoryDosPage(self), + UtilizationPage(self), + TransportationPage(self), + MapsPage(self), + ] + logo_file_name = "IBM.png" + + database_manager_class = PharmaScenarioDbManager + data_manager_class = PharmaDataManager + plotly_manager_class = PharmaPlotlyManager + super().__init__(db_credentials, schema, + db_echo = db_echo, + logo_file_name=logo_file_name, + cache_config=cache_config, + visualization_pages = visualization_pages, + database_manager_class=database_manager_class, + data_manager_class=data_manager_class, + plotly_manager_class=plotly_manager_class, + port=port, dash_debug=dash_debug, host_env=host_env) + + # def create_main_pages(self) -> List[MainPage]: + # """Creates the ordered list of main pages for the DO app. + # Can be overridden to replace by subclasses (not typical). + # """ + # main_pages = [ + # # HomePage(self), + # HomePageEdit(self), + # # PrepareDataPage(self), + # PrepareDataPageEdit(self), + # RunModelPage(self), + # ExploreSolutionPage(self), + # VisualizationTabsPage(self) + # ] + # return main_pages + + def get_pivot_table_configs(self) -> Dict[str, PivotTableConfig]: + input_pivots: List[PivotTableConfig] = [ + PivotTableConfig( + table_name='Location', + rows=[], + cols=['state'], + vals=[], + rendererName='Stacked Column Chart', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Plant', + rows=[], + cols=[], + vals=[], + rendererName='Table', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='TimePeriod', + rows=[], + cols=[], + vals=[], + rendererName='Table', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Line', + rows=['country', 'state'], + cols=[], + vals=[], + rendererName='Table', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Product', + rows=['subgroupID'], + cols=['groupID'], + vals=[], + rendererName='Stacked Column Chart', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Demand', + rows=['productName'], + cols=['timePeriodSeq'], + vals=['quantity'], + rendererName='Stacked Column Chart', + aggregatorName='Sum' + ), + ] + output_pivots = [ + PivotTableConfig( + table_name='ProductionActivity', + rows=['lineName'], + cols=['timePeriodSeq'], + vals=['line_capacity_utilization'], + rendererName='Table Heatmap', + aggregatorName='Sum' + ), + PivotTableConfig( + table_name='PlantInventory', + rows=['locationName','productName'], + cols=['timePeriodSeq'], + vals=['xPlantInvSol'], + rendererName='Table Heatmap', + aggregatorName='Sum' + ), + PivotTableConfig( + table_name='DemandInventory', + rows=['locationName','productName'], + cols=['timePeriodSeq'], + vals=['xBacklogSol'], + rendererName='Stacked Column Chart', + aggregatorName='Sum' + ), + PivotTableConfig( + table_name='LineUtilization', + rows=['lineName'], + cols=['timePeriodSeq'], + vals=['utilization'], + rendererName='Table Heatmap', + aggregatorName='Sum' + ), + ] + pivot_table_configs: Dict[str, PivotTableConfig] = {t.table_name : t for t in (input_pivots + output_pivots)} + return pivot_table_configs + + def get_table_schemas(self) -> Dict[str, ScenarioTableSchema]: + input_tables: List[ScenarioTableSchema] = [ + ScenarioTableSchema( + table_name = 'TimePeriod', + index_columns = ['timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Demand', + index_columns = ['customerName', 'locationName', 'productName', 'timePeriodSeq'], + value_columns =[], + foreign_tables = [ + ForeignKeySchema( + table_name = 'Location', + foreign_keys = ['locationName'] + ), + ForeignKeySchema( + table_name = 'Product', + foreign_keys = ['productName'] + ), + ], + ), + ScenarioTableSchema( + table_name = 'Product', + index_columns = ['productName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'RecipeProperties', + index_columns = ['productName', 'timePeriodSeq', 'lineName', 'recipeId'], + value_columns = [], + foreign_tables= [], + ), + ScenarioTableSchema( + table_name = 'Line', + index_columns = ['lineName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Plant', + index_columns = ['plantName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Location', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'WIP', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Warehouse', + index_columns = ['warehouseName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'WarehouseProperties', + index_columns = ['warehouseName', 'productName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ShippingMode', + index_columns = ['shippingModeName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ShippingLane', + index_columns = ['originLocationName', 'destinationLocationName', 'shippingMode'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ShippingLaneProperties', + index_columns = ['originLocationName', 'destinationLocationName', 'shippingMode','productName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'PlannedProductionActivity', + index_columns = ['planId', 'productName', 'timePeriodSeq', 'lineName', 'recipeId'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'StochasticScenario', + index_columns = ['stochasticScenarioId'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Parameter', + index_columns = ['param'], + value_columns = [], + foreign_tables = [], + ), + ] + + output_tables: List[ScenarioTableSchema]= [ + ScenarioTableSchema( + table_name = 'ProductionActivity', + index_columns = ['productName', 'timePeriodSeq', 'lineName', 'recipeId'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'PlantInventory', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + 'DemandInventory', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + 'WarehouseInventory', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'TransportationActivity', + index_columns = ['originLocationName','destinationLocationName','shippingMode', 'productName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'LineUtilization', + index_columns = ['lineName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'SupplyMap', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'DemandMap', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'DemandSupplyMap', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'BusinessKpis', + index_columns = ['kpi'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'kpis', + index_columns = ['NAME'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ProductionActivityStochastic', + index_columns = ['stochasticScenarioId','productName','timePeriodSeq','lineName','recipeId'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'PlantInventoryStochastic', + index_columns = ['stochasticScenarioId','productName','locationName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'WarehouseInventoryStochastic', + index_columns = ['stochasticScenarioId','productName','locationName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'DemandInventoryStochastic', + index_columns = ['stochasticScenarioId','productName','locationName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'TransportationActivityStochastic', + index_columns = ['stochasticScenarioId','originLocationName','destinationLocationName','shippingMode','productName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ] + + table_schemas: Dict[str, ScenarioTableSchema] = {t.table_name : t for t in (input_tables + output_tables)} + return table_schemas \ No newline at end of file diff --git a/test/pharma/pharma_index.py b/test/pharma/pharma_index.py new file mode 100644 index 0000000..10d1df4 --- /dev/null +++ b/test/pharma/pharma_index.py @@ -0,0 +1,26 @@ +import os + +from pharma.pharma_dash_app import PharmaDashApp +from dse_do_dashboard.dash_app import HostEnvironment +from my_secrets.db2wh import DB2_Pharma_CPD_credentials + +if 'PROJECT_NAME' in os.environ: # This works in CP4D v4.0.2 + host_env = HostEnvironment.CPD402 #'CP4D' + from ibm_watson_studio_lib import access_project_or_space + wslib = access_project_or_space() + DB2_credentials = wslib.get_connection("DB2Cloud_DO_Dashboards") +else: + host_env = HostEnvironment.Local # 'local' + from my_secrets.db2wh import DB2Cloud_DO_Dashboards_credentials, DB2_Pharma_CPD_credentials + DB2_credentials = DB2Cloud_DO_Dashboards_credentials + # DB2_credentials = DB2_Pharma_CPD_credentials + +DA = PharmaDashApp(db_credentials = DB2_credentials, schema='PHARMA_V1', dash_debug=True, host_env=host_env, + # db_echo=True, + ) + + + +#################################### +if __name__ == '__main__': + DA.run_server() diff --git a/test/pharma/supply_chain/__init__.py b/test/pharma/supply_chain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/pharma/supply_chain/pharma/__init__.py b/test/pharma/supply_chain/pharma/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/pharma/supply_chain/pharma/depharmadatamanager.py b/test/pharma/supply_chain/pharma/depharmadatamanager.py new file mode 100644 index 0000000..f3e2253 --- /dev/null +++ b/test/pharma/supply_chain/pharma/depharmadatamanager.py @@ -0,0 +1,75 @@ +from typing import Optional, Dict +import pandas as pd + +# from supply_chain.water.supply_chain_schema import SCNFO_SCHEMA +# from supply_chain.abbott.abbottdatamanager import AbbottDataManager +from supply_chain.pharma.pharmadatamanager import PharmaDataManager +from utils.dash_common_utils import ScenarioTableSchema +# from supply_chain.water.waterdatamanager import WaterDataManager + + +class DEPharmaDataManager(PharmaDataManager): + def __init__(self, inputs=None, outputs=None, table_schema:Dict[str, ScenarioTableSchema]=None): + super().__init__(inputs, outputs) + + # self.demand_index_columns = ['customerName', 'locationName', 'productName', 'timePeriodSeq'] + # self.production_activities_index_columns = ['productName', 'timePeriodSeq', 'lineName', + # 'recipeId'] # We'll be using these later on + # self.table_schema = SCNFO_SCHEMA # TODO: avoid hard-coding!? + self.table_schema = table_schema + + ############################################ + # Hack: move to DataManager in dse-do-utils + ############################################ + def get_raw_table_by_name(self, table_name: str) -> Optional[pd.DataFrame]: + """Get the 'raw' (non-indexed) table from inputs or outputs.""" + if table_name in self.inputs: + df = self.inputs[table_name] + elif table_name in self.outputs: + df = self.outputs[table_name] + else: + df = None + return df + + def get_table_by_name(self, table_name:str, index:bool=False, expand:bool=False) -> Optional[pd.DataFrame]: + """Return input or output table by name. + + :param table_name: can be the name of an input or an output table + :param index: index the DataFrame + :param expand: join tables from foreign-keys + :return: + """ + df = self.get_raw_table_by_name(table_name) + if df is not None: + if expand: + if table_name in self.table_schema: + for fkt in self.table_schema[table_name].foreign_tables: + foreign_df = self.get_table_by_name(fkt.table_name, expand=True) + if foreign_df is not None: + df = pd.merge(df, foreign_df, on=fkt.foreign_keys, how='inner') + else: + print(f"Error: could not find foreign-key table {fkt.table_name}") + if index: + if table_name in self.table_schema: + df = df.set_index(self.table_schema[table_name].index_columns, verify_integrity=True) + return df + + def get_table_schema(self, table_name: str) -> Optional[ScenarioTableSchema]: + table_schema = None + if self.table_schema is not None and table_name in self.table_schema: + table_schema = self.table_schema[table_name] + return table_schema + +############################################ +# Hack: move to DataManager in dse-do-utils +############################################ +# def get_raw_table_by_name(self, table_name): +# """Get the 'raw' (non-indexed) table from inputs or outputs.""" +# if table_name in self.inputs: +# df = self.inputs[table_name] +# elif table_name in self.outputs: +# df = self.outputs[table_name] +# else: +# df = None +# return df +# DataManager.get_raw_table_by_name = get_raw_table_by_name \ No newline at end of file diff --git a/test/pharma/supply_chain/pharma/pharmadatamanager.py b/test/pharma/supply_chain/pharma/pharmadatamanager.py new file mode 100644 index 0000000..322640d --- /dev/null +++ b/test/pharma/supply_chain/pharma/pharmadatamanager.py @@ -0,0 +1,208 @@ +from typing import Optional +import pandas as pd +import types + +from supply_chain.scnfo.scnfodatamanager import ScnfoDataManager + +class PharmaDataManager(ScnfoDataManager): + def __init__(self, inputs=None, outputs=None): + super().__init__(inputs, outputs) + + def prepare_input_data_frames(self): + super().prepare_input_data_frames() + + if 'PlannedProductionActivity' in self.inputs: + self.planned_production_activity = self.inputs['PlannedProductionActivity'].set_index(["planId","productName","timePeriodSeq","lineName","recipeId"], verify_integrity=True) + + def set_parameters(self): + self.param = types.SimpleNamespace() + + self.params = self.prep_parameters() # DataManager.prep_parameters() looks for an input table called 'Parameter' or 'Parameters' with columns `param` and `value` and indexes the data into a DataFrame. self.params is a DataFrame + + # Get parameter(s) from input + self.param.enable_outage = ScnfoDataManager.get_parameter_value(self.params, param_name='enableOutage', param_type='bool', default_value=False) + + # Some hard-coded parameters (to be moved to parameter input table) + self.param.planning_horizon_start_time_dt = ScnfoDataManager.get_parameter_value(self.params, 'horizon_start_time', param_type='str', default_value='2021-01-01 00:00:00') + self.param.bucket_length_days = ScnfoDataManager.get_parameter_value(self.params, 'bucket_length_days', param_type='int', default_value=7) + + # Integer vs float dvars + self.param.integer_dvars = ScnfoDataManager.get_parameter_value(self.params, param_name='integerDvars', param_type='bool', default_value=True) + + # enableCapacityConstraint + self.param.enable_capacity_constraint = ScnfoDataManager.get_parameter_value(self.params, param_name='enableCapacityConstraint', param_type='bool', default_value=True) + self.param.enable_inventory = ScnfoDataManager.get_parameter_value(self.params, param_name='enableInventory', param_type='bool', default_value=True) + # self.param.enable_inventory = False # HACK! + self.param.remove_zero_quantity_output_records = ScnfoDataManager.get_parameter_value(self.params, param_name='removeZeroQuantityOutputRecords', param_type='bool', default_value=False) # Keep false for debugging + + self.param.time_limit = ScnfoDataManager.get_parameter_value(self.params, 'solveTimeLimit', param_type='int', default_value=60) # in seconds + + self.param.plannedProductionId = ScnfoDataManager.get_parameter_value(self.params, 'plannedProductionId', param_type='int', default_value=0) + + self.param.enable_dos = ScnfoDataManager.get_parameter_value(self.params, 'enableDOS', param_type='bool', default_value=False) + self.param.dos = ScnfoDataManager.get_parameter_value(self.params, 'DOS', param_type='int', default_value=10) + + # Add batch parameter + self.param.batch = ScnfoDataManager.get_parameter_value(self.params, 'batch', param_type="bool", default_value=False) + self.param.M = ScnfoDataManager.get_parameter_value(self.params, 'M', param_type="int", default_value=999999) + + try: + self.param.objective = ScnfoDataManager.get_parameter_value(self.params, 'objective', param_type="int", default_value=2) + except: + self.param.objective = 2 + + def prep_time_periods(self): + self.active_timeperiods = self.timeperiods.drop(0) + + def prep_active_demand(self): + self.active_demand = (pd.merge(self.demand.reset_index(), self.active_timeperiods.reset_index(), on=['timePeriodSeq'], how = 'inner') + .set_index(self.demand_index_columns, verify_integrity = True) + ) + # Round demand in case of integer dvars + if self.param.integer_dvars: + self.active_demand.quantity = self.active_demand.quantity.round() + + def prep_line_locations(self): + self.line_locations = (self.lines.reset_index() + .merge(self.plants.reset_index(), on = 'plantName') + .merge(self.locations.reset_index(), on = 'locationName') + .set_index('lineName', verify_integrity = True) + ) + + def prep_active_recipe_properties(self): + df = self.recipe_properties + df = df.drop(df[df.capacity == 0].index) + + # Explode properties + recipe_properties_index_columns = ['lineName','productName','recipeId','timePeriodSeq'] + ''' + if "timePeriodSeqPattern" in df.columns: + df = (explode_time_period_pattern(df.reset_index(), self.active_timeperiods, 'timePeriodSeqPattern') + .set_index(recipe_properties_index_columns, verify_integrity=True) # just for verification! + ) + else: + ''' + df = df.reset_index().set_index(recipe_properties_index_columns, verify_integrity=True) + + self.active_recipe_properties = df + + def prep_production_activities(self): + self.production_activities = ( + self.active_recipe_properties.reset_index() + .merge(self.line_locations[['supplierName','locationName', 'plantName']].reset_index()) + .set_index(self.production_activities_index_columns, verify_integrity = True) + ) + + def prep_plant_inventories(self): + plant_products = (self.active_recipe_properties[['capacity']] + .join(self.line_locations[['locationName', 'plantName']]) + .groupby(['locationName', 'productName']).sum() # Groupby on locationName + .query("capacity > 0") # For exceptions where the plant cannot make the product at all + .join(self.bom_items[[]]) + ) + + + # Find all output products: + plant_output_products = plant_products.groupby(['locationName', 'productName']).sum()[[]] + + + # Find all input products: + plant_input_products = (plant_products.groupby(['locationName', 'componentName']).sum()[[]] + .rename_axis(index={'componentName':'productName'}) + ) + + + # Concat the input and output products in single df: + plant_products = (pd.concat([plant_input_products, plant_output_products]) + .groupby(['locationName', 'productName']).sum() # To merge any input and output products from same plant + ) + + + # Cross product with all time-periods + plant_inventory_index_columns = ['productName', 'locationName', 'timePeriodSeq'] + self.plant_inventories = (ScnfoDataManager.df_crossjoin_ai(plant_products, self.active_timeperiods) + .reset_index() + .set_index(plant_inventory_index_columns, verify_integrity=True) + ) + + def prep_warehouse_inventories(self): + warehouse_products = (self.warehouse_properties[[]] + .join(self.warehouses[['locationName']]) + .groupby(['productName', 'locationName']).sum() + ) + + warehouse_inventory_index_columns = ['productName', 'locationName', 'timePeriodSeq'] + self.warehouse_inventories = (ScnfoDataManager.df_crossjoin_ai(warehouse_products[[]], self.active_timeperiods)) + + def prep_demand_inventories_work_around(self): + demand_inventory_index_columns = ['productName', 'locationName', 'timePeriodSeq'] + df = pd.merge(self.active_demand.groupby(['locationName', 'productName']).sum().reset_index()[['locationName', 'productName']] + .assign(key=1),self.active_timeperiods.reset_index().assign(key=1), on="key") + df = pd.merge(df, self.active_demand, + on=demand_inventory_index_columns, how="left")[["locationName","productName","timePeriodSeq","quantity","actualQuantity"]].set_index(demand_inventory_index_columns).fillna(0) + self.demand_inventories = df + + def prep_transportation(self): + # Explode properties - df.expode() is not available in DO. Skip for now + transportation_index_columns = ['originLocationName','destinationLocationName','shippingMode','productName','timePeriodSeq'] + + df = (self.explode_time_period_pattern(self.shipping_lane_properties.reset_index(), 'timePeriodSeqPattern') + .set_index(transportation_index_columns, verify_integrity=True) + ) + # df = self.shipping_lane_properties + # Get transit time (or other properties (TODO)) + df = df.reset_index().merge(self.shipping_lanes['transitTime'].reset_index(), on=['originLocationName','destinationLocationName','shippingMode']).set_index(transportation_index_columns, verify_integrity=True) + self.transportation_activities = df + + def get_demand_location_dos(self, dos:int): + """Compute the quantity of product at the end of a time-period that represents the + Days-Of-Supply computed using the actual demand in the following time-periods. + The quantity can be used in a days-of-supply inventory constraint or objective. + For the last time-periods, assume demand remains constant with the value of the last time-period. + + Args: + dos (int): Days-Of-Supply. Number of days. + + Note: use dm.demand_inventories. Is has already expanded to all time-periods. + """ + # num_tps = 24 # Number of time-periods + + num_days_tp = 30 # Number of days per time-period. To keep it simple, use 30 per month. HARD-CODED for now. TODO: put in parameter, or add as column in TimePeriods + df = (self.demand_inventories[['quantity']] + .sort_index() # sort index so the shift will work right + ) + + num_tps = len(df.index.unique(level='timePeriodSeq')) + # df['numDays'] = num_days_tp + df['demandPerDay'] = df.quantity / num_days_tp #df.numDays + df['nextDemandPerDay'] = df.demandPerDay # Note we are shifting the nextDemandPerDay, so initialize once + df['dosQuantity'] = 0 # We are incrementing the dosQuantity, so initialize + + remaining_dos = dos # Remaining DOS in each iteration, initialize with all DOS + shift = 0 # Only for debuging + + # Iterate over the next time-periods until it covers all requested dos days + # Sum the DOS quantity + # Assume demand is constant throughout the time-period + while remaining_dos > 0: + shift = shift + 1 + next_dos = min(remaining_dos, num_days_tp) + # print(f"Shift = {shift}, remaining_dos = {remaining_dos}, next_dos={next_dos}") + df['nextDemandPerDay'] = df.groupby(['locationName','productName'])['nextDemandPerDay'].shift(-1) #, fill_value=0) + df.loc[pd.IndexSlice[:,:,num_tps],'nextDemandPerDay'] = df.loc[pd.IndexSlice[:,:,num_tps],'demandPerDay'] # Fill gap from the shift with last demand + df['dosQuantity'] = df.dosQuantity + df.nextDemandPerDay * next_dos + remaining_dos = remaining_dos - next_dos + # display(df.query("locationName=='NAMIBIA'").head(24)) + df = df.drop(columns=['demandPerDay', 'nextDemandPerDay']) + return df + + def prep_data_transformations(self): + self.prep_time_periods() + self.prep_active_demand() + self.prep_line_locations() + self.prep_active_recipe_properties() + self.prep_production_activities() + self.prep_plant_inventories() + self.prep_warehouse_inventories() + self.prep_demand_inventories_work_around() + self.prep_transportation() \ No newline at end of file diff --git a/test/pharma/supply_chain/pharma/pharmadedbmanager.py b/test/pharma/supply_chain/pharma/pharmadedbmanager.py new file mode 100644 index 0000000..f4974e2 --- /dev/null +++ b/test/pharma/supply_chain/pharma/pharmadedbmanager.py @@ -0,0 +1,97 @@ +####################################################### +# Table specific SQL +####################################################### +from typing import List, Dict + +# from supply_chain.folium_supply_chain import SCMapManager, MappingSCDM +# from supply_chain.plotly_supply_chain import PlotlyManager, WaterPlotlyManager +from supply_chain.pharma.pharmaplotlymanager import PharmaPlotlyManager +from supply_chain.pharma.depharmadatamanager import DEPharmaDataManager +from supply_chain.scnfo.scnfoplotlymanager import PlotlyManager +from supply_chain.pharma.supply_chain_schema import SCNFO_SCHEMA, SCNFO_PIVOT_CONFIG + +from supply_chain.water.watermapmanager import MappingSCDM, SCMapManager # TODO!!! + +# from supply_chain.water.waterplotlymanager import WaterPlotlyManager +# from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_dashboard import VisualizationPage +from dse_do_utils.scenariodbmanager import ScenarioDbManager +# from supply_chain.water.dewaterdatamanager import DEWaterDataManager + +import pandas as pd + +############################################################################# +# Dash Enterprise +# +# Note: we're subclassing a specific use-case DbManager +# Issues to resolve: +# * Have to re-define the inputs and outputs tables +# * Redefine generic methods necessary for Dash Enterpise use +# * Probably need to make a container object instead of a sub/super class +############################################################################# +class PharmaDashEnterpriseDbManager(): + """Experimental container object that wraps around a custom ScenarioDbManager. + Avoids having to subclass a CPD ScenarioDbManager + Defines some generic methods. + TODO: avoid hard-coding some class names + """ + def __init__(self, dbm: ScenarioDbManager, visualization_pages_spec=[]): + self.dbm = dbm + self.plotly_manager_class = None + self.folium_data_manager_class = None + self.folium_map_manager_class = None + self.visualization_pages_spec = visualization_pages_spec + + def set_table_read_callback(self, table_read_callback=None): + # print(f"Set callback to {table_read_callback}") + # self.table_read_callback = table_read_callback + # self.dbm.table_read_callback = table_read_callback # Quick Hack to fix error. TODO: + self.dbm.set_table_read_callback(table_read_callback) + + # Convenience method to quickly get a DM from the ScenarioDbManager for + def get_plotly_manager(self, scenario_name: str, input_table_names: List[str] = None, output_table_names: List[str] = None) -> PlotlyManager: + """Loads data for selected input and output tables. + TODO: avoid hard-coding PlotlySupplyChainDataManager + """ + inputs, outputs = self.dbm.read_scenario_tables_from_db_cached(scenario_name, input_table_names, output_table_names) + # dm = DEWaterDataManager(inputs, outputs, table_schema=SCNFO_SCHEMA) + dm = DEPharmaDataManager(inputs, outputs, table_schema=SCNFO_SCHEMA) + pm = PharmaPlotlyManager(dm) + dm.prepare_data_frames() + return pm + + def get_folium_map_manager(self, scenario_name: str, input_table_names:List[str] = None, output_table_names: List[str] = None) -> SCMapManager: + """ + TODO: avoid hard-coding MappingSCDM and SCMapManager + VT: not sure this is actually used + """ + inputs, outputs = self.dbm.read_scenario_tables_from_db_cached(scenario_name, input_table_names, output_table_names) + dm = MappingSCDM(inputs=inputs,outputs=outputs) + dm.prepare_data_frames() + mm = SCMapManager(data_manager=dm, width='100%', height='100%') # width/height=None ensures the iFrame drives the width/height and avoids scroll bars. + return mm + + def get_input_table_names(self) -> List[str]: + """Return list of valid table names based on self.input_db_tables""" + names = list(self.dbm.input_db_tables.keys()) + if 'Scenario' in names: names.remove('Scenario') + return names + + def get_output_table_names(self) -> List[str]: + """Return list of valid table names based on self.input_db_tables""" + names = list(self.dbm.output_db_tables.keys()) + return names + + def get_scenarios_df(self): + return self.dbm.get_scenarios_df() + + def get_pivot_table_config(self, table_name): + pivot_config = (SCNFO_PIVOT_CONFIG[table_name] if table_name in SCNFO_PIVOT_CONFIG else None) + return pivot_config + + def get_visualization_pages_dict(self): + visualization_pages_dict:Dict[str, VisualizationPage] = {vp.page_id:vp for vp in self.visualization_pages_spec} + return visualization_pages_dict + + def get_visualization_pages_spec(self): + return self.visualization_pages_spec diff --git a/test/pharma/supply_chain/pharma/pharmaplotlymanager.py b/test/pharma/supply_chain/pharma/pharmaplotlymanager.py new file mode 100644 index 0000000..126fdbf --- /dev/null +++ b/test/pharma/supply_chain/pharma/pharmaplotlymanager.py @@ -0,0 +1,2923 @@ +from typing import List, Dict, Tuple, Optional +import pandas as pd + +from supply_chain.pharma.pharmadatamanager import PharmaDataManager +from supply_chain.scnfo.scnfoplotlymanager import ScnfoPlotlyManager + +import plotly.express as px +import plotly.graph_objs as go +import numpy as np + +from dse_do_dashboard.utils.dash_common_utils import plotly_figure_exception_handler + +####################################################################################### +# Pharma +####################################################################################### + + +class PharmaPlotlyManager(ScnfoPlotlyManager): + def __init__(self, dm:PharmaDataManager): + super().__init__(dm) + # self.line_name_category_orders = ['Abbott_Weesp_Line','Abbott_Olst_Granulate_Line', + # 'Abbott_Olst_Packaging_Line_5','Abbott_Olst_Packaging_Line_6'] + # self.plant_name_category_orders = ['Abbott_Weesp_Plant', 'Abbott_Olst_Plant'] + + self.line_name_category_orders = ['API_Line','Granulate_Line', 'Packaging_Line_1','Packaging_Line_2'] + self.plant_name_category_orders = ['API_Plant', 'Packaging_Plant'] + + def describe_demand(self): + """Print summary of demand statistics.""" + super().describe_demand() + df = (self.dm.demand + .join(self.dm.products[['productGroup']]) + .reset_index()) + print(f"Num product types = {len(df.productGroup.unique()):,}") + + # def plotly_demand_bars(self): + # """Product demand over time. Colored by productGroup.""" + # product_aggregation_column = 'productGroup' + # df = (self.dm.demandplotly_production_activities_bars + # .join(self.dm.products[['productGroup']]) + # ).groupby(['timePeriodSeq', product_aggregation_column]).sum() + # # display(df.head()) + # + # labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', + # 'productGroup': 'Product Group'} + # fig = px.bar(df.reset_index(), x="timePeriodSeq", y="quantity", color=product_aggregation_column, + # title='Total Product Demand', labels=labels) + # fig.update_layout( + # # title={ + # # 'text': f"Total product demand", + # # # 'y': 0.9, + # # # 'x': 0.5, + # # 'xanchor': 'center', + # # 'yanchor': 'top'}, + # legend={'orientation': 'v'}, + # # legend_title_text=product_aggregation_column, + # ) + # + # return fig + + def gen_color_col(self, catSeries = None): + '''Converts a series into a set of color codes + NEEDS TO BE CALLED ON ENTIRE SERIES, NOT SUBSETTED VERSION + ''' + + cmap = ["#004172", "#08539d", "#2e64c7", "#be35a0", "#e32433", "#eb6007", + "#fb8b00", "#c19f00", "#5c9c00", "#897500", "#cb0049", "#7746ba", "#0080d1", + "#3192d2", "#ac6ac0", "#e34862", "#c57e00", "#71a500", "#ad6e00", "#b82e2e",] + + color_dict = { + 'Other': "#7FB3D5", + 'API': "#B03A2E", + ' - API': "#B03A2E", + 'Granulate': "#1F618D", + 'Tablet': "#117A65", + 'Package': "#B7950B", + } + + if catSeries is not None: + catSeries = catSeries.dropna() # some NAs get introduced for some reason + labels = list(catSeries.unique()) + + if ' - API' not in labels or 'API' not in labels: + labels.append(' - API') + + + labels = sorted(labels) + + cmap_ix = 0 + + for ix in range(len(labels)): + if cmap_ix == len(cmap): + cmap_ix = 0 + else: + if 'Granulate' in labels[ix]: + color_dict[labels[ix]] = "#1F618D" + elif 'Tablet' in labels[ix]: + color_dict[labels[ix]] = "#117A65" + elif 'Package' in labels[ix]: + color_dict[labels[ix]] = "#B7950B" + elif 'API' in labels[ix]: + color_dict[labels[ix]] = "#B03A2E" + + if labels[ix] not in color_dict: + color_dict[labels[ix]] = cmap[cmap_ix] + cmap_ix += 1 + + return color_dict + + def plotly_demand_bars(self, query=None, title='Total Product Demand', view = "All"): + """Product demand over time. Colored by productGroup.""" + product_aggregation_column = 'productName' + + df = (self.dm.demand + .join(self.dm.products[['productGroup', 'productCountry']]) + ) + + # df = (self.dm.demand # will return two dfs + # .join(self.dm.products[['productGroup', 'productCountry']]) + # ) + + df = df.reset_index() + + df['productCountry'] = np.where(pd.isnull(df.productCountry), '', df.productCountry) + + df['location_product'] = df.productCountry + " - " + df.productName + + color_discrete_map = self.gen_color_col(df.location_product) + + if query is not None: + df = df.query(query).copy() + + # Set location_product name + df = df.reset_index() + + df = (df + .groupby(['timePeriodSeq', 'location_product']).sum() + .sort_values('quantity', ascending=False) + ) + + df['demand_proportion'] = df.groupby(['timePeriodSeq'])['quantity'].apply(lambda x: x/x.sum()) + + df = df.reset_index() + + df['new_labels'] = np.where(df['demand_proportion'] < 0.015, 'Other', df['location_product']) + + # cmap = px.colors.qualitative.Light24 + + new_labels = df['new_labels'].unique() + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', + 'productGroup': 'Product Group'} + + if view == "All": + color = "location_product" + elif view == "Compact": + color = "new_labels" + + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="quantity", + color= color, + title=title, labels=labels, + # color_discrete_sequence=px.colors.qualitative.Light24, + color_discrete_map=color_discrete_map, + height=600, + hover_name="location_product", + # hover_data=["quantity"] + ) + + fig.update_layout( + legend={ + 'title': f"Total Product Demand", + 'bgcolor':'rgba(0,0,0,0)', # transparent background? not sure if this works + 'x': 1, + 'orientation': 'v'}, + margin = {'l':80,'t':50}, + hovermode="closest", + ) + return fig + + def plotly_utilization_multi_facet_bars(self): + """Line utilization colored by groupID. + Shows which groupIDs claim how much capacity on which lines. + Could be used to analyze why certain lines cannot produce enough of a given product, + i.e. that they are busy with other products.""" + product_aggregation_column = 'productGroup' + + df = (self.dm.production_activities[['line_capacity_utilization']] + .join(self.dm.products[['productGroup']]) + ).groupby(['timePeriodSeq', 'lineName', product_aggregation_column]).sum() + + labels = {'timePeriodSeq': 'Time Period', 'var_name': 'Utilization Type', 'lineName': 'Line Name', + 'line_capacity_utilization': 'Line Capacity Utilization'} + + fig = px.bar(df.reset_index(), x="lineName", y="line_capacity_utilization", color=product_aggregation_column, + title='Line Utilization', labels=labels, + facet_col="timePeriodSeq", + ) + # get rid of duplicated X-axis labels + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.XAxis: + fig.layout[axis].title.text = '' + + # fig.for_each_trace(lambda t: t.update(name=t.name.split()[-1])) + fig.for_each_annotation(lambda a: a.update(text=a.text.split()[-1])) + + fig.update_layout(yaxis=dict(tickformat="%", )) + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + + fig.update_layout( + legend= + dict( # change legend location + title = "Product Group", + orientation="h", + yanchor="top", + y=1.3, + xanchor="right", + x=0.95), + + # legend_title_text=None # this doesn't align the legend still + ) + + return fig + + def plotly_excess_utilization_line_time_bars(self): + """Line utilization bar per line over time, clustered by time-period. + Excess utilization over 100% is clearly colored as red. + Good initial view of utilization and excess utilization. + """ + df = (self.dm.line_utilization.copy() + ) + df['Regular Capacity'] = df.utilization.clip(0, 1) + df['Over Capacity'] = (df.utilization - 1).clip(0) + df = df[['Regular Capacity', 'Over Capacity']] + df = (df.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='Utilization') + .reset_index() + ) + + labels = {'timePeriodSeq': 'Time Period', 'var_name': 'Utilization Type', 'lineName': 'Line Name'} + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="Utilization", color='var_name', title='Line Utilization', + labels=labels, + facet_row="lineName", + # width = 2000 + color_discrete_map = {'Regular Capacity':'green', 'Over Capacity':'red'}, + height = 800, + ) + + fig.update_layout( + legend= + dict( # change legend location + title = "Utilization Type", + orientation="h", + yanchor="top", + y=1.05, + xanchor="right", + x=0.95), + ) + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + + ### This gets rid of the duplicated Y Axis labels caused by the facet_row argument + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + fig.layout[axis].tickformat = '%' + + fig.for_each_annotation(lambda a: a.update(text=a.text.split("Line Name=")[-1])) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("_", " "))) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("Olst", "Olst
"))) + fig.for_each_annotation(lambda a: a.update(x = a.x-1.07, textangle = 270)) + + fig.update_layout( + legend= + dict( # change legend location + title = "Product Group", + orientation="v", + x=1.05, + yanchor="top" + ), + margin = {'l' : 130, 't':80} + ) + return fig + + def plotly_utilization_line_time_bars(self): + """Line utilization colored by groupID. + Shows which groupIDs claim how much capacity on which lines. + Could be used to analyze why certain lines cannot produce enough of a given product, + i.e. that they are busy with other products.""" + product_aggregation_column = 'productGroup' + df = (self.dm.production_activities[['line_capacity_utilization']] + .join(self.dm.products[['productGroup']]) + ).groupby(['timePeriodSeq', 'lineName', product_aggregation_column]).sum() + + color_discrete_map = self.gen_color_col() + + labels = {'timePeriodSeq': 'Time Period', 'var_name': 'Utilization Type', 'lineName': 'Line Name', + 'line_capacity_utilization':'Line Capacity Utilization'} + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="line_capacity_utilization", color=product_aggregation_column, + title='Line Utilization', labels=labels, facet_row = 'lineName', + color_discrete_map=color_discrete_map, + category_orders={ + product_aggregation_column: ['API', 'Granulate', 'Tablet', 'Package'], + # 'lineName': ['Abbott_Weesp_Line', 'Abbott_Olst_Granulate_Line', + # 'Abbott_Olst_Packaging_Line_5', 'Abbott_Olst_Packaging_Line_6' ], + 'lineName' : self.line_name_category_orders, + 'timePeriodSeq': df.reset_index().timePeriodSeq.sort_values().unique() }, + height=800, + ) + + fig.update_layout( + legend= + dict( # change legend location + title = "Product Group", + orientation="v", + yanchor="top", + y=1.1, + xanchor="right", + x=1.05), + margin = {'l': 130,'t':80} + ) + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + + ### This gets rid of the duplicated Y Axis labels caused by the facet_row argument + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + fig.layout[axis].tickformat = '%' + + fig.for_each_annotation(lambda a: a.update(x = a.x -1.07, textangle = 270)) + fig.for_each_annotation(lambda a: a.update(text=a.text.split("Line Name=")[-1])) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("_", " "))) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("Olst", "Olst
"))) + return fig + + def plotly_line_utilization_heatmap_v2(self): + """ + Trying multiple traces to see if we can get a more clear color difference for utilization > 100% + Can't get the hover to work with multiple traces + """ + + # product_aggregation_column = 'groupID' + df = ((self.dm.production_activities + ) + ) + + df = df.pivot_table(values='line_capacity_utilization', index=['lineName'], columns=['timePeriodSeq'], aggfunc=np.sum) + + hovertemplate ='Utilization: %{z:.1%}
Line: %{y}
Time Period: %{x} ' + trace = go.Heatmap(z=df.values, x=df.columns, y=df.index, colorscale='Portland', zmin = 0, zmid =1, hovertemplate=hovertemplate) #colorscale='rdbu', + fig = go.Figure(data=[trace], layout=go.Layout(width=1000, height=600)) + + return fig + + def plotly_demand_fullfilment(self, mode=None): + """Demand, Fulfilled, Unfulfilled, Backlog, BacklogResupply and Inventory over time, grouped by time-period. + Colored by groupID. + Very useful graph since it contains all critical variables at the demand locations. Good for explanation. + """ + + # Collect transportation activities into a destination location. + # (later we'll do a left join to only select trnasportation into a demand location and ignore all other transportation activities) + df0 = (self.dm.transportation_activities[['xTransportationSol']] + .groupby(['productName', 'destinationLocationName', 'timePeriodSeq']).sum() + .rename_axis(index={'destinationLocationName': 'locationName'}) + .rename(columns={'xTransportationSol':'Transportation'}) + ) + # display(df0.head()) + + product_aggregation_column = 'productGroup' + df = (self.dm.demand_inventories[['quantity','xFulfilledDemandSol','xUnfulfilledDemandSol','xBacklogSol','xBacklogResupplySol','xInvSol']] + .join(self.dm.products[['productGroup']]) + # .join(self.dm.locations) + .join(df0, how='left') + ).groupby(['timePeriodSeq', product_aggregation_column]).sum() + if 'relative_week' in df.columns: # TODO: remove if not relevant anymore + df = df.drop(columns=['relative_week']) + # display(df.head()) + df = (df + # .drop(columns=['relative_week']) + .rename( + columns={'quantity': 'Demand', 'xFulfilledDemandSol': 'Fulfilled', 'xUnfulfilledDemandSol': 'Unfulfilled', + 'xBacklogSol': 'Backlog', 'xBacklogResupplySol': 'Backlog Resupply', 'xInvSol': 'Inventory'}) + ) + + df = (df.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='quantity') + .reset_index() + ) + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', 'productGroup':'Product Group', + 'var_name': 'Var'} + + if mode is None: #'bar_subplot_by_time' + fig = px.bar(df, x="var_name", y="quantity", color=product_aggregation_column, title="Demand", labels=labels, + facet_col="timePeriodSeq", + category_orders={ + 'var_name': ['Demand', 'Transportation', 'Fulfilled', 'Unfulfilled', 'Backlog', 'Backlog Resupply', + 'Inventory']}, + height=700 + ) + elif mode == 'multi_line': + fig = px.line(df, x="timePeriodSeq", y="quantity", color='var_name', title="Demand", labels=labels, + facet_row=product_aggregation_column, + height=700 + ) + elif mode == 'animated_horizontal_bars': + fig = px.bar(df, y="var_name", x="quantity", color=product_aggregation_column, title="Demand", labels=labels, + # facet_col="timePeriodSeq", + animation_frame="timePeriodSeq", + category_orders={ + 'var_name': ['Demand', 'Transportation', 'Fulfilled', 'Unfulfilled', 'Backlog', 'Backlog Resupply', + 'Inventory']}, + height=700 + ) + elif mode == 'animated_vertical_bars': + fig = px.bar(df, x="timePeriodSeq", y="quantity", color=product_aggregation_column, title="Demand", labels=labels, + # facet_col="timePeriodSeq", + animation_frame="timePeriodSeq", + facet_row = 'var_name', + category_orders={ + 'var_name': ['Demand', 'Transportation', 'Fulfilled', 'Unfulfilled', 'Backlog', 'Backlog Resupply', + 'Inventory']}, + height=700 + ) + + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + + return fig + + def plotly_demand_fullfilment_multi_plot(self, mode=None, var_names=None): + """Demand, Fulfilled, Unfulfilled, Backlog, BacklogResupply and Inventory over time, grouped by time-period. + Colored by groupID. + Very useful graph since it contains all critical variables at the demand locations. Good for explanation. + """ + + # Collect transportation activities into a destination location. + # (later we'll do a left join to only select trnasportation into a demand location and ignore all other transportation activities) + df0 = (self.dm.transportation_activities[['xTransportationSol']] + .groupby(['productName', 'destinationLocationName', 'timePeriodSeq']).sum() + .rename_axis(index={'destinationLocationName': 'locationName'}) + .rename(columns={'xTransportationSol':'Transportation'}) + ) + # display(df0.head()) + + # print(f"products in demand = {self.dm.demand_inventories.index.get_level_values('productName').unique()}") + # print(f"products = {self.dm.products[['productGroup', 'productCountry']]}") + + product_aggregation_column = 'productName' + df = (self.dm.demand_inventories[['quantity','xFulfilledDemandSol','xUnfulfilledDemandSol','xBacklogSol','xBacklogResupplySol','xInvSol']] + .join(self.dm.products[['productGroup', 'productCountry']], how='left') + # .join(self.dm.locations) + .join(df0, how='left') + ).groupby(['timePeriodSeq', product_aggregation_column, "productCountry"]).sum() + # print(f"products = {df.index.get_level_values('productName').unique()}") + + if 'relative_week' in df.columns: # TODO: remove if not relevant anymore + df = df.drop(columns=['relative_week']) + + df = (df + .rename( + columns={'quantity': 'Demand', 'xFulfilledDemandSol': 'Fulfilled', 'xUnfulfilledDemandSol': 'Unfulfilled', + 'xBacklogSol': 'Backlog', 'xBacklogResupplySol': 'Backlog Resupply', 'xInvSol': 'Inventory'}) + ) + + df = (df.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='quantity') + .reset_index() + ) + + var_name_category_order = ['Demand', 'Transportation', 'Fulfilled', 'Unfulfilled', 'Backlog', 'Backlog Resupply', 'Inventory'] + + num_vars = 6 + if var_names is not None: + df = df.query("var_name in @var_names").copy() + num_vars = len(var_names) + var_name_category_order = var_names + + df['location_product'] = df.productCountry + " - " + df.productName + + color_discrete_map = self.gen_color_col(df['location_product']) + print(f"color_discrete_map={color_discrete_map}") + print(f"location_product = {df['location_product'].unique()}") + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Quantity', 'productName': 'Product Name', 'productGroup':'Product Group', + 'var_name': 'Var', 'location_product': 'Product Country'} + + active_var_names = [] + + if mode == 'columns': + fig = px.bar(df, x="timePeriodSeq", y="quantity", + # color=product_aggregation_column, + title="Fulfillment", + labels=labels, + facet_col="var_name", + color = "location_product", + color_discrete_map= color_discrete_map, + category_orders={ + # 'var_name': ['Demand', 'Transportation', 'Fulfilled', 'Unfulfilled', 'Backlog', 'Backlog Resupply', + # 'Inventory'], + 'var_name': var_name_category_order + }, + height=400 + ) + + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.XAxis: + fig.layout[axis].title.text = '' + + + + fig.update_layout( + # keep the original annotations and add a list of new annotations: + annotations = list(fig.layout.annotations) + + [go.layout.Annotation( + x=0.55, + y=-0.15, + font=dict( + size=14 + ), + showarrow=False, + text="Time Period", + textangle=0, + xref="paper", + yref="paper" + ) + ] + ) + + + else: # e.g. None + fig = px.bar(df, x="timePeriodSeq", y="quantity", + # color=product_aggregation_column, + title="Fulfillment", labels=labels, + facet_row="var_name", + color = "location_product", + color_discrete_map= color_discrete_map, + category_orders={ + # 'var_name': ['Demand', 'Transportation', 'Fulfilled', 'Unfulfilled', 'Backlog', 'Backlog Resupply', + # 'Inventory'], + 'var_name': var_name_category_order + }, + height=250*num_vars + ) + + fig.for_each_annotation(lambda a: a.update(x = a.x -1.045, textangle = 270)) + + # get rid of duplicated Y-axis labels + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + + + fig.update_layout(hovermode="closest",legend = {'orientation': 'v'}) # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + fig.for_each_annotation(lambda a: a.update(text=a.text.split("Var=")[-1])) + + fig.update_layout(legend = + {'orientation': 'v', + 'x': 1, + }, + margin = {'l': 75} + ) + + # fig.layout.yaxis2.update(matches=None) + # fig.layout.yaxis3.update(matches=None) + fig.layout.yaxis4.update(matches=None) + fig.update_yaxes(showticklabels=True, col=4) #, col=2 + + fig.update_layout( + margin={'l': 80, 't': 50, 'r': 20, 'b': 60}) + + return fig + + # def plotly_inventory_days_of_supply_line(self, mode:str='line', query=None): + # """Demand inventory, normalized by days-of-supply.""" + # num_days = 2 * 365 # For now assume 2 years. TODO: get from number of time-periods and bucket length + # df1 = (self.dm.demand[['quantity']] + # .join(self.dm.products['productGroup']) + # .groupby(['productGroup','productName','locationName']).sum() + # ) + # df1['demand_per_day'] = df1.quantity / num_days + # df1 = df1.drop(columns=['quantity']) + # # display(df1.head()) + + # df = (self.dm.demand_inventories[['xInvSol']] + # .join(df1) + # .reset_index() + # .set_index(['locationName','productGroup','productName']) + # .sort_index() + # ) + # if query is not None: + # df = df.query(query).copy() + + # df['days_of_supply'] = df.xInvSol / df.demand_per_day + + # df = df.reset_index() + # df['product_location'] = df.locationName + " - " + df.productName + + # labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Inventory', 'productName': 'Product Name', + # 'productGroup': 'Product Group', 'days_of_supply': 'Days of Supply'} + # if mode == 'bar': + # fig = px.bar(df, x="timePeriodSeq", y="days_of_supply", + # color='product_location', + # height=600, + # title='Demand Inventory (days-of-supply)', labels=labels) + # else: + # fig = px.line(df, x="timePeriodSeq", y="days_of_supply", + # color='product_location', + # height=600, + # title='Demand Inventory (days-of-supply)', labels=labels) + # fig.update_layout( + # hovermode="closest", + # # title={ + # # 'text': f"Total product demand", + # # # 'y': 0.9, + # # # 'x': 0.5, + # # 'xanchor': 'center', + # # 'yanchor': 'top'}, + # legend={'orientation': 'v'}, + # # legend_title_text=product_aggregation_column, + # ) + + # return fig + + def plotly_wh_inventory(self, mode:str='bar', query=None): + """Warehouse inventory stacked bar chart by productName. + TODO: remove products that have no inventory over the whole time-line.""" + df = (self.dm.warehouse_inventories[['xInvSol']] + # .query("xInvSol > 0") + .join(self.dm.products[['productGroup', 'productCountry']]) + .sort_index() + .sort_values(['xInvSol'], ascending=False) + ) + if query is not None: + df = df.query(query) + + df = df.reset_index() + + df['productCountry'] = df['productCountry'].fillna("") + df['location_product'] = df['productCountry'] + " - " + df['productName'] + df['location_product'] = df['location_product'].fillna('API') + + color_discrete_map = self.gen_color_col(df['location_product']) + + labels = {'timePeriodSeq': 'Time Period', 'days_of_supply':'Days of Supply', 'quantity': 'Inventory', 'productName': 'Product Name', + 'productGroup': 'Product Group', + 'location_product': 'Product Location', + "xInvSol": "Inventory"} + + + if mode == 'bar': + fig = px.bar(df, x="timePeriodSeq", y="xInvSol", + color='location_product', + color_discrete_map = color_discrete_map, + height=600, + title='Warehouse Inventory', labels=labels) + elif mode == 'area': + fig = px.area(df, x="timePeriodSeq", y="xInvSol", + color='location_product', + color_discrete_map = color_discrete_map, + height=600, + title='Warehouse Inventory', labels=labels) + else: + fig = px.line(df, x="timePeriodSeq", y="xInvSol", + color='location_product', + color_discrete_map = color_discrete_map, + height=600, + title='Warehouse Inventory', labels=labels) + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v', + # 'yanchor': 'middle', + 'x': 1.05, + }, + margin = {'l': 80,'t':80} + # legend_title_text=product_aggregation_column, + ) + + return fig + + def plotly_plant_inventory(self, mode:str='bar', query=None): + """Plant inventory stacked bar chart by productName. + TODO: remove products that have no inventory over the whole time-line.""" + df = (self.dm.plant_inventories[['xInvSol']] + # .query("xInvSol > 0") # Doesn't work well: will reduce the number of entries in the horizon + .join(self.dm.products[['productGroup', 'productCountry']]) + .sort_index() + .sort_values(['xInvSol'], ascending=False) + ) + if query is not None: + df = df.query(query) + + df = df.reset_index() + + # df = df[df.xInvSol > 0] + + df.productCountry = df['productCountry'].fillna("") + + df['location_product'] = df['productCountry'] + " - " + df['productName'] + + # df['location_product'] = df['location_product'].fillna('API') + + color_discrete_map = self.gen_color_col(df['location_product']) + + labels = {'timePeriodSeq': 'Time Period', 'days_of_supply':'Days of Supply', 'quantity': 'Inventory', 'productName': 'Product Name', + 'productGroup': 'Product Group', + 'location_product': 'Product Location'} + + category_orders = { + # 'locationName': ['Abbott_Weesp_Plant', 'Abbott_Olst_Plant'], + 'locationName': self.plant_name_category_orders + } + + if mode == 'bar': + fig = px.bar(df, x="timePeriodSeq", y="xInvSol", + facet_row='locationName', + color='location_product', + color_discrete_map = color_discrete_map, + category_orders=category_orders, + height=600, + title='Plant Inventory', labels=labels) + fig.for_each_annotation(lambda a: a.update(x = a.x-1.04, textangle = 270)) + elif mode == 'area': + fig = px.area(df, x="timePeriodSeq", y="xInvSol", + facet_row='locationName', + color='location_product', + # color='productName', + color_discrete_map = color_discrete_map, + category_orders=category_orders, + height=600, + title='Plant Inventory', labels=labels) + fig.for_each_annotation(lambda a: a.update(x = a.x-1.08, textangle = 270)) + else: + fig = px.line(df, x="timePeriodSeq", y="xInvSol", + color='location_product', + color_discrete_map = color_discrete_map, + category_orders=category_orders, + height=600, + title='Plant Inventory', labels=labels) + + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v', + 'x': 1.05,}, + margin = {'l': 80, 't':80} + ) + + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + + fig.for_each_annotation(lambda a: a.update(text=a.text.split("locationName=")[-1])) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("_", " "))) + return fig + + def plotly_demand_inventory(self, mode:str='bar', query=None): + """Plant inventory stacked bar chart by productName. + TODO: remove products that have no inventory over the whole time-line.""" + df = (self.dm.demand_inventories[['xInvSol']] + # .query("xInvSol > 0") # Doesn't work well: will reduce the number of entries in the horizon + .join(self.dm.products[['productGroup', 'productCountry']]) + .sort_index() + .sort_values(['xInvSol'], ascending=False) + ) + if query is not None: + df = df.query(query) + + df = df.reset_index() + df['productCountry'] = df['productCountry'].fillna('') + df['location_product'] = df.productCountry + " - " + df.productName + + + color_discrete_map = self.gen_color_col(df['location_product']) + + labels = {'timePeriodSeq': 'Time Period', 'days_of_supply':'Days of Supply', 'quantity': 'Inventory', 'productName': 'Product Name', + 'productGroup': 'Product Group', 'location_product': 'Product Location', 'xInvSol': 'Inventory'} + + if mode == 'bar': + fig = px.bar(df, x="timePeriodSeq", y="xInvSol", + color_discrete_map=color_discrete_map, + color='location_product', + height=600, + title='Demand Inventory', labels=labels) + else: + fig = px.line(df, x="timePeriodSeq", y="xInvSol", + color_discrete_map=color_discrete_map, + color='location_product', + height=600, + title='Demand Inventory', labels=labels) + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v', + 'x': 1.05}, + margin={'l':80,'t':80}, + # legend_title_text=product_aggregation_column, + ) + + return fig + + def plotly_line_product_capacity_heatmap(self): + """Heatmap of capacity as line vs product. Good insight on line specialization/recipe-properties. + Input tables: ['RecipeProperties', 'Line', 'Product'] + Output tables: [] + """ + df = (self.dm.recipe_properties[['capacity']] + .join(self.dm.lines) + .join(self.dm.products[['productGroup']]) + # .join(self.dm.plants.rename(columns={'locationDescr':'plantDescr'}), on='plantName') + # .join(self.dm.locations, on='locationName') + ) # .groupby(['lineName','productType']).max() + df = df.reset_index() + # display(df.head()) + # df = df.pivot_table(values='capacity', index=['lineDescr'], columns=['productType'], aggfunc=np.max) + df = df.pivot_table(values='capacity', index=['lineName'], columns=['productGroup'], aggfunc=np.max) + + df = df.reset_index() + + cols = ["API", "Granulate", "Tablet", "Package"] + df= df[cols] + + labels = {'lineName': 'Line', 'productGroup': 'Product Group', 'productName': 'Product Name'} + labels = dict(x="Product Group", y="Line", color="Capacity") + + # labels = dict(x=["1","2","3","4"], y="Line", color="Capacity") + + fig = px.imshow(df, labels=labels, width=1000, + color_continuous_scale='YlOrRd', + # labels = { + # 'x':["1","2","3","4"] + # }, + # y = ["Abbott Olst
Granulate Line", "Abbott Olst
Packaging Line 5", "Abbott Olst
Packaging Line 6", "Abbott
Weesp Line"], + y = ["Granulate Line", "Packaging Line 1", "Packaging Line 2", "API Line"], + # y = ["API Line", "Granulate Line", "Packaging Line 1", "Packaging Line 2"], + # x = ["API", "Granulate", "Tablet", "Package"], + # template="ggplot2", + ) + + # for i, label in enumerate(['orignal', 'clean', '3', '4']): + # fig.layout.annotations[i]['text'] = label + + # fig.update_xaxes(showticklabels=False).update_yaxes(showticklabels=False) + + + fig.update_layout( + hovermode="closest", + title={ + 'text': "Maximum Line Capacity by Product Type", + # 'y': 0.92, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top'}, + margin = {'l': 60,'t':80,'b':60}) + + return fig + + def plotly_line_package_capacity_heatmap(self): + """Heatmap of capacity as line vs product. Good insight on line specialization/recipe-properties. + Input tables: ['RecipeProperties', 'Line', 'Product'] + Output tables: [] + """ + df = (self.dm.recipe_properties[['capacity']] + .join(self.dm.lines) + .join(self.dm.products[['productGroup', 'productCountry']]) + # .join(self.dm.plants.rename(columns={'locationDescr':'plantDescr'}), on='plantName') + # .join(self.dm.locations, on='locationName') + .query("productGroup == 'Package'") + ) # .groupby(['lineName','productType']).max() + df = df.reset_index() + + df.productName = df.productName.astype(str) + + df['location_product'] = df['productCountry'] + ' - ' + df['productName'] + df['location_product'] = df['location_product'].fillna('API') + + # df = df.pivot_table(values='capacity', index=['lineDescr'], columns=['productType'], aggfunc=np.max) + df = df.pivot_table(values='capacity', index= ['lineName'], + columns=['location_product'] , aggfunc=np.max) + + labels = {'lineName': 'Line', 'productGroup': 'Product Group', 'productName': 'Product Name', + } + labels = dict(x="Line", y="Product" , color="Max Capacity") + fig = px.imshow(df, + aspect = 'auto', + labels=labels, + # height = 800, + # width=1000, + color_continuous_scale='YlOrRd', + # y = ["Abbott Olst
Packaging Line 5", "Abbott Olst
Packaging Line 6"], + # y = ["Packaging Line 1", "Packaging Line 2"], + ) + + fig.update_layout( + title={ + 'text': "Maximum Packaging Line Capacity by Product", + # 'y': 0.92, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top'}, + margin = {'l': 140,'t':40,'b':100}) + + fig.update_xaxes(tickfont={'size':11}) + + + return fig + + def plotly_time_product_group_capacity_heatmap(self): + """Heatmap of capacity over time. + Good to detect and time-variation in capacity. + Input tables: ['RecipeProperties', 'Line', 'Product'] + Output tables: [] + """ + + df = (self.dm.recipe_properties[['capacity']] + .join(self.dm.lines) + .join(self.dm.products[['productGroup']])) + df = df.reset_index() + + + cols = ["API", "Granulate", "Tablet", "Package"] + # df= df[cols] + + df = df.pivot_table(values='capacity', index=['productGroup'], columns=['timePeriodSeq'], aggfunc=np.max) + + df= df.reindex(cols) + # print(df.index) + + labels = {'lineName': 'Line', 'productGroup': 'Product Group', 'productName': 'Product Name'} + labels = dict(x="Time Period", y="Product Group", color="Capacity") + fig = px.imshow(df, labels=labels, + color_continuous_scale='YlOrRd', + # y = ["API", "Granulate", "Tablet", "Package"] + ) + fig.update_layout( + title={ + 'text': "Maximum Line Capacity by Product Group and Time Period", + 'y': 0.95, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top'}, + margin = {'l': 90,'t':80,'b':60}) + + return fig + + def plotly_time_package_capacity_heatmap(self): + """Heatmap of capacity over time. + Good to detect and time-variation in capacity. + Input tables: ['RecipeProperties', 'Line', 'Product'] + Output tables: [] + """ + + df = (self.dm.recipe_properties[['capacity']] + .join(self.dm.lines) + .join(self.dm.products[['productGroup']])) + df = df.reset_index() + df = df.query("productGroup == 'Package'") + # display(df.head()) + df = df.pivot_table(values='capacity', index=['productName'], columns=['timePeriodSeq'], aggfunc=np.max) + + labels = {'lineName': 'Line', 'productGroup': 'Product Group', 'productName': 'Product Name'} + labels = dict(x="Time Period", y="Product Name", color="Capacity") + fig = px.imshow(df, labels=labels, + # color_discrete_sequence=px.colors.qualitative.G10 # Doesn't work! + # color_continuous_scale='Turbo', + # color_continuous_scale='YlOrBr', + color_continuous_scale='YlOrRd', + height = 1000, + ) + fig.update_layout( + title={ + 'text': "Maximum Line Capacity by Product and Time Period", +# 'y': 0.95, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top'}, + margin = {'l': 90,'t':80,'b':60}) + + return fig + + # def plotly_time_product_capacity_bars(self): + # """Heatmap of capacity over time. + # Good to detect and time-variation in capacity. + # Input tables: ['RecipeProperties', 'Line', 'Product'] + # Output tables: [] + # """ + # df = (self.dm.recipe_properties[['capacity']] + # .join(self.dm.lines) + # .join(self.dm.products[['productGroup']])) + # # display(df.head()) + + # df = df[['capacity','productGroup']].groupby(['lineName','timePeriodSeq','productName']).max() + # # display(df.head()) + + # labels = {'lineName': 'Line', 'productGroup': 'Product Group', 'productName': 'Product Name', 'timePeriodSeq':'Time Period', 'capacity':'Capacity'} + # # labels = dict(x="Time Period", y="Product Group", color="Capacity") + # fig = px.bar(df.reset_index(), x='timePeriodSeq', y='capacity', color='productName',labels=labels, + # facet_col='productGroup', + # category_orders={ + # "productGroup": ["API", "Granulate", "Tablet", "Package"] + # }, + + # # facet_row = 'lineName', + # ) + + # fig.update_layout( + # hovermode="closest", + # title={ + # 'text': "Maximum Line Capacity by Product and Time Period", + # 'y': 0.95, + # 'x': 0.5, + # 'xanchor': 'center', + # 'yanchor': 'top'}) + + # fig.update_layout(legend=dict( + # yanchor="top", + # y=0.99, + # xanchor="right", + # x=1.15, + # orientation="v" + # )) + # fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) + + # return fig + + def plotly_time_product_group_capacity_bars(self): + """Heatmap of capacity over time. + Good to detect and time-variation in capacity. + Input tables: ['RecipeProperties', 'Line', 'Product'] + Output tables: [] + """ + df = (self.dm.recipe_properties[['capacity']] + .join(self.dm.lines) + .join(self.dm.products[['productGroup']])) + # display(df.head()) + + df = df[['capacity','productGroup']].groupby(['lineName','timePeriodSeq','productGroup']).max() + # display(df.head()) + + color_discrete_map = self.gen_color_col() + + labels = {'lineName': 'Line', 'productGroup': 'Product Group', 'productName': 'Product Name', 'timePeriodSeq':'Time Period', 'capacity':'Capacity'} + # labels = dict(x="Time Period", y="Product Group", color="Capacity") + fig = px.bar(df.reset_index(), x='timePeriodSeq', y='capacity', color='productGroup',labels=labels, + facet_col='productGroup', + category_orders={ + "productGroup": ["API", "Granulate", "Tablet", "Package"] + }, + color_discrete_map= color_discrete_map + ) + + fig.update_layout( + hovermode="closest", + title={ + 'text': "Maximum Line Capacity by Product Group and Time Period", + 'y': 0.95, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top'} + ) + + fig.update_layout( + legend=dict( + yanchor="top", + y=0.99, + xanchor="right", + x=1.15, + orientation="v" + ), + margin = {'l': 60,'t':80,'b':60}, + ) + fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) + + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.XAxis: + fig.layout[axis].title.text = '' + + return fig + + + + def plotly_demand_inventory_bar_subplot_by_time(self): + """Demand, Fulfilled, Unfulfilled, Backlog, BacklogResupply and Inventory over time, grouped by time-period. + Colored by groupID. + Very useful graph since it contains all critical variables at the demand locations. Good for explanation. + """ + # product_aggregation_column = 'groupID' + # df1 = (self.dm.demand_inventories + # .join(self.dm.products[['productType', 'subgroupID', 'groupID']]) + # # .join(self.dm.locations) + # ).groupby(['timePeriodSeq', product_aggregation_column]).sum() + # if 'relative_week' in df1.columns: # TODO: remove if not relevant anymore + # df1 = df1.drop(columns=['relative_week']) + # df1 = (df1 + # # .drop(columns=['relative_week']) + # .rename( + # columns={'quantity': 'Demand', 'xFulfilledDemandSol': 'Fulfilled', 'xUnfulfilledDemandSol': 'Unfulfilled', + # 'xBacklogSol': 'Backlog', 'xBacklogResupplySol': 'Backlog Resupply', 'xDemandInvSol': 'Inventory'}) + # ) + # df1 = (df1.stack() + # .rename_axis(index={None: 'var_name'}) + # .to_frame(name='quantity') + # .reset_index() + # ) + + # # Inflows from plants: + # df2 = (self.dm.plant_to_demand_transportation[['xTransportationSol']] + # .join(self.dm.products[['productType', 'subgroupID', 'groupID']]) + # .groupby(['timePeriodSeq', product_aggregation_column]).sum() + # .rename(columns={'xTransportationSol': 'Production'}) + # ) + # df2 = (df2.stack() + # .rename_axis(index={None: 'var_name'}) + # .to_frame(name='quantity') + # .reset_index() + # ) + + # df = pd.concat([df1, df2]) + + # # print(df.head()) + + # labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', + # 'var_name': 'Var'} + # fig = px.bar(df, x="var_name", y="quantity", color=product_aggregation_column, title="Demand", labels=labels, + # facet_col="timePeriodSeq", + # category_orders={ + # 'var_name': ['Demand', 'Production', 'Fulfilled', 'Unfulfilled', 'Backlog', 'Backlog Resupply', + # 'Inventory']}, + # height=700 + # ) + # fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + # return fig + + product_aggregation_column = 'groupID' + df1 = (self.dm.demand_inventories + .join(self.dm.products[['productType', 'subgroupID', 'groupID']]) + # .join(self.locations) + ).groupby(['timePeriodSeq', product_aggregation_column]).sum() + if 'relative_week' in df1.columns: # TODO: remove if not relevant anymore + df1 = df1.drop(columns=['relative_week']) + df1 = (df1 + # .drop(columns=['relative_week']) + .rename( + columns={'quantity': 'Demand', 'xFulfilledDemandSol': 'Fulfilled', 'xUnfulfilledDemandSol': 'Unfulfilled', + 'xBacklogSol': 'Backlog', 'xBacklogResupplySol': 'Backlog Resupply', 'xDemandInvSol': 'Inventory'}) + ) + df1 = (df1.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='quantity') + .reset_index() + ) + + # Inflows from plants: + df2 = (self.dm.plant_to_demand_transportation[['xTransportationSol']] + .join(self.dm.products[['productType', 'subgroupID', 'groupID']]) + .groupby(['timePeriodSeq', product_aggregation_column]).sum() + .rename(columns={'xTransportationSol': 'Production'}) + ) + df2 = (df2.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='quantity') + .reset_index() + ) + + df = pd.concat([df1, df2]) + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', + 'var_name': 'Var'} + + import plotly.graph_objects as go + + fig = go.Figure() + + fig.update_layout( + template="simple_white", + xaxis=dict(title_text="Time"), + yaxis=dict(title_text="Quantity"), + barmode="stack", + ) + + colors = ["#6495ED", "#FFBF00", "#FF7F50", "#DE3163", "#9FE2BF"] + + for p, c in zip(df.groupID.unique(), colors): + plot_df = df[df.groupID == p] + fig.add_trace( + go.Bar(x=[plot_df.timePeriodSeq, plot_df.var_name], y=plot_df.quantity, name=p, marker_color=c), + ) + + fig.update_xaxes( + rangeslider_visible=True, + rangeselector=dict( + buttons=list([ + dict(count=1, label="1m", step="month", stepmode="backward"), + dict(count=6, label="6m", step="month", stepmode="backward"), + dict(count=1, label="YTD", step="year", stepmode="todate"), + dict(count=1, label="1y", step="year", stepmode="backward"), + dict(step="all") + ]) + ) + ) + + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + return fig + + ####################################################################################### + # Inventory + ####################################################################################### + def plotly_inventory_days_of_supply_line(self, mode:str='line', query=None): + """Demand inventory, normalized by days-of-supply. + Args: + mode (str): line (default) or bar. Bar will result in a stacked bar. + + Input tables: ['Demand', 'Product'] + Output tables: ['DemandInventory] + """ + # num_days = 2 * 365 # For now assume 2 years. TODO: get from number of time-periods and bucket length + num_days = len(self.dm.demand.index.unique(level='timePeriodSeq')) * 30 + df1 = (self.dm.demand[['quantity']] + .join(self.dm.products['productGroup']) + .groupby(['productGroup','productName','locationName']).sum() + ) + df1['demand_per_day'] = df1.quantity / num_days + + df1 = df1.drop(columns=['quantity']) + + temp = self.dm.demand_inventories[['xInvSol']].reset_index() + + temp = temp[temp.locationName == 'PERU'] + + df = (self.dm.demand_inventories[['xInvSol']] + .join(df1) + .reset_index() + .set_index(['locationName','productGroup','productName']) + .sort_index() + ) + if query is not None: + df = df.query(query).copy() + + df['days_of_supply'] = df.xInvSol / df.demand_per_day + + tdf = df.reset_index() + tdf = tdf[tdf.locationName == 'PERU'] + + df = df.reset_index() + + df['location_product'] = df.locationName + " - " + df.productName + + color_discrete_map = self.gen_color_col(df['location_product']) + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Inventory', 'productName': 'Product Name', + 'productGroup': 'Product Group', "days_of_supply": "Days of Supply", 'days_of_supply_smoothed': 'Days of Supply'} + + df['days_of_supply'] = df['days_of_supply'].clip(upper = 100) + + df = df.sort_values('timePeriodSeq') + + df['days_of_supply_smoothed'] = df['days_of_supply'].rolling(window=5).mean() + + if mode == 'bar': + fig = px.bar(df, x="timePeriodSeq", y="days_of_supply", + color='location_product', + color_discrete_map=color_discrete_map, + height=600, + title='Demand Inventory (days-of-supply)', labels=labels) + else: + fig = px.line(df, x="timePeriodSeq", y="days_of_supply", + color='location_product', + color_discrete_map=color_discrete_map, + height=600, + title='Demand Inventory (days-of-supply)', labels=labels) + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v', + "title": 'Product Location', + 'x': 1.05}, + margin={'l':80,'t':60, 'r':0}, + ) + + return fig + + def plotly_inventory_days_of_supply_slack_line(self, mode:str='line', query=None): + """Demand inventory, days-of-supply slack. + Args: + mode (str): line (default) or bar. Bar will result in a stacked bar. + + Input tables: ['Demand', 'Product'] + Output tables: ['DemandInventory] + """ + # num_days = 2 * 365 # For now assume 2 years. TODO: get from number of time-periods and bucket length + num_days = len(self.dm.demand.index.unique(level='timePeriodSeq')) * 30 + df1 = (self.dm.demand[['quantity']] + .join(self.dm.products['productGroup']) + .groupby(['productGroup','productName','locationName']).sum() + ) + df1['demand_per_day'] = df1.quantity / num_days + df1 = df1.drop(columns=['quantity']) + + df = (self.dm.demand_inventories[['xDOSSlackSol']] + .join(df1) + .reset_index() + .set_index(['locationName','productGroup','productName']) + .sort_index() + ) + if query is not None: + df = df.query(query).copy() + + df['dosSlack'] = df.xDOSSlackSol / df.demand_per_day + + df = df.reset_index() + + df['location_product'] = df.locationName + " - " + df.productName + + color_discrete_map = self.gen_color_col(df['location_product']) + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Inventory', 'productName': 'Product Name', + 'productGroup': 'Product Group', "days_of_supply": "Days of Supply"} + + df['dosSlack'] = df['dosSlack'].clip(upper = 100) + + if mode == 'bar': + fig = px.bar(df, x="timePeriodSeq", y="dosSlack", + color='location_product', + color_discrete_map=color_discrete_map, + height=600, + title='Demand Inventory Slack (days-of-supply)', labels=labels) + else: + fig = px.line(df, x="timePeriodSeq", y="dosSlack", + color='location_product', + color_discrete_map=color_discrete_map, + height=600, + title='Demand Inventory Slack (days-of-supply)', labels=labels) + + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v', + "title": 'Product Location', + 'x': 1.05}, + margin={'l': 80, 't': 60, 'r': 0}, + ) + + return fig + + def plotly_wh_inventory_days_of_supply_line(self, mode:str='line', query=None): + """Warehouse inventory, normalized by days-of-supply.""" + # num_days = 2 * 365 # For now assume 2 years. TODO: get from number of time-periods and bucket length + num_days = len(self.dm.demand.index.unique(level='timePeriodSeq')) * 30 + df1 = (self.dm.demand[['quantity']] + .join(self.dm.products[['productGroup', 'productCountry']]) + .groupby(['productGroup','productName', 'productCountry']).sum() + ) + + df1['demand_per_day'] = df1.quantity / num_days + df1 = df1.drop(columns=['quantity']) + + df = (self.dm.warehouse_inventories[['xInvSol']] + .join(df1) + .reset_index() + .set_index(['locationName','productGroup','productName', 'productCountry']) + .sort_index() + ) + if query is not None: + df = df.query(query).copy() + + df['days_of_supply'] = (df.xInvSol / df.demand_per_day) + + df = df.reset_index() + + df.productCountry = df.productCountry.fillna("") + + df['location_product'] = df.productCountry + " - " + df.productName + + df.days_of_supply = df.days_of_supply.clip(upper = 100) + + color_discrete_map = self.gen_color_col(df['location_product']) + + labels = {'timePeriodSeq': 'Time Period', 'days_of_supply':'Days of Supply', 'quantity': 'Inventory', 'productName': 'Product Name', + 'productGroup': 'Product Group', 'location_product': 'Product Location', 'xInvSol': 'Inventory'} + + if mode == 'bar': + fig = px.bar(df, x="timePeriodSeq", y="days_of_supply", + color='location_product', + color_discrete_map=color_discrete_map, + height=600, + title='Warehouse Inventory (days-of-supply)', labels=labels) + elif mode == 'area': + fig = px.area(df, x="timePeriodSeq", y="xInvSol", + color='productName', + color_discrete_map=color_discrete_map, + height=600, + title='Warehouse Inventory', labels=labels) + else: + fig = px.line(df, x="timePeriodSeq", y="days_of_supply", + color='location_product', + color_discrete_map=color_discrete_map, + height=600, + title='Warehouse Inventory (days-of-supply)', + labels=labels) + + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v', + "x": 1.05}, + margin={'l': 80, 't': 60, 'r': 0}, + ) + + return fig + + def plotly_package_demand_bars(self, query=None): + """Product demand over time. Colored by productGroup. + + Input tables: ['Demand', 'Product'] + Output tables: [] + """ + df = (self.dm.demand + .join(self.dm.products[['productGroup']]) + .query("productGroup == 'Package'") + ) + if query is not None: + df = df.query(query) + + aggregation_column = 'locationName' + df = df.groupby(['timePeriodSeq', aggregation_column]).sum() + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', + 'productGroup': 'Product Group'} + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="quantity", color=aggregation_column, + title='Total Package Demand', labels=labels) + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v'}, + ) + + return fig + + def plotly_package_demand_lines(self, query=None): + """Product demand over time. Colored by productGroup. + Input tables: ['Demand', 'Product'] + Output tables: [] + """ + df = (self.dm.demand + .join(self.dm.products[['productGroup']]) + .query("productGroup == 'Package'") + ) + if query is not None: + df = df.query(query) + + aggregation_column = 'locationName' + df = df.groupby(['timePeriodSeq', aggregation_column]).sum() + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', + 'productGroup': 'Product Group'} + + fig = px.line(df.reset_index(), x="timePeriodSeq", y="quantity", color=aggregation_column, + title='Total Package Demand', labels=labels) + + fig.update_layout( + hovermode="closest", + legend={'orientation': 'v'}, + ) + + return fig + + def plotly_demand_fullfilment_scroll(self): + """Demand, Fulfilled, Unfulfilled, Backlog, BacklogResupply and Inventory over time, grouped by time-period. + Colored by groupID. + Very useful graph since it contains all critical variables at the demand locations. Good for explanation. + """ + + # Collect transportation activities into a destination location. + # (later we'll do a left join to only select trnasportation into a demand location and ignore all other transportation activities) + df0 = self.dm.transportation_activities + df0['destinationTimePeriodSeq'] = df0.index.get_level_values('timePeriodSeq') + df0.transitTime + df0 = (df0[['xTransportationSol', 'destinationTimePeriodSeq']] + .groupby(['productName', 'destinationLocationName', 'destinationTimePeriodSeq']).sum() + .rename_axis(index={'destinationLocationName': 'locationName', 'destinationTimePeriodSeq':'timePeriodSeq'}) + .rename(columns={'xTransportationSol':'Transportation'}) + ) + # display(df0.head()) + + product_aggregation_column = 'productGroup' + df = (self.dm.demand_inventories[['quantity','xFulfilledDemandSol','xUnfulfilledDemandSol','xBacklogSol','xBacklogResupplySol','xInvSol']] + .join(self.dm.products[['productGroup']]) + # .join(self.dm.locations) + .join(df0, how='left') + ).groupby(['timePeriodSeq', product_aggregation_column]).sum() + if 'relative_week' in df.columns: # TODO: remove if not relevant anymore + df = df.drop(columns=['relative_week']) + # display(df.head()) + df = (df + # .drop(columns=['relative_week']) + .rename( + columns={'quantity': 'Demand', 'xFulfilledDemandSol': 'Fulfilled', 'xUnfulfilledDemandSol': 'Unfulfilled', + 'xBacklogSol': 'Backlog', 'xBacklogResupplySol': 'Backlog Resupply', 'xInvSol': 'Inventory'}) + ) + # display(df.head()) + df = (df.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='quantity') + .reset_index() + ) + # display(df.head()) + + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', 'productGroup':'Product Group', + 'var_name': 'Var'} + + fig = go.Figure() + fig.update_layout( + template="simple_white", + xaxis=dict(title_text="Time"), + yaxis=dict(title_text="Quantity"), + barmode="stack", + height=700 + ) + + colors = self.gen_color_col() + + # Default colors: + for p in df.productGroup.unique(): + plot_df = df[df.productGroup == p] + fig.add_trace(go.Bar(x=[plot_df.timePeriodSeq, plot_df.var_name], y=plot_df.quantity, name=p, marker_color = colors[p])) + + fig.update_xaxes( + rangeslider_visible=True, + rangeselector=dict( + buttons=list([ + dict(count=1, label="1m", step="month", stepmode="backward"), + dict(count=6, label="6m", step="month", stepmode="backward"), + dict(count=1, label="YTD", step="year", stepmode="todate"), + dict(count=1, label="1y", step="year", stepmode="backward"), + dict(step="all") + ]) + ) + ) + + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + + fig.update_layout( + xaxis = dict( + tickfont = dict(size=9))) + + fig.update_layout( + margin={'l': 10, 't': 10, 'r': 0, 'b':10}) + + return fig + + def plotly_demand_fullfilment_scroll_product(self): + """Demand, Fulfilled, Unfulfilled, Backlog, BacklogResupply and Inventory over time, grouped by time-period. + Colored by groupID. + Very useful graph since it contains all critical variables at the demand locations. Good for explanation. + """ + + # Collect transportation activities into a destination location. + # (later we'll do a left join to only select trnasportation into a demand location and ignore all other transportation activities) + # df0 = (self.dm.transportation_activities[['xTransportationSol']] + # .groupby(['productName', 'destinationLocationName', 'timePeriodSeq']).sum() + # .rename_axis(index={'destinationLocationName': 'locationName'}) + # .rename(columns={'xTransportationSol':'Transportation'}) + # ) + df0 = self.dm.transportation_activities + df0['destinationTimePeriodSeq'] = df0.index.get_level_values('timePeriodSeq') + df0.transitTime + df0 = (df0[['xTransportationSol', 'destinationTimePeriodSeq']] + .groupby(['productName', 'destinationLocationName', 'destinationTimePeriodSeq']).sum() + .rename_axis(index={'destinationLocationName': 'locationName', 'destinationTimePeriodSeq':'timePeriodSeq'}) + .rename(columns={'xTransportationSol':'Transportation'}) + ) + # display(df0.head()) + + # product_aggregation_column = 'productGroup' + product_aggregation_column = 'productName' + df = (self.dm.demand_inventories[['quantity','xFulfilledDemandSol','xUnfulfilledDemandSol','xBacklogSol','xBacklogResupplySol','xInvSol']] + .join(self.dm.products[['productGroup', 'productCountry']]) + # .join(self.dm.locations) + .join(df0, how='left') + ).groupby(['timePeriodSeq', product_aggregation_column, 'productCountry']).sum() + # print(df.head()) + if 'relative_week' in df.columns: # TODO: remove if not relevant anymore + df = df.drop(columns=['relative_week']) + # display(df.head()) + df = (df + # .drop(columns=['relative_week']) + .rename( + columns={'quantity': 'Demand', 'xFulfilledDemandSol': 'Fulfilled', 'xUnfulfilledDemandSol': 'Unfulfilled', + 'xBacklogSol': 'Backlog', 'xBacklogResupplySol': 'Backlog Resupply', 'xInvSol': 'Inventory'}) + ) + + df = (df.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='quantity') + .reset_index() + ) + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', 'productGroup':'Product Group', + 'var_name': 'Var'} + + fig = go.Figure() + fig.update_layout( + template="simple_white", + xaxis=dict(title_text="Time"), + yaxis=dict(title_text="Quantity"), + barmode="stack", + height=900, + # width = 2000 + ) + + df = df.reset_index() + df['location_product'] = df['productCountry'] + ' - ' + df['productName'] + df['location_product'] = df['location_product'].fillna('API') + + colors = self.gen_color_col(df['location_product']) + + # Default colors: + # for p in df[product_aggregation_column].unique(): + # print(f"p = {p}") + + # print(df[product_aggregation_column].unique()) + # print(colors) + + # Default colors: + for p in df['location_product'].unique(): + # print(f"p = {p}") + + plot_df = df[df['location_product'] == p] + try: + fig.add_trace(go.Bar(x=[plot_df.timePeriodSeq, plot_df.var_name], y=plot_df.quantity, name=p, + marker_color = colors[p] + )) + except: + pass + + + fig.update_xaxes( + rangeslider_visible=True, + rangeselector=dict( + buttons=list([ + dict(count=1, label="1m", step="month", stepmode="backward"), + dict(count=6, label="6m", step="month", stepmode="backward"), + dict(count=1, label="YTD", step="year", stepmode="todate"), + dict(count=1, label="1y", step="year", stepmode="backward"), + dict(step="all") + ]) + ) + ) + + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + + fig.update_layout( + xaxis = dict( + tickfont = dict(size=9)), + legend = {'orientation': 'v', 'x': 1}) + + fig.update_layout( + margin={'l': 10, 't': 10, 'r': 0, 'b': 30}) + + + return fig + + def plotly_production_activities_bars(self, query=None, title='Production'): + """Production activity over time, colored by productGroup. + Input tables: ['Product', 'Location'] + Output tables: ['ProductionActivity'] + """ + product_aggregation_column = 'productGroup' + product_aggregation_column = 'productName' + + df = (self.dm.production_activities + .join(self.dm.products[['productGroup', 'productCountry']])) + + df = df.reset_index() + + df.productCountry = df.productCountry.fillna('') + df['location_product'] = df.productCountry + " - " + df.productName + + color_discrete_map = self.gen_color_col(df['location_product']) + + if query is not None: + df = df.query(query) + + df = (df + .reset_index() + .merge(self.dm.locations.reset_index(), on='locationName') + ).groupby(['timePeriodSeq', product_aggregation_column, 'lineName', 'location_product']).sum() + + active_line_name_category_orders = [l for l in self.line_name_category_orders if l in df.index.unique(level='lineName')] # Avoids empty spaces in Plotly chart + labels = {'timePeriodSeq': 'Time Period', 'xProdSol': 'Production', 'productName': 'Product Name', 'location_product': 'Product Location'} + category_orders = { + # 'lineName' : ['Abbott_Weesp_Line','Abbott_Olst_Granulate_Line', 'Abbott_Olst_Packaging_Line_5','Abbott_Olst_Packaging_Line_6'], + # 'lineName' : self.line_name_category_orders, + 'lineName' : active_line_name_category_orders, + # 'timePeriodSeq': df.reset_index().timePeriodSeq.sort_values().unique(), + 'timePeriodSeq': df.index.unique(level='timePeriodSeq').sort_values() + } + + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="xProdSol", color='location_product', + color_discrete_map= color_discrete_map, + title=title, labels=labels, + facet_row = 'lineName', + category_orders=category_orders, + height=800, + ) + + fig.update_layout(legend = + {'orientation': 'v', + 'x': 1.05, + } + ) + + fig.update_layout(margin = {'l': 85, 't':80}) + + fig.for_each_annotation(lambda a: a.update(x = a.x-1.04, textangle = 270)) + fig.for_each_annotation(lambda a: a.update(text=a.text.split("lineName=")[-1])) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("_", " "))) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("Olst", "Olst
"))) + + # get rid of duplicated X-axis labels + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + + fig.update_xaxes(type='category') + fig.update_layout(hovermode="closest",legend = {'orientation': 'v'}) # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + return fig + + def plotly_planned_production_activities_bars(self, query=None, title='Production'): + """Production activity over time, colored by productGroup. + Input tables: ['Product', 'Location'] + Output tables: ['ProductionActivity'] + """ + + product_aggregation_column = 'productName' + + df = (self.dm.planned_production_activity + .join(self.dm.products[['productGroup', 'productCountry']]) + # .sort_index() + ) + + + df = df.reset_index() + df['productCountry'] = np.where(pd.isnull(df.productCountry), '', df.productCountry) + df['location_product'] = df.productCountry + " - " + df.productName + + color_discrete_map = self.gen_color_col(df['location_product']) + + if query is not None: + df = df.query(query) + + df = (df + ).groupby(['timePeriodSeq', product_aggregation_column, 'lineName', 'productCountry', 'location_product']).sum() + + active_line_name_category_orders = [l for l in self.line_name_category_orders if l in df.index.unique(level='lineName')] # Avoids empty spaces in Plotly chart + labels = {'timePeriodSeq': 'Time Period', 'xProdSol': 'Production', 'productName': 'Product Name', + 'location_product': 'Product Location'} + + # df = (df.reset_index()) + + category_orders = { + # 'lineName' : ['Abbott_Weesp_Line','Abbott_Olst_Granulate_Line', 'Abbott_Olst_Packaging_Line_5','Abbott_Olst_Packaging_Line_6'], + # 'lineName' : self.line_name_category_orders, + 'lineName' : active_line_name_category_orders, + # 'timePeriodSeq': df.reset_index().timePeriodSeq.sort_values().unique() + # 'timePeriodSeq': df.timePeriodSeq.sort_values().unique() + 'timePeriodSeq': df.index.unique(level='timePeriodSeq').sort_values() + } + + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="quantity", color='location_product', + color_discrete_map=color_discrete_map, + title=title, labels=labels, + facet_row = 'lineName', + category_orders = category_orders, + height=800, + ) + + fig.update_layout(legend = + {'orientation': 'v', + 'x': 1.05, + } + ) + + fig.update_layout(margin = {'l': 90,'t':60}) + # fig.for_each_annotation(lambda a: a.update(x = a.x -1., y = a.y-0.15, textangle = 0, + # font = {'size':16} + # )) + + fig.for_each_annotation(lambda a: a.update(x = a.x-1.055, textangle = 270)) + fig.for_each_annotation(lambda a: a.update(text=a.text.split("lineName=")[-1])) + + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("_", " "))) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("Olst", "Olst
"))) + + # get rid of duplicated X-axis labels + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + + fig.update_xaxes(type='category') + + fig.update_layout(hovermode="closest",legend = {'orientation': 'v'}) + + return fig + + def plotly_production_slack_bars(self, query=None, title='Production Slack'): + """Production activity slack over time, colored by productName. + Input tables: ['Product'] + Output tables: ['ProductionActivity'] + """ + product_aggregation_column = 'productName' + + df = (self.dm.production_activities + .join(self.dm.products[['productGroup', 'productCountry']])) + + df = df.reset_index() + df['productCountry'] = np.where(pd.isnull(df.productCountry), '', df.productCountry) + df['location_product'] = df.productCountry + " - " + df.productName + + color_discrete_map = self.gen_color_col(df['location_product']) + + if query is not None: + df = df.query(query) + + df = (df + .reset_index() + # .merge(self.dm.locations.reset_index(), on='locationName') + ).groupby(['timePeriodSeq', product_aggregation_column, 'lineName', 'productCountry', 'location_product']).sum() + + labels = {'timePeriodSeq': 'Time Period', 'xProdSol': 'Production', 'productName': 'Product Name', + 'location_product': 'Product Location'} + active_line_name_category_orders = [l for l in self.line_name_category_orders if l in df.index.unique(level='lineName')] # Avoids empty spaces in Plotly chart + + category_orders = { + # 'lineName' : ['Abbott_Weesp_Line','Abbott_Olst_Granulate_Line', + # 'Abbott_Olst_Packaging_Line_5','Abbott_Olst_Packaging_Line_6'], + # 'lineName' : self.line_name_category_orders, + 'lineName' : active_line_name_category_orders, + # 'timePeriodSeq': df.reset_index().timePeriodSeq.sort_values().unique() + 'timePeriodSeq': df.index.unique(level='timePeriodSeq').sort_values(), + } + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="xProdSlackSol", color='location_product', + color_discrete_map=color_discrete_map, + title=title, labels=labels, + facet_row = 'lineName', + category_orders=category_orders, + height=800, + ) + + fig.update_layout(legend = + {'orientation': 'v', + 'x': 1.05, + } + ) + + fig.update_layout(margin = {'l': 85, 't':60}) + + fig.for_each_annotation(lambda a: a.update(x = a.x-1.05, textangle = 270)) + fig.for_each_annotation(lambda a: a.update(text=a.text.split("lineName=")[-1])) + + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("_", " "))) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("Olst", "Olst
"))) + + # get rid of duplicated X-axis labels + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + + fig.update_xaxes(type='category') + fig.update_layout(hovermode="closest",legend = {'orientation': 'v'}) + + return fig + + def plotly_production_excess_bars(self, query=None, title='Production Plan Difference', mode = None): + """Production activity excess (compared to plan) over time, colored by productName. + Default mode returns excess as a substraction, percentage returns as percentage + Input tables: ['Product', 'PlannedproductionActivity'] + Output tables: ['ProductionActivity'] + """ + product_aggregation_column = 'productName' + + planned_production = (self.dm.planned_production_activity + .reset_index() + # .astype({'planId': int}) + # .query("planId == 1") # HACK!!!! Need to filter on planId + # .reset_index() + .set_index(['productName','lineName','timePeriodSeq','recipeId'], verify_integrity = True) + ) + + df = (self.dm.production_activities + .join(self.dm.products[['productGroup', 'productCountry']]) + .join(planned_production, how = 'left') + .rename(columns={'quantity':'plannedProductionQuantity'}) + ) + df.plannedProductionQuantity = df.plannedProductionQuantity.fillna(0) + + if mode == 'percentage': + df['planExcessQuantity'] = ((df.xProdSol - df.plannedProductionQuantity) / df.plannedProductionQuantity) + + else: + df['planExcessQuantity'] = df.xProdSol - df.plannedProductionQuantity + + df = df.reset_index() + df['productCountry'] = np.where(pd.isnull(df.productCountry), '', df.productCountry) + df['location_product'] = df.productCountry + " - " + df.productName + + color_discrete_map = self.gen_color_col(df['location_product']) + + if query is not None: + df = df.query(query) + + df = (df + .reset_index() + ).groupby(['timePeriodSeq', product_aggregation_column, 'lineName', 'productCountry', 'location_product']).sum() + + labels = {'timePeriodSeq': 'Time Period', 'xProdSol': 'Production', 'productName': 'Product Name', + 'location_product': 'Product Location', 'planExcessQuantity':'Plan Difference'} + active_line_name_category_orders = [l for l in self.line_name_category_orders if l in df.index.unique(level='lineName')] # Avoids empty spaces in Plotly chart + + # df = df.reset_index() + + category_orders = { + # 'lineName' : ['Abbott_Weesp_Line','Abbott_Olst_Granulate_Line', + # 'Abbott_Olst_Packaging_Line_5','Abbott_Olst_Packaging_Line_6'], + # 'lineName' : self.line_name_category_orders, + 'lineName' : active_line_name_category_orders, + # 'timePeriodSeq': [df.timePeriodSeq.sort_values().unique()] + 'timePeriodSeq': df.index.unique(level='timePeriodSeq').sort_values() + } + + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="planExcessQuantity", color='location_product', + color_discrete_map=color_discrete_map, + title=title, labels=labels, + facet_row = 'lineName', + category_orders=category_orders, + height=800, + ) + + fig.update_layout(legend = + {'orientation': 'v', + 'x': 1.05, + } + ) + + if mode is not None: + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + fig.layout[axis].tickformat = '%' + + fig.update_layout(margin = {'l': 85}) + + fig.for_each_annotation(lambda a: a.update(x = a.x-1.05, textangle = 270)) + fig.for_each_annotation(lambda a: a.update(text=a.text.split("lineName=")[-1])) + + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("_", " "))) + fig.for_each_annotation(lambda a: a.update(text=a.text.replace("Olst", "Olst
"))) + + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + + # get rid of duplicated X-axis labels + for axis in fig.layout: + if type(fig.layout[axis]) == go.layout.YAxis: + fig.layout[axis].title.text = '' + + fig.update_xaxes(type='category') + fig.update_layout(hovermode="closest",legend = {'orientation': 'v'}, + margin= {'l':85,'t':60}) + + return fig + + def plotly_inventory_flow_sankey_test(self, include_wip=True): + """Sankey diagram of transportation activities. + See https://stackoverflow.com/questions/50486767/plotly-how-to-draw-a-sankey-diagram-from-a-dataframe + """ + + aggregation_column = 'productName' + + # Collect inventories (location-product): + + # for these groupby productGroup instead of productName + df1 = self.dm.plant_inventories[[]].groupby(['locationName',aggregation_column]).sum().copy() + df1['type'] = 'plant' + df2 = self.dm.warehouse_inventories[[]].groupby(['locationName',aggregation_column]).sum().copy() + df2['type'] = 'warehouse' + df3 = self.dm.demand_inventories[[]].groupby(['locationName',aggregation_column]).sum().copy() + df3['type'] = 'demand' + df4 = pd.DataFrame([{'locationName':'External',aggregation_column:'None', 'type':'external'}]).set_index(['locationName',aggregation_column]) + df5 = self.dm.WIP[[]].groupby(['locationName',aggregation_column]).sum().copy() + df5 = df5.reset_index() + df5['locationName'] = df5.locationName + "_wip" + df5 = df5.set_index(['locationName',aggregation_column]) + df5['type'] = 'wip' + df6 = self.dm.plant_inventories[[]].groupby(['locationName']).sum().copy() + df6[aggregation_column] = 'None' + df6 = df6.reset_index().set_index(['locationName',aggregation_column]) + df6['type'] = 'source' + product_locations = pd.concat([df5, df4, df1, df2, df3, df6]) # should be same dataframes with same keys + + product_locations = product_locations.reset_index() + product_locations = product_locations.merge(self.dm.products[['productGroup']], on = 'productName') + + # Create locationName vs id + inventory_labels_df = (product_locations.reset_index() + .reset_index().rename(columns={'index': 'id'}) + ) + inventory_labels_df['label'] = inventory_labels_df.locationName + " - " +inventory_labels_df['productGroup'] + + #Collect inventory flows - transportation + df1 = (self.dm.transportation_activities[['xTransportationSol']].join(self.dm.products[['productGroup']]) + .query("xTransportationSol > 0") + .groupby(['originLocationName', 'destinationLocationName','shippingMode','productGroup']).sum() + .rename(columns={'xTransportationSol':'quantity'}) + ) + + df1 = df1.reset_index() + + df1 = (df1.merge(inventory_labels_df[['locationName','productGroup','id']], left_on=['originLocationName','productGroup'], right_on=['locationName','productGroup']) + .rename(columns={'id': 'source'}) + .drop(columns=['locationName']) + ) + + df1 = (df1.merge(inventory_labels_df[['locationName','productGroup','id']], left_on=['destinationLocationName','productGroup'], right_on=['locationName','productGroup']) + .rename(columns={'id': 'target'}) + .drop(columns=['locationName']) + ) + df1['label'] = df1.shippingMode + " - " + df1['productGroup'] + " from " + df1.originLocationName + " to " + df1.destinationLocationName + df1 = df1.drop(columns=['originLocationName','destinationLocationName','shippingMode']) + df1['color'] = 'rosybrown' + + aggregation_column = 'productGroup' + #Collect inventory flows - Production + df2 = (self.dm.production_activities[['xProdSol']].join(self.dm.products[['productGroup']]) + .join(self.dm.bom_items[['quantity']].rename(columns={'quantity':'component_bom_quantity'}), how='left') + .join(self.dm.lines[['plantName']]) + .join(self.dm.plants[['locationName']], on='plantName') + .query("xProdSol > 0") + .reset_index() + ) + + df2.componentName.fillna('None',inplace=True) # For any product without components + df2['component_quantity'] = df2.xProdSol * df2.component_bom_quantity + df2 = (df2 + .drop(columns=['component_bom_quantity','recipeId','timePeriodSeq']) + .groupby(['componentName', aggregation_column,'lineName','plantName','locationName']).sum() + .rename(columns={'xProdSol':'quantity'}) + ) + df2 = df2.reset_index() + + df2 = (df2.merge(inventory_labels_df[['locationName',aggregation_column,'id','type']], left_on=['locationName',aggregation_column], right_on=['locationName',aggregation_column]) + .rename(columns={'id': 'target'}) + ) + df2 = (df2.merge(inventory_labels_df[['locationName',aggregation_column,'id','type']], left_on=['locationName','componentName'], right_on=['locationName',aggregation_column], suffixes=[None,'_y']) + .rename(columns={'id': 'source'}) + .drop(columns=[aggregation_column+'_y']) + ) + df2['label'] = df2.type + " - " + df2.componentName + " to " + df2[aggregation_column] + df2 = df2[[aggregation_column, 'quantity', 'source', 'target', 'label']] + + df2['color'] = 'olive' + + # Collect inventory flows - WIP + df3 = (self.dm.WIP[['wipQuantity']].join(self.dm.products[['productGroup']]) + .query("wipQuantity > 0") + .rename(columns={'wipQuantity':'quantity'}) + ) + df3 = df3.reset_index() + df3['locationNameWip'] = df3.locationName + '_wip' + # display(df3.head()) + df3 = (df3.merge(inventory_labels_df[['locationName',aggregation_column,'id']], left_on=['locationName',aggregation_column], right_on=['locationName',aggregation_column]) + .rename(columns={'id': 'target'}) + # .drop(columns=['locationName']) + ) + df3 = (df3.merge(inventory_labels_df[['locationName',aggregation_column,'id']], left_on=['locationNameWip',aggregation_column], right_on=['locationName',aggregation_column], suffixes=[None,'_y']) + .rename(columns={'id': 'source'}) + .drop(columns=['locationName_y']) + ) + # display(df3.head()) + df3['label'] = "wip - " + df3[aggregation_column] + " to " + df3.locationName + # df1 = df1.drop(columns=['locationNameWip','locationName','shippingMode']) + # display(df3.head()) + df3['color'] = 'lightsalmon' + + + if include_wip: + df = pd.concat([df1, df2, df3]) + else: + df = pd.concat([df1, df2]) + + # df = df.merge(self.dm.products[['productGroup']], on = 'productName') + + # Set pop-up text + + + # df['color'] = 'aquamarine' + fig = go.Figure(data=[go.Sankey( + # valueformat = ".0f", + # valuesuffix = "TWh", + # Define nodes + node=dict( + pad=15, + thickness=15, + line=dict(color="black", width=0.5), + label=inventory_labels_df.label.array, + ), + # Add links + link=dict( + source=df.source.array, + target=df.target.array, + value=df.quantity.array, + label=df.label.array, + color = df.color.array, + ))]) + + fig.update_layout(title_text="", + font_size=10, + height=1000) + return fig + + def plotly_inventory_flow_sankey(self, include_wip=True): + """Sankey diagram of transportation activities. + See https://stackoverflow.com/questions/50486767/plotly-how-to-draw-a-sankey-diagram-from-a-dataframe + """ + # Collect inventories (location-product): + df1 = self.dm.plant_inventories[[]].groupby(['locationName','productName']).sum().copy() + df1['type'] = 'plant' + df2 = self.dm.warehouse_inventories[[]].groupby(['locationName','productName']).sum().copy() + df2['type'] = 'warehouse' + df3 = self.dm.demand_inventories[[]].groupby(['locationName','productName']).sum().copy() + df3['type'] = 'demand' + df4 = pd.DataFrame([{'locationName':'External','productName':'None', 'type':'external'}]).set_index(['locationName','productName']) + df5 = self.dm.WIP[[]].groupby(['locationName','productName']).sum().copy() + df5 = df5.reset_index() + df5['locationName'] = df5.locationName + "_wip" + df5 = df5.set_index(['locationName','productName']) + df5['type'] = 'wip' + df6 = self.dm.plant_inventories[[]].groupby(['locationName']).sum().copy() + df6['productName'] = 'None' + df6 = df6.reset_index().set_index(['locationName','productName']) + df6['type'] = 'source' + product_locations = pd.concat([df5, df4, df1, df2, df3, df6]) + # display(product_locations.head()) + # Create locationName vs id + inventory_labels_df = (product_locations.reset_index() + .reset_index().rename(columns={'index': 'id'}) + ) + inventory_labels_df['label'] = inventory_labels_df.locationName + " - " +inventory_labels_df.productName + # display(inventory_labels_df.head()) + + #Collect inventory flows - transportation + df1 = (self.dm.transportation_activities[['xTransportationSol']] + .query("xTransportationSol > 0") + .groupby(['originLocationName', 'destinationLocationName','shippingMode','productName']).sum() + .rename(columns={'xTransportationSol':'quantity'}) + ) + df1 = df1.reset_index() + # display(df1.head()) + df1 = (df1.merge(inventory_labels_df[['locationName','productName','id']], left_on=['originLocationName','productName'], right_on=['locationName','productName']) + .rename(columns={'id': 'source'}) + .drop(columns=['locationName']) + ) + # display(df1.head()) + df1 = (df1.merge(inventory_labels_df[['locationName','productName','id']], left_on=['destinationLocationName','productName'], right_on=['locationName','productName']) + .rename(columns={'id': 'target'}) + .drop(columns=['locationName']) + ) + df1['label'] = df1.shippingMode + " - " + df1.productName + " from " + df1.originLocationName + " to " + df1.destinationLocationName + df1 = df1.drop(columns=['originLocationName','destinationLocationName','shippingMode']) + df1['color'] = 'rosybrown' + # display(df1.head()) + + #Collect inventory flows - Production + df2 = (self.dm.production_activities[['xProdSol']] + .join(self.dm.bom_items[['quantity']].rename(columns={'quantity':'component_bom_quantity'}), how='left') + .join(self.dm.lines[['plantName']]) + .join(self.dm.plants[['locationName']], on='plantName') + .query("xProdSol > 0") + .reset_index() + # .groupby(['locationName', 'plantName','lineName', 'productName']).sum() + ) + df2.componentName.fillna('None',inplace=True) # For any product without components + df2['component_quantity'] = df2.xProdSol * df2.component_bom_quantity + df2 = (df2 + .drop(columns=['component_bom_quantity','recipeId','timePeriodSeq']) + .groupby(['componentName', 'productName','lineName','plantName','locationName']).sum() + .rename(columns={'xProdSol':'quantity'}) + ) + df2 = df2.reset_index() + # display(df2.head()) + df2 = (df2.merge(inventory_labels_df[['locationName','productName','id','type']], left_on=['locationName','productName'], right_on=['locationName','productName']) + .rename(columns={'id': 'target'}) + ) + df2 = (df2.merge(inventory_labels_df[['locationName','productName','id','type']], left_on=['locationName','componentName'], right_on=['locationName','productName'], suffixes=[None,'_y']) + .rename(columns={'id': 'source'}) + .drop(columns=['productName_y']) + ) + df2['label'] = df2.type + " - " + df2.componentName + " to " + df2.productName + df2 = df2[['productName', 'quantity', 'source', 'target', 'label']] + + df2['color'] = 'olive' + # display(df2.head()) + + # Collect inventory flows - WIP + df3 = (self.dm.WIP[['wipQuantity']] + .query("wipQuantity > 0") + .rename(columns={'wipQuantity':'quantity'}) + ) + df3 = df3.reset_index() + df3['locationNameWip'] = df3.locationName + '_wip' + # display(df3.head()) + df3 = (df3.merge(inventory_labels_df[['locationName','productName','id']], left_on=['locationName','productName'], right_on=['locationName','productName']) + .rename(columns={'id': 'target'}) + # .drop(columns=['locationName']) + ) + df3 = (df3.merge(inventory_labels_df[['locationName','productName','id']], left_on=['locationNameWip','productName'], right_on=['locationName','productName'], suffixes=[None,'_y']) + .rename(columns={'id': 'source'}) + .drop(columns=['locationName_y']) + ) + # display(df3.head()) + df3['label'] = "wip - " + df3.productName + " to " + df3.locationName + # df1 = df1.drop(columns=['locationNameWip','locationName','shippingMode']) + # display(df3.head()) + df3['color'] = 'lightsalmon' + + if include_wip: + df = pd.concat([df1, df2, df3]) + else: + df = pd.concat([df1, df2]) + + # Set pop-up text + + # display(df.head()) + + # df['color'] = 'aquamarine' + fig = go.Figure(data=[go.Sankey( + # valueformat = ".0f", + # valuesuffix = "TWh", + # Define nodes + node=dict( + pad=15, + thickness=15, + line=dict(color="black", width=0.5), + label=inventory_labels_df.label.array, + ), + # Add links + link=dict( + source=df.source.array, + target=df.target.array, + value=df.quantity.array, + label=df.label.array, + color = df.color.array, + ))]) + + fig.update_layout(title_text="", + font_size=10, + height=1000, + margin={'l':40, 'r':40, 't':40}) + return fig + + + + def plotly_line_product_group_capacity_heatmap(self): + """Heatmap of capacity as line vs product. Good insight on line specialization/recipe-properties. + Input tables: ['RecipeProperties', 'Line', 'Product'] + Output tables: [] + """ + df = (self.dm.recipe_properties[['capacity']] + .join(self.dm.lines) + .join(self.dm.products[['productGroup']]) + # .join(self.dm.plants.rename(columns={'locationDescr':'plantDescr'}), on='plantName') + # .join(self.dm.locations, on='locationName') + ) # .groupby(['lineName','productType']).max() + df = df.reset_index() + # df = df.pivot_table(values='capacity', index=['lineDescr'], columns=['productType'], aggfunc=np.max) + df = df.pivot_table(values='capacity', index=['lineName'], columns=['productGroup'], aggfunc=np.max) + + labels = {'lineName': 'Line', 'productGroup': 'Product Group', 'productName': 'Product Name'} + labels = dict(x="Product Group", y="Line", color="Capacity") + fig = px.imshow(df, labels=labels, width=1000, + color_continuous_scale='YlOrRd', + # y = ["Abbott Olst
Granulate Line", "Abbott Olst
Packaging Line 5", + # "Abbott Olst
Packaging Line 6", "Abbott
Weesp Line"], + y = ["Granulate Line", "Packaging Line 1", "Packaging Line 2", "API Line"], + x = ["API", "Granulate", "Tablet", "Package"], + ) + + fig.update_layout( + title={ + 'text': "Maximum Line Capacity by Product Type", + # 'y': 0.92, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top'}) + + return fig + + + + @plotly_figure_exception_handler + def plotly_transportation_bar(self, query = None, title = 'Transportation Activity'): + """ + """ + df = self.dm.transportation_activities[['xTransportationSol']].query("xTransportationSol > 0")\ + .join(self.dm.products[['productGroup', 'productCountry']])\ + .groupby(['timePeriodSeq', 'originLocationName', 'destinationLocationName','shippingMode','productName']).\ + sum().rename(columns={'xTransportationSol':'quantity'}) + + if query is not None: + df = df.query(query).copy() + # title = "Departing From: " + query.split("originLocationName == ")[-1].replace("_", " ").replace("'","") + else: + pass + # title = "Transportation Activity" + + df = df.join(self.dm.products[['productGroup', 'productCountry']]) + + df = df.reset_index() + + df.productCountry = df.productCountry.fillna("") + df['location_product'] = df['productCountry'] + " - " + df['productName'] + df['location_product'] = df['location_product'].fillna('API') + + color_discrete_map = self.gen_color_col(df['location_product']) + + labels = {'location_product': 'Product Location', 'timePeriodSeq': 'Time Period', "quantity": 'Quantity'} + + if len(df.shippingMode.unique()) < 2: + fct = None + else: + fct = "shippingMode" + + category_orders = {'shippingMode': ['Air', 'Sea', 'Truck', 'Rail']} + active_shipping_mode_category_orders = [sm for sm in ['Air', 'Sea', 'Truck', 'Rail'] if sm in df.shippingMode.unique()] + + fig = px.bar(data_frame = df, x = "timePeriodSeq", y = "quantity", color = "location_product", + labels = labels, + facet_col = fct, + # category_orders = category_orders, + category_orders = {'shippingMode': active_shipping_mode_category_orders}, + color_discrete_map=color_discrete_map) + + fig.update_layout(title = title, legend = {'orientation': 'v', 'x': 1.05}, + margin = {'l':80, 't':80}) + + if len(df.shippingMode.unique()) > 1: + fig.for_each_annotation(lambda a: a.update(text=a.text.split("shippingMode=")[-1].capitalize())) + + fig.update_layout(hovermode="closest") + + return fig + + def demand_choropleth_map(self): + """""" + df = (self.dm.demand + .join(self.dm.products[['productGroup', 'productCountry']])) + + + # Set location_product name + df = df.reset_index() + df['location_product'] = df.locationName + " - " + df.productName + + df = (df + .groupby(['timePeriodSeq', 'location_product', 'productCountry']).sum() + .sort_values('quantity', ascending=False)) + + # locs = pd.read_csv('/workspace/geocode_abbott_locations_fixed.csv') + + # print(self.dm.locations.head()) + # print(locs.head()) + locs = self.dm.locations.reset_index() + + df = df.reset_index() + df = df.merge(locs[["locationName", "latitude", "longitude", "countryIso"]], left_on = "productCountry", right_on = "locationName") + + df_gby = df.groupby("countryIso")['quantity'].mean().reset_index() + + fig = px.choropleth(df_gby, + locations = "countryIso", + color = "quantity", + width = 1200, + title = "Demand Choropleth Map") + + fig.update_layout(paper_bgcolor='#edf3f4', + geo=dict(bgcolor= '#edf3f4', showframe = False), + margin = {'b': 0, 't':50}, + title = {'y': 0.95}, + coloraxis_colorbar=dict(title="Quantity") + ) + return fig + + def unfulfilled_demand_choropleth_map(self, animation_col = None): + """ + """ + df0 = (self.dm.transportation_activities[['xTransportationSol']] + .groupby(['productName', 'destinationLocationName', 'timePeriodSeq']).sum() + .rename_axis(index={'destinationLocationName': 'locationName'}) + .rename(columns={'xTransportationSol':'Transportation'}) + ) + + product_aggregation_column = 'productName' + df = (self.dm.demand_inventories[['quantity','xFulfilledDemandSol','xUnfulfilledDemandSol','xBacklogSol','xBacklogResupplySol','xInvSol']] + .join(self.dm.products[['productGroup', 'productCountry']]) + # .join(self.dm.locations) + .join(df0, how='left') + ).groupby(['timePeriodSeq', 'productCountry', product_aggregation_column]).sum() + + df = df.reset_index() + + # locs = pd.read_csv('/workspace/geocode_abbott_locations_fixed.csv') + locs = self.dm.locations.reset_index() + + df = df.merge(locs[["locationName", "latitude", "longitude", "countryIso"]], left_on = "productCountry", right_on = "locationName") + + if animation_col is not None: + df_gby = df.groupby(["countryIso", animation_col])['xUnfulfilledDemandSol'].mean().reset_index() + title = "Animated Unfulfilled Demand Choropleth Map" + width = 2000 + else: + df_gby = df.groupby("countryIso")['xUnfulfilledDemandSol'].mean().reset_index() + title = "Unfulfilled Demand Choropleth Map" + width = 1000 + + fig = px.choropleth(df_gby, + locations = "countryIso", + color = "xUnfulfilledDemandSol", + animation_frame=animation_col, + animation_group = animation_col, +# facet_col = "productGroup", + width = width, + # height = 1000, + title = title) + + fig.update_layout(legend = {'title': 'Quantity'}, + paper_bgcolor='#edf3f4', + geo=dict(bgcolor= '#edf3f4', showframe = False), + margin = {'b': 0, 't':50}, + title = {'y': 0.95}, + width = width, + coloraxis_colorbar=dict(title="Quantity") + ) + + return fig + + def line_map(self): + """ + """ + import math + import random + + aggregation_column = 'productName' + #Collect inventory flows - transportation + df1 = (self.dm.transportation_activities[['xTransportationSol']].join(self.dm.products[['productGroup']]) + .query("xTransportationSol > 0") + .groupby(['originLocationName', 'destinationLocationName','shippingMode','productGroup']).sum() + .rename(columns={'xTransportationSol':'quantity'}) + ) + df1 = df1.reset_index() + + # locs = pd.read_csv('/workspace/geocode_abbott_locations_fixed.csv') + locs = self.dm.locations.reset_index() + + map_locs = df1.drop_duplicates(['originLocationName', 'destinationLocationName', 'shippingMode', 'productGroup', 'quantity']) + + df6 = map_locs.merge(locs[["locationName", "latitude", "longitude", "countryIso"]], left_on = "originLocationName", right_on = "locationName") + df6 = df6.rename({'latitude': 'origin_lat', 'longitude':'origin_lon', 'countryIso':'origin_iso3'}, axis = 1) + df6 = df6.merge(locs[["locationName", "latitude", "longitude", "countryIso"]], left_on = "destinationLocationName", right_on = "locationName") + df6 = df6.rename({'latitude': 'destination_lat', 'longitude':'destination_lon', 'countryIso':'destination_iso3'}, axis = 1) + + fig = go.Figure() + + fig = fig.add_trace(go.Scattergeo( + # locationmode = 'USA-states', + lon = df6['origin_lon'], + lat = df6['origin_lat'], + hoverinfo = 'text', + text = df6['originLocationName'], + name = "Supply Chain Origin", + # showlegend = False, + mode = 'markers', + marker = dict( + size = 8, + color = 'rgb(255, 0, 0)', + ))) + + df6 = df6.reset_index().copy() + # add some jitter to prevent overlays + random.seed(42) + df6['destination_lat'] = df6['destination_lat'].apply(lambda x : x + random.uniform(-0.75, 0.75)) + df6['destination_lon'] = df6['destination_lon'].apply(lambda x : x + random.uniform(-0.75, 0.75)) + + fig = fig.add_trace(go.Scattergeo( + # locationmode = 'USA-states', + lon = df6['destination_lon'], + lat = df6['destination_lat'], + hoverinfo = 'text', + text = df6['destinationLocationName'], + name = "Supply Chain Destination", + mode = 'markers', + marker = dict( + size = df6.quantity.apply(math.log).clip(lower = 2)*2, + color = "blue", + ))) + + color_dict = {'sea': 'darkblue', 'truck': 'darkgreen', 'air': 'darkred', 'Sea': 'darkblue', 'Truck': 'darkgreen', 'Air':'darkred' + } + + df6['showlegend'] = False + + df6['linetype'] = "solid" + + ix = df6.groupby('shippingMode').first()['index'].values + for i in ix: + df6['showlegend'].iloc[i] = True + + for i in range(len(df6)): + fig.add_trace( + go.Scattergeo( + lon = [df6['origin_lon'][i], df6['destination_lon'][i]], + lat = [df6['origin_lat'][i], df6['destination_lat'][i]], + mode = 'lines', + name = df6['shippingMode'][i], + showlegend = bool(df6['showlegend'][i]), + # showlegend = False, + line_dash= df6['linetype'][i], + line = dict(width = 1,color = color_dict[df6['shippingMode'][i]]), + # opacity = float(df_flight_paths['cnt'][i]) / float(df_flight_paths['cnt'].max()), + ) + ) + # adding a choropleth on top + df = (self.dm.demand + .join(self.dm.products[['productGroup', 'productCountry']]) + ) + + # Set location_product name + df = df.reset_index() + df['location_product'] = df.locationName + " - " + df.productName + + df = (df + .groupby(['timePeriodSeq', 'location_product', 'productCountry']).sum() + .sort_values('quantity', ascending=False)) + + df = df.reset_index() + + df = df.merge(locs[["locationName", "latitude", "longitude", "countryIso"]], left_on = "productCountry", right_on = "locationName") + + df = df.sort_values('timePeriodSeq') + + df_gby = df.groupby("countryIso")['quantity'].mean().reset_index() + + fig = fig.add_trace( + go.Choropleth( + locations = df_gby['countryIso'], + z = df_gby['quantity'], + colorscale = "Reds", + colorbar_title = "Quantity" + ) + ) + + fig.update_layout(coloraxis_colorbar_x=-1) + + fig.update_layout( + width = 1000, + # height = 1000, + legend = { + 'title': 'Transportation Type', + 'orientation': 'v', + 'x': 0.85, + 'y': 0.9, + }, + title = {'text': "Supply Chain Overview", 'y': 0.95}, + margin = { + 't': 50, + 'b': 0, + }, + paper_bgcolor='#edf3f4', + geo=dict(bgcolor= '#edf3f4', showframe = False), + ) + + + return fig + + def percent_unfullfilleddemand(self): + # product_aggregation_column = 'productGroup' # potentially for further unpacking + df = (self.dm.demand_inventories[['quantity','xUnfulfilledDemandSol']]) + # .join(self.dm.products[['productGroup']]) + # ).groupby(['timePeriodSeq']).sum() + + unfulfilled_demand = df.xUnfulfilledDemandSol.groupby(['timePeriodSeq']).sum() + num_tp = len(self.dm.demand.index.unique(level='timePeriodSeq')) + average_monthly_demand = df.quantity.sum()/num_tp + + final_df = (unfulfilled_demand/average_monthly_demand).replace([np.inf, -np.inf], np.nan).fillna(0).round(4)*100 + + # final_df = final_df.groupby('timePeriodSeq').mean() + + return final_df + + def percent_backlog(self): + # product_aggregation_column = 'productGroup' + df = (self.dm.demand_inventories[['quantity','xBacklogSol']]) + # .join(self.dm.products[['productGroup']]) + # ).groupby(['timePeriodSeq']).sum() + + backlog = df.xBacklogSol.groupby('timePeriodSeq').sum() + + num_tp = len(self.dm.demand.index.unique(level='timePeriodSeq')) + + average_monthly_demand = df.quantity.sum()/num_tp + + # final_df = (df.xBacklogSol/df.quantity).replace([np.inf, -np.inf], np.nan).fillna(0).round(4)*100 + + final_df = (backlog / average_monthly_demand).replace([np.inf, -np.inf], np.nan).fillna(0).round(4)*100 + + # final_df = final_df.groupby('timePeriodSeq').mean() + + return final_df + + def dos_inv(self): + # product_aggregation_column = 'productGroup' + + # print(self.dm.products) + + # can feed it plant or warehouse inventories + + df_demand = (self.dm.demand_inventories[['quantity','xFulfilledDemandSol','xUnfulfilledDemandSol','xBacklogSol','xBacklogResupplySol','xInvSol']]) + # .join(self.dm.products[['productGroup']]) + # ).groupby(['timePeriodSeq']).sum() + + df_inv = (self.dm.demand_inventories[['quantity','xFulfilledDemandSol','xUnfulfilledDemandSol','xBacklogSol','xBacklogResupplySol','xInvSol']]) + # .join(self.dm.products[['productGroup']]) + # ).groupby(['timePeriodSeq']).sum() + + + final_df = (df_demand.groupby(["productName", "timePeriodSeq"]).xInvSol.sum()/df_inv.groupby(["productName", "timePeriodSeq"]).\ + quantity.sum()).replace([np.inf, -np.inf], np.nan).fillna(0).round(4) + + final_df = pd.Series(final_df.groupby('timePeriodSeq').mean().values) + + + t = self.get_demand_location_dos(30).groupby(['timePeriodSeq']).agg({'dosQuantity': 'sum'}) + t = pd.Series(t.reset_index()['dosQuantity']) + + final_dos = (final_df/t) + + return final_dos + + def average_inv(self): + # product_aggregation_column = 'productGroup' + # num_timeperiods = self.dm.active_timeperiods.max() + num_timeperiods = 30 + + df_inv = (self.dm.demand_inventories[['xInvSol']] + # .join(self.dm.products[['productGroup']]) + ).groupby(['timePeriodSeq']).sum() + + final_df = (df_inv.xInvSol/num_timeperiods).round(4) + # final_df = final_df.groupby('timePeriodSeq').mean() + + return final_df + + def get_demand_location_dos(self, dos:int): + """Compute the quantity of product at the end of a time-period that represents the + Days-Of-Supply computed using the actual demand in the following time-periods. + The quantity can be used in a days-of-supply inventory constraint or objective. + For the last time-periods, assume demand remains constant with the value of the last time-period. + + Args: + dos (int): Days-Of-Supply. Number of days. + + Note: use dm.demand_inventories. Is has already expanded to all time-periods. + """ + # num_tps = 24 # Number of time-periods + + # num_days_tp = 30 # Number of days per time-period. To keep it simple, use 30 per month. HARD-CODED for now. TODO: put in parameter, or add as column in TimePeriods + num_days_tp = len(self.dm.demand.index.unique(level='timePeriodSeq')) * 30 + # print(self.dm.demand_inventories.head()) + df = (self.dm.demand_inventories[['quantity']] + .sort_index() # sort index so the shift will work right + ).fillna(0) + + num_tps = len(df.index.unique(level='timePeriodSeq'))-1 + # df['numDays'] = num_days_tp + df['demandPerDay'] = df.quantity / num_days_tp #df.numDays + df['nextDemandPerDay'] = df.demandPerDay # Note we are shifting the nextDemandPerDay, so initialize once + df['dosQuantity'] = 0 # We are incrementing the dosQuantity, so initialize + + remaining_dos = dos # Remaining DOS in each iteration, initialize with all DOS + shift = 0 # Only for debuging + + # Iterate over the next time-periods until it covers all requested dos days + # Sum the DOS quantity + # Assume demand is constant throughout the time-period + while remaining_dos > 0: + # print(remaining_dos) + shift = shift + 1 + # print(shift) + next_dos = min(remaining_dos, num_days_tp) + # print(f"Shift = {shift}, remaining_dos = {remaining_dos}, next_dos={next_dos}") + df['nextDemandPerDay'] = df.groupby(['locationName','productName'])['nextDemandPerDay'].shift(-1) #, fill_value=0) + # print(df.head()) + # print(num_tps) + # print(df.loc[pd.IndexSlice[:,:,num_tps],'demandPerDay']) + df.loc[pd.IndexSlice[:,:,num_tps],'nextDemandPerDay'] = df.loc[pd.IndexSlice[:,:,num_tps],'demandPerDay'] # Fill gap from the shift with last demand + # print("test") + df['dosQuantity'] = df.dosQuantity + df.nextDemandPerDay * next_dos + + remaining_dos = remaining_dos - next_dos + # print("test") + # display(df.query("locationName=='NAMIBIA'").head(24)) + df = df.drop(columns=['demandPerDay', 'nextDemandPerDay']) + + # print(df) + return df + + def kpi_heatmap(self): + ''' + ''' + + cols = [self.percent_unfullfilleddemand(), self.percent_backlog(), self.dos_kpi(as_time = True), + self.calc_air_pct(as_time = True), self.utilization_kpi(as_time = True)] + + final_df = pd.DataFrame(data= cols).\ + rename({'xUnfulfilledDemandSol': 'unfulfilled_demand', 'xBacklogSol': 'backlog', 'xInvSol': 'dos_inv', + 'Unnamed 0': 'air_sea_ratio', 'line_capacity_utilization': 'utilization'}, + axis = 0) + + heatmap_df = final_df.copy() + + # make a green zone around 30 days and orange if between 10-20 and 40-50, then red between 0-10 and 50-60 + heatmap_df.loc['dos_inv'] = np.where((heatmap_df.loc['dos_inv'] <= 10) | (heatmap_df.loc['dos_inv'] >= 60), 2, + np.where((heatmap_df.loc['dos_inv'] >= 40) & (heatmap_df.loc['dos_inv'] < 60), 1, + np.where((heatmap_df.loc['dos_inv'] >= 20) & (heatmap_df.loc['dos_inv'] < 40), 0, np.nan))) + + + heatmap_df.loc['unfulfilled_demand'] = np.where(heatmap_df.loc['unfulfilled_demand'] > 5, 2, + np.where(heatmap_df.loc['unfulfilled_demand'] < 2, 0, 1)) + + heatmap_df.loc['backlog'] = np.where(heatmap_df.loc['backlog'] > 10, 2, + np.where(heatmap_df.loc['backlog'] < 5, 0, 1)) + + heatmap_df.loc['air_sea_ratio'] = np.where(heatmap_df.loc['air_sea_ratio'] > 50, 2, + np.where(heatmap_df.loc['air_sea_ratio'] < 20, 0, 1)) + + heatmap_df.loc['utilization'] = np.where(heatmap_df.loc['utilization'] > 95, 2, + np.where(heatmap_df.loc['utilization'] < 85, 0, 1)) + + # final_df = final_df.apply(lambda x:(x - x.min())/(x.max() - x.min()), axis = 0) + + fig = px.imshow(heatmap_df, + color_continuous_scale =["green", "orange", "red"], + y = ["Unfulfilled Demand %", "Backlog %", "Inventory
Days of Supply", "Air Shipping %", "Utilization %"] + ) + # customdata allows to add an "invisible" dataset that is not being plotted but whose values can be used for reference + fig.update_traces(customdata= final_df, + hovertemplate = + "%{y}: %{customdata: .3f}"+ + "
Time Period %{x}"+ + '') + + fig.update(layout_coloraxis_showscale=False) + fig.update_layout(margin = {'b':40, 'l':140, 'r':10, 't':20}) # hide colorbar + + return fig + + def make_gauge(self, value: float, title: str, orange_threshold: float, red_threshold: float, max_val: float): + """ + """ + steps = [ + {'range': [0, orange_threshold], 'color': 'green'}, + {'range': [orange_threshold, red_threshold], 'color': 'orange'}, + {'range': [red_threshold, max_val], 'color': 'red'}, + ] + + fig = go.Figure(go.Indicator( + mode = "gauge+number", + value = value, + domain = {'x': [0, 1], 'y': [0, .75]}, + title = {'text': title, 'font': {'color': 'black', 'size': 18}}, + gauge = {'axis': {'range': [None, max_val], 'tickfont': {'color': 'black'}}, + 'threshold' : {'line': {'color': "darkred", 'width': 4}, 'thickness': 0.75, 'value': red_threshold}, + 'steps': steps, + 'bar': {'color': "darkblue"},}, + ) + ) + + fig.update_layout(font = {'color': 'green' if value < orange_threshold else 'orange' if value > orange_threshold and value < red_threshold else 'red', 'family': "Arial"}, + margin={'t':10,'b':30}, + ) + + return fig + + def make_gauge_dos(self, value: float, title: str, max_val: float, type = None): + ''' Standalone function for the DOS gauge + ''' + + steps = [ + {'range': [0, 10], 'color': 'red'}, + {'range': [60, max_val], 'color': 'red'}, + {'range': [10, 20], 'color': 'orange'}, + {'range': [40, 60], 'color': 'orange'}, + {'range': [20, 40], 'color': 'green'}, + ] + + fig = go.Figure(go.Indicator( + mode = "gauge+number", + value = value, + domain = {'x': [0, 1], 'y': [0, .75]}, + title = {'text': title, 'font': {'color': 'black', 'size': 18}}, + gauge = {'axis': {'range': [None, max_val], 'tickfont': {'color': 'black'}}, + 'threshold' : {'line': {'color': "darkred", 'width': 4}, 'thickness': 0.75, 'value': 60}, + 'steps': steps, + 'bar': {'color': "darkblue"},}, + ) + ) + + fig.update_layout(font = {'color': 'green' if value < 40 and value > 20 else 'orange' if ((value > 40 and value < 60) or (value > 10 and value < 20)) else 'red', 'family': "Arial"}, + margin={'t': 10, 'b': 30}) + + return fig + + + def calc_air_pct(self, as_time = False): + """ + When setting as_time = True, returns a vector with a value at each time index. + The issue is that not all time indices have a value for air or sea shipping. + A hacky solution: create a df initialized to 0 with all combinations of timePeriodSeq and shippingMode (i.e. 21 time periods * 3 shippingModes) + then iterate over the original df that was grouped by timePeriodSeq and shippingMode, + check if the grouped data has a value for that time/shippingMode combination, + if yes then copy/paste that value + if no, then keep 0 as the value + TODO: Probably a better way to write that code + """ + import warnings + warnings.filterwarnings("ignore") + + print(pd.__version__) + + df = self.dm.transportation_activities[['xTransportationSol']].query("xTransportationSol > 0")\ + .join(self.dm.products[['productGroup', 'productCountry']])\ + .groupby(['timePeriodSeq', 'originLocationName', 'destinationLocationName','shippingMode','productName']).\ + sum().rename(columns={'xTransportationSol':'quantity'}) + + if not 'Air' in df.index.get_level_values('shippingMode') and as_time: + num_tp = len(self.dm.demand.index.unique(level='timePeriodSeq')) + return pd.Series(index = range(num_tp+1), data = 0) + elif not 'Air' in df.index.get_level_values('shippingMode') and not as_time: + return 0 + + if as_time: + df = df.reset_index() + from itertools import product + df_gby = df.groupby(['shippingMode', 'timePeriodSeq']).sum().reset_index() + + dft = pd.DataFrame(product(df['shippingMode'].unique(), + df['timePeriodSeq'].unique()), columns = ['shippingMode', 'timePeriodSeq']) + + dft['quantity'] = 0 + + ### HACK ### probably a better way to write this code + for i in range(len(dft)): + sm = dft['shippingMode'].iloc[i] + ts = dft['timePeriodSeq'].iloc[i] + if len(df_gby.loc[(df_gby.shippingMode == sm) & (df_gby.timePeriodSeq == ts)]['quantity']) != 0: + dft['quantity'].iloc[i] = df_gby.loc[(df_gby.shippingMode == sm) & (df_gby.timePeriodSeq == ts)]['quantity'] + else: + continue + + air = dft.loc[dft.shippingMode == 'Air'].quantity.values + sea = dft.loc[dft.shippingMode == 'Sea'].quantity.values + + ratio = pd.Series(air/(air+sea)).replace([np.inf, -np.inf], np.nan).fillna(0).round(3) + + else: + df_gby = df.groupby('shippingMode').sum() + + air = df_gby.loc[df_gby.index == 'Air'].quantity.values + sea = df_gby.loc[df_gby.index == 'Sea'].quantity.values + + ratio = air/(sea+air) + + ratio = np.round(ratio, 3) + + return ratio*100 + + def utilization_kpi(self, as_time = False): + """ + """ + + product_aggregation_column = 'productGroup' + df = (self.dm.production_activities[['line_capacity_utilization']] + .join(self.dm.products[['productGroup']]) + ).groupby(['timePeriodSeq', 'lineName', product_aggregation_column]).sum().reset_index() + + # df = df[df['lineName'] == 'Abbott_Olst_Packaging_Line_5'] + df = df[df['lineName'].isin(['Abbott_Olst_Packaging_Line_5', 'Packaging_Line_1'])] # works both for Client and Pharma + # df = df[df['lineName'] == 'Packaging_Line_1'] # Ony for Pharma + + df['line_capacity_utilization'] = (df['line_capacity_utilization'].replace(0, np.nan)*100) + # VT notes 20211122: why the replace 0 with Nan? Probably to force the mean() to ignore months that have zero utilization? + # TODO: why not filter? + # df['line_capacity_utilization'] = (df['line_capacity_utilization']*100) + + if as_time: + return df.set_index('timePeriodSeq')['line_capacity_utilization'].sort_index() + else: + return float(df.groupby('lineName')['line_capacity_utilization'].mean()) + + def dos_kpi(self, as_time = False): + ''' + ''' + df = self.dm.demand_inventories[['quantity', 'xInvSol']] + + num_days = len(self.dm.demand.index.unique(level='timePeriodSeq')) * 30 + + demand_inv = df.groupby('timePeriodSeq')['xInvSol'].sum() + + total_demand = df['quantity'].sum() + + demand_dos = demand_inv / (total_demand / num_days) + + if as_time: + return demand_dos + else: + return float(demand_dos.mean()) + + + + + diff --git a/test/pharma/supply_chain/pharma/pharmascenariodbtables.py b/test/pharma/supply_chain/pharma/pharmascenariodbtables.py new file mode 100644 index 0000000..cf1ad56 --- /dev/null +++ b/test/pharma/supply_chain/pharma/pharmascenariodbtables.py @@ -0,0 +1,250 @@ +####################################################### +# Table specific SQL +####################################################### +from typing import List, Dict +from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, ForeignKeyConstraint +from collections import OrderedDict + +# from supply_chain.folium_supply_chain import SCMapManager, MappingSCDM +# from supply_chain.plotly_supply_chain import PlotlyManager, SupplyChainPlotlyManager, WaterPlotlyManager #, PlotlySupplyChainDataManager + +from supply_chain.scnfo.scnfoscenariodbtables import ProductTable, ScnfoScenarioDbManager, ScenarioTable, LocationTable, \ + PlantTable, LineTable, TimePeriodTable, DemandTable, RecipeTable, RecipePropertiesTable, BomItemTable, \ + ParameterTable, ProductionActivityTable, LineUtilizationTable, DemandMapTable, SupplyMapTable, KpiTable, \ + PlantInventoryTable, DemandInventoryTable, WIPTable, WarehouseTable, WarehousePropertiesTable, ShippingModeTable, \ + ShippingLaneTable, ShippingLanePropertiesTable, WarehouseInventoryTable, TransportationActivityTable,\ + PlannedProductionActivityTable, BusinessKpiTable +from dse_do_utils.scenariodbmanager import ScenarioDbTable, AutoScenarioDbTable #, ScenarioDbManager +# from supply_chain.water.dewaterdatamanager import DEWaterDataManager #ScenarioDbTable, ScenarioDbManager + + + +import pandas as pd + +# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# Pharma use-case +# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +# class ProductTable(ScenarioDbTable): +# def __init__(self, db_table_name: str = 'product', extended_columns_metadata: List[Column] = []): +# columns_metadata = [ +# Column('productName', String(256), primary_key=True), +# Column('inventoryVolume', Float(), primary_key=False), +# Column('transportationVolume', Float(), primary_key=False), +# Column('transportationWeight', Float(), primary_key=False), +# # Column('turnOverRatio', Float(), primary_key=False), +# ] +# columns_metadata.extend(extended_columns_metadata) +# super().__init__(db_table_name, columns_metadata) + +class PharmaProductTable(ProductTable): + def __init__(self, db_table_name: str = 'product', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('productGroup', String(256), primary_key=False), + Column('productCountry', String(256), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +class PharmaLocationTable(LocationTable): + def __init__(self, db_table_name: str = 'location', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('countryIso', String(3), primary_key=False), + Column('region', String(256), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +class PharmaDemandTable(DemandTable): + def __init__(self, db_table_name: str = 'demand', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('actualQuantity', Float(), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +class PharmaShippingLanePropertiesTable(ShippingLanePropertiesTable): + def __init__(self, db_table_name: str = 'shipping_lane_properties', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('transitCost', Float(), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +class StochasticScenarioTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'stochastic_scenario', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('stochasticScenarioId', String(256), primary_key=True), + Column('stage1id', String(256), primary_key=False), + Column('stage2id', String(256), primary_key=False), + Column('replication', Integer(), primary_key=False), + Column('stochasticDemandId', String(256), primary_key=False), + Column('minDemandIncrease', Float(), primary_key=False), + Column('maxDemandIncrease', Float(), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +class ProductionActivityStochasticTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'production_activity_stocastic'): + columns_metadata = [ + Column('stochasticScenarioId', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('lineName', String(256), primary_key=True), + Column('recipeId', Integer(), primary_key=True), + Column('xProdSol_mean', Float()), + Column('xProdSol_std', Float()), + Column('xProdSlackSol_mean', Float()), + Column('xProdSlackSol_std', Float()), + ] + constraints_metadata = [ + ForeignKeyConstraint(['stochasticScenarioId'], ['stochastic_scenario.stochasticScenarioId']), + ForeignKeyConstraint(['productName', 'recipeId'], ['recipe.productName', 'recipe.recipeId']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ForeignKeyConstraint(['lineName'], ['line.lineName']), + ] + super().__init__(db_table_name, columns_metadata, constraints_metadata) + +class PlantInventoryStochasticTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'plant_inventory_stochastic', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('stochasticScenarioId', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('xInvSol_mean', Float()), + Column('xInvSol_std', Float()), + ] + constraints_metadata = [ + ForeignKeyConstraint(['stochasticScenarioId'], ['stochastic_scenario.stochasticScenarioId']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + +class WarehouseInventoryStochsticTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'warehouse_inventory_stochastic', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('stochasticScenarioId', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('xInvSol_mean', Float()), + Column('xInvSol_std', Float()), + ] + constraints_metadata = [ + ForeignKeyConstraint(['stochasticScenarioId'], ['stochastic_scenario.stochasticScenarioId']), + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + +class DemandInventoryStochasticTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'demand_inventory_stochastic', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('stochasticScenarioId', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('xInvSol_mean', Float()), + Column('xInvSol_std', Float()), + Column('xBacklogSol_mean', Float(), primary_key=False), + Column('xBacklogSol_std', Float(), primary_key=False), + Column('xBacklogResupplySol_mean', Float(), primary_key=False), + Column('xBacklogResupplySol_std', Float(), primary_key=False), + Column('xFulfilledDemandSol_mean', Float(), primary_key=False), + Column('xFulfilledDemandSol_std', Float(), primary_key=False), + Column('xUnfulfilledDemandSol_mean', Float(), primary_key=False), + Column('xUnfulfilledDemandSol_std', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['stochasticScenarioId'], ['stochastic_scenario.stochasticScenarioId']), + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + +class TransportationActivityStochasticTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'transportation_activity_stochastic', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('stochasticScenarioId', String(256), primary_key=True), + Column('originLocationName', String(256), primary_key=True), + Column('destinationLocationName', String(256), primary_key=True), + Column('shippingMode', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('xTransportationSol_mean', Float(), primary_key=False), + Column('xTransportationSol_std', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['stochasticScenarioId'], ['stochastic_scenario.stochasticScenarioId']), + ForeignKeyConstraint(['originLocationName'], ['location.locationName']), + ForeignKeyConstraint(['destinationLocationName'], ['location.locationName']), + ForeignKeyConstraint(['shippingMode'], ['shipping_mode.shippingModeName']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class PharmaDemandInventoryTable(DemandInventoryTable): + def __init__(self, db_table_name: str = 'demand_inventory', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('xDOSSlackSol', Float(), primary_key=False), + Column('dosTargetDays', Integer(), primary_key=False), + Column('dosTargetQuantity', Float(), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +class PharmaScenarioDbManager(ScnfoScenarioDbManager): + def __init__(self, credentials=None, schema: str = None, echo=False, multi_scenario: bool = True): + input_db_tables = OrderedDict([ + ('Scenario', ScenarioTable()), + ('Location', PharmaLocationTable()), + ('Plant', PlantTable()), + ('Line', LineTable()), + ('TimePeriod', TimePeriodTable()), + ('Product', PharmaProductTable()), + ('Demand', PharmaDemandTable()), + ('Recipe', RecipeTable()), + ('RecipeProperties', RecipePropertiesTable()), + ('BomItem', BomItemTable()), + ('WIP', WIPTable()), + ('Warehouse', WarehouseTable()), + ('WarehouseProperties', WarehousePropertiesTable()), + ('ShippingMode', ShippingModeTable()), + ('ShippingLane', ShippingLaneTable()), + ('ShippingLaneProperties', PharmaShippingLanePropertiesTable()), + ('Parameter', ParameterTable()), + ('PlannedProductionActivity', PlannedProductionActivityTable()), + ('StochasticScenario', StochasticScenarioTable()), + ]) + output_db_tables = OrderedDict([ + ('ProductionActivity', ProductionActivityTable()), + ('PlantInventory', PlantInventoryTable()), + ('WarehouseInventory', WarehouseInventoryTable()), + ('DemandInventory', PharmaDemandInventoryTable()), + ('LineUtilization', LineUtilizationTable()), + ('TransportationActivity', TransportationActivityTable()), + ('DemandMap', DemandMapTable()), + ('SupplyMap', SupplyMapTable()), + ('BusinessKPIs', BusinessKpiTable()), + ('kpis', KpiTable()), + ('ProductionActivityStochastic', ProductionActivityStochasticTable()), + ('PlantInventoryStochastic', PlantInventoryStochasticTable()), + ('WarehouseInventoryStochastic', WarehouseInventoryStochsticTable()), + ('DemandInventoryStochastic', DemandInventoryStochasticTable()), + ('TransportationActivityStochastic', TransportationActivityStochasticTable()), + + ]) + super().__init__(input_db_tables=input_db_tables, output_db_tables=output_db_tables, credentials=credentials, schema=schema, echo=echo, + multi_scenario=multi_scenario) diff --git a/test/pharma/supply_chain/pharma/supply_chain_schema.py b/test/pharma/supply_chain/pharma/supply_chain_schema.py new file mode 100644 index 0000000..2dc88b2 --- /dev/null +++ b/test/pharma/supply_chain/pharma/supply_chain_schema.py @@ -0,0 +1,318 @@ +""" +Experiment to define keys, columns, foreign keys in Named Tuples +""" + +from typing import Dict, List + +from utils.dash_common_utils import ForeignKeySchema, ScenarioTableSchema, PivotTableConfig + +""" +Defines the 'schema' for the dataframes in a DataManager. +Is used for: +* Automatically set the index of a table +* 'Expand' a table by recursively joining tables from the foreign keys. +This is used so that a PivotTable in the UI has all columns to work with (similar as in DOC). + +TODO: +* Migrate to scnfo, water, etc. +""" + +""" +TODO: Generate this same info from an instance of a ScnfoScenarioDbManager. +Avoid defining the same info again. +But this can be used when there is no ScenarioDbManager. +""" +scnfo_input_tables:List[ScenarioTableSchema]= [ + ScenarioTableSchema( + table_name = 'TimePeriod', + index_columns = ['timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Demand', + index_columns = ['customerName', 'locationName', 'productName', 'timePeriodSeq'], + value_columns =[], + foreign_tables = [ + ForeignKeySchema( + table_name = 'Location', + foreign_keys = ['locationName'] + ), + ForeignKeySchema( + table_name = 'Product', + foreign_keys = ['productName'] + ), + ], + ), + ScenarioTableSchema( + table_name = 'Product', + index_columns = ['productName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'RecipeProperties', + index_columns = ['productName', 'timePeriodSeq', 'lineName', 'recipeId'], + value_columns = [], + foreign_tables= [], + ), + ScenarioTableSchema( + table_name = 'Line', + index_columns = ['lineName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Plant', + index_columns = ['plantName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Location', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'WIP', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'Warehouse', + index_columns = ['warehouseName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'WarehouseProperties', + index_columns = ['warehouseName', 'productName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ShippingMode', + index_columns = ['shippingModeName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ShippingLane', + index_columns = ['originLocationName', 'destinationLocationName', 'shippingMode'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ShippingLaneProperties', + index_columns = ['originLocationName', 'destinationLocationName', 'shippingMode','productName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'PlannedProductionActivity', + index_columns = ['planId', 'productName', 'timePeriodSeq', 'lineName', 'recipeId'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'StochasticScenario', + index_columns = ['stochasticScenarioId'], + value_columns = [], + foreign_tables = [], + ), +] + +scnfo_output_tables: List[ScenarioTableSchema]= [ + ScenarioTableSchema( + table_name = 'ProductionActivity', + index_columns = ['productName', 'timePeriodSeq', 'lineName', 'recipeId'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'PlantInventory', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + 'DemandInventory', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + 'WarehouseInventory', + index_columns = ['productName', 'locationName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'TransportationActivity', + index_columns = ['originLocationName','destinationLocationName','shippingMode', 'productName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'LineUtilization', + index_columns = ['lineName', 'timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'SupplyMap', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'DemandMap', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'DemandSupplyMap', + index_columns = ['locationName'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'BusinessKpis', + index_columns = ['kpi'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'kpis', + index_columns = ['NAME'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'ProductionActivityStochastic', + index_columns = ['stochasticScenarioId','productName','timePeriodSeq','lineName','recipeId'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'PlantInventoryStochastic', + index_columns = ['stochasticScenarioId','productName','locationName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'WarehouseInventoryStochastic', + index_columns = ['stochasticScenarioId','productName','locationName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'DemandInventoryStochastic', + index_columns = ['stochasticScenarioId','productName','locationName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), + ScenarioTableSchema( + table_name = 'TransportationActivityStochastic', + index_columns = ['stochasticScenarioId','originLocationName','destinationLocationName','shippingMode','productName','timePeriodSeq'], + value_columns = [], + foreign_tables = [], + ), +] + +SCNFO_SCHEMA:Dict[str, ScenarioTableSchema] = {t.table_name : t for t in (scnfo_input_tables + scnfo_output_tables)} + +# print(scnfo_schema) +# print(scnfo_schema['Demand'].index_columns) + +scnfo_input_pivots:List[PivotTableConfig]= [ + PivotTableConfig( + table_name='Location', + rows=[], + cols=['state'], + vals=[], + rendererName='Stacked Column Chart', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Plant', + rows=[], + cols=[], + vals=[], + rendererName='Table', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='TimePeriod', + rows=[], + cols=[], + vals=[], + rendererName='Table', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Line', + rows=['country', 'state'], + cols=[], + vals=[], + rendererName='Table', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Product', + rows=['subgroupID'], + cols=['groupID'], + vals=[], + rendererName='Stacked Column Chart', + aggregatorName='Count' + ), + PivotTableConfig( + table_name='Demand', + rows=['productName'], + cols=['timePeriodSeq'], + vals=['quantity'], + rendererName='Stacked Column Chart', + aggregatorName='Sum' + ), +] + +scnfo_output_pivots = [ + PivotTableConfig( + table_name='ProductionActivity', + rows=['lineName'], + cols=['timePeriodSeq'], + vals=['line_capacity_utilization'], + rendererName='Table Heatmap', + aggregatorName='Sum' + ), + PivotTableConfig( + table_name='PlantInventory', + rows=['locationName','productName'], + cols=['timePeriodSeq'], + vals=['xPlantInvSol'], + rendererName='Table Heatmap', + aggregatorName='Sum' + ), + PivotTableConfig( + table_name='DemandInventory', + rows=['locationName','productName'], + cols=['timePeriodSeq'], + vals=['xBacklogSol'], + rendererName='Stacked Column Chart', + aggregatorName='Sum' + ), + PivotTableConfig( + table_name='LineUtilization', + rows=['lineName'], + cols=['timePeriodSeq'], + vals=['utilization'], + rendererName='Table Heatmap', + aggregatorName='Sum' + ), + +] + +SCNFO_PIVOT_CONFIG:Dict[str, PivotTableConfig] = {t.table_name : t for t in (scnfo_input_pivots + scnfo_output_pivots)} diff --git a/test/pharma/supply_chain/scnfo/__init__.py b/test/pharma/supply_chain/scnfo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/pharma/supply_chain/scnfo/scnfodatamanager.py b/test/pharma/supply_chain/scnfo/scnfodatamanager.py new file mode 100644 index 0000000..215a9f6 --- /dev/null +++ b/test/pharma/supply_chain/scnfo/scnfodatamanager.py @@ -0,0 +1,160 @@ +from typing import Optional + +from dse_do_utils.datamanager import DataManager +import pandas as pd + +class ScnfoDataManager(DataManager): + def __init__(self, inputs=None, outputs=None): + super().__init__(inputs, outputs) + self.demand_index_columns = ['customerName', 'locationName', 'productName', 'timePeriodSeq'] + self.production_activities_index_columns = ['productName', 'timePeriodSeq', 'lineName', + 'recipeId'] # We'll be using these later on + self.recipe_properties_index_columns = self.production_activities_index_columns + + #TODO: move generic parts to DataManager + def prepare_data_frames(self): + if (self.inputs is not None) and (len(self.inputs) > 0): + self.prepare_input_data_frames() + if (self.outputs is not None) and (len(self.outputs) > 0): + self.prepare_output_data_frames() + + def prepare_input_data_frames(self): + if 'TimePeriod' in self.inputs: + self.timeperiods = (self.inputs['TimePeriod'].set_index('timePeriodSeq', verify_integrity=True)) + + self.demand_index_columns = ['customerName', 'locationName', 'productName', 'timePeriodSeq'] + if 'Demand' in self.inputs: + self.demand = (self.inputs['Demand'].astype({'productName': str}).set_index(self.demand_index_columns, verify_integrity=True)) + + # if 'ActualDemand' in self.inputs: + # self.actual_demand = (self.inputs['ActualDemand'].set_index(self.demand_index_columns, verify_integrity=True)) + + if 'Product' in self.inputs: + self.products = self.inputs['Product'].astype({'productName': str}).set_index('productName', verify_integrity=True) + + if 'BomItem' in self.inputs: + self.bom_items = self.inputs['BomItem'].astype({'productName': str}).set_index(['componentName', 'productName', 'recipeId'], verify_integrity = True) + else: + self.bom_items = pd.DataFrame(columns=['componentName', 'productName', 'recipeId', 'quantity']).astype({'productName': str}).set_index(['componentName', 'productName', 'recipeId']) + + # Recipe properties natural keys: + self.production_activities_index_columns = ['productName', 'timePeriodSeq', 'lineName', + 'recipeId'] # We'll be using these later on + self.recipe_properties_index_columns = self.production_activities_index_columns + if 'RecipeProperties' in self.inputs: + self.recipe_properties = (self.inputs['RecipeProperties'].astype({'productName': str}) + .set_index(self.recipe_properties_index_columns, verify_integrity=True) + ) + if 'recipePropertiesId' in self.recipe_properties.columns: + self.recipe_properties = self.recipe_properties.drop(['recipePropertiesId'], axis=1) # If we use the natural keys, we don't need the recipePropertiesId + + if 'Line' in self.inputs: + self.lines = self.inputs['Line'].set_index('lineName', verify_integrity=True) + + if 'Plant' in self.inputs: + self.plants = self.inputs['Plant'].set_index('plantName', verify_integrity=True) + + if 'Location' in self.inputs: + self.locations = self.inputs['Location'].set_index('locationName', verify_integrity=True) + + if 'ShippingLane' in self.inputs: + self.shipping_lanes = self.inputs['ShippingLane'].set_index(['originLocationName', 'destinationLocationName', 'shippingMode'], verify_integrity=True) + + if 'ShippingLaneProperties' in self.inputs: + self.shipping_lane_properties = self.inputs['ShippingLaneProperties'].set_index(['originLocationName', 'destinationLocationName', 'shippingMode', 'productName', 'timePeriodSeq'], verify_integrity=True) + + if 'Warehouse' in self.inputs: + self.warehouses = self.inputs['Warehouse'].set_index('warehouseName', verify_integrity=True) + + if 'WarehouseProperties' in self.inputs: + self.warehouse_properties = self.inputs['WarehouseProperties'].astype({'productName': str}).set_index(['warehouseName','productName','timePeriodSeqPattern'], verify_integrity=True) + + if 'ControlScenario' in self.inputs: + self.control_scenarios = self.inputs['ControlScenario'].set_index('controlScenarioId', verify_integrity=True) + + if 'StochasticScenario' in self.inputs: + self.stochastic_scenarios = self.inputs['StochasticScenario'].set_index('stochasticScenarioId', verify_integrity=True) + + if 'WIP' in self.inputs: + self.WIP = self.inputs['WIP'].astype({'productName': str}).set_index(['productName','locationName','timePeriodSeq'], verify_integrity=True) + + def prepare_output_data_frames(self): + #Beware: self.production_activities_index_columns defined in prepare_input_data_frames + if 'ProductionActivity' in self.outputs: + self.production_activities = (self.outputs['ProductionActivity'].astype({'productName': str}) + .set_index(self.production_activities_index_columns, verify_integrity = True)) + + if 'PlantInventory' in self.outputs: + self.plant_inventories = (self.outputs['PlantInventory'].astype({'productName': str}) + .set_index(['productName','locationName','timePeriodSeq'], verify_integrity = True) + ) + if 'WarehouseInventory' in self.outputs: + self.warehouse_inventories = (self.outputs['WarehouseInventory'].astype({'productName': str}) + .set_index(['productName','locationName','timePeriodSeq'], verify_integrity = True) + ) + if 'DemandInventory' in self.outputs: + self.demand_inventories = (self.outputs['DemandInventory'].astype({'productName': str}) + .set_index(['productName','locationName','timePeriodSeq'], verify_integrity = True) + ) + if 'TransportationActivity' in self.outputs: + self.transportation_activities = (self.outputs['TransportationActivity'] + .set_index(['originLocationName','destinationLocationName','shippingMode','productName','timePeriodSeq'], verify_integrity = True)) + + if 'LineUtilization' in self.outputs: + self.line_utilization = (self.outputs['LineUtilization'] + .set_index(['lineName','timePeriodSeq'], verify_integrity = True) + ) + # if 'SupplyMap' in self.outputs: + # self.supply_map = (self.outputs['SupplyMap'] + # .set_index(['locationName'], verify_integrity = True) + # ) + # if 'DemandMap' in self.outputs: + # self.demand_map = (self.outputs['DemandMap'] + # .set_index(['locationName'], verify_integrity = True) + # ) + if 'DemandSupplyMap' in self.outputs: + self.demand_supply_map = (self.outputs['DemandSupplyMap'] + .set_index(['locationName'], verify_integrity = True) + ) + + if 'BusinessKPIs' in self.outputs: + self.business_kpis = (self.outputs['BusinessKPIs'] + .set_index(['kpi'], verify_integrity = True) + ) + + if 'kpis' in self.outputs and self.outputs['kpis'].shape[0] > 0: + """Note: for some reason an imported scenario uses 'Name' and 'Value' as column names!""" + df = self.outputs['kpis'] + df.columns= df.columns.str.upper() +# if 'Name' in df.columns: +# df = df.rename(columns={'Name':'NAME'}) +# if 'Value' in df.columns: +# df = df.rename(columns={'Value':'VALUE'}) + self.kpis = (df + .set_index(['NAME'], verify_integrity = True) + ) + + def select_time_periods(self, pattern, time_periods): + if isinstance(pattern, int): + df = time_periods.query("timePeriodSeq == @pattern") + elif pattern == '*': + df = time_periods.query("timePeriodSeq >= 1") # Exclude the period 0 + elif isinstance(pattern, str) and 't' in pattern: + p2 = pattern.replace('t', 'timePeriodSeq', 1) + df = time_periods.query(p2) + else: + df = pd.DataFrame({'timePeriodSeq': []}).set_index('timePeriodSeq') + + # tps = df.timePeriodSeq.to_list() + tps = df.index.get_level_values('timePeriodSeq') + return tps + + def explode_time_period_pattern(self, df, pattern_column:str='timePeriodSeqPattern'): + """Explode rows based on pattern for timePeriodSeq + Assumes column named by pattern_column with the pattern + Replaces/add `timePeriodSeq`. Drops pattern_column. + """ + df['timePeriodSeq'] = df[pattern_column].apply(lambda x: self.select_time_periods(x, self.active_timeperiods)) + df = df.explode('timePeriodSeq') + df = df.drop(columns=[pattern_column]) + return df diff --git a/test/pharma/supply_chain/scnfo/scnfoplotlymanager.py b/test/pharma/supply_chain/scnfo/scnfoplotlymanager.py new file mode 100644 index 0000000..f9af0c3 --- /dev/null +++ b/test/pharma/supply_chain/scnfo/scnfoplotlymanager.py @@ -0,0 +1,467 @@ +from typing import List, Dict, Tuple, Optional +import pandas as pd +import plotly.express as px +import plotly.graph_objs as go +import numpy as np + +from dse_do_utils.datamanager import DataManager +from supply_chain.scnfo.scnfodatamanager import ScnfoDataManager + + +class PlotlyManager(): + """Holds method that create Plotly charts. + Pass-in the DM as an input in the constructor. + """ + def __init__(self, dm:DataManager): + self.dm = dm + + def get_plotly_fig_m(self, id): + """On the instance `self`, call the method named by id['index'] + For use with pattern-matching callbacks. Assumes the id['index'] is the name of a method of this class and returns a fig. + """ + return getattr(self, id['index'])() + + def get_dash_tab_layout_m(self, page_id): + """On the instance `self`, call the method named by get_tab_layout_{page_id} + """ + return getattr(self, f"get_tab_layout_{page_id}")() + + +class ScnfoPlotlyManager(PlotlyManager): + """Holds method that create Plotly charts. + Pass-in the DM as an input in the constructor. + """ + def __init__(self, dm: ScnfoDataManager): + super().__init__(dm) + + def plotly_production_plant_time_bar(self): + """Bar chart of production quantities per Plant over TimePeriod. + Input tables: ['ProductionActivity', 'Location'] + Output tables: [] + """ + df = self.dm.production_activities.reset_index().merge(self.dm.locations.reset_index(), on='locationName').groupby( + ['timePeriodSeq', 'plantName']).sum() + labels = {'timePeriodSeq': 'Time Period', 'xProdSol': 'Production', 'productName': 'Product Name'} + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="xProdSol", color="plantName", title="Production", + labels=labels) # , facet_row="timePeriodSeq") + fig.update_xaxes(type='category') + return fig + + def plotly_production_product_time_bar(self): + """Bar chart of production quantities per Product over TimePeriod. + Input tables: ['ProductionActivity', 'Location'] + Output tables: [] + """ + df = self.dm.production_activities.reset_index().merge(self.dm.locations.reset_index(), on='locationName').groupby( + ['timePeriodSeq', 'productName']).sum() + labels = {'timePeriodSeq': 'Time Period', 'xProdSol': 'Production', 'productName': 'Product Name'} + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="xProdSol", color="productName", title="Production", + labels=labels) # , facet_row="timePeriodSeq") + fig.update_xaxes(type='category') + return fig + + def plotly_animated_demand_map(self, query=None): + """Plotly map of US with demand locations. + Input tables: ['Location'] + Output tables: ['DemandInventory'] + """ + df = self.dm.demand_inventories.groupby(['locationName', 'timePeriodSeq']).agg( + quantity=pd.NamedAgg(column='quantity', aggfunc='sum'), + ) + df = df.join(self.dm.locations).reset_index() + fig = px.scatter_geo(df, lat="latitude", lon="longitude", color="quantity", + hover_name="city", size="quantity", + animation_frame="timePeriodSeq", + projection="equirectangular", width=1200, height=800, + # text='city' + # locationmode = 'USA-states', + size_max=40, + ) + fig.update_layout( + title='Demand', + # geo_scope='north america', + ) + return fig + + def plotly_animated_supply_map(self, query=None): + """Plotly map of US with production locations. Animated + """ + df = self.dm.production_activities.groupby(['locationName', 'timePeriodSeq']).agg( + quantity=pd.NamedAgg(column='xProdSol', aggfunc='sum'), + cost=pd.NamedAgg(column='production_cost', aggfunc='sum'), + ) + df = df.join(self.dm.locations).reset_index() + fig = px.scatter_geo(df, lat=df.latitude, lon=df.longitude, color="quantity", + hover_name="city", size="quantity", + animation_frame="timePeriodSeq", + # projection="equirectangular", + # locationmode = 'USA-states', + width=1200, height=800, + size_max=50, + # hover_data = ['timePeriodSeq','quantity'] # 'locationName', + hover_data={'longitude': False, 'latitude': False, 'quantity': ':,'}, + text='city' + ) + fig.update_layout( + title='Production', + # geo_scope='usa', + ) + + # Note: override hovertemplate does NOT work in combination with animation + # print("plotly express hovertemplate:", fig.data[0].hovertemplate) + # fig.update_traces(hovertemplate='%{hovertext}

quantityy=%{marker.color:,}') # + # print("plotly express hovertemplate:", fig.data[0].hovertemplate) + # show_fig(fig) + return fig + + + + + + + + ############################################### + # Demand + ############################################### + def describe_demand(self): + """Print summary of demand statistics. + TODO: ensure there is always a 'productGroup'? + Input tables: ['Demand'] + Output tables: [] + """ + df = (self.dm.demand +# .join(self.dm.products[['productGroup']]) + .reset_index()) + print(f"Demand entries = {df.shape[0]:,}") + print(f"Num products = {len(df.productName.unique()):,}") +# print(f"Num productGroup types = {len(df.productGroup.unique()):,}") + print(f"Num locations = {len(df.locationName.unique()):,}") + print(f"Num time-periods = {len(df.timePeriodSeq.unique()):,}") + + def plotly_demand_bars(self): + """Product demand over time. Colored by productGroup. + Input tables: ['Product', 'Demand'] + Output tables: [] + """ + product_aggregation_column = 'productGroup' + df = (self.dm.demand + .join(self.dm.products[['productGroup']]) + ).groupby(['timePeriodSeq', product_aggregation_column]).sum() + # display(df.head()) + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'Demand', 'productName': 'Product Name', + 'productGroup': 'Product Group'} + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="quantity", color=product_aggregation_column, + title='Total Product Demand', labels=labels) + fig.update_layout( + # title={ + # 'text': f"Total product demand", + # # 'y': 0.9, + # # 'x': 0.5, + # 'xanchor': 'center', + # 'yanchor': 'top'}, + legend={'orientation': 'v'}, + # legend_title_text=product_aggregation_column, + ) + + return fig + + + + + + ############################################### + # Production Capacity + ############################################### + def describe_production_assets(self): + """Print statistics on production capacity. + Input tables: ['RecipeProperties', 'Line', 'Plant','Location'] + Output tables: [] + """ + df = (self.dm.recipe_properties[['capacity', 'cost']] + .join(self.dm.lines) + .join(self.dm.plants.rename(columns={'locationDescr': 'plantDescr'}), on='plantName') + .join(self.dm.locations, on='locationName') + .reset_index() + ) + # display(df.head()) + print(f"Total Recipe Properties = {df.shape[0]:,}") + print(f"Total Time Periods = {len(df.timePeriodSeq.unique()):,}") + print(f"Total Products = {len(df.productName.unique()):,}") + print(f"Total Lines = {len(df.lineName.unique()):,}") + print(f"Total Plants = {len(df.plantName.unique()):,}") + print(f"Max Capacity = {df.capacity.max():,}") + print(f"Min Capacity = {df.capacity.min():,}") + print(f"Max Cost = {df.cost.max():0,.4f}") + print(f"Min Cost = {df.cost.min():0,.4f}") + + def plotly_capacity_sunburst(self): + """Sunburst of production capacity: + State->Plant->Line + Input tables: ['RecipeProperties', 'Line', 'Plant','Location'] + Output tables: [] + """ + df = (self.dm.recipe_properties[['capacity']] + .join(self.dm.lines) + .join(self.dm.plants.rename(columns={'locationDescr': 'plantDescr'}), on='plantName') + .join(self.dm.locations, on='locationName') + ).groupby(['lineName', 'plantName', 'city', 'state', 'country']).max() + # display(df.head()) + + labels = {'timePeriodSeq': 'Time Period', 'quantity': 'demand', 'productName': 'Product Name'} + fig = px.sunburst(df.reset_index(), path=['state', 'plantName', 'lineName'], values='capacity', labels=labels, + height=500) + + fig.update_layout( + title={ + 'text': "Maximum Line Capacity", + 'y': 0.95, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top'}) + return fig + + + + + ############################################### + # Production Activities + ############################################### + def plotly_excess_utilization_multi_facet_bars(self): + """Line utilization bar per line over time, clustered by time-period. + Excess utilization over 100% is clearly colored as red. + Good initial view of utilization and excess utilization. + Input tables: [] + Output tables: ['LineUtilization] + """ + df = (self.dm.line_utilization.copy() + ) + df['Regular Capacity'] = df.utilization.clip(0, 1) + df['Over Capacity'] = (df.utilization - 1).clip(0) + df = df[['Regular Capacity', 'Over Capacity']] + df = (df.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='Utilization') + .reset_index() + ) + # display(df.head()) + + labels = {'timePeriodSeq': 'Time Period', 'var_name': 'Utilization Type', 'lineName': 'Line Name'} + fig = px.bar(df.reset_index(), x="lineName", y="Utilization", color='var_name', title='Line Utilization', + labels=labels, + facet_col="timePeriodSeq", + # width = 2000 + color_discrete_map = {'Regular Capacity':'green', 'Over Capacity':'red'}, + ) + fig.update_layout( + legend= + dict( # change legend location + title = "Utilization Type", + orientation="h", + yanchor="top", + y=1.20, + xanchor="right", + x=0.95), + # legend_title_text=None # this doesn't align the legend still + ) + fig.update_layout(yaxis=dict(tickformat="%", )) + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + return fig + + def plotly_excess_utilization_line_time_bars(self): + """Line utilization bar per line over time, clustered by time-period. + Excess utilization over 100% is clearly colored as red. + Good initial view of utilization and excess utilization. + Input tables: [] + Output tables: ['LineUtilization] + """ + df = (self.dm.line_utilization.copy() + ) + df['Regular Capacity'] = df.utilization.clip(0, 1) + df['Over Capacity'] = (df.utilization - 1).clip(0) + df = df[['Regular Capacity', 'Over Capacity']] + df = (df.stack() + .rename_axis(index={None: 'var_name'}) + .to_frame(name='Utilization') + .reset_index() + ) + # display(df.head()) + + labels = {'timePeriodSeq': 'Time Period', 'var_name': 'Utilization Type', 'lineName': 'Line Name'} + fig = px.bar(df.reset_index(), x="timePeriodSeq", y="Utilization", color='var_name', title='Line Utilization', + labels=labels, + facet_row="lineName", + # width = 2000 + color_discrete_map = {'Regular Capacity':'green', 'Over Capacity':'red'}, + ) + fig.update_layout( + legend= + dict( # change legend location + title = "Utilization Type", + orientation="h", + yanchor="top", + y=1.20, + xanchor="right", + x=0.95), + # legend_title_text=None # this doesn't align the legend still + ) + fig.update_layout(yaxis=dict(tickformat="%", )) + fig.update_layout(hovermode="closest") # Is supposed to be the default, but in DE we get multiple. Setting 'closest' explicitly is a work-around + return fig + + + + ############################################### + # Transportation Activities + ############################################### + + + def plotly_production_activities_sankey(self): + """Sankey diagram of production activities. + See https://stackoverflow.com/questions/50486767/plotly-how-to-draw-a-sankey-diagram-from-a-dataframe + """ + # Create productName vs id + product_labels_df = (self.dm.products[[]].reset_index() + .reset_index().rename(columns={'index': 'id'}) + ) + # Sankey based on production_activities + df = (self.dm.production_activities[['xProdSol']] + .query("xProdSol > 0") + # .join(dm.products[['stage']]) + .join(self.dm.bom_items[['quantity']].rename(columns={'quantity':'component_bom_quantity'})) + ) + df['component_quantity'] = df.xProdSol * df.component_bom_quantity + df = (df + .drop(columns=['component_bom_quantity']) + .groupby(['componentName', 'productName','lineName']).sum() + ) + + df = df.reset_index() + # display(df.head()) + + # Set pop-up text + df['label'] = df.productName + " - " + df.lineName + + df = (df.merge(product_labels_df, left_on='productName', right_on='productName') + .rename(columns={'id': 'target'}) + ) + # display(df.head()) + df = (df.merge(product_labels_df, left_on='componentName', right_on='productName', suffixes=[None,'_y']) + .drop(columns=['productName_y']) + .rename(columns={'id': 'source'}) + ) + # display(df.head()) + + product_labels_df['color_nodes'] = np.where(product_labels_df.productName == 'Tablet', 'rgba(63, 191, 63, 0.9)', + np.where(product_labels_df.productName == 'Granulate', "rgba(185, 2, 103, 0.95)", + np.where(product_labels_df.productName == 'API', "rgba(251, 33, 44, 0.9)", "rgba(43, 155, 247, 0.92)"))) + + df['color_links'] = np.where(df.componentName == 'Tablet', 'rgba(63, 191, 63, 0.45)', + # np.where(df.componentName == 'Granulate', "rgba(63, 191, 191, 0.45)", + np.where(df.componentName == 'API', "rgba(251, 33, 44, 0.45)", "rgba(43, 155, 247, 0.45)")) + #) + + fig = go.Figure(data=[go.Sankey( + arrangement ='snap', + # valueformat = ".0f", + # valuesuffix = "TWh", + # Define nodes + node=dict( + pad=15, + thickness=15, + line=dict(color="black", width=0.5), + label=product_labels_df.productName.array, + color=product_labels_df.color_nodes.array, + # color = data['data'][0]['node']['color'] + ), + # Add links + link=dict( + source=df.source.array, + target=df.target.array, + value=df.xProdSol.array, + label=df.label.array, + color=df.color_links.array, + line=dict(color="black", width=0.25), + # label = data['data'][0]['link']['label'], + # color = data['data'][0]['link']['color'] + ))]) + + fig.update_layout( + # title_text="Production", + font_size=10, + ) + # height=800) + return fig + + def plotly_transportation_activities_sankey(self): + """Sankey diagram of transportation activities. + See https://stackoverflow.com/questions/50486767/plotly-how-to-draw-a-sankey-diagram-from-a-dataframe + Input tables: ['Location'] + Output tables: ['TransportationActivity'] + """ + # Create locationName vs id + location_labels_df = (self.dm.locations[[]].reset_index() + .reset_index().rename(columns={'index': 'id'}) + ) + # Sankey based on transportation_activities + df = (self.dm.transportation_activities[['xTransportationSol']] + .query("xTransportationSol > 0") + .groupby(['originLocationName', 'destinationLocationName','shippingMode','productName']).sum() + ) + df = df.reset_index() + # display(df.head()) + + df = (df.merge(location_labels_df, left_on='originLocationName', right_on='locationName') + .rename(columns={'id': 'source'}) + .drop(columns=['locationName']) + ) + # display(df.head()) + df = (df.merge(location_labels_df, left_on='destinationLocationName', right_on='locationName') + .rename(columns={'id': 'target'}) + .drop(columns=['locationName']) + ) + # display(df.head()) + + # Set pop-up text + df['label'] = df.productName + " - " + df.shippingMode + + location_labels_df['color_nodes'] = np.where(location_labels_df.locationName == 'Abbott_Olst_Plant', 'rgba(63, 191, 63, 0.9)', + np.where(location_labels_df.locationName == 'Abbott_WH_NL', "rgba(63, 191, 191, 0.9)", + np.where(location_labels_df.locationName == 'Abbott_Weesp_Plant', "rgba(251, 33, 44, 0.9)", "rgba(43, 155, 247, 0.92)"))) + + df['color_links'] = np.where(df.originLocationName == 'Abbott_Olst_Plant', 'rgba(63, 191, 63, 0.45)', + np.where(df.originLocationName == 'Abbott_WH_NL', "rgba(63, 191, 191, 0.45)", + np.where(df.originLocationName == 'Abbott_Weesp_Plant', "rgba(251, 33, 44, 0.45)", "rgba(43, 155, 247, 0.45)"))) + + # print(df.head()) + fig = go.Figure(data=[go.Sankey( + arrangement ='snap', + # valueformat = ".0f", + # valuesuffix = "TWh", + # Define nodes + node=dict( + pad=15, + thickness=15, + line=dict(color="black", width=0.5), + label=location_labels_df.locationName.array, + color = location_labels_df.color_nodes.array, + # color = data['data'][0]['node']['color'] + ), + # Add links + link=dict( + source=df.source.array, + target=df.target.array, + value=df.xTransportationSol.array, + line=dict(color="black", width=0.25), + label=df.label.array, + color=df.color_links.array, + # label = data['data'][0]['link']['label'], + # color = data['data'][0]['link']['color'] + ))]) + + fig.update_layout( + # title_text="Transportation", + font_size=10, + ) + # height=800) + return fig diff --git a/test/pharma/supply_chain/scnfo/scnfoscenariodbtables.py b/test/pharma/supply_chain/scnfo/scnfoscenariodbtables.py new file mode 100644 index 0000000..eb239f4 --- /dev/null +++ b/test/pharma/supply_chain/scnfo/scnfoscenariodbtables.py @@ -0,0 +1,561 @@ +####################################################### +# Table specific SQL +####################################################### +from typing import List, Dict +from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, ForeignKeyConstraint +from collections import OrderedDict + +# from supply_chain.folium_supply_chain import SCMapManager, MappingSCDM +# from supply_chain.plotly_supply_chain import PlotlyManager, SupplyChainPlotlyManager, WaterPlotlyManager #, PlotlySupplyChainDataManager +from dse_do_utils.scenariodbmanager import ScenarioDbTable, ScenarioDbManager +# from supply_chain.supply_chain import DEWaterDataManager #ScenarioDbTable, ScenarioDbManager + +import pandas as pd +from dse_do_dashboard.utils.scenariodbmanager_update import ScenarioDbManagerUpdate + +class ScenarioTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'scenario'): + columns_metadata = [ + Column('scenario_name', String(256), primary_key=True), + ] + super().__init__(db_table_name, columns_metadata) + + +class LocationTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'location', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + # Column('scenario_name', String(256), ForeignKey("scenario.scenario_name"), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('city', String(256), primary_key=False), + Column('state', String(2), primary_key=False), + Column('zip', String(64), primary_key=False), + Column('country', String(256), primary_key=False), + Column('latitude', Float(), primary_key=False), + Column('longitude', Float(), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + + +class PlantTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'plant', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('plantName', String(256), primary_key=True), + Column('supplierName', String(256), primary_key=False), + # Column('locationName', String(256), ForeignKey("location.locationName"), primary_key=False, nullable=False), + Column('locationName', String(256), primary_key=False, nullable=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['locationName'], ['location.locationName']) + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class LineTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'line', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('lineName', String(256), primary_key=True), + # Column('plantName', String(256), ForeignKey("plant.plantName"), primary_key=False), + Column('plantName', String(256), primary_key=False), + Column('stageName', String(256), primary_key=False, nullable=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['plantName'], ['plant.plantName']) + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class TimePeriodTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'time_period', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('timePeriodSeq', Integer(), primary_key=True), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + + +class ProductTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'product', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('productName', String(256), primary_key=True), + Column('inventoryVolume', Float(), primary_key=False), + Column('transportationVolume', Float(), primary_key=False), + Column('transportationWeight', Float(), primary_key=False), +# Column('turnOverRatio', Float(), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +# @staticmethod +# def extend_metadata(self, default_columns_metadata, columns_metadata: List[Column] = None, extend_metadata:bool=True): +# """For use in subsubclass of a ScenarioDbTable. If applicable, replaces or extends the default_columns_metadata of a class. +# Options: +# 1. Keep default: column_metadata = None +# 2. Extend: column_metadata is not None, extend_metadata is True +# 3. Replace: column_metadata is not None, extend_metadata is False +# """ +# if extend_metadata: +# md = default_columns_metadata.extend(columns_metadata) +# elif columns_metadata is not None: +# md = columns_metadata +# else +# md = default_columns_metadata + + +class RecipeTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'recipe', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('recipeId', Integer(), primary_key=True), + # Column('productName', String(256), ForeignKey("product.productName") , primary_key=True), # If also PK: we do not need unique recipeId + Column('productName', String(256), primary_key=True), # If also PK: we do not need unique recipeId + ] + constraints_metadata = [ + ForeignKeyConstraint(['productName'], ['product.productName']) + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class RecipePropertiesTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'recipe_properties', extended_columns_metadata: List[Column] = []): + columns_metadata = [ +# Column('recipePropertiesId', String(256), primary_key=True), + # Do we use the recipePropertiesId, or the 'natural' keys (productName, recipeId, lineName and timePeriodSeq)? + Column('recipeId', Integer(), primary_key=True), + Column('productName', String(256), primary_key=True), + # Column('recipeId', Integer(), ForeignKey("recipe.recipeId"), primary_key=False), + # Column('productName', String(256), ForeignKey("product.productName") , primary_key=False), + # Column('productName', String(256), ForeignKey("recipe.productName") , primary_key=False), + # Column('lineName', String(256), ForeignKey("line.lineName") , primary_key=False), + # Column('timePeriodSeq', Integer(), ForeignKey("time_period.timePeriodSeq"), primary_key=False), + Column('lineName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('timePeriodSeqPattern', String(256), primary_key=False), + Column('capacity', Float(), primary_key=False), + Column('yield', Float(), primary_key=False), + Column('cost', Float(), primary_key=False), + Column('cycleTime', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['lineName'], ['line.lineName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ForeignKeyConstraint(['productName', 'recipeId'], ['recipe.productName', 'recipe.recipeId']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class BomItemTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'bom_item', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + # Column('componentName',String(256), ForeignKey("product.productName"), primary_key=True), + # Column('productName', String(256), ForeignKey("product.productName") , primary_key=True), + # Column('recipeId', Integer(), ForeignKey("recipe.recipeId"), primary_key=True), + Column('componentName', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('recipeId', Integer(), primary_key=True), + Column('quantity', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['componentName'], ['product.productName']), + # ForeignKeyConstraint(['productName'],['product.productName']), + ForeignKeyConstraint(['productName', 'recipeId'], ['recipe.productName', 'recipe.recipeId']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class DemandTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'demand', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('customerName', String(256), primary_key=True), + # Column('locationName', String(256), ForeignKey("location.locationName"), primary_key=True), + # Column('productName', String(256), ForeignKey("product.productName") , primary_key=True), + # Column('timePeriodSeq', Integer(), ForeignKey("time_period.timePeriodSeq"), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('quantity', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class WIPTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'wip', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('productName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('wipQuantity', Float()), + ] + constraints_metadata = [ + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class WarehouseTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'warehouse', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('warehouseName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=False, nullable=False), + ] + constraints_metadata = [ + # TODO: enable FK constraint! Disabled to test Pharma use-case + # ForeignKeyConstraint(['locationName'], ['location.locationName']) # HACK!!!!!!!!! + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class WarehousePropertiesTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'warehouse_properties', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('warehouseName', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('timePeriodSeqPattern', String(256), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['warehouseName'], ['warehouse.warehouseName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class ShippingModeTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'shipping_mode', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('shippingModeName', String(256), primary_key=True), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + + +class ShippingLaneTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'shipping_lane', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('originLocationName', String(256), primary_key=True), + Column('destinationLocationName', String(256), primary_key=True), + Column('shippingMode', String(256), primary_key=True), # TODO: switch to shippingModeName +# Column('shippingModeName', String(256), primary_key=True), + ] + constraints_metadata = [ + ForeignKeyConstraint(['originLocationName'], ['location.locationName']), + ForeignKeyConstraint(['destinationLocationName'], ['location.locationName']), +# ForeignKeyConstraint(['shippingMode'], ['shipping_mode.shippingModeName']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class ShippingLanePropertiesTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'shipping_lane_properties', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('originLocationName', String(256), primary_key=True), + Column('destinationLocationName', String(256), primary_key=True), + Column('shippingMode', String(256), primary_key=True), # TODO: switch to shippingModeName +# Column('shippingModeName', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('timePeriodSeqPattern', String(256), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['originLocationName', 'destinationLocationName', 'shippingMode'], + ['shipping_lane.originLocationName', 'shipping_lane.destinationLocationName', 'shipping_lane.shippingMode']), +# ForeignKeyConstraint(['originLocationName'], ['location.locationName']), +# ForeignKeyConstraint(['destinationLocationName'], ['location.locationName']), +# ForeignKeyConstraint(['shippingMode'], ['shipping_mode.shippingModeName']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + +class PlannedProductionActivityTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'planned_production_activity', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('planId', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + # Column('timePeriodSeq', Integer(), ForeignKey("time_period.timePeriodSeq"), primary_key=True), + # Column('lineName', String(256), ForeignKey("line.lineName"), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('lineName', String(256), primary_key=True), + Column('recipeId', Integer(), primary_key=True), + Column('quantity', Float()), + ] + constraints_metadata = [ + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ForeignKeyConstraint(['lineName'], ['line.lineName']), + ForeignKeyConstraint(['productName', 'recipeId'], ['recipe.productName', 'recipe.recipeId']) + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + +# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# Output Tables +# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +class ProductionActivityTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'production_activity', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('productName', String(256), primary_key=True), + # Column('timePeriodSeq', Integer(), ForeignKey("time_period.timePeriodSeq"), primary_key=True), + # Column('lineName', String(256), ForeignKey("line.lineName"), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('lineName', String(256), primary_key=True), + Column('recipeId', Integer(), primary_key=True), + Column('capacity', Float()), + Column('yield', Float()), + Column('cost', Float()), + Column('cycleTime', Integer()), + Column('supplierName', String(256)), + # Column('locationName', String(256), ForeignKey("location.locationName"), primary_key=False, nullable=False), + # Column('plantName', String(256), ForeignKey("plant.plantName"), primary_key=False), + Column('locationName', String(256), primary_key=False, nullable=False), + Column('plantName', String(256), primary_key=False), + Column('xProdSol', Float()), + Column('xProdSlackSol', Float()), + Column('line_capacity_utilization', Float()), + Column('production_cost', Float()), + # ForeignKeyConstraint(['productName','recipeId'],['recipe.productName','recipe.recipeId']) + ] + constraints_metadata = [ + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ForeignKeyConstraint(['lineName'], ['line.lineName']), + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ForeignKeyConstraint(['plantName'], ['plant.plantName']), + ForeignKeyConstraint(['productName', 'recipeId'], ['recipe.productName', 'recipe.recipeId']) + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class PlantInventoryTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'plant_inventory', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + # Column('productName', String(256), ForeignKey("product.productName") , primary_key=True), + # Column('plantName', String(256), ForeignKey("plant.plantName"), primary_key=True), + # Column('timePeriodSeq', Integer(), ForeignKey("time_period.timePeriodSeq"), primary_key=True), + Column('productName', String(256), primary_key=True), +# Column('plantName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('xInvSol', Float()), + ] + constraints_metadata = [ + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['locationName'], ['location.locationName']), +# ForeignKeyConstraint(['plantName'], ['plant.plantName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + +class WarehouseInventoryTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'warehouse_inventory', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('productName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), +# Column('quantity', Float(), primary_key=False), + Column('xInvSol', Float(), primary_key=False), + ] + constraints_metadata = [ + # TODO: re-enable FK constraint! + # ForeignKeyConstraint(['locationName'], ['location.locationName']), # HACK!!!!! TODO: re-enable after fixing Pharma scenario data + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class DemandInventoryTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'demand_inventory', extended_columns_metadata: List[Column] = []): + columns_metadata = [ +# Column('customerName', String(256), primary_key=True), + # Column('locationName', String(256), ForeignKey("location.locationName"), primary_key=True), + # Column('productName', String(256), ForeignKey("product.productName") , primary_key=True), + # Column('timePeriodSeq', Integer(), ForeignKey("time_period.timePeriodSeq"), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('locationName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('quantity', Float(), primary_key=False), + Column('xInvSol', Float(), primary_key=False), + Column('xBacklogSol', Float(), primary_key=False), + Column('xBacklogResupplySol', Float(), primary_key=False), + Column('xFulfilledDemandSol', Float(), primary_key=False), + Column('xUnfulfilledDemandSol', Float(), primary_key=False), +# Column('xDOSSlackSol', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class TransportationActivityTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'transportation_activity', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('originLocationName', String(256), primary_key=True), + Column('destinationLocationName', String(256), primary_key=True), + Column('shippingMode', String(256), primary_key=True), + Column('productName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('transitTime', Float(), primary_key=False), + Column('xTransportationSol', Float(), primary_key=False), + # Other columns are derived: Cognos should be able to figure these out + ] + constraints_metadata = [ + ForeignKeyConstraint(['originLocationName'], ['location.locationName']), + ForeignKeyConstraint(['destinationLocationName'], ['location.locationName']), + ForeignKeyConstraint(['shippingMode'], ['shipping_mode.shippingModeName']), + ForeignKeyConstraint(['productName'], ['product.productName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class LineUtilizationTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'line_utilization', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + # Column('lineName', String(256), ForeignKey("line.lineName"), primary_key=True), + # Column('timePeriodSeq', Integer(), ForeignKey("time_period.timePeriodSeq"), primary_key=True), + # Column('plantName', String(256), ForeignKey("plant.plantName"), primary_key=True), + Column('lineName', String(256), primary_key=True), + Column('timePeriodSeq', Integer(), primary_key=True), + Column('plantName', String(256), primary_key=True), + Column('utilization', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['lineName'], ['line.lineName']), + ForeignKeyConstraint(['timePeriodSeq'], ['time_period.timePeriodSeq']), + ForeignKeyConstraint(['plantName'], ['plant.plantName']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class ParameterTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'parameters', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('param', String(256), primary_key=True), + Column('value', String(256), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + + +class DemandMapTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'demand_map', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('locationName', String(256), primary_key=True), + Column('quantity', Float(), primary_key=False), + Column('cost', Float(), primary_key=False), + Column('type', String(256), primary_key=False), +# Column('locationDescr', String(256), primary_key=False), +# Column('locationType', String(256), primary_key=False), + Column('city', String(256), primary_key=False), + Column('state', String(256), primary_key=False), + Column('zip', String(256), primary_key=False), + Column('country', String(256), primary_key=False), + Column('latitude', Float(), primary_key=False), + Column('longitude', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class SupplyMapTable(ScenarioDbTable): + """Same as DemandMapTable""" + + def __init__(self, db_table_name: str = 'supply_map', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('locationName', String(256), primary_key=True), + Column('quantity', Float(), primary_key=False), + Column('cost', Float(), primary_key=False), + Column('type', String(256), primary_key=False), +# Column('locationDescr', String(256), primary_key=False), +# Column('locationType', String(256), primary_key=False), + Column('city', String(256), primary_key=False), + Column('state', String(256), primary_key=False), + Column('zip', String(256), primary_key=False), + Column('country', String(256), primary_key=False), + Column('latitude', Float(), primary_key=False), + Column('longitude', Float(), primary_key=False), + ] + constraints_metadata = [ + ForeignKeyConstraint(['locationName'], ['location.locationName']), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata, constraints_metadata) + + +class KpiTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'kpis'): + columns_metadata = [ + Column('NAME', String(256), primary_key=True), + Column('VALUE', Float(), primary_key=False), + ] + super().__init__(db_table_name, columns_metadata) + +class BusinessKpiTable(ScenarioDbTable): + def __init__(self, db_table_name: str = 'business_kpi', extended_columns_metadata: List[Column] = []): + columns_metadata = [ + Column('kpi', String(256), primary_key=True), + Column('value', Float(), primary_key=False), + ] + columns_metadata.extend(extended_columns_metadata) + super().__init__(db_table_name, columns_metadata) + +class ScnfoScenarioDbManager(ScenarioDbManager): + def __init__(self, input_db_tables: Dict[str, ScenarioDbTable]=None, output_db_tables: Dict[str, ScenarioDbTable]=None, + credentials=None, schema: str = None, echo=False, multi_scenario: bool = True): + if input_db_tables is None: + input_db_tables = OrderedDict([ + ('Scenario', ScenarioTable()), + ('Location', LocationTable()), + ('Plant', PlantTable()), + ('Line', LineTable()), + ('TimePeriod', TimePeriodTable()), + ('Product', ProductTable()), + ('Recipe', RecipeTable()), + ('RecipeProperties', RecipePropertiesTable()), + ('BomItem', BomItemTable()), + ('Demand', DemandTable()), + ('Parameter', ParameterTable()), + ]) + if output_db_tables is None: + output_db_tables = OrderedDict([ + ('ProductionActivity', ProductionActivityTable()), + ('PlantInventory', PlantInventoryTable()), + ('WarehouseInventory', WarehouseInventoryTable()), + ('DemandInventory', DemandInventoryTable()), + ('LineUtilization', LineUtilizationTable()), + ('TransportationActivity', TransportationActivityTable()), +# ('PlantToDemandTransportation', PlantToDemandTransportationTable()), +# ('DemandMap', DemandMapTable()), +# ('SupplyMap', SupplyMapTable()), + ('kpis', KpiTable()), + ]) + super().__init__(input_db_tables=input_db_tables, output_db_tables=output_db_tables, credentials=credentials, schema=schema, echo=echo, multi_scenario=multi_scenario) diff --git a/test/pharma/visualization_pages/capacity_page.py b/test/pharma/visualization_pages/capacity_page.py new file mode 100644 index 0000000..508554c --- /dev/null +++ b/test/pharma/visualization_pages/capacity_page.py @@ -0,0 +1,96 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class CapacityPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Capacity', + page_id='capacity_tab', + url='capacity', + input_table_names = ['RecipeProperties', 'Line', 'Product'], + output_table_names = [], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + # dbc.Row(dbc.Col(html.Div("A single, half-width column"), width=6)), + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_time_product_group_capacity_bars(), + style={'height': '55vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row([ + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_line_product_capacity_heatmap(), + style={'height': '55vh', 'width': '36vw'}, + ), + + ) + ]) + ), + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_time_product_group_capacity_heatmap(), + style={'height': '55vh', 'width': '36vw'}, + ), + ) + ]) + ), + ]), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_line_package_capacity_heatmap(), + style={'height': '55vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_time_package_capacity_heatmap(), + style={'height': '55vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + ] + + return layout_children diff --git a/test/pharma/visualization_pages/demand_fulfillment_page.py b/test/pharma/visualization_pages/demand_fulfillment_page.py new file mode 100644 index 0000000..012ee5e --- /dev/null +++ b/test/pharma/visualization_pages/demand_fulfillment_page.py @@ -0,0 +1,52 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class DemandFulfillmentPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Demand Fulfillment', + page_id='demand_fulfillment_tab', + url='demand_fulfillment', + input_table_names = ['Product'], + output_table_names = ['DemandInventory','TransportationActivity'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_demand_fullfilment_multi_plot(mode='columns',var_names=['Unfulfilled','Backlog','Backlog Resupply','Inventory']), + # style={'height': '55vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_demand_fullfilment_multi_plot(), + # style={'height': '200vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + ] + return layout_children diff --git a/test/pharma/visualization_pages/demand_fulfillment_scroll_page.py b/test/pharma/visualization_pages/demand_fulfillment_scroll_page.py new file mode 100644 index 0000000..1d398f8 --- /dev/null +++ b/test/pharma/visualization_pages/demand_fulfillment_scroll_page.py @@ -0,0 +1,52 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class DemandFulfillmentScrollPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Demand Fulfillment Scroll', + page_id='demand_fulfillment_scroll_tab', + url='demand_fulfillment_scroll', + input_table_names = ['Product'], + output_table_names = ['DemandInventory','TransportationActivity'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_demand_fullfilment_scroll(), + # style={'height': '75vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_demand_fullfilment_scroll_product(), + # style={'height': '75vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + ] + return layout_children diff --git a/test/pharma/visualization_pages/demand_page.py b/test/pharma/visualization_pages/demand_page.py new file mode 100644 index 0000000..1721d44 --- /dev/null +++ b/test/pharma/visualization_pages/demand_page.py @@ -0,0 +1,89 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class DemandPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Demand', + page_id='demand_tab', + url='demand', + input_table_names=['Demand', 'Product'], + output_table_names=[], + ) + + def get_layout_children(self, pm: PlotlyManager): + view = "All" + layout_children = [ + # html.H1("My demand tab 4"), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + id='plotly_demand_bars', + # id={ + # 'type': chart_type, + # 'index': 'plotly_demand_bars' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.plotly_demand_bars(view=view), + style={'height': '75vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + id='plotly_demand_bars_v2', + # id={ + # 'type': chart_type, + # 'index': 'plotly_demand_pie' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.plotly_demand_bars(query="productGroup == 'Package'", + title="Total Package Demand"), + # style={'height': '800px'}, + style={'height': '55vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + id='plotly_demand_bars_v3', + # id={ + # 'type': chart_type, + # 'index': 'plotly_products_sunburst' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.plotly_demand_bars(query="productGroup != 'Package'", + title="Total Granulate Demand"), + # style={'height': '400px'}, + style={'height': '55vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + ] + return layout_children diff --git a/test/pharma/visualization_pages/inventory_dos_page.py b/test/pharma/visualization_pages/inventory_dos_page.py new file mode 100644 index 0000000..6c0e9ea --- /dev/null +++ b/test/pharma/visualization_pages/inventory_dos_page.py @@ -0,0 +1,82 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class InventoryDosPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Inventory Days of Supply', + page_id='inventory_dos_tab', + url='inventorydos', + input_table_names = ['RecipeProperties', 'Line', 'Product', 'Location', 'Plant', 'Demand'], + output_table_names = ['PlantInventory', 'WarehouseInventory', 'DemandInventory'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_inventory_days_of_supply_line(), + # style={'height': '85vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_inventory_days_of_supply_line(mode='bar'), + # style={'height': '85vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_wh_inventory_days_of_supply_line(mode = 'bar'), + # style={'height': '85vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_inventory_days_of_supply_slack_line(mode='bar'), + # style={'height': '85vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + ] + return layout_children diff --git a/test/pharma/visualization_pages/inventory_page.py b/test/pharma/visualization_pages/inventory_page.py new file mode 100644 index 0000000..4d2ab05 --- /dev/null +++ b/test/pharma/visualization_pages/inventory_page.py @@ -0,0 +1,92 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class InventoryPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Inventory', + page_id='inventory_tab', + url='inventory', + input_table_names = ['RecipeProperties', 'Line', 'Product', 'Location', 'Plant', 'Demand'], + output_table_names = ['PlantInventory', 'WarehouseInventory', 'DemandInventory'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row([ + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_wh_inventory(mode='area'), + # style={'height': '75vh', 'width': '36vw'}, + ), + ) + ]) + ), + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_plant_inventory(mode='area'), + # style={'height': '75vh', 'width': '36vw'}, + ), + ) + ]) + ), + ]), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_plant_inventory(mode='bar'), + # style={'height': '85vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_wh_inventory(mode='bar'), + # style={'height': '85vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_demand_inventory(mode = 'bar'), + # style={'height': '85vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + ] + return layout_children diff --git a/test/pharma/visualization_pages/kpi_page.py b/test/pharma/visualization_pages/kpi_page.py new file mode 100644 index 0000000..d40685c --- /dev/null +++ b/test/pharma/visualization_pages/kpi_page.py @@ -0,0 +1,161 @@ +from dash import dcc, dash_table +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class KpiPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='KPIs', + page_id='kpi_tab', + url='kpi', + input_table_names=['Line', 'Product', 'Location', 'Plant', 'Demand', 'BomItem'], + output_table_names=['DemandInventory','TransportationActivity', 'ProductionActivity', 'PlantInventory', 'WarehouseInventory', 'kpis', 'BusinessKPIs'], + ) + + def get_layout_children(self, pm: PlotlyManager): + kpis = pm.dm.kpis.reset_index() + b_kpis = pm.dm.business_kpis.reset_index() + + print(f"Utilization KPI = {pm.utilization_kpi()}") + + + # gauge_style = {'height': '30vh', 'width': '41vw', 'margin-left':'auto','margin-right':'auto', 'display':'block'} + gauge_style = {'height': '20vh', 'width': '20vw', 'margin-left':'auto','margin-right':'auto', 'display':'block'} + # gauge_style = {'height': '20vh'} + layout_childen = [ + # ddk.Header([ddk.Title('Business KPI Gauges')]), + + dbc.Row([ + dbc.Col( + dbc.Card([ + dbc.CardBody( + # dbc.Block(width=25), + dcc.Graph( + id="stat1", + # style={'height': '30vh', 'width': '41vw', 'margin-left':'auto','margin-right':'auto', 'display':'block'}, + style=gauge_style, + # style={'height': '20vh'}, + figure=pm.make_gauge(value=round(b_kpis['value'].iloc[0] * 100, 3), + title="Unfulfilled Demand %", max_val=100, + orange_threshold=2, red_threshold=5) + ),) + ], + # style={"border": "none", "outline": "solid green"}, + ) + ), + dbc.Col( + dbc.Card([ + dbc.CardBody( + # dbc.Block(width=25), + dcc.Graph( + id="stat2", + # style={'height': '30vh', 'width': '41vw', 'margin-left':'auto','margin-right':'auto', 'display':'block'}, + style=gauge_style, + figure=pm.make_gauge(value=round(b_kpis['value'].iloc[1] * 100, 3), + title="Backlog %", max_val=100, + orange_threshold=5, red_threshold=10) + ), ) + ], + )) + ], + # style={'padding':'15px'} + ), + + dbc.Row([ + dbc.Col( + dbc.Card([ + dbc.CardBody( + # dbc.Block(width=25), + dcc.Graph( + id="stat3", + # style={'height': '30vh', 'width': '26.5vw', 'margin-left':'auto','margin-right':'auto', 'display':'block'}, + style=gauge_style, + figure=pm.make_gauge_dos(value=round(pm.dos_kpi(), 3), + title="Inventory Days-of-Supply", + max_val=80) + ), )])), + dbc.Col( + dbc.Card([ + dbc.CardBody( + # dbc.Block(width=25), + dcc.Graph( + id="stat4", + # style={'height': '30vh', 'width': '26.5vw', 'margin-left':'auto','margin-right':'auto', 'display':'block'}, + style=gauge_style, + figure=pm.make_gauge(value=float(pm.calc_air_pct()), + title="Air Shipping %", + orange_threshold=10, red_threshold=30, + max_val=100) + ), )])), + dbc.Col( + dbc.Card([ + dbc.CardBody( + # dbc.Block(width=25), + dcc.Graph( + id="stat5", + # style={'height': '30vh', 'width': '26.5vw', 'margin-left':'auto','margin-right':'auto', 'display':'block'}, + style=gauge_style, + figure=pm.make_gauge(value=round(pm.utilization_kpi(), 3), + title="Utilization %", + orange_threshold=85, red_threshold=95, + max_val=100) + ), )])) + + ]), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + # id='plotly_transportation_bar', + # id={ + # 'type': chart_type, + # 'index': 'plotly_demand_bars' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.kpi_heatmap(), + style={'height': '55vh', 'width': '79vw'}, + # style={'height': '35vh', 'width': '39vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + + # ddk.Header([ddk.Title('KPI Table')]), + + dbc.Row(style = {'height': '5px'}), + dbc.Row( + dbc.Col( + dbc.Card([ + dbc.CardBody( + dash_table.DataTable( + # style = {'width': 50}, + columns=[{"name": i, "id": i} for i in b_kpis.columns], + data=b_kpis.to_dict("rows"), + editable=True, + style_cell={ + 'font_family': 'sans-serif', + 'font_size': '12px', + 'textAlign': 'left'}, + style_table={ + 'maxHeight': '300px', + 'width': '79vw', + # 'width': '500px', + 'overflowY': 'scroll' + }, + # style={'height': '35vh', 'width': '85vw'}, + ))], style = {'width':'80.5vw'}))), + + dbc.Row(style={'height': '50px'}), + + ] + + return layout_childen diff --git a/test/pharma/visualization_pages/maps_page.py b/test/pharma/visualization_pages/maps_page.py new file mode 100644 index 0000000..47abf2e --- /dev/null +++ b/test/pharma/visualization_pages/maps_page.py @@ -0,0 +1,76 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class MapsPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Maps', + page_id='maps_tab', + url='maps', + input_table_names = ['Line', 'Product', 'Location', 'Plant', 'Demand', 'BomItem'], + output_table_names = ['DemandInventory','TransportationActivity', 'ProductionActivity', 'PlantInventory', 'WarehouseInventory'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + # ddk.Block(), + + dbc.Card( + dcc.Graph( + figure=pm.line_map(), + # style={'height': '65vh', 'width': '79vw'}, + # style={'height': '800px', 'width': '1000px'}, + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.demand_choropleth_map(), + # style={'height': '65vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.unfulfilled_demand_choropleth_map(), + # style={'height': '65vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.unfulfilled_demand_choropleth_map(animation_col = "timePeriodSeq"), + # style={'height': '65vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + ] + return layout_children diff --git a/test/pharma/visualization_pages/planned_production_page.py b/test/pharma/visualization_pages/planned_production_page.py new file mode 100644 index 0000000..176eaf7 --- /dev/null +++ b/test/pharma/visualization_pages/planned_production_page.py @@ -0,0 +1,116 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class PlannedProductionPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Planned Production', + page_id='planned_production_tab', + url='planned_production', + input_table_names = ['Product', 'PlannedProductionActivity'], + output_table_names = ['ProductionActivity'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_planned_production_activities_bars(title="Total Planned Production"), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_planned_production_activities_bars(query="productGroup == 'Package'",title="Planned Package Production"), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_production_slack_bars(title="Planned Production Slack"), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_production_excess_bars(title="Planned Production Difference"), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_production_excess_bars( + title="Planned Production Difference - Percentage", mode='percentage'), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_production_excess_bars(query="productGroup == 'Package'", + title="Planned Package Production Difference"), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + ] + + return layout_children diff --git a/test/pharma/visualization_pages/production_page.py b/test/pharma/visualization_pages/production_page.py new file mode 100644 index 0000000..fc01dc9 --- /dev/null +++ b/test/pharma/visualization_pages/production_page.py @@ -0,0 +1,54 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class ProductionPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Production', + page_id='production_tab', + url='production', + input_table_names = ['Product', 'Location'], + output_table_names = ['ProductionActivity'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_production_activities_bars(title="Total Production"), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_production_activities_bars(query="productGroup == 'Package'",title="Package Production"), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + ] + + return layout_children diff --git a/test/pharma/visualization_pages/supply_page.py b/test/pharma/visualization_pages/supply_page.py new file mode 100644 index 0000000..8f4f113 --- /dev/null +++ b/test/pharma/visualization_pages/supply_page.py @@ -0,0 +1,71 @@ +from dash import dcc, html +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class SupplyPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Supply Flow', + page_id='supply_tab', + url='supply', + input_table_names = ['RecipeProperties', 'Line', 'Product', 'Location', 'Plant', 'BomItem', 'WIP'], + output_table_names = ['PlantInventory', 'WarehouseInventory', 'DemandInventory', 'ProductionActivity', 'TransportationActivity'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + # dbc.CardHeader(title='Production'), + dbc.CardBody( + [dbc.CardHeader(html.Div("Production", style={'width': '80vw'})), + dcc.Graph( + figure=pm.plotly_inventory_flow_sankey(), + # style={'height': '85vh', 'width': '79vw'}, + ) + ]) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + # dbc.CardHeader(title='Production'), + dbc.CardBody( + [dbc.CardHeader(html.Div("Transportation", style={'width': '80vw'})), + dcc.Graph( + figure=pm.plotly_production_activities_sankey(), + # style={'height': '45vh', 'width': '79vw'}, + ) + ]) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + # dbc.CardHeader(title='Production'), + dbc.CardBody( + [dbc.CardHeader(html.Div('Inventory Flow', style={'width': '80vw'})), + dcc.Graph( + figure=pm.plotly_transportation_activities_sankey(), + # style={'height': '45vh', 'width': '79vw'}, + ) + ]) + ]) + # , width=12 + ) + ), + ] + + return layout_children diff --git a/test/pharma/visualization_pages/transportation_page.py b/test/pharma/visualization_pages/transportation_page.py new file mode 100644 index 0000000..abdf08a --- /dev/null +++ b/test/pharma/visualization_pages/transportation_page.py @@ -0,0 +1,108 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class TransportationPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Transportation', + page_id='transportation_tab', + url='transportation', + input_table_names = ['Line', 'Product', 'Location', 'Plant', 'Demand', 'BomItem'], + output_table_names = ['DemandInventory','TransportationActivity', 'ProductionActivity', 'PlantInventory', 'WarehouseInventory'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + id='plotly_transportation_bar', + # id={ + # 'type': chart_type, + # 'index': 'plotly_demand_bars' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.plotly_transportation_bar( + query="originLocationName in ['Central_Warehouse', 'Abbott_WH_NL']", + title='Departing from Central Warehouse'), # Central_Warehouse + style={'height': '75vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + id='plotly_transportation_bar', + # id={ + # 'type': chart_type, + # 'index': 'plotly_demand_bars' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.plotly_transportation_bar(), + style={'height': '75vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + id='plotly_transportation_bar', + # id={ + # 'type': chart_type, + # 'index': 'plotly_demand_bars' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.plotly_transportation_bar( + query="originLocationName in ['API_Plant', 'Abbott_Weesp_Plant']", + title='Departing from API Plant'), + style={'height': '75vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + id='plotly_transportation_bar', + # id={ + # 'type': chart_type, + # 'index': 'plotly_demand_bars' # Method on PlotlySupplyChainDataManager! + # }, + figure=pm.plotly_transportation_bar( + query="originLocationName in ['Abbott_Olst_Plant','Packaging_Plant']", + title='Departing from Packaging Plant'), + style={'height': '75vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + ] + return layout_children diff --git a/test/pharma/visualization_pages/utilization_page.py b/test/pharma/visualization_pages/utilization_page.py new file mode 100644 index 0000000..c42ff85 --- /dev/null +++ b/test/pharma/visualization_pages/utilization_page.py @@ -0,0 +1,52 @@ +from dash import dcc +import dash_bootstrap_components as dbc + +from dse_do_dashboard.do_dash_app import DoDashApp +from dse_do_dashboard.visualization_pages.visualization_page import VisualizationPage +from dse_do_utils.plotlymanager import PlotlyManager + + +class UtilizationPage(VisualizationPage): + def __init__(self, dash_app: DoDashApp): + super().__init__(dash_app=dash_app, + page_name='Utilization', + page_id='utilization_tab', + url='utilization', + input_table_names = ['Product'], + output_table_names = ['LineUtilization', 'ProductionActivity'], + ) + + def get_layout_children(self, pm: PlotlyManager): + layout_children = [ + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_excess_utilization_line_time_bars(), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + + dbc.Row( + dbc.Col( + dbc.Card([ + ## dbc.CardHeader(""), + dbc.CardBody( + dcc.Graph( + figure=pm.plotly_utilization_line_time_bars(), + # style={'height': '100vh', 'width': '79vw'}, + ) + ) + ]) + # , width=12 + ) + ), + ] + return layout_children