From 667bd1f25eca24b6c6d1458ca7039070043115bc Mon Sep 17 00:00:00 2001 From: sebastianherreramonterrosa Date: Mon, 28 Oct 2024 22:22:13 -0500 Subject: [PATCH] Write readme module fit and simulation --- README.md | 258 +++++++++++++++++- SIMULATION.md | 197 ++++++------- .../process_simulation/process_simulation.py | 128 ++------- 3 files changed, 365 insertions(+), 218 deletions(-) diff --git a/README.md b/README.md index de2e60e..d5e8d82 100644 --- a/README.md +++ b/README.md @@ -23,10 +23,12 @@

Phitter analyzes datasets and determines the best analytical probability distributions that represent them. Phitter studies over 80 probability distributions, both continuous and discrete, 3 goodness-of-fit tests, and interactive visualizations. For each selected probability distribution, a standard modeling guide is provided along with spreadsheets that detail the methodology for using the chosen distribution in data science, operations research, and artificial intelligence. +

+

In addition, Phitter offers the capability to perform process simulations, allowing users to graph and observe minimum times for specific observations. It also supports queue simulations with flexibility to configure various parameters, such as the number of servers, maximum population size, system capacity, and different queue disciplines, including First-In-First-Out (FIFO), Last-In-First-Out (LIFO), and priority-based service (PBS). -

+

This repository contains the implementation of the python library and the kernel of Phitter Web

@@ -45,9 +47,11 @@ python: >=3.9 pip install phitter ``` + + ## Usage -### Notebook's Tutorials +### ***1. Fit Notebook's Tutorials*** | Tutorial | Notebooks | | :------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | @@ -57,7 +61,28 @@ pip install phitter | **Fit Specific Disribution** | Open In Colab | | **Working Distribution** | Open In Colab | -### General +### ***2. Simulation Notebook's Tutorials*** +pending + +## Documentation + + + + + + + + + + + +
+ +Documentation Fit Module + + + +### General Fit ```python import phitter @@ -466,6 +491,233 @@ distribution.mode # -> 733.3333333333333 | poisson | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | | uniform | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | +
+ + + + + + + + + +
+Documentation Simulation Module + + + +## Process Simulation + +This will help you to understand your processes. To use it, run the following line + +```python +from phitter import simulation + +# Create a simulation process instance +simulation = simulation.ProcessSimulation() + +``` + +### Add processes to your simulation instance + +There are two ways to add processes to your simulation instance: + +- Adding a **process _without_ preceding process (new branch)** +- Adding a **process _with_ preceding process (with previous ids)** + +#### Process _without_ preceding process (new branch) + +```python +# Add a new process without preceding process +simulation.add_process( + prob_distribution="normal", + parameters={"mu": 5, "sigma": 2}, + process_id="first_process", + number_of_products=10, + number_of_servers=3, + new_branch=True, +) + +``` + +#### Process _with_ preceding process (with previous ids) + +```python +# Add a new process with preceding process +simulation.add_process( + prob_distribution="exponential", + parameters={"lambda": 4}, + process_id="second_process", + previous_ids=["first_process"], +) + +``` + +#### All together and adding some new process + +The order in which you add each process **_matters_**. You can add as many processes as you need. + +```python +# Add a new process without preceding process +simulation.add_process( + prob_distribution="normal", + parameters={"mu": 5, "sigma": 2}, + process_id="first_process", + number_of_products=10, + number_of_servers=3, + new_branch=True, +) + +# Add a new process with preceding process +simulation.add_process( + prob_distribution="exponential", + parameters={"lambda": 4}, + process_id="second_process", + previous_ids=["first_process"], +) + +# Add a new process with preceding process +simulation.add_process( + prob_distribution="gamma", + parameters={"alpha": 15, "beta": 3}, + process_id="third_process", + previous_ids=["first_process"], +) + +# Add a new process without preceding process +simulation.add_process( + prob_distribution="exponential", + parameters={"lambda": 4.3}, + process_id="fourth_process", + new_branch=True, +) + + +# Add a new process with preceding process +simulation.add_process( + prob_distribution="beta", + parameters={"alpha": 1, "beta": 1, "A": 2, "B": 3}, + process_id="fifth_process", + previous_ids=["second_process", "fourth_process"], +) + +# Add a new process with preceding process +simulation.add_process( + prob_distribution="normal", + parameters={"mu": 15, "sigma": 2}, + process_id="sixth_process", + previous_ids=["third_process", "fifth_process"], +) +``` + +### Visualize your processes + +You can visualize your processes to see if what you're trying to simulate is your actual process. + +```python +# Graph your process +simulation.process_graph() +``` + +![Simulation](./multimedia/simulation_process_graph.png) + +### Start Simulation + +You can simulate and have different simulation time values or you can create a confidence interval for your process + +#### Run Simulation + +Simulate several scenarios of your complete process + +```python +# Run Simulation +simulation.run(number_of_simulations=100) + +# After run +simulation: pandas.Dataframe +``` + +### Review Simulation Metrics by Stage + +If you want to review average time and standard deviation by stage run this line of code + +```python +# Review simulation metrics +simulation.simulation_metrics() -> pandas.Dataframe +``` + +#### Run confidence interval + +If you want to have a confidence interval for the simulation metrics, run the following line of code + +```python +# Confidence interval for Simulation metrics +simulation.run_confidence_interval( + confidence_level=0.99, + number_of_simulations=100, + replications=10, +) -> pandas.Dataframe +``` + +## Queue Simulation + +If you need to simulate queues run the following code: + +```python +from phitter import simulation + +# Create a simulation process instance +simulation = simulation.QueueingSimulation( + a="exponential", + a_paramters={"lambda": 5}, + s="exponential", + s_parameters={"lambda": 20}, + c=3, +) +``` + +In this case we are going to simulate **a** (arrivals) with _exponential distribution_ and **s** (service) as _exponential distribution_ with **c** equals to 3 different servers. + +By default Maximum Capacity **k** is _infinity_, total population **n** is _infinity_ and the queue discipline **d** is _FIFO_. As we are not selecting **d** equals to "PBS" we don't have any information to add for **pbs_distribution** nor **pbs_parameters** + +### Run the simulation + +If you want to have the simulation results + +```python +# Run simulation +simulation = simulation.run(simulation_time = 2000) +simulation: pandas.Dataframe +``` + +If you want to see some metrics and probabilities from this simulation you should use:: + +```python +# Calculate metrics +simulation.metrics_summary() -> pandas.Dataframe + +# Calculate probabilities +number_probability_summary() -> pandas.Dataframe +``` + +### Run Confidence Interval for metrics and probabilities + +If you want to have a confidence interval for your metrics and probabilities you should run the following line + +```python +# Calculate confidence interval for metrics and probabilities +probabilities, metrics = simulation.confidence_interval_metrics( + simulation_time=2000, + confidence_level=0.99, + replications=10, +) + +probabilities -> pandas.Dataframe +metrics -> pandas.Dataframe +``` + +
+ ## Contribution If you would like to contribute to the Phitter project, please create a pull request with your proposed changes or enhancements. All contributions are welcome! diff --git a/SIMULATION.md b/SIMULATION.md index 8504199..b403c91 100644 --- a/SIMULATION.md +++ b/SIMULATION.md @@ -1,50 +1,3 @@ -

- - - - phitter-dark-logo - -

- -

- - Downloads - - - License - - - Supported Python versions - - - Tests - -

- -

- Phitter analyzes datasets and determines the best analytical probability distributions that represent them. Phitter studies over 80 probability distributions, both continuous and discrete, 3 goodness-of-fit tests, and interactive visualizations. For each selected probability distribution, a standard modeling guide is provided along with spreadsheets that detail the methodology for using the chosen distribution in data science, operations research, and artificial intelligence. - - In addition, Phitter offers the capability to perform process simulations, allowing users to graph and observe minimum times for specific observations. It also supports queue simulations with flexibility to configure various parameters, such as the number of servers, maximum population size, system capacity, and different queue disciplines, including First-In-First-Out (FIFO), Last-In-First-Out (LIFO), and priority-based service (PBS). - -

-

- This repository contains the implementation of the python library and the kernel of Phitter Web -

- -## Installation - -### Requirements - -```console -python: >=3.9 -``` - -### PyPI - -```console -pip install phitter -``` - # Simulation ## Process Simulation @@ -63,19 +16,21 @@ simulation = simulation.ProcessSimulation() There are two ways to add processes to your simulation instance: -- Adding a **process _without_ preceding process (new branch)** -- Adding a **process _with_ preceding process (with previous ids)** +- Adding a **process _without_ preceding process (new branch)** +- Adding a **process _with_ preceding process (with previous ids)** #### Process _without_ preceding process (new branch) ```python # Add a new process without preceding process -simulation.add_process(prob_distribution = "normal", # Probability Distribution - parameters = {"mu": 5, "sigma": 2}, # Parameters - process_id = "first_process", # Process name - number_of_products = 10, # Number of products to be simulated in this stage - number_of_servers = 3, # Number of servers in that process - new_branch=True) # New branch +simulation.add_process( + prob_distribution="normal", + parameters={"mu": 5, "sigma": 2}, + process_id="first_process", + number_of_products=10, + number_of_servers=3, + new_branch=True, +) ``` @@ -83,10 +38,12 @@ simulation.add_process(prob_distribution = "normal", # Probability Distribution ```python # Add a new process with preceding process -simulation.add_process(prob_distribution = "exponential", # Probability Distribution - parameters = {"lambda": 4}, # Parameters - process_id = "second_process", # Process name - previous_ids = ["first_process"]) # Previous Process +simulation.add_process( + prob_distribution="exponential", + parameters={"lambda": 4}, + process_id="second_process", + previous_ids=["first_process"], +) ``` @@ -96,44 +53,55 @@ The order in which you add each process **_matters_**. You can add as many proce ```python # Add a new process without preceding process -simulation.add_process(prob_distribution = "normal", # Probability Distribution - parameters = {"mu": 5, "sigma": 2}, # Parameters - process_id = "first_process", # Process name - number_of_products = 10, # Number of products to be simulated in this stage - number_of_servers = 3, # Number of servers in that process - new_branch=True) # New branch +simulation.add_process( + prob_distribution="normal", + parameters={"mu": 5, "sigma": 2}, + process_id="first_process", + number_of_products=10, + number_of_servers=3, + new_branch=True, +) # Add a new process with preceding process -simulation.add_process(prob_distribution = "exponential", # Probability Distribution - parameters = {"lambda": 4}, # Parameters - process_id = "second_process", # Process name - previous_ids = ["first_process"]) # Previous Process +simulation.add_process( + prob_distribution="exponential", + parameters={"lambda": 4}, + process_id="second_process", + previous_ids=["first_process"], +) # Add a new process with preceding process -simulation.add_process(prob_distribution = "gamma", # Probability Distribution - parameters = {"alpha": 15, "beta": 3}, # Parameters - process_id = "third_process", # Process name - previous_ids = ["first_process"]) # Previous Process +simulation.add_process( + prob_distribution="gamma", + parameters={"alpha": 15, "beta": 3}, + process_id="third_process", + previous_ids=["first_process"], +) # Add a new process without preceding process -simulation.add_process(prob_distribution = "exponential", # Probability Distribution - parameters = {"lambda": 4.3}, # Parameters - process_id = "fourth_process", # Process name - new_branch=True) # New branch +simulation.add_process( + prob_distribution="exponential", + parameters={"lambda": 4.3}, + process_id="fourth_process", + new_branch=True, +) # Add a new process with preceding process -simulation.add_process(prob_distribution = "beta", # Probability Distribution - parameters = {"alpha": 1, "beta": 1, "A": 2, "B": 3}, # Parameters - process_id = "fifth_process", # Process name - previous_ids = ["second_process", "fourth_process"]) # Previous Process - You can add several previous processes +simulation.add_process( + prob_distribution="beta", + parameters={"alpha": 1, "beta": 1, "A": 2, "B": 3}, + process_id="fifth_process", + previous_ids=["second_process", "fourth_process"], +) # Add a new process with preceding process -simulation.add_process(prob_distribution = "normal", # Probability Distribution - parameters = {"mu": 15, "sigma": 2}, # Parameters - process_id = "sixth_process", # Process name - previous_ids = ["third_process", "fifth_process"]) # Previous Process - You can add several previous processes - +simulation.add_process( + prob_distribution="normal", + parameters={"mu": 15, "sigma": 2}, + process_id="sixth_process", + previous_ids=["third_process", "fifth_process"], +) ``` ### Visualize your processes @@ -157,10 +125,10 @@ Simulate several scenarios of your complete process ```python # Run Simulation -simulation.run(number_of_simulations = 100) -simulation +simulation.run(number_of_simulations=100) -# -> df +# After run +simulation: pandas.Dataframe ``` ### Review Simulation Metrics by Stage @@ -169,9 +137,7 @@ If you want to review average time and standard deviation by stage run this line ```python # Review simulation metrics -simulation.simulation_metrics() - -# -> df +simulation.simulation_metrics() -> pandas.Dataframe ``` #### Run confidence interval @@ -180,10 +146,11 @@ If you want to have a confidence interval for the simulation metrics, run the fo ```python # Confidence interval for Simulation metrics -simulation.run_confidence_interval(confidence_level = 0.99, - number_of_simulations = 100, - replications = 10) -# -> df +simulation.run_confidence_interval( + confidence_level=0.99, + number_of_simulations=100, + replications=10, +) -> pandas.Dataframe ``` ## Queue Simulation @@ -194,12 +161,13 @@ If you need to simulate queues run the following code: from phitter import simulation # Create a simulation process instance -simulation = simulation.QueueingSimulation(a = "exponential", - a_paramters = {"lambda": 5}, - s = "exponential", - s_parameters = {"lambda": 20}, - c = 3) - +simulation = simulation.QueueingSimulation( + a="exponential", + a_paramters={"lambda": 5}, + s="exponential", + s_parameters={"lambda": 20}, + c=3, +) ``` In this case we are going to simulate **a** (arrivals) with _exponential distribution_ and **s** (service) as _exponential distribution_ with **c** equals to 3 different servers. @@ -213,20 +181,17 @@ If you want to have the simulation results ```python # Run simulation simulation = simulation.run(simulation_time = 2000) -simulation -# -> df result +simulation: pandas.Dataframe ``` If you want to see some metrics and probabilities from this simulation you should use:: ```python # Calculate metrics -simulation.metrics_summary() -# -> df result +simulation.metrics_summary() -> pandas.Dataframe # Calculate probabilities -number_probability_summary() -# -> df result +number_probability_summary() -> pandas.Dataframe ``` ### Run Confidence Interval for metrics and probabilities @@ -235,12 +200,12 @@ If you want to have a confidence interval for your metrics and probabilities you ```python # Calculate confidence interval for metrics and probabilities -probabilities, metrics = simulation.confidence_interval_metrics(simulation_time = 2000, - confidence_level = 0.99, - replications = 10) -probabilities -# -> df result - -metrics -# -> df result +probabilities, metrics = simulation.confidence_interval_metrics( + simulation_time=2000, + confidence_level=0.99, + replications=10, +) + +probabilities -> pandas.Dataframe +metrics -> pandas.Dataframe ``` diff --git a/phitter/simulation/process_simulation/process_simulation.py b/phitter/simulation/process_simulation/process_simulation.py index 6f0de41..9b3639c 100644 --- a/phitter/simulation/process_simulation/process_simulation.py +++ b/phitter/simulation/process_simulation/process_simulation.py @@ -1,7 +1,6 @@ import math import random -import numpy as np import pandas as pd from graphviz import Digraph from IPython.display import display @@ -19,10 +18,7 @@ def __init__(self) -> None: self.number_of_products = dict() self.process_positions = dict() self.next_process = dict() - self.probability_distribution = ( - phitter.continuous.CONTINUOUS_DISTRIBUTIONS - | phitter.discrete.DISCRETE_DISTRIBUTIONS - ) + self.probability_distribution = phitter.continuous.CONTINUOUS_DISTRIBUTIONS | phitter.discrete.DISCRETE_DISTRIBUTIONS self.servers = dict() self.simulation_result = dict() @@ -72,9 +68,7 @@ def add_process( """ # Verify if the probability is created in phitter if prob_distribution not in self.probability_distribution.keys(): - raise ValueError( - f"""Please select one of the following probability distributions: '{"', '".join(self.probability_distribution.keys())}'.""" - ) + raise ValueError(f"""Please select one of the following probability distributions: '{"', '".join(self.probability_distribution.keys())}'.""") else: # Verify unique id name for each process if process_id not in self.order.keys(): @@ -84,9 +78,7 @@ def add_process( if number_of_servers >= 1: # Verify that if you create a new branch, it's impossible to have a previous id (or preceding process). One of those is incorrect if new_branch == True and previous_ids != None: - raise ValueError( - f"""You cannot select 'new_branch' is equals to True if 'previous_id' is not empty. OR you cannot add 'previous_ids' if 'new_branch' is equals to True.""" - ) + raise ValueError(f"""You cannot select 'new_branch' is equals to True if 'previous_id' is not empty. OR you cannot add 'previous_ids' if 'new_branch' is equals to True.""") else: # If it is a new branch then initialize all the needed paramters if new_branch == True: @@ -95,27 +87,17 @@ def add_process( self.order[process_id] = branch_id self.number_of_products[process_id] = number_of_products self.servers[process_id] = number_of_servers - self.process_prob_distr[process_id] = ( - self.probability_distribution[prob_distribution]( - parameters - ) - ) + self.process_prob_distr[process_id] = self.probability_distribution[prob_distribution](parameters) self.next_process[process_id] = 0 # Create id of that process in the simulation result self.simulation_result[process_id] = [] # If it is NOT a new branch then initialize all the needed paramters - elif previous_ids != None and all( - id in self.order.keys() for id in previous_ids - ): + elif previous_ids != None and all(id in self.order.keys() for id in previous_ids): self.order[process_id] = previous_ids self.number_of_products[process_id] = number_of_products self.servers[process_id] = number_of_servers - self.process_prob_distr[process_id] = ( - self.probability_distribution[prob_distribution]( - parameters - ) - ) + self.process_prob_distr[process_id] = self.probability_distribution[prob_distribution](parameters) self.next_process[process_id] = 0 # Create id of that process in the simulation result self.simulation_result[process_id] = [] @@ -127,17 +109,11 @@ def add_process( f"""Please create a new_brach == True if you need a new process or specify the previous process/processes (previous_ids) that are before this one. Processes that have been added: '{"', '".join(self.order.keys())}'.""" ) else: - raise ValueError( - f"""You must add number_of_servers grater or equals than 1.""" - ) + raise ValueError(f"""You must add number_of_servers grater or equals than 1.""") else: - raise ValueError( - f"""You must add number_of_products grater or equals than 1.""" - ) + raise ValueError(f"""You must add number_of_products grater or equals than 1.""") else: - raise ValueError( - f"""You need to create diferent process_id for each process, '{process_id}' already exists.""" - ) + raise ValueError(f"""You need to create diferent process_id for each process, '{process_id}' already exists.""") def run(self, number_of_simulations: int = 1) -> list[float]: """Simulation of the described process @@ -166,38 +142,25 @@ def run(self, number_of_simulations: int = 1) -> list[float]: if self.servers[self.branches[key]] == 1: # Simulate the time it took to create each product needed for _ in range(self.number_of_products[self.branches[key]]): - partial_result += self.process_prob_distr[ - self.branches[key] - ].ppf(random.random()) + partial_result += self.process_prob_distr[self.branches[key]].ppf(random.random()) # Add all simulation time according to the time it took to create all products in that stage simulation_partial_result[self.branches[key]] = partial_result # Add this partial result to see the average time of this specific process - self.simulation_result[self.branches[key]].append( - simulation_partial_result[self.branches[key]] - ) + self.simulation_result[self.branches[key]].append(simulation_partial_result[self.branches[key]]) # Because we are simulating the "new branch" or first processes, accumulative it's the same as partial result - simulation_accumulative_result[self.branches[key]] = ( - simulation_partial_result[self.branches[key]] - ) + simulation_accumulative_result[self.branches[key]] = simulation_partial_result[self.branches[key]] # If there are more than one servers in that process else: # Simulate the time it took to create each product needed - products_times = [ - self.process_prob_distr[self.branches[key]].ppf(random.random()) - for _ in range(self.number_of_products[self.branches[key]]) - ] + products_times = [self.process_prob_distr[self.branches[key]].ppf(random.random()) for _ in range(self.number_of_products[self.branches[key]])] # Initialize dictionary - servers_dictionary = { - server: 0 for server in range(self.servers[self.branches[key]]) - } + servers_dictionary = {server: 0 for server in range(self.servers[self.branches[key]])} # Organize times according to the number of machines you have for product in products_times: # Identify server with the shortest time of all - min_server_time = min( - servers_dictionary, key=servers_dictionary.get - ) + min_server_time = min(servers_dictionary, key=servers_dictionary.get) # Add product time to that server servers_dictionary[min_server_time] += product @@ -207,13 +170,9 @@ def run(self, number_of_simulations: int = 1) -> list[float]: # Add all simulation time according to the time it took to create all products in that stage simulation_partial_result[self.branches[key]] = partial_result # Add this partial result to see the average time of this specific process - self.simulation_result[self.branches[key]].append( - simulation_partial_result[self.branches[key]] - ) + self.simulation_result[self.branches[key]].append(simulation_partial_result[self.branches[key]]) # Because we are simulating the "new branch" or first processes, accumulative it's the same as partial result - simulation_accumulative_result[self.branches[key]] = ( - simulation_partial_result[self.branches[key]] - ) + simulation_accumulative_result[self.branches[key]] = simulation_partial_result[self.branches[key]] # For every process for key in self.process_prob_distr.keys(): @@ -224,15 +183,11 @@ def run(self, number_of_simulations: int = 1) -> list[float]: if self.servers[key] == 1: # Simulate all products time for _ in range(self.number_of_products[key]): - partial_result += self.process_prob_distr[key].ppf( - random.random() - ) + partial_result += self.process_prob_distr[key].ppf(random.random()) # Save partial result simulation_partial_result[key] = partial_result # Add this partial result to see the average time of this specific process - self.simulation_result[key].append( - simulation_partial_result[key] - ) + self.simulation_result[key].append(simulation_partial_result[key]) # Accumulate this partial result plus the previous processes of this process simulation_accumulative_result[key] = ( simulation_partial_result[key] @@ -246,22 +201,15 @@ def run(self, number_of_simulations: int = 1) -> list[float]: # If there are more than one servers in that process else: # Simulate the time it took to create each product needed - products_times = [ - self.process_prob_distr[key].ppf(random.random()) - for _ in range(self.number_of_products[key]) - ] + products_times = [self.process_prob_distr[key].ppf(random.random()) for _ in range(self.number_of_products[key])] # Initialize dictionary - servers_dictionary = { - server: 0 for server in range(self.servers[key]) - } + servers_dictionary = {server: 0 for server in range(self.servers[key])} # Organize times according to the number of machines you have for product in products_times: # Identify server with the shortest time of all - min_server_time = min( - servers_dictionary, key=servers_dictionary.get - ) + min_server_time = min(servers_dictionary, key=servers_dictionary.get) # Add product time to that server servers_dictionary[min_server_time] += product @@ -271,9 +219,7 @@ def run(self, number_of_simulations: int = 1) -> list[float]: # Save partial result simulation_partial_result[key] = partial_result # Add this partial result to see the average time of this specific process - self.simulation_result[key].append( - simulation_partial_result[key] - ) + self.simulation_result[key].append(simulation_partial_result[key]) # Accumulate this partial result plus the previous processes of this process simulation_accumulative_result[key] = ( simulation_partial_result[key] @@ -311,15 +257,11 @@ def simulation_metrics(self) -> pd.DataFrame: # Calculate all metrics metrics_dict_1 = {f"Avg. {column}": df[column].mean() for column in df.columns} - metrics_dict_2 = { - f"Std. Dev. {column}": df[column].std() for column in df.columns - } + metrics_dict_2 = {f"Std. Dev. {column}": df[column].std() for column in df.columns} metrics_dict = metrics_dict_1 | metrics_dict_2 # Create result dataframe - metrics = pd.DataFrame.from_dict(metrics_dict, orient="index").rename( - columns={0: "Value"} - ) + metrics = pd.DataFrame.from_dict(metrics_dict, orient="index").rename(columns={0: "Value"}) metrics.index.name = "Metrics" @@ -360,27 +302,15 @@ def run_confidence_interval( z = normal_standard.ppf((1 + confidence_level) / 2) ## Confidence Interval avg__2 = mean__2.copy() - lower_bound = ( - (mean__2 - (z * standard_error)) - .copy() - .rename(columns={"Value": "LB - Value"}) - ) - upper_bound = ( - (mean__2 + (z * standard_error)) - .copy() - .rename(columns={"Value": "UB - Value"}) - ) + lower_bound = (mean__2 - (z * standard_error)).copy().rename(columns={"Value": "LB - Value"}) + upper_bound = (mean__2 + (z * standard_error)).copy().rename(columns={"Value": "UB - Value"}) avg__2 = avg__2.rename(columns={"Value": "AVG - Value"}) tot_metrics_interval = pd.concat([lower_bound, avg__2, upper_bound], axis=1) - tot_metrics_interval = tot_metrics_interval[ - ["LB - Value", "AVG - Value", "UB - Value"] - ] + tot_metrics_interval = tot_metrics_interval[["LB - Value", "AVG - Value", "UB - Value"]] # Return confidence interval return tot_metrics_interval.reset_index() - def process_graph( - self, graph_direction: str = "LR", save_graph_pdf: bool = False - ) -> None: + def process_graph(self, graph_direction: str = "LR", save_graph_pdf: bool = False) -> None: """Generates the graph of the process Args: