From a478a8caa76974c58659e46770966a17c741ebd5 Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Sat, 14 Sep 2024 23:49:59 +0530 Subject: [PATCH 1/8] add error message --- .../mhq/service/ai/ai_analytics_service.py | 59 ++++++++++++------- .../pages/api/internal/ai/dora_metrics.ts | 34 ++++++++++- 2 files changed, 69 insertions(+), 24 deletions(-) diff --git a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py index 14a3d0dc..8d75fe71 100644 --- a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py +++ b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py @@ -2,7 +2,7 @@ import requests from http import HTTPStatus from enum import Enum -from typing import Dict, List +from typing import Dict, List, Union class AIProvider(Enum): @@ -44,7 +44,27 @@ def __init__(self, llm: LLM, access_token: str): def _get_message(self, message: str, role: str = "user"): return {"role": role, "content": message} + def _handle_api_response(self, response) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + """ + Handles the API response, returning a success or error structure that the frontend can use. + """ + if response.status_code == HTTPStatus.OK: + return {"status": "success", "data": response.json()["choices"][0]["message"]['content']} + elif response.status_code == HTTPStatus.UNAUTHORIZED: + return { + "status": "error", + "message": "Unauthorized. Please check your access token.", + } + else: + return { + "status": "error", + "message": f"Unexpected error: {response.text}", + } + def _open_ai_fetch_completion_open_ai(self, messages: List[Dict[str, str]]): + """ + Handles the request to OpenAI API for fetching completions. + """ payload = { "model": self.LLM_NAME_TO_MODEL_MAP[self._llm], "temperature": 0.6, @@ -53,13 +73,12 @@ def _open_ai_fetch_completion_open_ai(self, messages: List[Dict[str, str]]): api_url = "https://api.openai.com/v1/chat/completions" response = requests.post(api_url, headers=self._headers, json=payload) - print(payload, api_url, response) - if response.status_code != HTTPStatus.OK: - raise Exception(response.json()) - - return response.json() + return self._handle_api_response(response) def _fireworks_ai_fetch_completions(self, messages: List[Dict[str, str]]): + """ + Handles the request to Fireworks AI API for fetching completions. + """ payload = { "model": self.LLM_NAME_TO_MODEL_MAP[self._llm], "temperature": 0.6, @@ -73,28 +92,24 @@ def _fireworks_ai_fetch_completions(self, messages: List[Dict[str, str]]): api_url = "https://api.fireworks.ai/inference/v1/chat/completions" response = requests.post(api_url, headers=self._headers, json=payload) - if response.status_code != HTTPStatus.OK: - raise Exception(response.json()) - - return response.json() - - def _fetch_completion(self, messages: List[Dict[str, str]]): + return self._handle_api_response(response) + def _fetch_completion(self, messages: List[Dict[str, str]]) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + """ + Fetches the completion using the appropriate AI provider based on the LLM. + """ if self._ai_provider == AIProvider.FIREWORKS_AI: - return self._fireworks_ai_fetch_completions(messages)["choices"][0][ - "message" - ]["content"] + return self._fireworks_ai_fetch_completions(messages) if self._ai_provider == AIProvider.OPEN_AI: - return self._open_ai_fetch_completion_open_ai(messages)["choices"][0][ - "message" - ]["content"] + return self._open_ai_fetch_completion_open_ai(messages) - raise Exception(f"Invalid AI provider {self._ai_provider}") + return { + "status": "error", + "message": f"Invalid AI provider {self._ai_provider}", + } - def get_dora_metrics_score( - self, four_keys_data: Dict[str, float] - ) -> Dict[str, str]: + def get_dora_metrics_score(self, four_keys_data: Dict[str, float]) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: """ Calculate the DORA metrics score using input data and an LLM (Language Learning Model). diff --git a/web-server/pages/api/internal/ai/dora_metrics.ts b/web-server/pages/api/internal/ai/dora_metrics.ts index 551ff75f..0e4e09ff 100644 --- a/web-server/pages/api/internal/ai/dora_metrics.ts +++ b/web-server/pages/api/internal/ai/dora_metrics.ts @@ -103,10 +103,40 @@ endpoint.handle.POST(postSchema, async (req, res) => { access_token ); - res.send({ + const responses = { ...aggregated_dora_data, ...dora_compiled_summary - }); + }; + + let status = 'success'; + let message = ''; + // let data = {}; + + for (let [key, value] of Object.entries(responses) as [string, any][]) { + // console.log(key, value); + if (value.status === 'error') { + status = 'error'; + message = value.message; + } + } + + if (status === 'error') { + res.status(400).send({ + message + }); + } else { + const simplifiedData = Object.fromEntries( + Object.entries(responses).map(([key, value]: [string, any]) => [ + key, + value.data as any + ]) + ); + console.log(simplifiedData) + res.status(200).send({ + ...simplifiedData + }); + } + }); const getDoraMetricsScore = ( From 2d59b189db8ab95c82f0847550322d74dc003016 Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Sun, 15 Sep 2024 11:45:57 +0530 Subject: [PATCH 2/8] refactor: improve dora_metrics api --- .../pages/api/internal/ai/dora_metrics.ts | 120 +++++++++--------- 1 file changed, 61 insertions(+), 59 deletions(-) diff --git a/web-server/pages/api/internal/ai/dora_metrics.ts b/web-server/pages/api/internal/ai/dora_metrics.ts index 0e4e09ff..b0c51191 100644 --- a/web-server/pages/api/internal/ai/dora_metrics.ts +++ b/web-server/pages/api/internal/ai/dora_metrics.ts @@ -64,80 +64,82 @@ const postSchema = yup.object().shape({ }); const endpoint = new Endpoint(nullSchema); - endpoint.handle.POST(postSchema, async (req, res) => { const { data, model, access_token } = req.payload; + const dora_data = data as TeamDoraMetricsApiResponseType; + + try { + const [ + doraMetricsScore, + leadTimeSummary, + CFRSummary, + MTTRSummary, + deploymentFrequencySummary, + doraTrendSummary + ] = await Promise.all( + [ + getDoraMetricsScore, + getLeadTimeSummary, + getCFRSummary, + getMTTRSummary, + getDeploymentFrequencySummary, + getDoraTrendsCorrelationSummary + ].map((fn) => fn(dora_data, model, access_token)) + ); - const dora_data = data as unknown as TeamDoraMetricsApiResponseType; - - const [ - dora_metrics_score, - lead_time_trends_summary, - change_failure_rate_trends_summary, - mean_time_to_recovery_trends_summary, - deployment_frequency_trends_summary, - dora_trend_summary - ] = await Promise.all( - [ - getDoraMetricsScore, - getLeadTimeSummary, - getCFRSummary, - getMTTRSummary, - getDeploymentFrequencySummary, - getDoraTrendsCorrelationSummary - ].map((f) => f(dora_data, model, access_token)) - ); + const aggregatedData = { + ...doraMetricsScore, + ...leadTimeSummary, + ...CFRSummary, + ...MTTRSummary, + ...deploymentFrequencySummary, + ...doraTrendSummary + }; - const aggregated_dora_data = { - ...dora_metrics_score, - ...lead_time_trends_summary, - ...change_failure_rate_trends_summary, - ...mean_time_to_recovery_trends_summary, - ...deployment_frequency_trends_summary, - ...dora_trend_summary - } as AggregatedDORAData; - - const dora_compiled_summary = await getDORACompiledSummary( - aggregated_dora_data, - model, - access_token - ); + const compiledSummary = await getDORACompiledSummary( + aggregatedData, + model, + access_token + ); - const responses = { - ...aggregated_dora_data, - ...dora_compiled_summary - }; + const responses = { + ...aggregatedData, + ...compiledSummary + }; + + const { status, message } = checkForErrors(responses); + + if (status === 'error') { + return res.status(400).send({ message }); + } + const simplifiedData = Object.fromEntries( + Object.entries(responses).map(([key, value]) => [key, value.data]) + ); + + return res.status(200).send(simplifiedData); + } catch (error) { + return res.status(500).send({ + message: 'Internal Server Error', + error: error.message + }); + } +}); + +function checkForErrors(responses: any): { status: string; message: string } { let status = 'success'; let message = ''; - // let data = {}; - for (let [key, value] of Object.entries(responses) as [string, any][]) { - // console.log(key, value); + for (const value of Object.values(responses)) { if (value.status === 'error') { status = 'error'; message = value.message; + break; } } - if (status === 'error') { - res.status(400).send({ - message - }); - } else { - const simplifiedData = Object.fromEntries( - Object.entries(responses).map(([key, value]: [string, any]) => [ - key, - value.data as any - ]) - ); - console.log(simplifiedData) - res.status(200).send({ - ...simplifiedData - }); - } - -}); + return { status, message }; +} const getDoraMetricsScore = ( dora_data: TeamDoraMetricsApiResponseType, From 65b085724e4ef7ceee5117ca5863553435d08d1e Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Sun, 15 Sep 2024 11:58:44 +0530 Subject: [PATCH 3/8] improve error message --- backend/analytics_server/mhq/service/ai/ai_analytics_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py index 8d75fe71..d2c6a2cd 100644 --- a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py +++ b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py @@ -53,7 +53,7 @@ def _handle_api_response(self, response) -> Union[Dict[str, str], Dict[str, Unio elif response.status_code == HTTPStatus.UNAUTHORIZED: return { "status": "error", - "message": "Unauthorized. Please check your access token.", + "message":"Unauthorized Access: Your access token is either missing, expired, or invalid. Please ensure that you are providing a valid token. ", } else: return { From ddf4f5d4e238eb5432ece5f0dc3a73ebccac732c Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Sun, 15 Sep 2024 12:04:16 +0530 Subject: [PATCH 4/8] lint fix --- .../mhq/service/ai/ai_analytics_service.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py index d2c6a2cd..48cf7b09 100644 --- a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py +++ b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py @@ -44,16 +44,21 @@ def __init__(self, llm: LLM, access_token: str): def _get_message(self, message: str, role: str = "user"): return {"role": role, "content": message} - def _handle_api_response(self, response) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + def _handle_api_response( + self, response + ) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: """ Handles the API response, returning a success or error structure that the frontend can use. """ if response.status_code == HTTPStatus.OK: - return {"status": "success", "data": response.json()["choices"][0]["message"]['content']} + return { + "status": "success", + "data": response.json()["choices"][0]["message"]["content"], + } elif response.status_code == HTTPStatus.UNAUTHORIZED: return { "status": "error", - "message":"Unauthorized Access: Your access token is either missing, expired, or invalid. Please ensure that you are providing a valid token. ", + "message": "Unauthorized Access: Your access token is either missing, expired, or invalid. Please ensure that you are providing a valid token. ", } else: return { @@ -94,7 +99,9 @@ def _fireworks_ai_fetch_completions(self, messages: List[Dict[str, str]]): return self._handle_api_response(response) - def _fetch_completion(self, messages: List[Dict[str, str]]) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + def _fetch_completion( + self, messages: List[Dict[str, str]] + ) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: """ Fetches the completion using the appropriate AI provider based on the LLM. """ @@ -109,7 +116,9 @@ def _fetch_completion(self, messages: List[Dict[str, str]]) -> Union[Dict[str, s "message": f"Invalid AI provider {self._ai_provider}", } - def get_dora_metrics_score(self, four_keys_data: Dict[str, float]) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + def get_dora_metrics_score( + self, four_keys_data: Dict[str, float] + ) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: """ Calculate the DORA metrics score using input data and an LLM (Language Learning Model). From e89931ace0c8b42b9f264a536acf237a10d81701 Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Mon, 16 Sep 2024 17:12:01 +0530 Subject: [PATCH 5/8] fix: check error func --- .../pages/api/internal/ai/dora_metrics.ts | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/web-server/pages/api/internal/ai/dora_metrics.ts b/web-server/pages/api/internal/ai/dora_metrics.ts index b0c51191..3413f32b 100644 --- a/web-server/pages/api/internal/ai/dora_metrics.ts +++ b/web-server/pages/api/internal/ai/dora_metrics.ts @@ -126,20 +126,12 @@ endpoint.handle.POST(postSchema, async (req, res) => { } }); -function checkForErrors(responses: any): { status: string; message: string } { - let status = 'success'; - let message = ''; - - for (const value of Object.values(responses)) { - if (value.status === 'error') { - status = 'error'; - message = value.message; - break; - } - } +const checkForErrors = (responses: any): { status: string; message: string } => { + const errorResponse = Object.values(responses).find(value => value.status === 'error'); + + return errorResponse ? { status: 'error', message: errorResponse.message } : { status: 'success', message: '' }; +}; - return { status, message }; -} const getDoraMetricsScore = ( dora_data: TeamDoraMetricsApiResponseType, From 967bb3787e6f65e7b51b9294610c2f47e82aaf78 Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Mon, 16 Sep 2024 17:17:15 +0530 Subject: [PATCH 6/8] fix: types --- web-server/pages/api/internal/ai/dora_metrics.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/web-server/pages/api/internal/ai/dora_metrics.ts b/web-server/pages/api/internal/ai/dora_metrics.ts index 3413f32b..46a79c8b 100644 --- a/web-server/pages/api/internal/ai/dora_metrics.ts +++ b/web-server/pages/api/internal/ai/dora_metrics.ts @@ -125,14 +125,12 @@ endpoint.handle.POST(postSchema, async (req, res) => { }); } }); - -const checkForErrors = (responses: any): { status: string; message: string } => { +const checkForErrors = (responses: { [key: string]: { status: string; message: string } }): { status: string; message: string } => { const errorResponse = Object.values(responses).find(value => value.status === 'error'); return errorResponse ? { status: 'error', message: errorResponse.message } : { status: 'success', message: '' }; }; - const getDoraMetricsScore = ( dora_data: TeamDoraMetricsApiResponseType, model: string, From aa34a558f450ba3d446a9cec95da2e3f074190ff Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Tue, 17 Sep 2024 11:56:02 +0530 Subject: [PATCH 7/8] use record --- web-server/pages/api/internal/ai/dora_metrics.ts | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/web-server/pages/api/internal/ai/dora_metrics.ts b/web-server/pages/api/internal/ai/dora_metrics.ts index 46a79c8b..d14145aa 100644 --- a/web-server/pages/api/internal/ai/dora_metrics.ts +++ b/web-server/pages/api/internal/ai/dora_metrics.ts @@ -125,10 +125,16 @@ endpoint.handle.POST(postSchema, async (req, res) => { }); } }); -const checkForErrors = (responses: { [key: string]: { status: string; message: string } }): { status: string; message: string } => { - const errorResponse = Object.values(responses).find(value => value.status === 'error'); +const checkForErrors = ( + responses: Record +): { status: string; message: string } => { + const errorResponse = Object.values(responses).find( + (value) => value.status === 'error' + ); - return errorResponse ? { status: 'error', message: errorResponse.message } : { status: 'success', message: '' }; + return errorResponse + ? { status: 'error', message: errorResponse.message } + : { status: 'success', message: '' }; }; const getDoraMetricsScore = ( From 6c0bd764e718cf7b6ca00c8c6cd766657e6a04e1 Mon Sep 17 00:00:00 2001 From: VipinDevelops Date: Tue, 17 Sep 2024 16:01:00 +0530 Subject: [PATCH 8/8] fix ai api types --- .../mhq/service/ai/ai_analytics_service.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py index 48cf7b09..e7b01b74 100644 --- a/backend/analytics_server/mhq/service/ai/ai_analytics_service.py +++ b/backend/analytics_server/mhq/service/ai/ai_analytics_service.py @@ -44,9 +44,7 @@ def __init__(self, llm: LLM, access_token: str): def _get_message(self, message: str, role: str = "user"): return {"role": role, "content": message} - def _handle_api_response( - self, response - ) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + def _handle_api_response(self, response) -> Dict[str, Union[str, int]]: """ Handles the API response, returning a success or error structure that the frontend can use. """ @@ -101,7 +99,7 @@ def _fireworks_ai_fetch_completions(self, messages: List[Dict[str, str]]): def _fetch_completion( self, messages: List[Dict[str, str]] - ) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + ) -> Dict[str, Union[str, int]]: """ Fetches the completion using the appropriate AI provider based on the LLM. """ @@ -118,7 +116,7 @@ def _fetch_completion( def get_dora_metrics_score( self, four_keys_data: Dict[str, float] - ) -> Union[Dict[str, str], Dict[str, Union[str, int]]]: + ) -> Dict[str, Union[str, int]]: """ Calculate the DORA metrics score using input data and an LLM (Language Learning Model).