Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Created a class for the whatsapp module #106

Merged
merged 1 commit into from
Jan 9, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 62 additions & 62 deletions app/routers/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -761,54 +761,54 @@ async def verify_webhook(mode: str, token: str, challenge: str):
# # Step 5: Initialize the Runpod endpoint for transcription
# endpoint = runpod.Endpoint(RUNPOD_ENDPOINT_ID)

logging.info("Audio data found for langauge detection")
data = {
"input": {
"task": "auto_detect_audio_language",
"audio_file": blob_name,
}
}

start_time = time.time()

try:
logging.info("Audio file ready for langauge detection")
audio_lang_response = call_endpoint_with_retry(endpoint, data)
except TimeoutError as e:

logging.error("Job timed out %s", str(e))
raise HTTPException(
status_code=503, detail="Service unavailable due to timeout."
) from e

except ConnectionError as e:

logging.error("Connection lost: %s", str(e))
raise HTTPException(
status_code=503, detail="Service unavailable due to connection error."
) from e

end_time = time.time()
logging.info(
"Audio language auto detection response: %s ",
audio_lang_response.get("detected_language"),
)

# Calculate the elapsed time
elapsed_time = end_time - start_time
logging.info(
"Audio language auto detection elapsed time: %s seconds", elapsed_time
)

audio_language = audio_lang_response.get("detected_language")
request_response = {}

if audio_language in language_mapping:
# Language is in the mapping
logging.info("Language detected in audio is %s", audio_language)
else:
# Language is not in our scope
return "Audio Language not detected"
# logging.info("Audio data found for langauge detection")
# data = {
# "input": {
# "task": "auto_detect_audio_language",
# "audio_file": blob_name,
# }
# }

# start_time = time.time()

# try:
# logging.info("Audio file ready for langauge detection")
# audio_lang_response = call_endpoint_with_retry(endpoint, data)
# except TimeoutError as e:

# logging.error("Job timed out %s", str(e))
# raise HTTPException(
# status_code=503, detail="Service unavailable due to timeout."
# ) from e

# except ConnectionError as e:

# logging.error("Connection lost: %s", str(e))
# raise HTTPException(
# status_code=503, detail="Service unavailable due to connection error."
# ) from e

# end_time = time.time()
# logging.info(
# "Audio language auto detection response: %s ",
# audio_lang_response.get("detected_language"),
# )

# # Calculate the elapsed time
# elapsed_time = end_time - start_time
# logging.info(
# "Audio language auto detection elapsed time: %s seconds", elapsed_time
# )

# audio_language = audio_lang_response.get("detected_language")
# request_response = {}

# if audio_language in language_mapping:
# # Language is in the mapping
# logging.info("Language detected in audio is %s", audio_language)
# else:
# # Language is not in our scope
# return "Audio Language not detected"

# try:

Expand All @@ -822,20 +822,20 @@ async def verify_webhook(mode: str, token: str, challenge: str):
# phone_number_id,
# )

try:
# Step 7: Call the transcription service with the correct parameters
request_response = endpoint.run_sync(
{
"input": {
"task": "transcribe",
"target_lang": audio_language,
"adapter": audio_language,
"audio_file": blob_name, # Corrected to pass local file path
"recognise_speakers": False,
}
},
timeout=150, # Set a timeout for the transcription job.
)
# try:
# # Step 7: Call the transcription service with the correct parameters
# request_response = endpoint.run_sync(
# {
# "input": {
# "task": "transcribe",
# "target_lang": audio_language,
# "adapter": audio_language,
# "audio_file": blob_name, # Corrected to pass local file path
# "recognise_speakers": False,
# }
# },
# timeout=150, # Set a timeout for the transcription job.
# )

# # Step 8: Notify the user that transcription is in progress
# send_message(
Expand Down
Loading