Skip to content

Commit

Permalink
do not use async for stream, it is already directly a generator
Browse files Browse the repository at this point in the history
  • Loading branch information
gorkemgoknar committed Oct 30, 2023
1 parent 2910760 commit cea0550
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions server/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from TTS.utils.generic_utils import get_user_data_dir
from TTS.utils.manage import ModelManager

torch.set_num_threads(int(os.environ.get("NUM_THREADS", "8")))
torch.set_num_threads(int(os.environ.get("NUM_THREADS", "2")))
device = torch.device("cuda")

model_name = "tts_models/multilingual/multi-dataset/xtts_v1.1"
Expand Down Expand Up @@ -49,7 +49,7 @@


@app.post("/clone_speaker")
async def predict_speaker(wav_file: UploadFile):
def predict_speaker(wav_file: UploadFile):
"""Compute conditioning inputs from reference audio file."""
temp_audio_name = next(tempfile._get_candidate_names())
with open(temp_audio_name, "wb") as temp, torch.inference_mode():
Expand Down Expand Up @@ -118,7 +118,7 @@ class StreamingInputs(BaseModel):
decoder: str = "ne_hifigan"


async def predict_streaming_generator(parsed_input: dict = Body(...)):
def predict_streaming_generator(parsed_input: dict = Body(...)):
speaker_embedding = (
torch.tensor(parsed_input.speaker_embedding).unsqueeze(0).unsqueeze(-1)
)
Expand Down

0 comments on commit cea0550

Please sign in to comment.