forked from DrewThomasson/VoxNovel
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgui_run.py
2106 lines (1526 loc) · 83.6 KB
/
gui_run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import subprocess
import tkinter as tk
from tkinter import filedialog, messagebox
from epub2txt import epub2txt
from booknlp.booknlp import BookNLP
import nltk
nltk.download('averaged_perceptron_tagger')
def calibre_installed():
"""Check if Calibre's ebook-convert tool is available."""
try:
subprocess.run(['ebook-convert', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return True
except FileNotFoundError:
print("""ERROR NO CALIBRE: running epub2txt convert version...
It appears you dont have the calibre commandline tools installed on your,
This will allow you to convert from any ebook file format:
Calibre supports the following input formats: CBZ, CBR, CBC, CHM, EPUB, FB2, HTML, LIT, LRF, MOBI, ODT, PDF, PRC, PDB, PML, RB, RTF, SNB, TCR, TXT.
If you want this feature please follow online instruction for downloading the calibre commandline tool.
For Linux its:
sudo apt update && sudo apt upgrade
sudo apt install calibre
""")
return False
def convert_with_calibre(file_path, output_format="txt"):
"""Convert a file using Calibre's ebook-convert tool."""
output_path = file_path.rsplit('.', 1)[0] + '.' + output_format
subprocess.run(['ebook-convert', file_path, output_path])
return output_path
def process_file():
file_path = filedialog.askopenfilename(
title='Select File',
filetypes=[('Supported Files',
('*.cbz', '*.cbr', '*.cbc', '*.chm', '*.epub', '*.fb2', '*.html', '*.lit', '*.lrf',
'*.mobi', '*.odt', '*.pdf', '*.prc', '*.pdb', '*.pml', '*.rb', '*.rtf', '*.snb',
'*.tcr', '*.txt'))]
)
if not file_path:
return
if file_path.lower().endswith(('.cbz', '.cbr', '.cbc', '.chm', '.epub', '.fb2', '.html', '.lit', '.lrf',
'.mobi', '.odt', '.pdf', '.prc', '.pdb', '.pml', '.rb', '.rtf', '.snb', '.tcr')) and calibre_installed():
file_path = convert_with_calibre(file_path)
elif file_path.lower().endswith('.epub') and not calibre_installed():
content = epub2txt(file_path)
if not os.path.exists('Working_files'):
os.makedirs('Working_files')
file_path = os.path.join('Working_files', 'Book.txt')
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
elif not file_path.lower().endswith('.txt'):
messagebox.showerror("Error", "Selected file format is not supported or Calibre is not installed.")
return
# Now process the TXT file with BookNLP
book_id = "Book"
output_directory = os.path.join('Working_files', book_id)
model_params = {
"pipeline": "entity,quote,supersense,event,coref",
"model": "big"
}
booknlp = BookNLP("en", model_params)
booknlp.process(file_path, output_directory, book_id)
print("Success, File processed successfully!")
# Close the GUI
root.destroy()
root = tk.Tk()
root.title("BookNLP Processor")
frame = tk.Frame(root, padx=20, pady=20)
frame.pack(padx=10, pady=10)
process_button = tk.Button(frame, text="Process File", command=process_file)
process_button.pack()
root.mainloop()
import pandas as pd
def filter_and_correct_quotes(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
lines = file.readlines()
corrected_lines = []
# Filter out lines with mismatched quotes
for line in lines:
if line.count('"') % 2 == 0:
corrected_lines.append(line)
with open(file_path, 'w', encoding='utf-8') as file:
file.writelines(corrected_lines)
print(f"Processed {len(lines)} lines.")
print(f"Removed {len(lines) - len(corrected_lines)} problematic lines.")
print(f"Wrote {len(corrected_lines)} lines back to the file.")
if __name__ == "__main__":
file_path = "Working_files/Book/Book.quotes"
filter_and_correct_quotes(file_path)
import pandas as pd
import re
import glob
import os
def process_files(quotes_file, tokens_file):
skip_rows = []
while True:
try:
df_quotes = pd.read_csv(quotes_file, delimiter="\t", skiprows=skip_rows)
break
except pd.errors.ParserError as e:
msg = str(e)
match = re.search(r'at row (\d+)', msg)
if match:
problematic_row = int(match.group(1))
print(f"Skipping problematic row {problematic_row} in {quotes_file}")
skip_rows.append(problematic_row)
else:
print(f"Error reading {quotes_file}: {e}")
return
df_tokens = pd.read_csv(tokens_file, delimiter="\t", on_bad_lines='skip', quoting=3)
last_end_id = 0
nonquotes_data = []
for index, row in df_quotes.iterrows():
start_id = row['quote_start']
end_id = row['quote_end']
filtered_tokens = df_tokens[(df_tokens['token_ID_within_document'] > last_end_id) &
(df_tokens['token_ID_within_document'] < start_id)]
words_chunk = ' '.join([str(token_row['word']) for index, token_row in filtered_tokens.iterrows()])
words_chunk = words_chunk.replace(" n't", "n't").replace(" n’", "n’").replace("( ", "(").replace(" ,", ",").replace("gon na", "gonna").replace(" n’t", "n’t")
words_chunk = re.sub(r' (?=[^a-zA-Z0-9\s])', '', words_chunk)
if words_chunk:
nonquotes_data.append([words_chunk, last_end_id, start_id, "False", "Narrator"])
last_end_id = end_id
nonquotes_df = pd.DataFrame(nonquotes_data, columns=["Text", "Start Location", "End Location", "Is Quote", "Speaker"])
output_filename = os.path.join(os.path.dirname(quotes_file), "non_quotes.csv")
nonquotes_df.to_csv(output_filename, index=False)
print(f"Saved nonquotes.csv to {output_filename}")
def main():
quotes_files = glob.glob('Working_files/**/*.quotes', recursive=True)
tokens_files = glob.glob('Working_files/**/*.tokens', recursive=True)
for q_file in quotes_files:
base_name = os.path.splitext(os.path.basename(q_file))[0]
matching_token_files = [t_file for t_file in tokens_files if os.path.splitext(os.path.basename(t_file))[0] == base_name]
if matching_token_files:
process_files(q_file, matching_token_files[0])
print("All processing complete!")
if __name__ == "__main__":
main()
import pandas as pd
import re
import glob
import os
import nltk
def process_files(quotes_file, entities_file):
# Load the files
df_quotes = pd.read_csv(quotes_file, delimiter="\t")
df_entities = pd.read_csv(entities_file, delimiter="\t")
character_info = {}
def is_pronoun(word):
tagged_word = nltk.pos_tag([word])
return 'PRP' in tagged_word[0][1] or 'PRP$' in tagged_word[0][1]
def get_gender(pronoun):
male_pronouns = ['he', 'him', 'his']
female_pronouns = ['she', 'her', 'hers']
if pronoun in male_pronouns:
return 'Male'
elif pronoun in female_pronouns:
return 'Female'
return 'Unknown'
# Process the quotes dataframe
for index, row in df_quotes.iterrows():
char_id = row['char_id']
mention = row['mention_phrase']
# Initialize character info if not already present
if char_id not in character_info:
character_info[char_id] = {"names": {}, "pronouns": {}, "quote_count": 0}
# Update names or pronouns based on the mention_phrase
if is_pronoun(mention):
character_info[char_id]["pronouns"].setdefault(mention.lower(), 0)
character_info[char_id]["pronouns"][mention.lower()] += 1
else:
character_info[char_id]["names"].setdefault(mention, 0)
character_info[char_id]["names"][mention] += 1
character_info[char_id]["quote_count"] += 1
# Process the entities dataframe
for index, row in df_entities.iterrows():
coref = row['COREF']
name = row['text']
if coref in character_info:
if is_pronoun(name):
character_info[coref]["pronouns"].setdefault(name.lower(), 0)
character_info[coref]["pronouns"][name.lower()] += 1
else:
character_info[coref]["names"].setdefault(name, 0)
character_info[coref]["names"][name] += 1
# Extract the most likely name and gender for each character
for char_id, info in character_info.items():
most_likely_name = max(info["names"].items(), key=lambda x: x[1])[0] if info["names"] else "Unknown"
most_common_pronoun = max(info["pronouns"].items(), key=lambda x: x[1])[0] if info["pronouns"] else None
gender = get_gender(most_common_pronoun) if most_common_pronoun else 'Unknown'
gender_suffix = ".M" if gender == 'Male' else ".F" if gender == 'Female' else ".?"
info["formatted_speaker"] = f"{char_id}:{most_likely_name}{gender_suffix}"
info["most_likely_name"] = most_likely_name
info["gender"] = gender
# Write the formatted data to quotes.csv
output_filename = os.path.join(os.path.dirname(quotes_file), "quotes.csv")
with open(output_filename, 'w', newline='') as outfile:
fieldnames = ["Text", "Start Location", "End Location", "Is Quote", "Speaker"]
writer = pd.DataFrame(columns=fieldnames)
for index, row in df_quotes.iterrows():
char_id = row['char_id']
if not re.search('[a-zA-Z0-9]', row['quote']):
print(f"Removing row with text: {row['quote']}")
continue
if character_info[char_id]["quote_count"] == 1:
formatted_speaker = "Narrator"
else:
formatted_speaker = character_info[char_id]["formatted_speaker"] if char_id in character_info else "Unknown"
new_row = {"Text": row['quote'], "Start Location": row['quote_start'], "End Location": row['quote_end'], "Is Quote": "True", "Speaker": formatted_speaker}
#turn the new_row into a data frame
new_row_df = pd.DataFrame([new_row])
# Concatenate 'writer' with 'new_row_df'
writer = pd.concat([writer, new_row_df], ignore_index=True)
writer.to_csv(output_filename, index=False)
print(f"Saved quotes.csv to {output_filename}")
def main():
# Use glob to get all .quotes and .entities files within the "Working_files" directory and its subdirectories
quotes_files = glob.glob('Working_files/**/*.quotes', recursive=True)
entities_files = glob.glob('Working_files/**/*.entities', recursive=True)
# Pair and process .quotes and .entities files with matching filenames (excluding the extension)
for q_file in quotes_files:
base_name = os.path.splitext(os.path.basename(q_file))[0]
matching_entities_files = [e_file for e_file in entities_files if os.path.splitext(os.path.basename(e_file))[0] == base_name]
if matching_entities_files:
process_files(q_file, matching_entities_files[0])
print("All processing complete!")
if __name__ == "__main__":
main()
import pandas as pd
import re
import glob
import os
def process_files(quotes_file, tokens_file):
# Load the files
df_quotes = pd.read_csv(quotes_file, delimiter="\t")
df_tokens = pd.read_csv(tokens_file, delimiter="\t", on_bad_lines='skip', quoting=3)
last_end_id = 0 # Initialize the last_end_id to 0
nonquotes_data = [] # List to hold data for nonquotes.csv
# Iterate through the quotes dataframe
for index, row in df_quotes.iterrows():
start_id = row['quote_start']
end_id = row['quote_end']
# Get tokens between the end of the last quote and the start of the current quote
filtered_tokens = df_tokens[(df_tokens['token_ID_within_document'] > last_end_id) &
(df_tokens['token_ID_within_document'] < start_id)]
# Build the word chunk
#words_chunk = ' '.join([token_row['word'] for index, token_row in filtered_tokens.iterrows()])
words_chunk = ' '.join([str(token_row['word']) for index, token_row in filtered_tokens.iterrows()])
words_chunk = words_chunk.replace(" n't", "n't").replace(" n’", "n’").replace(" ’", "’").replace(" ,", ",").replace(" .", ".").replace(" n’t", "n’t")
words_chunk = re.sub(r' (?=[^a-zA-Z0-9\s])', '', words_chunk)
# Append data to nonquotes_data if words_chunk is not empty
if words_chunk:
nonquotes_data.append([words_chunk, last_end_id, start_id, "False", "Narrator"])
last_end_id = end_id # Update the last_end_id to the end_id of the current quote
# Create a DataFrame for non-quote data
nonquotes_df = pd.DataFrame(nonquotes_data, columns=["Text", "Start Location", "End Location", "Is Quote", "Speaker"])
# Write to nonquotes.csv
output_filename = os.path.join(os.path.dirname(quotes_file), "non_quotes.csv")
nonquotes_df.to_csv(output_filename, index=False)
print(f"Saved nonquotes.csv to {output_filename}")
def main():
# Use glob to get all .quotes and .tokens files within the "Working_files" directory and its subdirectories
quotes_files = glob.glob('Working_files/**/*.quotes', recursive=True)
tokens_files = glob.glob('Working_files/**/*.tokens', recursive=True)
# Pair and process .quotes and .tokens files with matching filenames (excluding the extension)
for q_file in quotes_files:
base_name = os.path.splitext(os.path.basename(q_file))[0]
matching_token_files = [t_file for t_file in tokens_files if os.path.splitext(os.path.basename(t_file))[0] == base_name]
if matching_token_files:
process_files(q_file, matching_token_files[0])
print("All processing complete!")
if __name__ == "__main__":
main()
import pandas as pd
import numpy as np
# Read the CSV files
quotes_df = pd.read_csv("Working_files/Book/quotes.csv")
non_quotes_df = pd.read_csv("Working_files/Book/non_quotes.csv")
# Concatenate the dataframes
combined_df = pd.concat([quotes_df, non_quotes_df], ignore_index=True)
# Convert 'None' to NaN
combined_df.replace('None', np.nan, inplace=True)
# Drop rows with NaN in 'Start Location'
combined_df.dropna(subset=['Start Location'], inplace=True)
# Convert the 'Start Location' column to integers
combined_df["Start Location"] = combined_df["Start Location"].astype(int)
# Sort by 'Start Location'
sorted_df = combined_df.sort_values(by="Start Location")
# Save to 'book.csv'
sorted_df.to_csv("Working_files/Book/book.csv", index=False)
#this is a clean up script to try to clean up the quotes.csv and non_quotes.csv files of any types formed by booknlp
import pandas as pd
import os
import re
def process_text(text):
# Apply the rule to remove spaces before punctuation and other non-alphanumeric characters
text = re.sub(r' (?=[^a-zA-Z0-9\s])', '', text)
# Replace " n’t" with "n’t"
text = text.replace(" n’t", "n’t").replace("[", "(").replace("]", ")").replace("gon na", "gonna").replace("—————–", "")
return text
def process_file(filename):
# Load the file
df = pd.read_csv(filename)
# Check if the "Text" column exists
if "Text" in df.columns:
# Apply the rules to the "Text" column
df['Text'] = df['Text'].apply(lambda x: process_text(str(x)))
# Save the processed data back to the file
df.to_csv(filename, index=False)
print(f"Processed and saved {filename}")
else:
print(f"Column 'Text' not found in {filename}")
def main():
folder_path = "Working_files/Book/"
files = ["non_quotes.csv", "quotes.csv", "book.csv"]
for filename in files:
full_path = os.path.join(folder_path, filename)
if os.path.exists(full_path):
process_file(full_path)
else:
print(f"File {filename} not found in {folder_path}")
if __name__ == "__main__":
main()
#this will wipe the computer of any current audio clips from a previous session
#but itll ask the user first
import os
import tkinter as tk
from tkinter import messagebox
def check_and_wipe_folder(directory_path):
# Check if the directory exists
if not os.path.exists(directory_path):
print(f"The directory {directory_path} does not exist!")
return
# Check for .wav files in the directory
wav_files = [f for f in os.listdir(directory_path) if f.endswith('.wav')]
if wav_files: # If there are .wav files
# Initialize tkinter
root = tk.Tk()
root.withdraw() # Hide the main window
# Ask the user if they want to delete the files
response = messagebox.askyesno("Confirm Deletion", "Audio clips from a previous session have been found. Do you want to wipe them?")
root.destroy() # Destroy the tkinter instance
if response: # If the user clicks 'Yes'
# Iterate through files and delete them
for filename in wav_files:
file_path = os.path.join(directory_path, filename)
try:
os.remove(file_path)
print(f"Deleted: {file_path}")
except Exception as e:
print(f"Failed to delete {file_path}. Reason: {e}")
else:
print("Wipe operation cancelled by the user.")
else:
print("No audio clips from a previous session were found.")
# Usage
check_and_wipe_folder("Working_files/generated_audio_clips/")
import torch
from TTS.api import TTS
import tkinter as tk
from tkinter import ttk, scrolledtext, messagebox, simpledialog, filedialog
import threading
import pandas as pd
import random
import os
import time
import os
import pandas as pd
import random
import shutil
import torch
import torchaudio
import time
import pygame
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
# Ensure that nltk punkt is downloaded
nltk.download('punkt', quiet=True)
demo_text = "Imagine a world where endless possibilities await around every corner."
# Load the CSV data
csv_file="Working_files/Book/book.csv"
data = pd.read_csv(csv_file)
#voice actors folder
voice_actors_folder ="tortoise/voices/"
# Get the list of voice actors
voice_actors = [va for va in os.listdir(voice_actors_folder) if va != "cond_latent_example"]
male_voice_actors = [va for va in voice_actors if va.endswith(".M")]
female_voice_actors = [va for va in voice_actors if va.endswith(".F")]
# Dictionary to hold each character's selected language
character_languages = {}
models = TTS().list_models()
#selected_tts_model = 'tts_models/multilingual/multi-dataset/xtts_v2'
selected_tts_model = models[0]
# Map for speaker to voice actor
speaker_voice_map = {}
CHAPTER_KEYWORD = "CHAPTER"
multi_voice_model1 ="tts_models/en/vctk/vits"
multi_voice_model2 ="tts_models/en/vctk/fast_pitch"
multi_voice_model3 ="tts_models/ca/custom/vits"
#multi_voice_model_voice_list1 =speakers_list = TTS(multi_voice_model1).speakers
#multi_voice_model_voice_list2 =speakers_list = TTS(multi_voice_model2).speakers
#multi_voice_model_voice_list3 =speakers_list = TTS(multi_voice_model3).speakers
multi_voice_model_voice_list1 = []
multi_voice_model_voice_list2 = []
multi_voice_model_voice_list3 = []
# Dictionary to hold the comboboxes references
voice_comboboxes = {}
def add_languages_to_csv():
df = pd.read_csv('Working_files/Book/book.csv') # Make sure to use your actual CSV file path
if 'language' not in df.columns:
# Map the 'Speaker' column to the 'language' column using the character_languages dictionary
# The get method returns 'en' as a default value if the speaker is not found in the dictionary
df['language'] = df['Speaker'].apply(lambda speaker: character_languages.get(speaker, 'en'))
df.to_csv('Working_files/Book/book.csv', index=False) # Save the changes back to the CSV file
print("Added language data to the CSV file.")
def add_voice_actors_to_csv():
df = pd.read_csv(csv_file)
if 'voice_actor' not in df.columns:
df['voice_actor'] = df['Speaker'].map(speaker_voice_map)
df.to_csv(csv_file, index=False)
print(f"Added voice actor data to {csv_file}")
def get_random_voice_for_speaker(speaker):
selected_voice_actors = voice_actors # default to all voice actors
if speaker.endswith(".M") and male_voice_actors:
selected_voice_actors = male_voice_actors
elif speaker.endswith(".F") and female_voice_actors:
selected_voice_actors = female_voice_actors
if not selected_voice_actors: # If list is empty, default to all voice actors
selected_voice_actors = voice_actors
return random.choice(selected_voice_actors)
def get_random_voice_for_speaker_fast(speaker):
selected_voice_actors = voice_actors # default to all voice actors
male_voice_actors = {"p226", "p228","p229","p230","p231","p232","p233","p234","p236","p238","p239","p241","p251","p252","p253","p254","p255","p256","p258","p262","p264","p265","p266","p267","p269","p272","p279","p281","p282","p285","p286","p287","p292","p298","p299","p301","p302","p307","p312","p313","p317","p318","p326","p340"}
female_voice_actors = {"p225","p227","p237","p240","p243","p244","p245","p246","p247","p248","p249","p250","p257","p259","p260","p261","p263","p268","p270","p271","p273","p274","p275","p276","p277","p280","p283","p284","p288","p293","p294","p295","p297","p300","p303","p304","p305","p306","p308","p310","p311","p314","p316","p323","p329","p341","p343","p345","p347","p351","p360","p361","p362","p363","p364","p374"}
if speaker.endswith(".M") and male_voice_actors:
selected_voice_actors = male_voice_actors
elif speaker.endswith(".F") and female_voice_actors:
selected_voice_actors = female_voice_actors
elif speaker.endswith(".?") and female_voice_actors:
selected_voice_actors = male_voice_actors.union(female_voice_actors)
if not selected_voice_actors: # If list is empty, default to all voice actors
selected_voice_actors = male_voice_actors.union(female_voice_actors)
# Convert the set to a list before using random.choice
return random.choice(list(selected_voice_actors))
def ensure_output_folder():
if not os.path.exists("Working_files/generated_audio_clips"):
os.mkdir("Working_files/generated_audio_clips")
def ensure_temp_folder():
if not os.path.exists("Working_files/temp"):
os.mkdir("Working_files/temp")
def select_voices():
random.seed(int(time.time()))
ensure_output_folder()
total_rows = len(data)
for speaker in data['Speaker'].unique():
random_voice = get_random_voice_for_speaker(speaker)
speaker_voice_map[speaker] = random_voice
for speaker, voice in speaker_voice_map.items():
print(f"Selected voice for {speaker}: {voice}")
# Update the comboboxes if they exist
if speaker in voice_comboboxes:
random_voice = get_random_voice_for_speaker(speaker)
voice_comboboxes[speaker].set(random_voice)
print("Voices have been selected randomly.")
def select_voices_fast():
random.seed(int(time.time()))
ensure_output_folder()
total_rows = len(data)
for speaker in data['Speaker'].unique():
random_voice = get_random_voice_for_speaker_fast(speaker)
speaker_voice_map[speaker] = random_voice
for speaker, voice in speaker_voice_map.items():
print(f"Selected voice for {speaker}: {voice}")
# Update the comboboxes if they exist
if speaker in voice_comboboxes:
random_voice = get_random_voice_for_speaker_fast(speaker)
voice_comboboxes[speaker].set(random_voice)
print("Voices have been selected randomly.")
# Pre-select the voices before starting the GUI
select_voices()
# Main application window
root = tk.Tk()
root.title("coqui TTS GUI")
root.geometry("1200x800")
chapter_delimiter_var = tk.StringVar(value="CHAPTER")
def disable_chapter_delimiter_entry():
chapter_delimiter_entry.config(state='disabled')
def enable_chapter_delimiter_entry():
chapter_delimiter_entry.config(state='normal')
# Initialize the mixer module
try:
pygame.mixer.init()
print("mixer modual initialized successfully.")
except pygame.error:
print("mixer modual initialization failed")
print(pygame.error)
# This function is called when a voice actor is selected from the dropdown
def update_voice_actor(speaker):
selected_voice_actor = voice_comboboxes[speaker].get()
speaker_voice_map[speaker] = selected_voice_actor
print(f"Updated voice for {speaker}: {selected_voice_actor}")
# Get a random reference file for the selected voice actor
reference_files = list_reference_files(selected_voice_actor)
if reference_files: # Check if there are any reference files
random_file = random.choice(reference_files)
try:
# Stop any currently playing music or sound
pygame.mixer.music.stop()
pygame.mixer.stop()
if random_file.endswith('.mp3'):
# Use the music module for mp3 files
pygame.mixer.music.load(random_file)
pygame.mixer.music.play()
else:
# Use the Sound class for wav files
sound = pygame.mixer.Sound(random_file)
sound.play()
except Exception as e:
print(f"Could not play the audio file: {e}")
# Function to split long strings into parts
def split_long_string(text, limit=250):
if len(text) <= limit:
return [text]
# Split by commas
parts = text.split(',')
new_parts = []
for part in parts:
while len(part) > limit:
# Split at the last space before the limit
break_point = part.rfind(' ', 0, limit)
if break_point == -1: # If no space found, split at the limit
break_point = limit
new_parts.append(part[:break_point].strip())
part = part[break_point:].strip()
new_parts.append(part)
return new_parts
def combine_wav_files(input_directory, output_directory, file_name):
# Get a list of all .wav files in the specified input directory
input_file_paths = [os.path.join(input_directory, f) for f in os.listdir(input_directory) if f.endswith(".wav")]
# Sort the file paths to ensure numerical order
input_file_paths.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
# Create an empty list to store the loaded audio tensors
audio_tensors = []
# Iterate through the sorted input file paths and load each audio file
for input_file_path in input_file_paths:
waveform, sample_rate = torchaudio.load(input_file_path)
audio_tensors.append(waveform)
# Concatenate the audio tensors along the time axis (dimension 1)
combined_audio = torch.cat(audio_tensors, dim=1)
# Ensure that the output directory exists, create it if necessary
os.makedirs(output_directory, exist_ok=True)
# Specify the output file path
output_file_path = os.path.join(output_directory, file_name)
# Save the combined audio to the output file path
torchaudio.save(output_file_path, combined_audio, sample_rate)
print(f"Combined audio saved to {output_file_path}")
def wipe_folder(directory_path):
# Ensure the directory exists
if not os.path.exists(directory_path):
print(f"The directory {directory_path} does not exist!")
return
# Iterate through files in the directory
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
# Check if it's a regular file (not a subdirectory)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print(f"Deleted: {file_path}")
except Exception as e:
print(f"Failed to delete {file_path}. Reason: {e}")
# List of available TTS models
tts_models = [
#'tts_models/multilingual/multi-dataset/xtts_v2',
# Add all other models here...
]
tts_models = TTS().list_models()
# Function to update the selected TTS model
def update_tts_model(event):
global selected_tts_model
selected_tts_model = tts_model_combobox.get()
print(f"Selected TTS model: {selected_tts_model}")
# Frame for TTS Model Selection Dropdown
tts_model_selection_frame = ttk.LabelFrame(root, text="Select TTS Model")
tts_model_selection_frame.pack(fill="x", expand="yes", padx=10, pady=10)
# Create a dropdown for TTS model selection
tts_model_var = tk.StringVar()
tts_model_combobox = ttk.Combobox(tts_model_selection_frame, textvariable=tts_model_var, state="readonly")
multilingual_tts_models = [model for model in tts_models if "multi-dataset" in model]
# modelse to be removed because i found that they are multi speaker and not single speaker
models_to_remove = [multi_voice_model1, multi_voice_model2, multi_voice_model3]
# List comprehension to remove the unwatned models
multilingual_tts_models = [model for model in multilingual_tts_models if model not in models_to_remove]
tts_model_combobox['values'] = multilingual_tts_models
tts_model_combobox.set(selected_tts_model) # Set default value
tts_model_combobox.bind("<<ComboboxSelected>>", update_tts_model)
tts_model_combobox.pack(side="top", fill="x", expand="yes")
# Declare the button as global to access it in other functions
global select_voices_button
def update_voice_comboboxes():
global multi_voice_model_voice_list1
global multi_voice_model_voice_list2
global multi_voice_model_voice_list3
global voice_actors
global female_voice_actors
global male_voice_actors
#updating the values of the avalible voice actors too
voice_actors = [va for va in os.listdir(voice_actors_folder) if va != "cond_latent_example"]
male_voice_actors = [va for va in voice_actors if va.endswith(".M")]
female_voice_actors = [va for va in voice_actors if va.endswith(".F")]
if include_single_models_var.get(): # Checkbox is checked
# your code snippet to include single voice models
filtered_tts_models = [model for model in tts_models if "multi-dataset" not in model]
if not multi_voice_model_voice_list1: # This is True if the list is empty
print(f"{multi_voice_model_voice_list1} is empty populating it...")
multi_voice_model_voice_list1 = TTS(multi_voice_model1).speakers
if not multi_voice_model_voice_list2: # This is True if the list is empty
print(f"{multi_voice_model_voice_list2} is empty populating it...")
multi_voice_model_voice_list2 = TTS(multi_voice_model2).speakers
if not multi_voice_model_voice_list3: # This is True if the list is empty
print(f"{multi_voice_model_voice_list3} is empty populating it...")
multi_voice_model_voice_list3 = TTS(multi_voice_model3).speakers
combined_values = voice_actors + filtered_tts_models
combined_values += multi_voice_model_voice_list1 + multi_voice_model_voice_list2 + multi_voice_model_voice_list3
#this will remove unwatned models from the model list, thats cause these three are multi-speaker so im already including them as their voices
combined_values.remove(multi_voice_model1)
combined_values.remove(multi_voice_model2)
combined_values.remove(multi_voice_model3)
else: # Checkbox is not checked
# Just use the default voice actors without single voice models
combined_values = voice_actors
# Now update each combobox with the new combined_values
for speaker, combobox in voice_comboboxes.items():
combobox['values'] = combined_values
combobox.set(speaker_voice_map[speaker]) # Reset to the currently selected voice actor
longest_string_length = max((len(str(value)) for value in combobox['values']), default=0)
combobox.config(width=longest_string_length)
# Check the state of the checkbox and manage the visibility of the button
if include_single_models_var.get(): # Checkbox is checked
# Create the button if it doesn't exist
if 'select_voices_button' not in globals():
global select_voices_button
select_voices_button = ttk.Button(buttons_frame, text="Select Random Voices", command=select_voices_fast)
select_voices_button.pack(side=tk.LEFT, padx=5)
else:
select_voices_button.pack(side=tk.LEFT, padx=5)
else:
# Hide the button if the checkbox is unchecked
if 'select_voices_button' in globals():
select_voices_button.pack_forget()
# Add this near the top of your script where other variables are defined
include_single_models_var = tk.BooleanVar(value=False)
# Add this in your GUI setup section, after initializing `root`
include_single_models_checkbox = ttk.Checkbutton(
root, # or another frame where you want the checkbox to appear
text="Include fast Voice Models:(fast generate at cost of audio quality)",
variable=include_single_models_var,
onvalue=True,
offvalue=False,
command=update_voice_comboboxes # This function will be defined later
)
include_single_models_checkbox.pack() # Adjust layout options as needed
# Call this function once initially to set the correct values from the start
update_voice_comboboxes()
# Function to clone a new voice
def clone_voice():
# Prompt the user to enter the name of the new voice actor
voice_actor_name = simpledialog.askstring("Input", "Enter the name of the voice actor:", parent=root)
# If a name was entered, proceed to ask for gender
if voice_actor_name:
# Create a small popup window for gender selection
gender_window = tk.Toplevel(root)
gender_window.title("Select Gender")
# Dropdown for gender selection
gender_var = tk.StringVar(gender_window)
gender_options = ['M', 'F', '?']
gender_dropdown = ttk.Combobox(gender_window, textvariable=gender_var, values=gender_options, state='readonly')
gender_dropdown.pack()
# Function to be called when 'OK' button is pressed
def on_gender_select():
voice_actor_gender = gender_var.get()
gender_window.destroy() # Close the popup window
# Now proceed to file selection
file_path = filedialog.askopenfilename(
title="Select Voice Sample File",
filetypes=[("Audio Files", "*.mp3 *.wav *.mp4")],
parent=root
)
# Check if the file is selected and is of the correct format
if file_path and (file_path.lower().endswith('.mp3') or file_path.lower().endswith('.wav') or file_path.lower().endswith('.mp4')):
# Create the directory path for the new voice actor
new_voice_path = f"tortoise/voices/{voice_actor_name}.{voice_actor_gender}"
# Check if the directory already exists
if not os.path.exists(new_voice_path):
# Create the directory
os.makedirs(new_voice_path)
# Copy the selected file to the new directory
shutil.copy(file_path, new_voice_path)
messagebox.showinfo("Success", f"New voice actor folder created with sample file: {new_voice_path}")
update_voice_comboboxes()
else:
messagebox.showerror("Error", "Voice actor folder already exists.")
else:
messagebox.showerror("Error", "No file selected or selected file is not an MP3, WAV, or MP4.")
# OK button for gender selection
ok_button = ttk.Button(gender_window, text="OK", command=on_gender_select)
ok_button.pack()
# Set the focus on the dropdown and wait for the user to make a selection
gender_dropdown.focus_set()
root.wait_window(gender_window)
else:
messagebox.showerror("Error", "No name entered for the voice actor.")
# Add this near the top of your script where other variables are defined
clone_voice_button = ttk.Button(
root,
text="Clone new voice",
command=clone_voice # The function to execute when the button is clicked
)
# Add the new button to the GUI
clone_voice_button.pack(padx=5)
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
print(f"Folder '{folder_path}' created successfully.")
else:
print(f"Folder '{folder_path}' already exists.")
#i want to gigv ethis the voice actor name and have it turn that into the full directory of the voice actor location, and then use that to grab all the files inside of that voice actoers folder
def list_reference_files(voice_actor):
global multi_voice_model_voice_list1
global multi_voice_model_voice_list2
global multi_voice_model_voice_list3
if voice_actor in multi_voice_model_voice_list1:
create_folder_if_not_exists(f"tortoise/_model_demo_voices/{multi_voice_model1}/{voice_actor}")
reference_files = [os.path.join(f"tortoise/_model_demo_voices/{multi_voice_model1}/{voice_actor}", file) for file in os.listdir(f"tortoise/_model_demo_voices/{multi_voice_model1}/{voice_actor}") if file.endswith((".wav", ".mp3"))]
if len(reference_files)==0:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
fast_tts = TTS(multi_voice_model1, progress_bar=True).to(device)
fast_tts.tts_to_file(text=demo_text , file_path=f"tortoise/_model_demo_voices/{multi_voice_model1}/{voice_actor}/demo.wav", speaker = voice_actor)
reference_files = [os.path.join(f"tortoise/_model_demo_voices/{multi_voice_model1}/{voice_actor}", file) for file in os.listdir(f"tortoise/_model_demo_voices/{multi_voice_model1}/{voice_actor}") if file.endswith((".wav", ".mp3"))]
return reference_files
else:
return reference_files
elif voice_actor in multi_voice_model_voice_list2: