dataset: opus4m
model: transformer
source language(s): aze_Latn bak chv crh crh_Latn kaz_Cyrl kaz_Latn kir_Cyrl kjh kum mon nog ota_Arab ota_Latn sah tat tat_Arab tat_Latn tuk tuk_Latn tur tyv uig_Arab uig_Cyrl uzb_Cyrl uzb_Latn xal
target language(s): eng
model: transformer
pre-processing: normalization + SentencePiece (spm32k,spm32k)
download: opus4m-2020-08-12.zip
test set translations: opus4m-2020-08-12.test.txt
test set scores: opus4m-2020-08-12.eval.txt
testset
BLEU
chr-F
newsdev2016-entr-tureng.tur.eng
5.9
0.259
newstest2016-entr-tureng.tur.eng
4.6
0.246
newstest2017-entr-tureng.tur.eng
4.6
0.246
newstest2018-entr-tureng.tur.eng
4.9
0.247
Tatoeba-test.aze-eng.aze.eng
13.2
0.322
Tatoeba-test.bak-eng.bak.eng
2.9
0.190
Tatoeba-test.chv-eng.chv.eng
0.8
0.162
Tatoeba-test.crh-eng.crh.eng
14.6
0.322
Tatoeba-test.kaz-eng.kaz.eng
11.8
0.302
Tatoeba-test.kir-eng.kir.eng
15.1
0.337
Tatoeba-test.kjh-eng.kjh.eng
1.7
0.099
Tatoeba-test.kum-eng.kum.eng
14.6
0.314
Tatoeba-test.mon-eng.mon.eng
6.4
0.251
Tatoeba-test.multi.eng
10.6
0.296
Tatoeba-test.nog-eng.nog.eng
9.0
0.263
Tatoeba-test.ota-eng.ota.eng
1.4
0.150
Tatoeba-test.sah-eng.sah.eng
0.7
0.118
Tatoeba-test.tat-eng.tat.eng
4.6
0.228
Tatoeba-test.tuk-eng.tuk.eng
5.5
0.256
Tatoeba-test.tur-eng.tur.eng
28.6
0.459
Tatoeba-test.tyv-eng.tyv.eng
5.5
0.190
Tatoeba-test.uig-eng.uig.eng
0.1
0.113
Tatoeba-test.uzb-eng.uzb.eng
5.1
0.208
Tatoeba-test.xal-eng.xal.eng
1.4
0.150
You can’t perform that action at this time.