-
Notifications
You must be signed in to change notification settings - Fork 3
/
TfIdfFeatureExtraction.py
54 lines (46 loc) · 2.02 KB
/
TfIdfFeatureExtraction.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import numpy as np
from sklearn.preprocessing import LabelEncoder
from nltk.corpus import stopwords
from nltk.stem.porter import *
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
def encode_label(label):
le = LabelEncoder()
label_encoded = le.fit(label).transform(label)
return label_encoded
def readdata(train_set_path):
x = []
y = []
stop_words = set(stopwords.words('english'))
with open(train_set_path, encoding="utf8") as infile:
for line in infile:
data = []
data = line.split(",")
stemmer = PorterStemmer()
if data[1] != "tweet_id":
content = re.sub(r"(?:\@|https?\://)\S+", "", data[3].lower())
toker = RegexpTokenizer(r'((?<=[^\w\s])\w(?=[^\w\s])|(\W))+', gaps=True)
word_tokens = toker.tokenize(content)
filtered_sentence = [stemmer.stem(w) for w in word_tokens if not w in stop_words and w.isalpha()]
x.append(' '.join(filtered_sentence))
y.append(data[1])
x, y = np.array(x), np.array(y)
return x, y
if __name__ == '__main__':
print("Begin Extract Features ....")
dataset_csv = 'D:\\My Source Codes\\Projects-Python' \
'\\TextBaseEmotionDetectionWithEnsembleMethod\\Dataset\\' \
'text_emotion_6class.csv'
feature_csv = 'D:\\My Source Codes\\Projects-Python' \
'\\TextBaseEmotionDetectionWithEnsembleMethod\\Dataset\\' \
'tfidffeature6cl.csv'
x, y = readdata(dataset_csv)
y = encode_label(y)
features_vectors = pd.DataFrame()
vectorizer = TfidfVectorizer()
vectorizer.fit(x)
x_tfidf = vectorizer.transform(x)
features_vectors = pd.DataFrame(x_tfidf.toarray())
features_vectors['label'] = y
features_vectors.to_csv(feature_csv, mode='a', header=False, index=False)