-
Notifications
You must be signed in to change notification settings - Fork 0
/
training_companies.py
103 lines (73 loc) · 2.76 KB
/
training_companies.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# -*- coding: utf-8 -*-
"""booking_Erwin.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1xkxzi8wZ5rOC731yBeITMF-BXjJGh7ui
"""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import tensorflow as tf
import re
import argparse
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelBinarizer
import numpy as np
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', help='CSV path')
parser.add_argument('-s', '--save', help='Save path')
args = parser.parse_args()
path = args.path
saved_path = args.save
# '/content/drive/MyDrive/bookings_Erwin.csv'
df = pd.read_csv(path, low_memory= False)
df.dropna(subset = ["booking_account"], inplace=True)
## cleaning texts
df['description'] = df.apply(lambda row : row['description'].lower(), axis = 1)
df['description'] = df.apply(lambda row : re.sub(r'[0-9]', '', row['description']), axis = 1)
df['description'] = df.apply(lambda row : re.sub(r'[\W_]+', ' ', row['description']), axis = 1)
vectorizer = CountVectorizer(min_df=0, lowercase=False)
vectorizer.fit(df['description'])
corpus = vectorizer.vocabulary_
reverse_corpus = dict([(value, key) for (key, value) in corpus.items()])
def text_to_num(s):
temp = []
s = s.split(' ')
for word in s:
if word in corpus: temp.append(corpus[word])
else: temp.append(0)
return temp
df['X'] = df.apply(lambda row: text_to_num(row['description']), axis = 1)
num_words= 5
data = pad_sequences(df['X'], maxlen=num_words)
bin = LabelBinarizer()
y = bin.fit_transform(df.booking_account)
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=42)
model = Sequential()
model.add(Embedding(875, 64, input_length= 5))
model.add(LSTM(64, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model.add(LSTM(64))
model.add(Dense(21, activation='relu'))
model.compile(loss='MSE',
optimizer='adam',
metrics=['accuracy'])
labels = bin.classes_.tolist()
model.fit(X_train, y_train,
batch_size=1000,
epochs=30,
validation_data=(X_test, y_test))
print('Evaluating >>>>>')
score, acc = model.evaluate(X_test, y_test,
batch_size=1000)
print(X_test[1].shape)
xx = np.expand_dims(X_test[100], axis = 0)
yy = np.expand_dims(y_test[100], axis = 0)
print('Original result:', bin.inverse_transform(yy))
hist = model.predict(xx)
print(hist)
print('Prediction:', labels[np.argmax(hist)])
# '/content/saved_erwin'
model.save(saved_path)