Skip to content

Commit

Permalink
Some work in progress
Browse files Browse the repository at this point in the history
  • Loading branch information
ThorvaldAagaard committed Nov 6, 2023
1 parent 25f93c4 commit 2fe136c
Show file tree
Hide file tree
Showing 8 changed files with 5,044 additions and 10 deletions.
21 changes: 14 additions & 7 deletions scripts/training/bidding/bidding_nn_tf2.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import sys
sys.path.append('../../../src')
import os

import numpy as np
import datetime
Expand All @@ -16,7 +17,7 @@

model_path = 'model/bidding.h5'

batch_size = 64
batch_size = 1
display_step = 1000
epochs = 1

Expand All @@ -26,10 +27,12 @@
print("Input shape: ", X_train.shape)
print("Output shape: ", y_train.shape)
n_examples = y_train.shape[0]
n_ftrs = X_train.shape[1]
n_bids = y_train.shape[1]
n_sequence_length = X_train.shape[1]
n_ftrs = X_train.shape[2]
n_bids = y_train.shape[2]

print("Size input hand: ", n_ftrs)
print("Size input: ", n_ftrs)
print("Sequence length: ", n_sequence_length)
print("Examples for training: ", n_examples)
print("Batch size: ", batch_size)
n_iterations = round(((n_examples / batch_size) * epochs) / 1000) * 1000
Expand All @@ -38,19 +41,23 @@

lstm_size = 128
n_layers = 3
keep_prob = 0.8

input_shape = (n_sequence_length, n_ftrs)
# Create a Sequential model with stateful LSTM and specify batch_input_shape
model = Sequential()
model.add(LSTM(lstm_size, dropout=1 - keep_prob, stateful=True, return_sequences=True, batch_input_shape=(batch_size,) + input_shape))

for _ in range(n_layers):
model.add(LSTM(lstm_size, dropout=0.2, stateful=True))
for _ in range(n_layers - 1): # Subtract 1 to account for the first LSTM layer
model.add(LSTM(lstm_size, dropout=1 - keep_prob, stateful=True, return_sequences=True))

model.add(Dense(n_bids, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.001))

# Create Batcher instances
batch = Batcher(n_examples, batch_size)
cost_batch = Batcher(n_examples, display_step)
cost_batch = Batcher(n_examples, batch_size)

for i in range(n_iterations):
x_batch, y_batch = batch.next_batch([X_train, y_train])
Expand Down
76 changes: 76 additions & 0 deletions scripts/training/bidding/bidding_nn_tf2xx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import sys
import os
sys.path.append('../../../src')

import numpy as np
import datetime
from keras.models import Sequential
from keras.layers import LSTM, Dropout, Dense
from keras.optimizers import Adam
from batcher import Batcher

if len(sys.argv) < 2:
print("Usage: python bidding_nn inputdirectory")
sys.exit(1)

bin_dir = sys.argv[1]

model_path = './model/bidding.h5'

# Batch size is 1 so we can reset state between each sequence
batch_size = 32
display_step = 1000
epochs = 1

X_train = np.load(os.path.join(bin_dir, 'x.npy'))
y_train = np.load(os.path.join(bin_dir, 'y.npy'))

print("Input shape: ", X_train.shape)
print("Output shape: ", y_train.shape)
n_examples = y_train.shape[0]
n_sequence_length = X_train.shape[1]
n_ftrs = X_train.shape[2]
n_bids = y_train.shape[2]

print("Size input hand: ", n_ftrs)
print("Sequence length: ", n_sequence_length)
print("Examples for training: ", n_examples)
print("Batch size: ", batch_size)
n_iterations = round(((n_examples / batch_size) * epochs) / 1000) * 1000
print("Iterations ", n_iterations)
print("Model path: ", model_path)

lstm_size = 128
n_layers = 3
keep_prob = 0.8 # Adjust the dropout rate as needed

input_shape = (n_sequence_length, n_ftrs)
# Create a Sequential model with stateful LSTM and specify batch_input_shape
model = Sequential()
model.add(LSTM(lstm_size, dropout=1 - keep_prob, stateful=True, return_sequences=True, batch_input_shape=(batch_size,) + input_shape))

for _ in range(n_layers - 1): # Subtract 1 to account for the first LSTM layer
model.add(LSTM(lstm_size, dropout=1 - keep_prob, stateful=True, return_sequences=True))

model.add(Dense(n_bids, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.001))

model.summary()

# Create Batcher instances
batch = Batcher(n_examples, batch_size)
cost_batch = Batcher(n_examples, batch_size)

for i in range(n_iterations):
x_batch, y_batch = batch.next_batch([X_train, y_train])
if (i != 0) and i % display_step == 0:
# Evaluate the model on the test data
x_cost, y_cost = cost_batch.next_batch([X_train, y_train])
c_train = model.evaluate(x_cost, y_cost, verbose=0)
print('{} {}. c_train={}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), i, c_train))
sys.stdout.flush()
model.save(model_path, overwrite=True)

model.train_on_batch(x_batch, y_batch)

model.save(model_path, overwrite=True)
2 changes: 1 addition & 1 deletion scripts/training/bidding/default.conf
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ include_system = False
tf_version = 1

[bidding]
bidder = models/gib21_model/gib21-1000000
bidder = scripts/training/bidding/model/bidding-1248000
info = models/gib21_info_model/gib21_info-500000
search_threshold = 0.1

Expand Down
Loading

0 comments on commit 2fe136c

Please sign in to comment.