-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrbmpygivenx.py
74 lines (45 loc) · 2.26 KB
/
rbmpygivenx.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import numpy as np
import sys
import math
sys.path.insert(0, './util/')
from softplus import softplus
def rbmpygivenx(rbm, x, train_or_test):
# RBMPYGIVENX calculates class probabilities [p(y|x)]
# internal function
#
# Copyright (c) Sřren Sřnderby july 2014
n_classes = rbm.d.shape[0]
n_samples = x.shape[0]
cwx = np.matmul(rbm.W, np.transpose(x))
cwx = np.add(cwx, np.reshape(rbm.c, (rbm.c.shape[0], 1)))
# cwx = (np.matmul(rbm.W, np.transpose(x))) + rbm.c
# cwx =
# only apply dropout in training mode
if train_or_test == 'train' and rbm.dropout_hidden > 0:
cwx * rbm.hidden_mask
# rbm.U = rbm.U[:,None,:]
#rbm.U = rbm.U[:,None,:] # o: rbm.U2 = np.concatenate((rbm.U[:, None, :], np.zeros((rbm.U.shape[0], 99, rbm.U.shape[1]))), axis=1)
# :o np.reshape(rbm.U,(rbm.U.shape[0], 1, 12))
F = rbm.U[:,None,:] + cwx[:, :, None] # np.ndarray.transpose() or (rbm.U.transpose(0, 2, 1))
rbm.zeros = np.zeros((n_samples, n_classes))
class_log_prob = rbm.zeros # -o: class_log_prob = rbm.zeros[n_samples,n_classes]
for y in range(n_classes):
softplus_F = softplus(F[:, :, y])
class_log_prob[:, y] = np.sum(softplus_F, axis=0) + rbm.d[y] # axis=1
# o: class_log_prob[:, y] = sum(softplus(F[:, :, y])) + rbm.d[y]
# o2: class_log_prob[:, y] = np.sum(softplus(F[:, :, y]), axis=0) + rbm.d[y]
# normalize probabilities
if class_log_prob.shape[0] == 1:
class_log_prob_amax = np.reshape((np.amax(class_log_prob, 1)), (1, 1))
else:
class_log_prob_amax = np.reshape((np.amax(class_log_prob, 1)), (class_log_prob.shape[0], 1))
# :o class_log_prob_amax = np.reshape((np.amax(class_log_prob, 1)), (100, 1))
# o2: class_log_prob_amax = np.reshape((np.amax(class_log_prob, 1)), (100, 1))
class_prob = class_log_prob - class_log_prob_amax
class_prob = np.exp(class_prob)
# o: class_prob = np.exp(class_log_prob - class_log_prob_amax)
# class_log_prob - (np.amax(class_log_prob, 1))
# o: class_prob = np.exp(np.subtract(class_log_prob, class_log_prob_amax))
class_prob_sum = np.reshape(np.sum(class_prob, axis=1),(class_prob.shape[0],1))
class_prob = np.divide(class_prob, class_prob_sum)
return class_prob, F