Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pep8 #64

Open
wants to merge 3 commits into
base: develop
Choose a base branch
from
Open

Pep8 #64

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion t3f/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@
from t3f.regularizers import *
from t3f.riemannian import *
from t3f.shapes import *
from t3f.decompositions import *
from t3f.decompositions import *
5 changes: 3 additions & 2 deletions t3f/batch_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def testConcatMatrix(self):
first = initializers.random_matrix_batch(((2, 3), (3, 3)), batch_size=1)
second = initializers.random_matrix_batch(((2, 3), (3, 3)), batch_size=4)
third = initializers.random_matrix_batch(((2, 3), (3, 3)), batch_size=3)
first_res = batch_ops.concat_along_batch_dim((first))
first_res = batch_ops.concat_along_batch_dim(first)
first_res = ops.full(first_res)
first_second_res = batch_ops.concat_along_batch_dim((first, second))
first_second_res = ops.full(first_second_res)
Expand All @@ -40,7 +40,8 @@ def testConcatMatrix(self):
first_second_third_desired_val = res[5]
self.assertAllClose(first_res_val, first_desired_val)
self.assertAllClose(first_second_res_val, first_second_desired_val)
self.assertAllClose(first_second_third_res_val, first_second_third_desired_val)
self.assertAllClose(first_second_third_res_val,
first_second_third_desired_val)

def testBatchMultiply(self):
# Test multiplying batch of TTMatrices by individual numbers.
Expand Down
6 changes: 4 additions & 2 deletions t3f/decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,8 @@ def _round_batch_tt(tt, max_tt_rank, epsilon):
tt_cores[0] = tf.reshape(tt_cores[0], core_shape)
if not are_tt_ranks_defined:
ranks = None
return TensorTrainBatch(tt_cores, tt.get_raw_shape(), ranks, batch_size=tt.batch_size)
return TensorTrainBatch(tt_cores, tt.get_raw_shape(), ranks,
batch_size=tt.batch_size)


def orthogonalize_tt_cores(tt, left_to_right=True):
Expand Down Expand Up @@ -513,7 +514,8 @@ def _orthogonalize_batch_tt_cores_left_to_right(tt):

tt_cores[core_idx] = tf.reshape(curr_core, new_core_shape)

next_core = tf.reshape(tt_cores[core_idx + 1], (batch_size, triang_shape[2], -1))
next_core_shape = (batch_size, triang_shape[2], -1)
next_core = tf.reshape(tt_cores[core_idx + 1], next_core_shape)
tt_cores[core_idx + 1] = tf.matmul(triang, next_core)

if tt.is_tt_matrix():
Expand Down
6 changes: 4 additions & 2 deletions t3f/kronecker.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from t3f.tensor_train import TensorTrain
from t3f import ops


def determinant(kron_a):
"""Computes the determinant of a given Kronecker-factorized matrix.

Expand Down Expand Up @@ -95,10 +96,10 @@ def slog_determinant(kron_a):
core_pow = pows / i_shapes[core_idx].value
logdet += tf.log(core_abs_det) * core_pow
det_sign *= core_det_sign**(core_pow)



return det_sign, logdet


def inv(kron_a):
"""Computes the inverse of a given Kronecker-factorized matrix.

Expand Down Expand Up @@ -142,6 +143,7 @@ def inv(kron_a):
res_shape = kron_a.get_raw_shape()
return TensorTrain(inv_cores, res_shape, res_ranks)


def cholesky(kron_a):
"""Computes the Cholesky decomposition of a given Kronecker-factorized matrix.

Expand Down
9 changes: 5 additions & 4 deletions t3f/kronecker_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from t3f import initializers
from t3f import variables


class KroneckerTest(tf.test.TestCase):

def testIsKronNonKron(self):
Expand Down Expand Up @@ -38,11 +39,11 @@ def testSlogDet(self):
# TODO: use kron and -1 * kron matrices, when mul is implemented
# the current version is platform-dependent

tf.set_random_seed(5) # negative derminant
tf.set_random_seed(5) # negative derminant
initializer = initializers.random_matrix(((2, 3), (2, 3)), tt_rank=1)
kron_neg = variables.get_variable('kron_neg', initializer=initializer)

tf.set_random_seed(1) # positive determinant
tf.set_random_seed(1) # positive determinant
initializer = initializers.random_matrix(((2, 3), (2, 3)), tt_rank=1)
kron_pos = variables.get_variable('kron_pos', initializer=initializer)

Expand Down Expand Up @@ -83,8 +84,8 @@ def testCholesky(self):
K_2 = L_2.dot(L_2.T)
K = np.kron(K_1, K_2)
initializer = tensor_train.TensorTrain([K_1[None, :, :, None],
K_2[None, :, :, None]],
tt_ranks=7*[1])
K_2[None, :, :, None]],
tt_ranks=7*[1])
kron_mat = variables.get_variable('kron_mat', initializer=initializer)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
Expand Down
14 changes: 8 additions & 6 deletions t3f/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,12 @@ def _full_tt(tt):
intermediate_shape.append(raw_shape[0][i])
intermediate_shape.append(raw_shape[1][i])
res = tf.reshape(res, intermediate_shape)
transpose = []
transpose_params = []
for i in range(0, 2 * num_dims, 2):
transpose.append(i)
transpose_params.append(i)
for i in range(1, 2 * num_dims, 2):
transpose.append(i)
res = tf.transpose(res, transpose)
transpose_params.append(i)
res = tf.transpose(res, transpose_params)
return tf.reshape(res, shape)
else:
return tf.reshape(res, shape)
Expand Down Expand Up @@ -166,7 +166,8 @@ def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
left_mode = a_shape[0][core_idx]
right_mode = b_shape[1][core_idx]
if is_res_batch:
core_shape = (batch_size, res_left_rank, left_mode, right_mode, res_right_rank)
core_shape = (batch_size, res_left_rank, left_mode, right_mode,
res_right_rank)
else:
core_shape = (res_left_rank, left_mode, right_mode,
res_right_rank)
Expand Down Expand Up @@ -702,7 +703,8 @@ def add(tt_a, tt_b):
raise ValueError('The batch sizes are different and not 1, broadcasting is '
'not available.')

is_batch_case = isinstance(tt_a, TensorTrainBatch) or isinstance(tt_b, TensorTrainBatch)
is_batch_case = isinstance(tt_a, TensorTrainBatch) or \
isinstance(tt_b, TensorTrainBatch)
batch_size = None
if is_batch_case:
if tt_a.is_tt_matrix():
Expand Down
6 changes: 3 additions & 3 deletions t3f/ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ def testFlatInnerTTTensbySparseTens(self):
for rank in rank_list:
for num_elements in [1, 10]:
tt_1 = initializers.random_tensor(shape, tt_rank=rank)
sparse_flat_indices = np.random.choice(np.prod(shape), num_elements).astype(int)
sparse_flat_indices = np.random.choice(np.prod(shape), num_elements)
sparse_flat_indices = sparse_flat_indices.astype(int)
sparse_indices = np.unravel_index(sparse_flat_indices, shape)
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(num_elements).astype(np.float32)
Expand Down Expand Up @@ -385,7 +386,6 @@ def testUnknownRanksTTMatmul(self):
res_desired_val = sess.run(res_desired, {K_1: K_1_val, K_2: K_2_val})
self.assertAllClose(res_desired_val, res_actual_val)


def testHalfKnownRanksTTMatmul(self):
# Tests tt_tt_matmul for the case when one matrice has known ranks
# and the other one doesn't
Expand Down Expand Up @@ -677,7 +677,7 @@ def _random_sparse(shape, non_zeros):
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(non_zeros).astype(np.float32)
sparse = tf.SparseTensor(indices=sparse_indices, values=values,
dense_shape=shape)
dense_shape=shape)
return sparse

if __name__ == "__main__":
Expand Down
10 changes: 4 additions & 6 deletions t3f/regularizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,8 @@ def l2_regularizer(scale, scope=None):
def l2(tt):
"""Applies l2 regularization to TensorTrain object."""
with tf.name_scope(scope, 'l2_regularizer', [tt]) as name:
my_scale = tf.convert_to_tensor(scale,
dtype=tt.dtype.base_dtype,
name='scale')
my_scale = tf.convert_to_tensor(scale, dtype=tt.dtype.base_dtype,
name='scale')
return tf.mul(my_scale, ops.frobenius_norm_squared(tt), name=name)

return l2
Expand Down Expand Up @@ -68,9 +67,8 @@ def cores_regularizer(core_regularizer, scale, scope=None):
def regularizer(tt):
"""Applies the regularization to TensorTrain object."""
with tf.name_scope(scope, 'l2_regularizer', [tt]) as name:
my_scale = tf.convert_to_tensor(scale,
dtype=tt.dtype.base_dtype,
name='scale')
my_scale = tf.convert_to_tensor(scale, dtype=tt.dtype.base_dtype,
name='scale')
penalty = 0.0
for i in range(tt.ndims()):
penalty += core_regularizer(tt.tt_cores[i])
Expand Down
12 changes: 7 additions & 5 deletions t3f/riemannian_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,20 @@ def testProjectOnItself(self):
def testProject(self):
# Compare our projection with the results obtained (and precomputed) from
# tt.riemannian.project which is well tested.
tangent_tens_cores = ([[[-0.42095269, 0.02130842],
[-0.4181081 , 0.42945687],
tangent_tens_cores = \
([[[-0.42095269, 0.02130842],
[-0.4181081, 0.42945687],
[ 0.45972439, -0.4525616 ],
[-0.17159869, -0.14505528]]], [[[ 0.23344421],
[ 0.81480049],
[-0.92385135]],

[[-0.19279465],
[ 0.524976 ],
[ 0.524976],
[-0.40149197]]])
tangent_tens = TensorTrain(tangent_tens_cores, (4, 3), (1, 2, 1))
tens_cores = ([[[-1.01761142, 0.36075896, -0.2493624 ],
tens_cores = \
([[[-1.01761142, 0.36075896, -0.2493624 ],
[-0.99896565, -1.12685474, 1.02832458],
[ 1.08739724, -0.6537435 , 1.99975537],
[ 0.35128005, 0.40395104, -0.16790072]]], [[[ 0.34105142],
Expand All @@ -53,7 +55,7 @@ def testProject(self):
[-0.04847773, -0.72908174, 0.20142675],
[ 0.34431125, -0.20935516, -1.15864246]]
proj = riemannian.project_sum(tens, tangent_tens)
with self.test_session() as sess:
with self.test_session():
self.assertAllClose(desired_projection, ops.full(proj).eval())

def testProjectSum(self):
Expand Down
2 changes: 1 addition & 1 deletion t3f/shapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,4 +278,4 @@ def expand_batch_dim(tt):
for core_idx in range(tt.ndims()):
tt_cores.append(tf.expand_dims(tt.tt_cores[core_idx], 0))
return TensorTrainBatch(tt_cores, tt.get_raw_shape(), tt.get_tt_ranks(),
batch_size=1)
batch_size=1)
2 changes: 1 addition & 1 deletion t3f/tensor_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def __str__(self):
tt_ranks)
else:
return "A Tensor Train%s of shape %s, TT-ranks: %s" % (variable_str,
shape, tt_ranks)
shape, tt_ranks)

def __getitem__(self, slice_spec):
"""Basic indexing, returns a `TensorTrain` containing the specified region.
Expand Down
3 changes: 1 addition & 2 deletions t3f/tensor_train_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,9 @@ def eval(self, feed_dict=None, session=None):
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
"""
# TODO: implement feed_dict
if session is None:
session = tf.get_default_session()
session.run(self.tt_cores)
session.run(self.tt_cores, feed_dict=feed_dict)

# TODO: do we need this?
# @staticmethod
Expand Down
5 changes: 2 additions & 3 deletions t3f/variables.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,5 @@ def assign(ref, value, validate_shape=None, use_locking=None, name=None):
value.get_tt_ranks(), value.batch_size,
convert_to_tensors=False)
else:
return TensorTrain(new_cores, value.get_raw_shape(),
value.get_tt_ranks(),
convert_to_tensors=False)
return TensorTrain(new_cores, value.get_raw_shape(), value.get_tt_ranks(),
convert_to_tensors=False)
3 changes: 2 additions & 1 deletion t3f/variables_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from t3f import ops
from t3f import initializers


class VariablesTest(tf.test.TestCase):

def testGetExistingVariable(self):
Expand Down Expand Up @@ -74,7 +75,7 @@ def testAssign(self):
self.assertAllClose(assigner_value, after_value)
# Assert that the value actually changed:
abs_diff = np.linalg.norm((init_value - after_value).flatten())
rel_diff = abs_diff / np.linalg.norm((init_value).flatten())
rel_diff = abs_diff / np.linalg.norm(init_value.flatten())
self.assertGreater(rel_diff, 0.2)


Expand Down