Skip to content

Commit

Permalink
ADD: allow for choice of nb of parallel iterations when using LBFGS (#…
Browse files Browse the repository at this point in the history
…167)

* ADD: allow for choice of nb of parallel iterations when using LBFGS

* ADD: full cLogit test

* ADD: report computation in tests
  • Loading branch information
VincentAuriau authored Oct 22, 2024
1 parent c746318 commit 10e6ffd
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 4 deletions.
13 changes: 9 additions & 4 deletions choice_learn/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ def __init__(
label_smoothing=0.0,
add_exit_choice=False,
optimizer="lbfgs",
tolerance=1e-8,
lbfgs_tolerance=1e-8,
lbfgs_parallel_iterations=4,
callbacks=None,
lr=0.001,
epochs=1000,
Expand All @@ -45,8 +46,10 @@ def __init__(
List of callbacks to add to model.fit, by default None and only add History
optimizer : str, optional
Name of the tf.keras.optimizers to be used, by default "lbfgs"
tolerance : float, optional
lbfgs_tolerance : float, optional
Tolerance for the L-BFGS optimizer if applied, by default 1e-8
lbfgs_parallel_iterations : int, optional
Number of parallel iterations for the L-BFGS optimizer, by default 4
lr: float, optional
Learning rate for the optimizer if applied, by default 0.001
epochs: int, optional
Expand Down Expand Up @@ -99,7 +102,8 @@ def __init__(

self.epochs = epochs
self.batch_size = batch_size
self.tolerance = tolerance
self.lbfgs_tolerance = lbfgs_tolerance
self.lbfgs_parallel_iterations = lbfgs_parallel_iterations

if regularization is not None:
if np.sum(regularization_strength) <= 0:
Expand Down Expand Up @@ -778,9 +782,10 @@ def _fit_with_lbfgs(self, choice_dataset, sample_weight=None, verbose=0):
value_and_gradients_function=func,
initial_position=init_params,
max_iterations=epochs,
tolerance=self.tolerance,
tolerance=self.lbfgs_tolerance,
f_absolute_tolerance=-1,
f_relative_tolerance=-1,
parallel_iterations=self.lbfgs_parallel_iterations,
)

# after training, the final optimized parameters are still in results.position
Expand Down
24 changes: 24 additions & 0 deletions tests/integration_tests/models/test_conditional_logit.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

def test_mode_canada_gt():
"""Tests specific config of cLogit and .evaluate()."""
tf.config.run_functions_eagerly(True)
# Instantiation with the coefficients dictionnary
coefficients = {
"income": "item",
Expand Down Expand Up @@ -36,3 +37,26 @@ def test_mode_canada_gt():
total_nll = gt_model.evaluate(canada_dataset) * len(canada_dataset)
assert total_nll <= 1874.4, f"Got NLL: {total_nll}"
assert total_nll >= 1874.1, f"Got NLL: {total_nll}"


def test_mode_canada_fit():
"""Tests specific config of cLogit and .fit()."""
tf.config.run_functions_eagerly(True)
# Instantiation with the coefficients dictionnary
coefficients = {
"income": "item",
"cost": "constant",
"freq": "constant",
"ovt": "constant",
"ivt": "item-full",
"intercept": "item",
}

canada_dataset = load_modecanada(as_frame=False, preprocessing="tutorial")

model = ConditionalLogit(coefficients=coefficients)
model.fit(canada_dataset, get_report=True)

total_nll = model.evaluate(canada_dataset) * len(canada_dataset)
assert total_nll <= 1874.4, f"Got NLL: {total_nll}"
assert total_nll >= 1874.1, f"Got NLL: {total_nll}"

0 comments on commit 10e6ffd

Please sign in to comment.