Skip to content

Commit

Permalink
ENH: ChoiceModel.trainable_weights as @Property (#116)
Browse files Browse the repository at this point in the history
* FIX: ArrayStorage when directly instantiated from ndarray

* ENH: better handling when features / availabilities are indexed with FeaturesStorage

* ADD: ArrayStorage indexer & catching KeyErrors for custom messaging

* ENH: ArrayStorage follows common structure with Indexers

* ENH: harmonize Error raised with missing key

* ADD: KeyError test for all Storages

* ENH: FeaturesStorage IDs checked automatically

* ENH: Way faster FeaturesStorage instantiation in some edge cases

* ENH: trainable_weights as abstractmethod & property

* FIX: subset ChoiceDataset with availabilities as FbID

* FIX: FeaturesStorage in the middle of 'normal' features

* FIX: FeaturesStorage in the middle of 'normal features'

* ADD: check for ArrayStorage that Features have more dimensions than key
  • Loading branch information
VincentAuriau authored Jul 25, 2024
1 parent eb8a66c commit efff23c
Show file tree
Hide file tree
Showing 17 changed files with 106 additions and 55 deletions.
2 changes: 1 addition & 1 deletion .github/actions/publish/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ runs:
vname="${{ github.event.release.tag_name }}"
vname=${vname:1}
echo $vname
sed -i -r 's/__version__ *= *".*"/__version__ = "'"$vname"'" /g' ${{ inputs.PACKAGE_DIRECTORY }}__init__.py
sed -i -r 's/__version__ *= *".*"/__version__ = "'"$vname"'"/g' ${{ inputs.PACKAGE_DIRECTORY }}__init__.py
sed -i '0,/version =.*/s//version = "'"$vname"'"/' ./pyproject.toml
fi
shell: bash
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release_pypi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
PACKAGE_DIRECTORY: "./choice_learn/"
PYTHON_VERSION: "3.9"
PUBLISH_REGISTRY_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
PUBLISH_REGISTRY_USER: ${{ secrets.PYPI_USERNAME }}
PUBLISH_REGISTRY_USERNAME: ${{ secrets.PYPI_USERNAME }}
UPDATE_CODE_VERSION: false
BRANCH: ${{ steps.install.outputs.BRANCH }}

5 changes: 5 additions & 0 deletions choice_learn/data/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,11 @@ def __init__(self, values=None, values_names=None, name=None):
values = np.array(values)
elif not isinstance(values, np.ndarray):
raise ValueError("ArrayStorage Values must be a list or a numpy array")

# Checking that FeaturesStorage increases dimensions
# key -> features of ndim >= 1
if not values.ndim > 1:
raise ValueError("ArrayStorage Values must be a list or a numpy array of ndim >= 1")
# self.storage = storage
self.values_names = values_names
self.name = name
Expand Down
13 changes: 13 additions & 0 deletions choice_learn/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,19 @@ def __init__(
self.regularization_strength = 0.0
self.regularization = None

@property
def trainable_weights(self):
"""Trainable weights need to be specified in children classes.
Basically it determines which weights need to be optimized during training.
MUST be a list
"""
raise NotImplementedError(
"""Trainable_weights must be specified in children classes,
when you inherit from ChoiceModel.
See custom models documentation for more details and examples."""
)

@abstractmethod
def compute_batch_utility(
self,
Expand Down
18 changes: 9 additions & 9 deletions choice_learn/models/baseline_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def __init__(self, **kwargs):

@property
def trainable_weights(self):
"""Return an empty list."""
"""Return an empty list - there is no trainable weight."""
return []

def compute_batch_utility(
Expand Down Expand Up @@ -75,13 +75,13 @@ class DistribMimickingModel(ChoiceModel):
def __init__(self, **kwargs):
"""Initialize of the model."""
super().__init__(**kwargs)
self.weights = []
self._trainable_weights = []
self.is_fitted = False

@property
def trainable_weights(self):
"""Return the weights."""
return self.weights
"""Trainable weights of the model."""
return [self._trainable_weights]

def fit(self, choice_dataset, *args, **kwargs):
"""Compute the choice frequency of each product and defines it as choice probabilities.
Expand All @@ -95,8 +95,8 @@ def fit(self, choice_dataset, *args, **kwargs):
_ = args
choices = choice_dataset.choices
for i in range(choice_dataset.get_n_items()):
self.weights.append(tf.reduce_sum(tf.cast(choices == i, tf.float32)))
self.weights = tf.stack(self.weights) / len(choices)
self._trainable_weights.append(tf.reduce_sum(tf.cast(choices == i, tf.float32)))
self._trainable_weights = tf.stack(self._trainable_weights) / len(choices)
self.is_fitted = True

def _fit_with_lbfgs(self, choice_dataset, *args, **kwargs):
Expand All @@ -111,8 +111,8 @@ def _fit_with_lbfgs(self, choice_dataset, *args, **kwargs):
_ = args
choices = choice_dataset.choices
for i in range(choice_dataset.get_n_items()):
self.weights.append(tf.reduce_sum(tf.cast(choices == i, tf.float32)))
self.weights = tf.stack(self.weights) / len(choices)
self._trainable_weights.append(tf.reduce_sum(tf.cast(choices == i, tf.float32)))
self._trainable_weights = tf.stack(self._trainable_weights) / len(choices)
self.is_fitted = True

def compute_batch_utility(
Expand Down Expand Up @@ -153,4 +153,4 @@ def compute_batch_utility(
_ = items_features_by_choice, shared_features_by_choice, available_items_by_choice
if not self.is_fitted:
raise ValueError("Model not fitted")
return np.stack([np.log(self.trainable_weights.numpy())] * len(choices), axis=0)
return tf.stack([tf.math.log(self.trainable_weights[0])] * len(choices), axis=0)
13 changes: 9 additions & 4 deletions choice_learn/models/conditional_logit.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ def instantiate(self, choice_dataset):
if not self.instantiated:
if not isinstance(self.coefficients, MNLCoefficients):
self._build_coefficients_from_dict(n_items=choice_dataset.get_n_items())
self.trainable_weights = self._instantiate_tf_weights()
self._trainable_weights = self._instantiate_tf_weights()

# Checking that no weight has been attributed to non existing feature in dataset
dataset_stacked_features_names = []
Expand Down Expand Up @@ -360,10 +360,15 @@ def _instantiate_tf_weights(self):
weights.append(weight)
self.coefficients._add_tf_weight(weight_name, weight_nb)

self.trainable_weights = weights
self._trainable_weights = weights

return weights

@property
def trainable_weights(self):
"""Trainable weights of the model."""
return self._trainable_weights

def _build_coefficients_from_dict(self, n_items):
"""Build coefficients when they are given as a dictionnay.
Expand Down Expand Up @@ -699,7 +704,7 @@ def get_weights_std(self, choice_dataset):
for _w in self.trainable_weights:
mw.append(w[:, index : index + _w.shape[1]])
index += _w.shape[1]
model.trainable_weights = mw
model._trainable_weights = mw
batch = next(choice_dataset.iter_batch(batch_size=-1))
utilities = model.compute_batch_utility(*batch)
probabilities = tf.nn.softmax(utilities, axis=-1)
Expand Down Expand Up @@ -732,7 +737,7 @@ def clone(self):
if hasattr(self, "report"):
clone.report = self.report
if hasattr(self, "trainable_weights"):
clone.trainable_weights = self.trainable_weights
clone._trainable_weights = self.trainable_weights
if hasattr(self, "lr"):
clone.lr = self.lr
if hasattr(self, "_shared_features_by_choice_names"):
Expand Down
9 changes: 7 additions & 2 deletions choice_learn/models/nested_logit.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def instantiate(self, choice_dataset):
if not self.instantiated:
if not isinstance(self.coefficients, MNLCoefficients):
self._build_coefficients_from_dict(n_items=choice_dataset.get_n_items())
self.trainable_weights = self._instantiate_tf_weights()
self._trainable_weights = self._instantiate_tf_weights()

# Checking that no weight has been attributed to non existing feature in dataset
dataset_stacked_features_names = []
Expand Down Expand Up @@ -312,10 +312,15 @@ def _instantiate_tf_weights(self):
)
)

self.trainable_weights = weights
self._trainable_weights = weights

return weights

@property
def trainable_weights(self):
"""Trainable weights of the model."""
return self._trainable_weights

def _build_coefficients_from_dict(self, n_items):
"""Build coefficients when they are given as a dictionnay.
Expand Down
22 changes: 14 additions & 8 deletions choice_learn/models/rumnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,15 +591,18 @@ def instantiate(self):
l2_regularization_coeff=self.l2_regularization_coef,
)

# Storing weights for back-propagation
self.trainable_weights = self.x_model.weights + self.z_model.weights + self.u_model.weights
self.loss = tf_ops.CustomCategoricalCrossEntropy(
from_logits=False,
label_smoothing=self.label_smoothing,
epsilon=self.logmin,
)
self.instantiated = True

@property
def trainable_weights(self):
"""Trainable weights of the model."""
return self.x_model.weights + self.z_model.weights + self.u_model.weights

def compute_batch_utility(
self,
shared_features_by_choice,
Expand Down Expand Up @@ -978,18 +981,21 @@ def instantiate(self):
width=self.width_u, depth=self.depth_u, add_last=True
)

# Storing weights for back-propagation
self.trainable_weights = (
self.x_model.trainable_variables
+ self.z_model.trainable_variables
+ self.u_model.trainable_variables
)
self.loss = tf_ops.CustomCategoricalCrossEntropy(
from_logits=False, label_smoothing=self.label_smoothing
)
self.time_dict = {}
self.instantiated = True

@property
def trainable_weights(self):
"""Trainable weights of the model."""
return (
self.x_model.trainable_variables
+ self.z_model.trainable_variables
+ self.u_model.trainable_variables
)

def compute_batch_utility(
self,
shared_features_by_choice,
Expand Down
15 changes: 10 additions & 5 deletions choice_learn/models/simple_mnl.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,14 @@ def instantiate(self, n_items, n_shared_features, n_items_features):

self.instantiated = True
self.indexes = indexes
self.trainable_weights = weights
self._trainable_weights = weights
return indexes, weights

@property
def trainable_weights(self):
"""Trainable weights of the model."""
return self._trainable_weights

def compute_batch_utility(
self,
shared_features_by_choice,
Expand Down Expand Up @@ -188,7 +193,7 @@ def fit(self, choice_dataset, get_report=False, **kwargs):
"""
if not self.instantiated:
# Lazy Instantiation
self.indexes, self.trainable_weights = self.instantiate(
self.indexes, self._trainable_weights = self.instantiate(
n_items=choice_dataset.get_n_items(),
n_shared_features=choice_dataset.get_n_shared_features(),
n_items_features=choice_dataset.get_n_items_features(),
Expand Down Expand Up @@ -220,7 +225,7 @@ def _fit_with_lbfgs(self, choice_dataset, sample_weight=None, get_report=False,
"""
if not self.instantiated:
# Lazy Instantiation
self.indexes, self.trainable_weights = self.instantiate(
self.indexes, self._trainable_weights = self.instantiate(
n_items=choice_dataset.get_n_items(),
n_shared_features=choice_dataset.get_n_shared_features(),
n_items_features=choice_dataset.get_n_items_features(),
Expand Down Expand Up @@ -304,7 +309,7 @@ def get_weights_std(self, choice_dataset):
for _w in self.trainable_weights:
mw.append(w[index : index + _w.shape[0]])
index += _w.shape[0]
model.trainable_weights = mw
model._trainable_weights = mw
for batch in choice_dataset.iter_batch(batch_size=-1):
utilities = model.compute_batch_utility(*batch)
probabilities = tf.nn.softmax(utilities, axis=-1)
Expand Down Expand Up @@ -336,7 +341,7 @@ def clone(self):
if hasattr(self, "report"):
clone.report = self.report
if hasattr(self, "trainable_weights"):
clone.trainable_weights = self.trainable_weights
clone._trainable_weights = self.trainable_weights
if hasattr(self, "indexes"):
clone.indexes = self.indexes
if hasattr(self, "intercept"):
Expand Down
6 changes: 4 additions & 2 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,10 @@ The use of this software is under the MIT license, with no limitation of usage,
Choice-Learn has been developed through a collaboration between researchers at the Artefact Research Center and the laboratory MICS from CentraleSupélec, Université Paris Saclay.

<figure markdown>
![Elements](https://raw.githubusercontent.com/artefactory/choice-learn/main/docs/illustrations/logos/logo_arc.png){: style="height:83px"}
![Dandi](https://raw.githubusercontent.com/artefactory/choice-learn/main/docs/illustrations/logos/logo_atf.png){: style="height:83px"}

![Elements](https://raw.githubusercontent.com/artefactory/choice-learn/main/docs/illustrations/logos/logo_arc.png){: style="height:60px"}

![Dandi](https://raw.githubusercontent.com/artefactory/choice-learn/main/docs/illustrations/logos/logo_atf.png){: style="height:60px"}
</figure>

<p align="center">
Expand Down
4 changes: 2 additions & 2 deletions mkdocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ nav:
- Intoduction to data handling: notebooks/introduction/2_data_handling.md
- Exhaustive example of ChoiceDataset creation: notebooks/data/dataset_creation.md
- Optimize RAM usage with Features Storage, in-depth examples: notebooks/data/features_byID_examples.md
- Modelling:
- Modeling:
- Introduction to Choice Models - the SimpleMNL: notebooks/models/simple_mnl.md
- Conditional Logit Usage: notebooks/introduction/3_model_clogit.md
- Nested Logit Usage: notebooks/models/nested_logit.md
Expand Down Expand Up @@ -96,5 +96,5 @@ nav:
- Latent Class BaseModel: reference/models/references_latent_class_base_model.md
- Latent Class MNL: references/models/references_latent_class_mnl.md
- Toolbox:
- Assortment Optimizer: references/toolbox/references_assortment_optimizer.md
- Assortment Optimizer and Pricing: references/toolbox/references_assortment_optimizer.md
- explanations.md
8 changes: 4 additions & 4 deletions notebooks/introduction/3_model_clogit.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1128,7 +1128,7 @@
"gt_model.instantiate(canada_dataset)\n",
"canada_dataset\n",
"# Here we estimate the negative log-likelihood with these coefficients (also, we obtain same value as in those papers):\n",
"gt_model.trainable_weights = gt_weights\n",
"gt_model._trainable_weights = gt_weights\n",
"print(\"'Ground Truth' Negative LogLikelihood:\", gt_model.evaluate(canada_dataset) * len(canada_dataset))"
]
},
Expand Down Expand Up @@ -1211,9 +1211,9 @@
"name": "stderr",
"output_type": "stream",
"text": [
"Epoch 1999 Train Loss 0.6801: 100%|██████████| 2000/2000 [00:13<00:00, 148.87it/s]\n",
"Epoch 3999 Train Loss 0.6776: 100%|██████████| 4000/4000 [00:24<00:00, 160.24it/s]\n",
"Epoch 19999 Train Loss 0.6767: 100%|██████████| 20000/20000 [01:59<00:00, 166.89it/s]\n"
"Epoch 1999 Train Loss 0.6801: 100%|██████████| 2000/2000 [00:11<00:00, 176.83it/s]\n",
"Epoch 3999 Train Loss 0.6776: 100%|██████████| 4000/4000 [00:20<00:00, 198.77it/s]\n",
"Epoch 19999 Train Loss 0.6767: 100%|██████████| 20000/20000 [01:38<00:00, 202.55it/s]\n"
]
}
],
Expand Down
37 changes: 24 additions & 13 deletions notebooks/introduction/4_model_customization.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -220,19 +220,30 @@
" batch_size=batch_size)\n",
"\n",
" # Create model weights. Basically is one weight by feature + one for intercept\n",
" beta_inter = tf.Variable(tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1, 3)),\n",
" self.beta_inter = tf.Variable(tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1, 3)),\n",
" name=\"beta_inter\")\n",
" beta_freq_cost_ovt = tf.Variable(\n",
" self.beta_freq_cost_ovt = tf.Variable(\n",
" tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1, 3)),\n",
" name=\"beta_freq_cost_ovt\"\n",
" )\n",
" beta_income = tf.Variable(tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1, 3)),\n",
" self.beta_income = tf.Variable(tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1, 3)),\n",
" name=\"beta_income\")\n",
" beta_ivt = tf.Variable(tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1, 4)),\n",
" self.beta_ivt = tf.Variable(tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1, 4)),\n",
" name=\"beta_ivt\")\n",
"\n",
" # Do not forget to add them to the list of trainable_weights, it is mandatory !\n",
" self.trainable_weights = [beta_inter, beta_freq_cost_ovt, beta_income, beta_ivt]\n",
" # Do not forget to add them to the list of trainable_weights, it is mandatory !\n",
" @property\n",
" def trainable_weights(self):\n",
" \"\"\"Do not forget to add the weights to the list of trainable_weights.\n",
" \n",
" It is needed to use the @property definition as here.\n",
"\n",
" Return:\n",
" -------\n",
" list:\n",
" list of tf.Variable to be optimized\n",
" \"\"\"\n",
" return [self.beta_inter, self.beta_freq_cost_ovt, self.beta_income, self.beta_ivt]\n",
"\n",
"\n",
" def compute_batch_utility(self,\n",
Expand Down Expand Up @@ -266,15 +277,15 @@
" _ = (available_items_by_choice, choices) # Avoid unused variable warning\n",
"\n",
" # Adding the 0 value intercept of first item to get the right shape\n",
" full_beta_inter = tf.concat([tf.constant([[.0]]), self.trainable_weights[0]], axis=-1)\n",
" full_beta_inter = tf.concat([tf.constant([[.0]]), self.beta_inter], axis=-1)\n",
" # Concatenation to reach right shape for dot product\n",
" full_beta_income = tf.concat([tf.constant([[.0]]), self.trainable_weights[2]], axis=-1) # shape = (1, n_items)\n",
" full_beta_income = tf.concat([tf.constant([[.0]]), self.beta_income], axis=-1) # shape = (1, n_items)\n",
"\n",
" items_ivt_by_choice = items_features_by_choice[:, :, 3] # shape = (n_choices, n_items, )\n",
" items_cost_freq_ovt_by_choice = items_features_by_choice[:, :, :3 ]# shape = (n_choices, n_items, 3)\n",
" u_cost_freq_ovt = tf.squeeze(tf.tensordot(items_cost_freq_ovt_by_choice,\n",
" tf.transpose(self.trainable_weights[1]), axes=1)) # shape = (n_choices, n_items)\n",
" u_ivt = tf.multiply(items_ivt_by_choice, self.trainable_weights[3]) # shape = (n_choices, n_items)\n",
" tf.transpose(self.beta_freq_cost_ovt), axes=1)) # shape = (n_choices, n_items)\n",
" u_ivt = tf.multiply(items_ivt_by_choice, self.beta_ivt) # shape = (n_choices, n_items)\n",
"\n",
" u_income = tf.tensordot(shared_features_by_choice, full_beta_income, axes=1) # shape = (n_choices, n_items)\n",
"\n",
Expand Down Expand Up @@ -502,9 +513,9 @@
" list\n",
" list of trainable_weights\n",
" \"\"\"\n",
" return model.dense_items_features.trainable_variables\\\n",
" + model.dense_shared_features.trainable_variables\\\n",
" + model.final_layer.trainable_variables\n",
" return self.dense_items_features.trainable_variables\\\n",
" + self.dense_shared_features.trainable_variables\\\n",
" + self.final_layer.trainable_variables\n",
"\n",
" def compute_batch_utility(self,\n",
" shared_features_by_choice,\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/models/custom_modelling.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Custom modelling with Choice-Learn is part of the introductive tutorial and is detailed [here](../introduction/3_model_clogit.ipynb).
Custom modelling with Choice-Learn is part of the introductive tutorial and is detailed [here](../introduction/4_model_customization.ipynb).
Loading

3 comments on commit efff23c

@github-actions
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Coverage

Coverage Report for Python 3.9
FileStmtsMissCoverMissing
choice_learn
   tf_ops.py471764%45, 141–143, 199–203, 220–231
choice_learn/data
   choice_dataset.py62817971%67, 78, 101–112, 123, 145–153, 159, 168–177, 190–203, 221–288, 306–325, 333–339, 341, 353, 394–401, 408, 420, 450–451, 472, 482–483, 491, 519, 552, 559–568, 576–578, 585, 592, 633–634, 713, 727–733, 756–759, 829, 831, 867, 921, 926, 937–938, 969–970, 974–981, 986–997, 1004, 1008, 1102, 1116, 1135–1137, 1150–1152, 1180, 1185, 1194, 1206–1211, 1252, 1264, 1278, 1293, 1305, 1317, 1332, 1337, 1360, 1362, 1366, 1374–1375, 1379, 1387–1388, 1394–1398, 1403–1407, 1411, 1414–1415, 1462, 1479–1480, 1495, 1510
   indexer.py2337070%20, 31, 45, 60–67, 99, 103, 105–108, 179, 184–187, 203–205, 220–231, 261, 266, 288, 292, 336, 393, 409, 423, 492–493, 517, 524, 532, 545–567, 570–591, 601, 613
   storage.py1592386%22, 33, 51, 56, 61, 71, 151, 153, 160, 162, 166, 173, 176, 235–236, 262, 267, 295, 365, 371, 389, 419, 429
   store.py72720%2–274
choice_learn/datasets
   base.py34320241%38–39, 61–68, 90–96, 118, 185, 191–220, 225–294, 309–358, 373–405, 414–481, 566–578, 581–584, 587, 594–605, 609–622, 631–657, 665, 671, 738, 825, 870, 919, 1004
   expedia.py1029210%26–299
   tafeng.py49198%50
choice_learn/models
   __init__.py12283%12–13
   base_model.py24310656%92, 97–98, 105–123, 135, 177, 213–241, 274, 292–333, 340, 369, 383, 388, 396–424, 429–430, 475–508, 518–526, 544–559, 625–626, 628–633, 681–683, 705–727, 782, 784–787
   conditional_logit.py23513742%45, 48, 50, 81, 84, 87–91, 94–98, 132, 135, 175–178, 297, 334, 391, 438–576, 593–598, 625–634, 650–671, 696–720, 724–747
   latent_class_base_model.py1861860%3–609
   latent_class_mnl.py59590%3–299
   nested_logit.py29126310%54–82, 124–162, 188, 220, 229–236, 251–274, 284–317, 322, 332–353, 363–364, 398–547, 586–628, 664–707, 724–729, 756–765, 781–802, 827–874, 883–909
   rumnet.py24516035%171–175, 186–218, 235–243, 268–272, 285–317, 334–341, 364–368, 381–417, 432–442, 532, 534, 561–567, 604, 639–683, 722–766, 807–834, 888–940, 970–988, 993, 1032–1086, 1125–1169, 1207–1234
   simple_mnl.py1409234%75–81, 92–98, 139–177, 204, 238, 255–276, 301–325, 329–357
   tastenet.py945640%15, 64, 90–104, 140–142, 177–207, 223, 238–254, 273–289
choice_learn/toolbox
   assortment_optimizer.py27270%4–180
   gurobi_opt.py2362360%2–674
   or_tools_opt.py2262260%3–666
TOTAL3685220640% 

Tests Skipped Failures Errors Time
59 0 💤 0 ❌ 0 🔥 49.607s ⏱️

@github-actions
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Coverage

Coverage Report for Python 3.11
FileStmtsMissCoverMissing
choice_learn
   tf_ops.py471764%45, 141–143, 199–203, 220–231
choice_learn/data
   choice_dataset.py62817971%67, 78, 101–112, 123, 145–153, 159, 168–177, 190–203, 221–288, 306–325, 333–339, 341, 353, 394–401, 408, 420, 450–451, 472, 482–483, 491, 519, 552, 559–568, 576–578, 585, 592, 633–634, 713, 727–733, 756–759, 829, 831, 867, 921, 926, 937–938, 969–970, 974–981, 986–997, 1004, 1008, 1102, 1116, 1135–1137, 1150–1152, 1180, 1185, 1194, 1206–1211, 1252, 1264, 1278, 1293, 1305, 1317, 1332, 1337, 1360, 1362, 1366, 1374–1375, 1379, 1387–1388, 1394–1398, 1403–1407, 1411, 1414–1415, 1462, 1479–1480, 1495, 1510
   indexer.py2337070%20, 31, 45, 60–67, 99, 103, 105–108, 179, 184–187, 203–205, 220–231, 261, 266, 288, 292, 336, 393, 409, 423, 492–493, 517, 524, 532, 545–567, 570–591, 601, 613
   storage.py1592386%22, 33, 51, 56, 61, 71, 151, 153, 160, 162, 166, 173, 176, 235–236, 262, 267, 295, 365, 371, 389, 419, 429
   store.py72720%2–274
choice_learn/datasets
   base.py34320141%36, 61–68, 90–96, 118, 185, 191–220, 225–294, 309–358, 373–405, 414–481, 566–578, 581–584, 587, 594–605, 609–622, 631–657, 665, 671, 738, 825, 870, 919, 1004
   expedia.py1029210%26–299
   tafeng.py49198%50
choice_learn/models
   __init__.py12283%12–13
   base_model.py24310656%92, 97–98, 105–123, 135, 177, 213–241, 274, 292–333, 340, 369, 383, 388, 396–424, 429–430, 475–508, 518–526, 544–559, 625–626, 628–633, 681–683, 705–727, 782, 784–787
   conditional_logit.py23513742%45, 48, 50, 81, 84, 87–91, 94–98, 132, 135, 175–178, 297, 334, 391, 438–576, 593–598, 625–634, 650–671, 696–720, 724–747
   latent_class_base_model.py1861860%3–609
   latent_class_mnl.py59590%3–299
   nested_logit.py29126310%54–82, 124–162, 188, 220, 229–236, 251–274, 284–317, 322, 332–353, 363–364, 398–547, 586–628, 664–707, 724–729, 756–765, 781–802, 827–874, 883–909
   rumnet.py24516035%171–175, 186–218, 235–243, 268–272, 285–317, 334–341, 364–368, 381–417, 432–442, 532, 534, 561–567, 604, 639–683, 722–766, 807–834, 888–940, 970–988, 993, 1032–1086, 1125–1169, 1207–1234
   simple_mnl.py1409234%75–81, 92–98, 139–177, 204, 238, 255–276, 301–325, 329–357
   tastenet.py945640%15, 64, 90–104, 140–142, 177–207, 223, 238–254, 273–289
choice_learn/toolbox
   assortment_optimizer.py27270%4–180
   gurobi_opt.py2382380%2–674
   or_tools_opt.py2262260%3–666
TOTAL3687220740% 

Tests Skipped Failures Errors Time
59 0 💤 0 ❌ 0 🔥 51.923s ⏱️

@github-actions
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Coverage

Coverage Report for Python 3.10
FileStmtsMissCoverMissing
choice_learn
   tf_ops.py471764%45, 141–143, 199–203, 220–231
choice_learn/data
   choice_dataset.py62817971%67, 78, 101–112, 123, 145–153, 159, 168–177, 190–203, 221–288, 306–325, 333–339, 341, 353, 394–401, 408, 420, 450–451, 472, 482–483, 491, 519, 552, 559–568, 576–578, 585, 592, 633–634, 713, 727–733, 756–759, 829, 831, 867, 921, 926, 937–938, 969–970, 974–981, 986–997, 1004, 1008, 1102, 1116, 1135–1137, 1150–1152, 1180, 1185, 1194, 1206–1211, 1252, 1264, 1278, 1293, 1305, 1317, 1332, 1337, 1360, 1362, 1366, 1374–1375, 1379, 1387–1388, 1394–1398, 1403–1407, 1411, 1414–1415, 1462, 1479–1480, 1495, 1510
   indexer.py2337070%20, 31, 45, 60–67, 99, 103, 105–108, 179, 184–187, 203–205, 220–231, 261, 266, 288, 292, 336, 393, 409, 423, 492–493, 517, 524, 532, 545–567, 570–591, 601, 613
   storage.py1592386%22, 33, 51, 56, 61, 71, 151, 153, 160, 162, 166, 173, 176, 235–236, 262, 267, 295, 365, 371, 389, 419, 429
   store.py72720%2–274
choice_learn/datasets
   base.py34320141%36, 61–68, 90–96, 118, 185, 191–220, 225–294, 309–358, 373–405, 414–481, 566–578, 581–584, 587, 594–605, 609–622, 631–657, 665, 671, 738, 825, 870, 919, 1004
   expedia.py1029210%26–299
   tafeng.py49198%50
choice_learn/models
   __init__.py12283%12–13
   base_model.py24310656%92, 97–98, 105–123, 135, 177, 213–241, 274, 292–333, 340, 369, 383, 388, 396–424, 429–430, 475–508, 518–526, 544–559, 625–626, 628–633, 681–683, 705–727, 782, 784–787
   conditional_logit.py23513742%45, 48, 50, 81, 84, 87–91, 94–98, 132, 135, 175–178, 297, 334, 391, 438–576, 593–598, 625–634, 650–671, 696–720, 724–747
   latent_class_base_model.py1861860%3–609
   latent_class_mnl.py59590%3–299
   nested_logit.py29126310%54–82, 124–162, 188, 220, 229–236, 251–274, 284–317, 322, 332–353, 363–364, 398–547, 586–628, 664–707, 724–729, 756–765, 781–802, 827–874, 883–909
   rumnet.py24516035%171–175, 186–218, 235–243, 268–272, 285–317, 334–341, 364–368, 381–417, 432–442, 532, 534, 561–567, 604, 639–683, 722–766, 807–834, 888–940, 970–988, 993, 1032–1086, 1125–1169, 1207–1234
   simple_mnl.py1409234%75–81, 92–98, 139–177, 204, 238, 255–276, 301–325, 329–357
   tastenet.py945640%15, 64, 90–104, 140–142, 177–207, 223, 238–254, 273–289
choice_learn/toolbox
   assortment_optimizer.py27270%4–180
   gurobi_opt.py2382380%2–674
   or_tools_opt.py2262260%3–666
TOTAL3687220740% 

Tests Skipped Failures Errors Time
59 0 💤 0 ❌ 0 🔥 51.600s ⏱️

Please sign in to comment.