Skip to content

Commit

Permalink
Added help texts and removed some typos
Browse files Browse the repository at this point in the history
  • Loading branch information
Genarito committed Oct 4, 2024
1 parent 9349e91 commit 5460aae
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 8 deletions.
1 change: 1 addition & 0 deletions src/biomarkers/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ class Biomarker(models.Model):
cnas: QuerySet['CNAIdentifier']
mirnas: QuerySet['MiRNAIdentifier']
mrnas: QuerySet['MRNAIdentifier']

name: str = models.CharField(max_length=300)
description: Optional[str] = models.TextField(null=True, blank=True)
tag: Optional[Tag] = models.ForeignKey(Tag, on_delete=models.SET_NULL, default=None, blank=True, null=True)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Generated by Django 4.2.15 on 2024-10-03 21:02

from django.db import migrations, models


class Migration(migrations.Migration):

dependencies = [
('feature_selection', '0054_alter_trainedmodel_state'),
]

operations = [
migrations.AlterField(
model_name='fsexperiment',
name='app_name',
field=models.CharField(blank=True, help_text='Spark app name to get the results', max_length=100, null=True),
),
migrations.AlterField(
model_name='fsexperiment',
name='attempt',
field=models.PositiveSmallIntegerField(default=0, help_text='Number of attempts to prevent a buggy experiment running forever'),
),
migrations.AlterField(
model_name='fsexperiment',
name='emr_job_id',
field=models.CharField(blank=True, help_text='Job ID in the Spark cluster', max_length=100, null=True),
),
migrations.AlterField(
model_name='fsexperiment',
name='execution_time',
field=models.PositiveIntegerField(default=0, help_text='Execution time in seconds'),
),
migrations.AlterField(
model_name='fsexperiment',
name='task_id',
field=models.CharField(blank=True, help_text='Celery Task ID', max_length=100, null=True),
),
]
15 changes: 8 additions & 7 deletions src/feature_selection/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class ClusteringScoringMethod(models.IntegerChoices):


class SVMKernel(models.IntegerChoices):
"""SVM's kernel """
"""SVM kernel """
LINEAR = 1
POLYNOMIAL = 2
RBF = 3
Expand Down Expand Up @@ -102,10 +102,11 @@ class FSExperiment(models.Model):
rf_times_records: QuerySet['RFTimesRecord']
svm_times_records: QuerySet['SVMTimesRecord']
best_model: 'TrainedModel'

origin_biomarker = models.ForeignKey('biomarkers.Biomarker', on_delete=models.CASCADE,
related_name='fs_experiments_as_origin')
algorithm = models.IntegerField(choices=FeatureSelectionAlgorithm.choices)
execution_time = models.PositiveIntegerField(default=0) # Execution time in seconds
execution_time = models.PositiveIntegerField(default=0, help_text='Execution time in seconds')
created_biomarker = models.OneToOneField('biomarkers.Biomarker', on_delete=models.SET_NULL, null=True, blank=True,
related_name='fs_experiment')
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
Expand All @@ -122,14 +123,14 @@ class FSExperiment(models.Model):
methylation_source = models.ForeignKey('api_service.ExperimentSource', on_delete=models.CASCADE, null=True,
blank=True, related_name='fs_experiments_as_methylation')

task_id = models.CharField(max_length=100, blank=True, null=True) # Celery Task ID
task_id = models.CharField(max_length=100, blank=True, null=True, help_text='Celery Task ID')

# Number of attempts to prevent a buggy experiment running forever
attempt = models.PositiveSmallIntegerField(default=0)
attempt = models.PositiveSmallIntegerField(default=0, help_text='Number of attempts to prevent a buggy experiment '
'running forever')

# AWS-EMR fields
app_name = models.CharField(max_length=100, null=True, blank=True) # Spark app name to get the results
emr_job_id = models.CharField(max_length=100, null=True, blank=True) # Job ID in the Spark cluster
app_name = models.CharField(max_length=100, null=True, blank=True, help_text='Spark app name to get the results')
emr_job_id = models.CharField(max_length=100, null=True, blank=True, help_text='Job ID in the Spark cluster')

def get_all_sources(self):
"""Returns a list with all the sources."""
Expand Down
2 changes: 1 addition & 1 deletion src/statistical_properties/survival_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def generate_survival_groups_by_median_expression(
def compute_c_index_and_log_likelihood(df: pd.DataFrame) -> Tuple[float, float]:
"""
Computes the C-Index and the partial Log-Likelihood from a DataFrame.
@param df: Pandas DataFrame. IMPORTANT: has to have 3 colunms: 'E' (event), 'T' (time), and 'group' (group in which
@param df: Pandas DataFrame. IMPORTANT: has to have 3 columns: 'E' (event), 'T' (time), and 'group' (group in which
the sample is).
@return: A tuple with the C-Index and the partial Log-Likelihood.
"""
Expand Down

0 comments on commit 5460aae

Please sign in to comment.