Skip to content

Commit

Permalink
chore: Convert str.format to f-strings (#138)
Browse files Browse the repository at this point in the history
  • Loading branch information
sellth authored Feb 5, 2024
1 parent ef8668c commit 4ad091e
Show file tree
Hide file tree
Showing 18 changed files with 209 additions and 263 deletions.
33 changes: 14 additions & 19 deletions altamisa/apps/isatab2dot.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,15 @@ def print_dot(
):
print(indent + "/* materials */", file=outf)
for name, mat in obj.materials.items():
label = json.dumps("{}:\n{}\n({})".format(mat.type, mat.name if mat.name else "-", name))
label = json.dumps(f"{mat.type}:\n{mat.name if mat.name else '-'}\n({name})")
print(
"{}{} [label={},shape={},color={},fontcolor={}]".format(
indent, json.dumps(name), label, mat_shape, mat_color, mat_color
),
f"{indent}{json.dumps(name)} [label={label},shape={mat_shape},color={mat_color},fontcolor={mat_color}]",
file=outf,
)
print(indent + "/* processes */", file=outf)
for name, proc in obj.processes.items():
label = json.dumps(
"{}:\n{}\n{}\n({})".format(
"Process",
"Process:\n{}\n{}\n({})".format(
proc.protocol_ref if proc.protocol_ref else "-",
proc.name if proc.name else "-",
name,
Expand All @@ -59,7 +56,7 @@ def print_dot(
)
print(indent + "/* arcs */", file=outf)
for arc in obj.arcs:
print("{}{} -> {};".format(indent, json.dumps(arc.tail), json.dumps(arc.head)), file=outf)
print(f"{indent}{json.dumps(arc.tail)} -> {json.dumps(arc.head)};", file=outf)


def run(args: Arguments):
Expand All @@ -79,27 +76,25 @@ def run(args: Arguments):

for s, study_info in enumerate(investigation.studies):
if not study_info.info.path:
print(" /* no file for study {} */".format(s + 1), file=output_file)
print(f" /* no file for study {s + 1} */", file=output_file)
continue
with open(os.path.join(path, study_info.info.path), "rt") as inputf:
study = StudyReader.from_stream("S{}".format(s + 1), inputf).read()
print(" /* study {} */".format(study_info.info.path), file=output_file)
print(" subgraph clusterStudy{} {{".format(s), file=output_file)
print(' label = "Study: {}"'.format(study_info.info.path), file=output_file)
study = StudyReader.from_stream(f"S{s + 1}", inputf).read()
print(f" /* study {study_info.info.path} */", file=output_file)
print(f" subgraph clusterStudy{s} {{", file=output_file)
print(f' label = "Study: {study_info.info.path}"', file=output_file)
print_dot(study, output_file)
print(" }", file=output_file)

for a, assay_info in enumerate(study_info.assays):
if not assay_info.path:
print(" /* no file for assay {} */".format(a + 1), file=output_file)
print(f" /* no file for assay {a + 1} */", file=output_file)
continue
with open(os.path.join(path, assay_info.path), "rt") as inputf:
assay = AssayReader.from_stream(
"S{}".format(s + 1), "A{}".format(a + 1), inputf
).read()
print(" /* assay {} */".format(assay_info.path), file=output_file)
print(" subgraph clusterAssayS{}A{} {{".format(s, a), file=output_file)
print(' label = "Assay: {}"'.format(assay_info.path), file=output_file)
assay = AssayReader.from_stream(f"S{s + 1}", f"A{a + 1}", inputf).read()
print(f" /* assay {assay_info.path} */", file=output_file)
print(f" subgraph clusterAssayS{s}A{a} {{", file=output_file)
print(f' label = "Assay: {assay_info.path}"', file=output_file)
print_dot(assay, output_file)
print(" }", file=output_file)

Expand Down
9 changes: 3 additions & 6 deletions altamisa/apps/isatab2isatab.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ def run_warnings_caught(args: Arguments):
path_in = os.path.realpath(os.path.dirname(args.input_investigation_file))
path_out = os.path.realpath(os.path.dirname(args.output_investigation_file))
if path_in == path_out:
tpl = "Can't output ISA-tab files to same directory as as input: {} == {}"
msg = tpl.format(path_in, path_out)
msg = f"Can't output ISA-tab files to same directory as as input: {path_in} == {path_out}"
raise IsaException(msg)

with ExitStack() as stack:
Expand Down Expand Up @@ -98,15 +97,13 @@ def run_reading(
for s, study_info in enumerate(investigation.studies):
if study_info.info.path:
with open(os.path.join(path_in, study_info.info.path), "rt") as inputf:
studies[s] = StudyReader.from_stream("S{}".format(s + 1), inputf).read()
studies[s] = StudyReader.from_stream(f"S{s + 1}", inputf).read()
if study_info.assays:
assays[s] = {}
for a, assay_info in enumerate(study_info.assays):
if assay_info.path:
with open(os.path.join(path_in, assay_info.path), "rt") as inputf:
assays[s][a] = AssayReader.from_stream(
"S{}".format(s + 1), "A{}".format(a + 1), inputf
).read()
assays[s][a] = AssayReader.from_stream(f"S{s + 1}", f"A{a + 1}", inputf).read()

# Validate studies and assays
for s, study_info in enumerate(investigation.studies):
Expand Down
6 changes: 2 additions & 4 deletions altamisa/apps/isatab_validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,15 +82,13 @@ def run_warnings_caught(args: Arguments):
for s, study_info in enumerate(investigation.studies):
if study_info.info.path:
with open(os.path.join(path_in, study_info.info.path), "rt") as inputf:
studies[s] = StudyReader.from_stream("S{}".format(s + 1), inputf).read()
studies[s] = StudyReader.from_stream(f"S{s + 1}", inputf).read()
if study_info.assays:
assays[s] = {}
for a, assay_info in enumerate(study_info.assays):
if assay_info.path:
with open(os.path.join(path_in, assay_info.path), "rt") as inputf:
assays[s][a] = AssayReader.from_stream(
"S{}".format(s + 1), "A{}".format(a + 1), inputf
).read()
assays[s][a] = AssayReader.from_stream(f"S{s + 1}", f"A{a + 1}", inputf).read()

# Validate studies and assays
for s, study_info in enumerate(investigation.studies):
Expand Down
6 changes: 2 additions & 4 deletions altamisa/isatab/headers.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,8 +446,7 @@ def _parse_next(self):
raise ParseIsatabException(msg)
return self._parse_labeled_column_header(val, label, type_)
# None of the if-statements above was taken
tpl = 'Header "{}" unknown, processing unclear'
msg = tpl.format(val)
msg = f'Header "{val}" unknown, processing unclear'
raise ParseIsatabException(msg)

def _parse_term_source_ref(self):
Expand All @@ -471,8 +470,7 @@ def _parse_simple_column_header(self, type_):
def _parse_labeled_column_header(self, val, key, type_):
tok = val[len(key) :] # strip '^{key}'
if not tok or tok[0] != "[" or tok[-1] != "]":
tpl = "Problem parsing labeled header {}"
msg = tpl.format(val)
msg = f"Problem parsing labeled header {val}"
raise ParseIsatabException(msg)
self.col_no += 1
return type_(self.col_no - 1, tok[1:-1])
Expand Down
3 changes: 1 addition & 2 deletions altamisa/isatab/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ def list_strip(line: List[str]) -> List[str]:
"""Remove trailing space from strings in a list (e.g. a csv line)"""
new_line = [field.strip() for field in line]
if new_line != line:
tpl = "Removed trailing whitespaces in fields of line: {}"
msg = tpl.format(line)
msg = f"Removed trailing whitespaces in fields of line: {line}"
warnings.warn(msg, ParseIsatabWarning)
return new_line
2 changes: 1 addition & 1 deletion altamisa/isatab/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,7 @@ def __getitem__(self, idx):
elif idx == 1:
return self.head
else:
raise IndexError("Invalid index: %d" % idx) # pragma: no cover
raise IndexError(f"Invalid index: {idx}") # pragma: no cover


@attr.s(auto_attribs=True, frozen=True)
Expand Down
76 changes: 33 additions & 43 deletions altamisa/isatab/parse_assay_study.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,10 @@ def _assign_column_headers(self): # noqa: C901
header.column_type not in self.name_headers
and header.column_type not in self.allowed_column_types
):
tpl = 'Invalid column type occured "{}" not in {}'
msg = tpl.format(header.column_type, self.allowed_column_types)
msg = (
"Invalid column type occured "
f'"{header.column_type}" not in {self.allowed_column_types}'
)
raise ParseIsatabException(msg)
# Most headers are not secondary, so make this the default state.
is_secondary = False
Expand Down Expand Up @@ -206,7 +208,7 @@ def _assign_column_headers(self): # noqa: C901
): # pragma: no cover
tpl = (
"Ontologies not supported for primary annotation "
"'{}' (in col {}).".format(prev.column_type, "{}")
f"'{prev.column_type}' (in col {{}})."
)
elif prev.term_source_ref_header: # pragma: no cover
tpl = 'Seen "Term Source REF" header for same entity ' "in col {}"
Expand All @@ -232,8 +234,7 @@ def _assign_column_headers(self): # noqa: C901

@staticmethod
def _raise_seen_before(name, col_no): # pragma: no cover
tpl = 'Seen "{}" header for same entity in col {}'
msg = tpl.format(name, col_no)
msg = f'Seen "{name}" header for same entity in col {col_no}'
raise ParseIsatabException(msg)

def _build_complex(
Expand All @@ -256,7 +257,7 @@ def _build_complex(
unit = self._build_freetext_or_term_ref(header.unit_header, line)
if unit is not None and not isinstance(unit, (str, models.OntologyTermRef)):
raise ParseIsatabException(
"Unit must be a string or an OntologyTermRef, not {}".format(type(unit))
f"Unit must be a string or an OntologyTermRef, not {type(unit)}"
)
# Then, constructing ``klass`` is easy
return klass(header.label, value, unit)
Expand Down Expand Up @@ -284,11 +285,10 @@ def _build_freetext_or_term_ref(
]
return term_refs
else: # pragma: no cover
tpl = (
msg = (
"Irregular numbers of fields in ontology term columns"
"(i.e. ';'-separated fields): {}"
f"(i.e. ';'-separated fields): {line[header.col_no : header2.col_no + 2]}"
)
msg = tpl.format(line[header.col_no : header2.col_no + 2])
raise ParseIsatabException(msg)

# Else, just create single ontology term references
Expand Down Expand Up @@ -348,24 +348,22 @@ def build(self, line: List[str]) -> models.Material:
# First, build the individual components
if not self.name_header:
raise ParseIsatabException(
"No name header found for material found for file {}".format(self.filename)
f"No name header found for material found for file {self.filename}"
)
type_ = self.name_header.column_type
assay_id = "-{}".format(self.assay_id) if self.assay_id else ""
assay_id = f"-{self.assay_id}" if self.assay_id else ""
name = line[self.name_header.col_no]
if name:
# make material/data names unique by column
if self.name_header.column_type == table_headers.SOURCE_NAME:
unique_name = "{}-{}-{}".format(self.study_id, "source", name)
unique_name = f"{self.study_id}-source-{name}"
elif self.name_header.column_type == table_headers.SAMPLE_NAME:
# use static column identifier "sample-", since the same
# samples occur in different columns in study and assay
unique_name = "{}-{}-{}".format(self.study_id, "sample", name)
unique_name = f"{self.study_id}-sample-{name}"
else:
# anything else gets the column id
unique_name = "{}{}-{}-COL{}".format(
self.study_id, assay_id, name, self.name_header.col_no + 1
)
unique_name = f"{self.study_id}{assay_id}-{name}-COL{self.name_header.col_no + 1}"
else:
name_val = "{}{}-{} {}-{}-{}".format(
self.study_id,
Expand Down Expand Up @@ -434,8 +432,7 @@ def build(self, line: List[str]) -> models.Process:
try:
date = datetime.strptime(line[self.date_header.col_no], "%Y-%m-%d").date()
except ValueError as e: # pragma: no cover
tpl = 'Invalid ISO8601 date "{}"' # pragma: no cover
msg = tpl.format(line[self.date_header.col_no])
msg = f'Invalid ISO8601 date "{line[self.date_header.col_no]}"' # pragma: no cover
raise ParseIsatabException(msg) from e
else:
date = ""
Expand Down Expand Up @@ -479,14 +476,11 @@ def _build_protocol_ref_and_name(
) -> Tuple[str, Union[models.AnnotatedStr, str], Optional[str], Optional[str]]:
# At least one of these headers has to be specified
if not self.name_header and not self.protocol_ref_header: # pragma: no cover
raise ParseIsatabException(
"No protocol reference header found for process found for file {}".format(
self.filename
)
)
msg = f"No protocol reference header found for process found for file {self.filename}"
raise ParseIsatabException(msg)
# Perform case distinction on which case is actually true
counter_value = self._next_counter()
assay_id = "-{}".format(self.assay_id) if self.assay_id else ""
assay_id = f"-{self.assay_id}" if self.assay_id else ""
name = None
name_type = None
if not self.name_header: # and self.protocol_ref_header:
Expand All @@ -509,9 +503,7 @@ def _build_protocol_ref_and_name(
name = line[self.name_header.col_no]
name_type = self.name_header.column_type
if name: # Use name if available
unique_name = "{}{}-{}-{}".format(
self.study_id, assay_id, name, self.name_header.col_no + 1
)
unique_name = f"{self.study_id}{assay_id}-{name}-{self.name_header.col_no + 1}"
else: # Empty! # pragma: no cover
name_val = "{}{}-{} {}-{}-{}".format(
self.study_id,
Expand All @@ -527,9 +519,7 @@ def _build_protocol_ref_and_name(
name = line[self.name_header.col_no]
name_type = self.name_header.column_type
if name:
unique_name = "{}{}-{}-{}".format(
self.study_id, assay_id, name, self.name_header.col_no + 1
)
unique_name = f"{self.study_id}{assay_id}-{name}-{self.name_header.col_no + 1}"
else:
name_val = "{}{}-{}-{}-{}".format(
self.study_id,
Expand All @@ -544,7 +534,7 @@ def _build_protocol_ref_and_name(
tpl = "Missing protocol reference in column {} of file {} "
msg = tpl.format(self.protocol_ref_header.col_no + 1, self.filename)
else:
msg = "Missing protocol reference in file {}".format(self.filename)
msg = f"Missing protocol reference in file {self.filename}"
raise ParseIsatabException(msg)
return protocol_ref, unique_name, name, name_type

Expand Down Expand Up @@ -596,16 +586,16 @@ def _intercept_duplicates(self, start, end):
]
header = [h for h in self.header[start:end] if h.column_type in column_types_to_check]
names = [
"{}[{}]".format(h.column_type, h.label)
if isinstance(h, LabeledColumnHeader)
else h.column_type
f"{h.column_type}[{h.label}]" if isinstance(h, LabeledColumnHeader) else h.column_type
for h in header
]
duplicates = set([c for c in names if names.count(c) > 1])
if duplicates:
assay = " assay {}".format(self.assay_id) if self.assay_id else ""
tpl = "Found duplicated column types in header of study {}{}: {}"
msg = tpl.format(self.study_id, assay, ", ".join(duplicates))
assay = f" assay {self.assay_id}" if self.assay_id else ""
msg = (
"Found duplicated column types in header of study "
f"{self.study_id}{assay}: {', '.join(duplicates)}"
)
raise ParseIsatabException(msg)

def _make_breaks(self):
Expand Down Expand Up @@ -768,23 +758,23 @@ def _construct(self, rows):
if (
entry.unique_name in processes and entry != processes[entry.unique_name]
): # pragma: no cover
tpl = (
msg = (
"Found processes with same name but different "
"annotation:\nprocess 1: {}\nprocess 2: {}"
f"annotation:\nprocess 1: {entry}\n"
f"process 2: {processes[entry.unique_name]}"
)
msg = tpl.format(entry, processes[entry.unique_name])
raise ParseIsatabException(msg)
processes[entry.unique_name] = entry
else:
assert isinstance(entry, models.Material)
if (
entry.unique_name in materials and entry != materials[entry.unique_name]
): # pragma: no cover
tpl = (
msg = (
"Found materials with same name but different "
"annotation:\nmaterial 1: {}\nmaterial 2: {}"
f"annotation:\nmaterial 1: {entry}\n"
f"material 2: {materials[entry.unique_name]}"
)
msg = tpl.format(entry, materials[entry.unique_name])
raise ParseIsatabException(msg)
materials[entry.unique_name] = entry
# Collect arc
Expand Down
Loading

0 comments on commit 4ad091e

Please sign in to comment.