Skip to content

Commit

Permalink
lint/format, update pytest action and add failing test
Browse files Browse the repository at this point in the history
  • Loading branch information
xinaesthete committed Apr 15, 2024
1 parent 9cbb171 commit 0dece0a
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 14 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ jobs:
- run: poetry install --with dev
- run: echo "$(poetry env info --path)/bin" >> $GITHUB_PATH

- uses: jakebailey/pyright-action@v1
- uses: jakebailey/pyright-action@v2
with:
version: ${{ matrix.pyright-version }}

Expand Down
6 changes: 4 additions & 2 deletions python/mdvtools/conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,10 @@ def create_regulamentary_project(
# trying to avoid unbound variables - but not convinced this is correct
# what is supposed to happen if bigwig_folder is not a URL?
# >> need tests for this function... <<
beds = bigwigs = ['' for _ in all_names]
print("bigwig_folder is not a URL - using empty URLs for tracks, may well be wrong.")
beds = bigwigs = ["" for _ in all_names]
print(
"bigwig_folder is not a URL - using empty URLs for tracks, may well be wrong."
)
pass
# get the reference

Expand Down
11 changes: 8 additions & 3 deletions python/mdvtools/mdvproject.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ def add_annotations(
f"index column {index_col} not found in {datasource} datasource"
)
# py-right: dictionary key must be hashable
newdf = pandas.DataFrame({index_col: self.get_column(datasource, index_col)}) # type: ignore
newdf = pandas.DataFrame({index_col: self.get_column(datasource, index_col)}) # type: ignore
h5 = self._get_h5_handle()
gr = h5[datasource]
assert isinstance(gr, h5py.Group)
Expand Down Expand Up @@ -1202,7 +1202,9 @@ def get_subgroup_bytes(grp, index, sparse=False):
return numpy.array(grp["x"][offset : offset + _len], numpy.float32).tobytes()


def add_column_to_group(col: dict, data: pandas.Series | pandas.DataFrame, group: h5py.Group, length: int):
def add_column_to_group(
col: dict, data: pandas.Series | pandas.DataFrame, group: h5py.Group, length: int
):
"""
col (dict): The column metadata (may be modified e.g. to add values)
data (pandas.Series): The data to add
Expand Down Expand Up @@ -1230,7 +1232,10 @@ def add_column_to_group(col: dict, data: pandas.Series | pandas.DataFrame, group
col["values"] = [x for x in values.index if values[x] != 0]
vdict = {k: v for v, k in enumerate(col["values"])}
group.create_dataset(
col["field"], length, dtype=dtype, data=data.map(vdict) # type: ignore
col["field"],
length,
dtype=dtype,
data=data.map(vdict), # type: ignore
)
# convert to string
col["values"] = [str(x) for x in col["values"]]
Expand Down
4 changes: 3 additions & 1 deletion python/mdvtools/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,9 @@ def get_binary_data():
return "Request must contain JSON with 'datasource' and 'name'", 400
if project.dir is None or not os.path.exists(project.dir):
return "Project directory not found", 404
path = safe_join(project.dir, "binarydata", req["datasource"], f"{req['name']}.gz")
path = safe_join(
project.dir, "binarydata", req["datasource"], f"{req['name']}.gz"
)
if path is None or not os.path.exists(path):
return "Binary data not found", 404
with open(path, "rb") as f:
Expand Down
11 changes: 4 additions & 7 deletions python/mdvtools/tests/test_column_groups.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,12 @@
import pandas as pd

# make a project in local temp directory, which is ignored by git
path = os.path.join(os.path.dirname(__file__), 'temp', 'test_column_groups')
path = os.path.join(os.path.dirname(__file__), "temp", "test_column_groups")
if not os.path.exists(path):
os.makedirs(path)
p = MDVProject(path, delete_existing=True)

df = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})

p.add_datasource('test', df)
p.add_datasource("test", df)
assert False ## making a test that fails, to see if it runs properly etc.

0 comments on commit 0dece0a

Please sign in to comment.