Skip to content

Commit

Permalink
Upgrade to Python 3.10 (#184)
Browse files Browse the repository at this point in the history
Upgrades emote from 3.9 to 3.10 the main changes are: 

- You can now combine with statements, so instead of 

```
with open('file1.txt') as file1:
    with open('file2.txt') as file2:
    	bla
```

You can use 

```
with (
    open('file1.txt') as file1,
    open('file2.txt') as file2
):
	bla
```

- Instead of using `Union`, you can now use `|`. So `name: Union[str,
bytes]` to `name: str | bytes`.
- `match` statements have been added, similar to rust, for
datastructures

The first two points have been adjusted in this repo, grouping `with`s
together where possible and using `|` instead of `Union`s.

Pdm had to be version 2.5.6 to properly update the lock files, otherwise
it wouldn't work.
  • Loading branch information
jaxs-ribs authored Nov 17, 2023
1 parent 043d650 commit 354c7be
Show file tree
Hide file tree
Showing 18 changed files with 137 additions and 158 deletions.
2 changes: 1 addition & 1 deletion .buildkite/install-repo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,5 @@ eval "$(pyenv init -)"

${PDM_COMMAND:1:-1} use ${PYTHON_VERSION:1:-1}
${PDM_COMMAND:1:-1} install -d -G ci -k post_install
${PDM_COMMAND:1:-1} plugin add pdm-plugin-torch>=23.1.1
${PDM_COMMAND:1:-1} plugin add pdm-plugin-torch==23.3.0
${PDM_COMMAND:1:-1} torch install cpu
2 changes: 1 addition & 1 deletion .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ large: &large

env:
PDM_COMMAND: pdm25
PYTHON_VERSION: '3.9'
PYTHON_VERSION: '3.10'

steps:
- group: ":passport_control: Validating PR"
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
- uses: pdm-project/setup-pdm@v3
name: Setup PDM
with:
python-version: 3.9
python-version: 3.10
architecture: x64
version: 2.5.6
prerelease: false
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pre-release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ jobs:
- uses: pdm-project/setup-pdm@v3
name: Setup PDM
with:
python-version: 3.9
python-version: 3.10
architecture: x64
version: 2.5.6
prerelease: true
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
- uses: pdm-project/setup-pdm@v3
name: Setup PDM
with:
python-version: 3.9
python-version: 3.10
architecture: x64
version: 2.5.6
prerelease: true
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,6 @@ runs/**
wandb/**

__pypackages__/

.pdm-python
.DS_STORE
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -127,18 +127,18 @@ pdm install -d -G :all
**Torch won't install:** Check that your python version is correct. Try deleting your `.venv` and recreating it with

```bash
pdm venv create 3.9
pdm venv create 3.10
pdm install -G :all
```

**Box2d complains:** Box2d needs swig and python bindings. On apt-based systems try

```bash
sudo apt install swig
sudo apt install python3.9-dev
sudo apt install python3.10-dev
```

**Python 3.9 is tricky to install:** For Ubuntu based distros try adding the deadsnakes PPA.
**Python 3.10 is tricky to install:** For Ubuntu based distros try adding the deadsnakes PPA.

## Contribution

Expand Down
2 changes: 1 addition & 1 deletion emote/memory/core_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@


# Number is *either* an int or a float, but *not* covariant.
# For example: Sequence[Union[int, float]] accepts [int, float]
# For example: Sequence[int | float] accepts int | float
# Sequence[Number] only accept [int, int, ...] or
# [float, float, ...]

Expand Down
6 changes: 3 additions & 3 deletions emote/memory/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from collections import defaultdict, deque
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple

import numpy as np
import torch
Expand Down Expand Up @@ -298,7 +298,7 @@ def report(

def get_report(
self, keys: List[str]
) -> Tuple[dict[str, Union[int, float, list[float]]], dict[str, list[float]]]:
) -> Tuple[dict[str, int | float | list[float]], dict[str, list[float]]]:
keys = set(keys)
out = {}
out_lists = {}
Expand Down Expand Up @@ -386,7 +386,7 @@ class MemoryExporterProxyWrapper(TableMemoryProxyWrapper, LoggingMixin):

def __init__(
self,
memory: Union[TableMemoryProxy, TableMemoryProxyWrapper],
memory: TableMemoryProxy | TableMemoryProxyWrapper,
target_memory_name,
inf_steps_per_memory_export,
experiment_root_path: str,
Expand Down
14 changes: 7 additions & 7 deletions emote/memory/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"""

from typing import Sequence, Tuple, Union
from typing import Sequence, Tuple

import numpy as np

Expand Down Expand Up @@ -93,7 +93,7 @@ def post_import(self):
self[-abs(valid_id) - 1] = self[valid_id]
del self[valid_id]

def __getitem__(self, key: Union[int, Tuple[int, ...], slice]):
def __getitem__(self, key: int | Tuple[int, ...] | slice):
episode = super().__getitem__(key)
r = TagStorage.TagProxy()
r.value = episode
Expand All @@ -117,13 +117,13 @@ def __init__(self, storage, shape, dtype):
def shape(self):
return self._storage.shape

def __getitem__(self, key: Union[int, Tuple[int, ...], slice]):
def __getitem__(self, key: int | Tuple[int, ...] | slice):
pass

def __setitem__(self, key: Union[int, Tuple[int, ...], slice], value: Sequence[Number]):
def __setitem__(self, key: int | Tuple[int, ...] | slice, value: Sequence[Number]):
pass

def __delitem__(self, key: Union[int, Tuple[int, ...], slice]):
def __delitem__(self, key: int | Tuple[int, ...] | slice):
pass

def sequence_length_transform(self, length):
Expand Down Expand Up @@ -191,7 +191,7 @@ def __init__(self, storage, shape, dtype, only_last: bool = False):
self._only_last = only_last
self._wrapper = NextElementMapper.LastWrapper if only_last else NextElementMapper.Wrapper

def __getitem__(self, key: Union[int, Tuple[int, ...], slice]):
def __getitem__(self, key: int | Tuple[int, ...] | slice):
return self._wrapper(self._storage[key])

def sequence_length_transform(self, length):
Expand Down Expand Up @@ -245,7 +245,7 @@ def __init__(self, storage, shape, dtype, mask: bool = False):
super().__init__(storage, shape, dtype)
self._mask = mask

def __getitem__(self, key: Union[int, Tuple[int, ...], slice]):
def __getitem__(self, key: int | Tuple[int, ...] | slice):
if self._mask:
return SyntheticDones.MaskWrapper(len(self._storage[key]), self._shape, self._dtype)

Expand Down
166 changes: 86 additions & 80 deletions emote/memory/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,11 @@ def full(self) -> bool:
return self._filled

def add_sequence(self, identity: int, sequence: dict):
with self._timers.scope("add_sequence"):
with self._lock:
self._add_sequence_internal(identity, sequence)
with (
self._timers.scope("add_sequence"),
self._lock,
):
self._add_sequence_internal(identity, sequence)

def _add_sequence_internal(self, identity: int, sequence: dict):
"""add a fully terminated sequence to the memory"""
Expand Down Expand Up @@ -267,68 +269,70 @@ def _serialize(self, path: str) -> bool:
from atomicwrites import atomic_write

with self._lock:
with atomic_write(f"{path}.zip", overwrite=True, mode="wb") as tmp:
with zipfile.ZipFile(tmp, "a") as zip_:
with zip_.open("version", "w") as version:
version_int = TableSerializationVersion.V1.value
version.write(str(version_int).encode("utf-8"))

parts = {
"ejector_type": self._ejector.__class__.__name__,
"sampler_type": self._sampler.__class__.__name__,
"length_key": self._length_key,
"maxlen": self._maxlen,
"ids": list(self._lengths.keys()),
}

ejector_state = self._ejector.state()
if ejector_state is not None:
parts["ejector_state"] = ejector_state

sampler_state = self._sampler.state()
if sampler_state is not None:
parts["sampler_state"] = sampler_state

parts["columns"] = [
(name, column.__class__.__name__, column.state())
for name, column in self._columns.items()
]

output_ranges = {}
output_data = {}

for key, store in self._data.items():
ranges = []
merged_data = []

if isinstance(store, VirtualStorage):
continue

for identity, data in store.items():
ranges.append(
(
identity,
len(merged_data),
len(data),
)
with (
atomic_write(f"{path}.zip", overwrite=True, mode="wb") as tmp,
zipfile.ZipFile(tmp, "a") as zip_,
):
with zip_.open("version", "w") as version:
version_int = TableSerializationVersion.V1.value
version.write(str(version_int).encode("utf-8"))

parts = {
"ejector_type": self._ejector.__class__.__name__,
"sampler_type": self._sampler.__class__.__name__,
"length_key": self._length_key,
"maxlen": self._maxlen,
"ids": list(self._lengths.keys()),
}

ejector_state = self._ejector.state()
if ejector_state is not None:
parts["ejector_state"] = ejector_state

sampler_state = self._sampler.state()
if sampler_state is not None:
parts["sampler_state"] = sampler_state

parts["columns"] = [
(name, column.__class__.__name__, column.state())
for name, column in self._columns.items()
]

output_ranges = {}
output_data = {}

for key, store in self._data.items():
ranges = []
merged_data = []

if isinstance(store, VirtualStorage):
continue

for identity, data in store.items():
ranges.append(
(
identity,
len(merged_data),
len(data),
)
merged_data.extend(data)
)
merged_data.extend(data)

output_data[key] = np.stack(merged_data)
output_ranges[key] = ranges
output_data[key] = np.stack(merged_data)
output_ranges[key] = ranges

parts["part_keys"] = list(output_data.keys())
parts["part_keys"] = list(output_data.keys())

with zip_.open("configuration.json", "w", force_zip64=True) as f:
json_data = json.dumps(parts)
f.write(json_data.encode("utf-8"))
with zip_.open("configuration.json", "w", force_zip64=True) as f:
json_data = json.dumps(parts)
f.write(json_data.encode("utf-8"))

for key, data in output_data.items():
with zip_.open(f"{key}.ranges.npy", "w", force_zip64=True) as f:
np.save(f, output_ranges[key], allow_pickle=False)
for key, data in output_data.items():
with zip_.open(f"{key}.ranges.npy", "w", force_zip64=True) as f:
np.save(f, output_ranges[key], allow_pickle=False)

with zip_.open(f"{key}.npy", "w", force_zip64=True) as npz:
np.save(npz, data, allow_pickle=False)
with zip_.open(f"{key}.npy", "w", force_zip64=True) as npz:
np.save(npz, data, allow_pickle=False)

os.chmod(f"{path}.zip", stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)

Expand Down Expand Up @@ -441,27 +445,29 @@ def _store_legacy(self, path: str) -> bool:
from atomicwrites import atomic_write

with self._lock:
with atomic_write(f"{path}.zip", overwrite=True, mode="wb") as tmp:
with zipfile.ZipFile(tmp, "a") as zip_:
with zip_.open("data.pickle", "w", force_zip64=True) as data_file:
parts = {
"ejector": self._ejector,
"sampler": self._sampler,
"length_key": self._length_key,
"maxlen": self._maxlen,
"columns": self._columns,
"lengths": self._lengths,
"filled": self._filled,
}

cloudpickle.dump(parts, data_file, protocol=4)

for key, data in self._data.items():
if isinstance(data, VirtualStorage):
continue

with zip_.open(f"{key}.npy", "w", force_zip64=True) as npz:
np.save(npz, data)
with (
atomic_write(f"{path}.zip", overwrite=True, mode="wb") as tmp,
zipfile.ZipFile(tmp, "a") as zip_,
):
with zip_.open("data.pickle", "w", force_zip64=True) as data_file:
parts = {
"ejector": self._ejector,
"sampler": self._sampler,
"length_key": self._length_key,
"maxlen": self._maxlen,
"columns": self._columns,
"lengths": self._lengths,
"filled": self._filled,
}

cloudpickle.dump(parts, data_file, protocol=4)

for key, data in self._data.items():
if isinstance(data, VirtualStorage):
continue

with zip_.open(f"{key}.npy", "w", force_zip64=True) as npz:
np.save(npz, data)

os.chmod(f"{path}.zip", stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)

Expand Down
Loading

0 comments on commit 354c7be

Please sign in to comment.