Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable ruff's flake8-commas rule #3044

Merged
merged 4 commits into from
Aug 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .git-blame-ignore-revs
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
# sorting all imports with isort
933f77b96f0092e1baab4474a9208fc2e379aa32
# enabling ruff's flake8-commas rule
b25c02a94e2defcb0fad32976b02218be1133bdf
3 changes: 2 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,8 @@ def autodoc_process_signature(
# and insert the fully-qualified name.
signature = signature.replace("+E", "~trio.testing._raises_group.E")
signature = signature.replace(
"+MatchE", "~trio.testing._raises_group.MatchE"
"+MatchE",
"~trio.testing._raises_group.MatchE",
)
if "DTLS" in name:
signature = signature.replace("SSL.Context", "OpenSSL.SSL.Context")
Expand Down
2 changes: 1 addition & 1 deletion notes-to-self/afd-lab.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ async def afd_poll(self, sock, flags, *, exclusive=0):
ffi.sizeof("AFD_POLL_INFO"),
ffi.NULL,
lpOverlapped,
)
),
)
except OSError as exc:
if exc.winerror != ErrorCodes.ERROR_IO_PENDING: # pragma: no cover
Expand Down
4 changes: 3 additions & 1 deletion notes-to-self/blocking-read-hack.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@ class BlockingReadTimeoutError(Exception):


async def blocking_read_with_timeout(
fd, count, timeout # noqa: ASYNC109 # manual timeout
fd,
count,
timeout, # noqa: ASYNC109 # manual timeout
):
print("reading from fd", fd)
cancel_requested = False
Expand Down
2 changes: 1 addition & 1 deletion notes-to-self/file-read-latency.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,5 @@
seek = (end - between) / COUNT * 1e9
read = both - seek
print(
f"{both:.2f} ns/(seek+read), {seek:.2f} ns/seek, estimate ~{read:.2f} ns/read"
f"{both:.2f} ns/(seek+read), {seek:.2f} ns/seek, estimate ~{read:.2f} ns/read",
)
6 changes: 3 additions & 3 deletions notes-to-self/how-does-windows-so-reuseaddr-work.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def table_entry(mode1, bind_type1, mode2, bind_type2):
"""
second bind
| """
+ " | ".join(["%-19s" % mode for mode in modes])
+ " | ".join(["%-19s" % mode for mode in modes]),
)

print(""" """, end="")
Expand All @@ -58,7 +58,7 @@ def table_entry(mode1, bind_type1, mode2, bind_type2):

print(
"""
first bind -----------------------------------------------------------------"""
first bind -----------------------------------------------------------------""",
# default | wildcard | INUSE | Success | ACCESS | Success | INUSE | Success
)

Expand All @@ -72,5 +72,5 @@ def table_entry(mode1, bind_type1, mode2, bind_type2):
# print(mode1, bind_type1, mode2, bind_type2, entry)
print(
f"{mode1:>19} | {bind_type1:>8} | "
+ " | ".join(["%8s" % entry for entry in row])
+ " | ".join(["%8s" % entry for entry in row]),
)
5 changes: 3 additions & 2 deletions notes-to-self/manual-signal-handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,12 @@
"""
void* WINAPI GetProcAddress(void* hModule, char* lpProcName);
typedef void (*PyOS_sighandler_t)(int);
"""
""",
)
kernel32 = ffi.dlopen("kernel32.dll")
PyOS_getsig_ptr = kernel32.GetProcAddress(
ffi.cast("void*", sys.dllhandle), b"PyOS_getsig"
ffi.cast("void*", sys.dllhandle),
b"PyOS_getsig",
)
PyOS_getsig = ffi.cast("PyOS_sighandler_t (*)(int)", PyOS_getsig_ptr)

Expand Down
4 changes: 2 additions & 2 deletions notes-to-self/proxy-benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def add_wrapper(cls, method):
f"""
def wrapper(self, *args, **kwargs):
return self._wrapped.{method}(*args, **kwargs)
"""
""",
)
ns = {}
exec(code, ns)
Expand Down Expand Up @@ -113,7 +113,7 @@ def setter(self, newval):

def deleter(self):
del self._wrapped.{attr}
"""
""",
)
ns = {}
exec(code, ns)
Expand Down
16 changes: 12 additions & 4 deletions notes-to-self/ssl-handshake/ssl-handshake.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ def echo_server_connection():
client_sock, server_sock = socket.socketpair()
with client_sock, server_sock:
t = threading.Thread(
target=_ssl_echo_serve_sync, args=(server_sock,), daemon=True
target=_ssl_echo_serve_sync,
args=(server_sock,),
daemon=True,
)
t.start()

Expand Down Expand Up @@ -101,7 +103,9 @@ def wrap_socket_via_wrap_bio(ctx, sock, **kwargs):
with echo_server_connection() as client_sock:
client_ctx = ssl.create_default_context(cafile="trio-test-CA.pem")
wrapped = wrap_socket(
client_ctx, client_sock, server_hostname="trio-test-1.example.org"
client_ctx,
client_sock,
server_hostname="trio-test-1.example.org",
)
wrapped.do_handshake()
wrapped.sendall(b"x")
Expand All @@ -113,7 +117,9 @@ def wrap_socket_via_wrap_bio(ctx, sock, **kwargs):
with echo_server_connection() as client_sock:
client_ctx = ssl.create_default_context(cafile="trio-test-CA.pem")
wrapped = wrap_socket(
client_ctx, client_sock, server_hostname="trio-test-2.example.org"
client_ctx,
client_sock,
server_hostname="trio-test-2.example.org",
)
try:
wrapped.do_handshake()
Expand All @@ -126,7 +132,9 @@ def wrap_socket_via_wrap_bio(ctx, sock, **kwargs):
with echo_server_connection() as client_sock:
client_ctx = ssl.create_default_context(cafile="trio-test-CA.pem")
wrapped = wrap_socket(
client_ctx, client_sock, server_hostname="trio-test-2.example.org"
client_ctx,
client_sock,
server_hostname="trio-test-2.example.org",
)
# We forgot to call do_handshake
# But the hostname is wrong so something had better error out...
Expand Down
2 changes: 1 addition & 1 deletion notes-to-self/wakeup-fd-racer.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def main():
if duration < 2:
print(
f"Attempt {attempt}: OK, trying again "
f"(select_calls = {select_calls}, drained = {drained})"
f"(select_calls = {select_calls}, drained = {drained})",
)
else:
print(f"Attempt {attempt}: FAILED, took {duration} seconds")
Expand Down
2 changes: 1 addition & 1 deletion notes-to-self/win-waitable-timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
WORD wSecond;
WORD wMilliseconds;
} SYSTEMTIME, *PSYSTEMTIME, *LPSYSTEMTIME;
"""
""",
)

ffi.cdef(
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ select = [
"ASYNC", # flake8-async
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"COM", # flake8-commas
"E", # Error
"EXE", # flake8-executable
"F", # pyflakes
Expand Down
4 changes: 3 additions & 1 deletion src/trio/_abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,9 @@ async def getaddrinfo(

@abstractmethod
async def getnameinfo(
self, sockaddr: tuple[str, int] | tuple[str, int, int, int], flags: int
self,
sockaddr: tuple[str, int] | tuple[str, int, int, int],
flags: int,
) -> tuple[str, str]:
"""A custom implementation of :func:`~trio.socket.getnameinfo`.

Expand Down
3 changes: 2 additions & 1 deletion src/trio/_channel.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ def _open_memory_channel(
# Need to use Tuple instead of tuple due to CI check running on 3.8
class open_memory_channel(Tuple["MemorySendChannel[T]", "MemoryReceiveChannel[T]"]):
def __new__( # type: ignore[misc] # "must return a subtype"
cls, max_buffer_size: int | float # noqa: PYI041
cls,
max_buffer_size: int | float, # noqa: PYI041
) -> tuple[MemorySendChannel[T], MemoryReceiveChannel[T]]:
return _open_memory_channel(max_buffer_size)

Expand Down
15 changes: 10 additions & 5 deletions src/trio/_core/_asyncgens.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ def firstiter(agen: AsyncGeneratorType[object, NoReturn]) -> None:
self.prev_hooks.firstiter(agen)

def finalize_in_trio_context(
agen: AsyncGeneratorType[object, NoReturn], agen_name: str
agen: AsyncGeneratorType[object, NoReturn],
agen_name: str,
) -> None:
try:
runner.spawn_system_task(
Expand All @@ -85,7 +86,9 @@ def finalizer(agen: AsyncGeneratorType[object, NoReturn]) -> None:

if is_ours:
runner.entry_queue.run_sync_soon(
finalize_in_trio_context, agen, agen_name
finalize_in_trio_context,
agen,
agen_name,
)

# Do this last, because it might raise an exception
Expand Down Expand Up @@ -123,7 +126,7 @@ def finalizer(agen: AsyncGeneratorType[object, NoReturn]) -> None:
raise RuntimeError(
f"Non-Trio async generator {agen_name!r} awaited something "
"during finalization; install a finalization hook to "
"support this, or wrap it in 'async with aclosing(...):'"
"support this, or wrap it in 'async with aclosing(...):'",
)

self.prev_hooks = sys.get_asyncgen_hooks()
Expand All @@ -146,7 +149,7 @@ async def finalize_remaining(self, runner: _run.Runner) -> None:
# them was an asyncgen finalizer that snuck in under the wire.
runner.entry_queue.run_sync_soon(runner.reschedule, runner.init_task)
await _core.wait_task_rescheduled(
lambda _: _core.Abort.FAILED # pragma: no cover
lambda _: _core.Abort.FAILED, # pragma: no cover
)
self.alive.update(self.trailing_needs_finalize)
self.trailing_needs_finalize.clear()
Expand Down Expand Up @@ -193,7 +196,9 @@ def close(self) -> None:
sys.set_asyncgen_hooks(*self.prev_hooks)

async def _finalize_one(
self, agen: AsyncGeneratorType[object, NoReturn], name: object
self,
agen: AsyncGeneratorType[object, NoReturn],
name: object,
) -> None:
try:
# This shield ensures that finalize_asyncgen never exits
Expand Down
6 changes: 4 additions & 2 deletions src/trio/_core/_concat_tb.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,15 +104,17 @@ def controller(operation: tputil.ProxyOperation) -> Any | None: # type: ignore[
return operation.delegate() # Delegate is reverting to original behaviour

return cast(
TracebackType, tputil.make_proxy(controller, type(base_tb), base_tb)
TracebackType,
tputil.make_proxy(controller, type(base_tb), base_tb),
) # Returns proxy to traceback


# this is used for collapsing single-exception ExceptionGroups when using
# `strict_exception_groups=False`. Once that is retired this function and its helper can
# be removed as well.
def concat_tb(
head: TracebackType | None, tail: TracebackType | None
head: TracebackType | None,
tail: TracebackType | None,
) -> TracebackType | None:
# We have to use an iterative algorithm here, because in the worst case
# this might be a RecursionError stack that is by definition too deep to
Expand Down
2 changes: 1 addition & 1 deletion src/trio/_core/_entry_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ async def kill_everything(exc: BaseException) -> NoReturn:
parent_nursery = _core.current_task().parent_nursery
if parent_nursery is None:
raise AssertionError(
"Internal error: `parent_nursery` should never be `None`"
"Internal error: `parent_nursery` should never be `None`",
) from exc # pragma: no cover
parent_nursery.start_soon(kill_everything, exc)

Expand Down
11 changes: 8 additions & 3 deletions src/trio/_core/_generated_io_kqueue.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ def current_kqueue() -> select.kqueue:


def monitor_kevent(
ident: int, filter: int
ident: int,
filter: int,
) -> ContextManager[_core.UnboundedQueue[select.kevent]]:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -56,7 +57,9 @@ def monitor_kevent(


async def wait_kevent(
ident: int, filter: int, abort_func: Callable[[RaiseCancelT], Abort]
ident: int,
filter: int,
abort_func: Callable[[RaiseCancelT], Abort],
) -> Abort:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -65,7 +68,9 @@ async def wait_kevent(
sys._getframe().f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_kevent(
ident, filter, abort_func
ident,
filter,
abort_func,
)
except AttributeError:
raise RuntimeError("must be called from async context") from None
Expand Down
19 changes: 14 additions & 5 deletions src/trio/_core/_generated_io_windows.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,14 +134,17 @@ async def wait_overlapped(handle_: int | CData, lpOverlapped: CData | int) -> ob
sys._getframe().f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_overlapped(
handle_, lpOverlapped
handle_,
lpOverlapped,
)
except AttributeError:
raise RuntimeError("must be called from async context") from None


async def write_overlapped(
handle: int | CData, data: Buffer, file_offset: int = 0
handle: int | CData,
data: Buffer,
file_offset: int = 0,
) -> int:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -151,14 +154,18 @@ async def write_overlapped(
sys._getframe().f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.write_overlapped(
handle, data, file_offset
handle,
data,
file_offset,
)
except AttributeError:
raise RuntimeError("must be called from async context") from None


async def readinto_overlapped(
handle: int | CData, buffer: Buffer, file_offset: int = 0
handle: int | CData,
buffer: Buffer,
file_offset: int = 0,
) -> int:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -168,7 +175,9 @@ async def readinto_overlapped(
sys._getframe().f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.readinto_overlapped(
handle, buffer, file_offset
handle,
buffer,
file_offset,
)
except AttributeError:
raise RuntimeError("must be called from async context") from None
Expand Down
5 changes: 4 additions & 1 deletion src/trio/_core/_generated_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,10 @@ def spawn_system_task(
sys._getframe().f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return GLOBAL_RUN_CONTEXT.runner.spawn_system_task(
async_fn, *args, name=name, context=context
async_fn,
*args,
name=name,
context=context,
)
except AttributeError:
raise RuntimeError("must be called from async context") from None
Expand Down
4 changes: 2 additions & 2 deletions src/trio/_core/_io_epoll.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ class EpollIOManager:
_epoll: select.epoll = attrs.Factory(lambda: select.epoll())
# {fd: EpollWaiters}
_registered: defaultdict[int, EpollWaiters] = attrs.Factory(
lambda: defaultdict(EpollWaiters)
lambda: defaultdict(EpollWaiters),
)
_force_wakeup: WakeupSocketpair = attrs.Factory(WakeupSocketpair)
_force_wakeup_fd: int | None = None
Expand Down Expand Up @@ -298,7 +298,7 @@ async def _epoll_wait(self, fd: int | _HasFileNo, attr_name: str) -> None:
waiters = self._registered[fd]
if getattr(waiters, attr_name) is not None:
raise _core.BusyResourceError(
"another task is already reading / writing this fd"
"another task is already reading / writing this fd",
)
setattr(waiters, attr_name, _core.current_task())
self._update_registrations(fd)
Expand Down
Loading
Loading