Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Format using Ruff instead of Black. #2988

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/trio/_core/_generated_io_epoll.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
__all__ = ["notify_closing", "wait_readable", "wait_writable"]


async def wait_readable(fd: int | _HasFileNo) -> None:
async def wait_readable(fd: (int | _HasFileNo)) -> None:
"""Block until the kernel reports that the given object is readable.

On Unix systems, ``fd`` must either be an integer file descriptor,
Expand Down Expand Up @@ -47,7 +47,7 @@ async def wait_readable(fd: int | _HasFileNo) -> None:
raise RuntimeError("must be called from async context") from None


async def wait_writable(fd: int | _HasFileNo) -> None:
async def wait_writable(fd: (int | _HasFileNo)) -> None:
"""Block until the kernel reports that the given object is writable.

See `wait_readable` for the definition of ``fd``.
Expand All @@ -66,7 +66,7 @@ async def wait_writable(fd: int | _HasFileNo) -> None:
raise RuntimeError("must be called from async context") from None


def notify_closing(fd: int | _HasFileNo) -> None:
def notify_closing(fd: (int | _HasFileNo)) -> None:
"""Notify waiters of the given object that it will be closed.

Call this before closing a file descriptor (on Unix) or socket (on
Expand Down
6 changes: 3 additions & 3 deletions src/trio/_core/_generated_io_kqueue.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ async def wait_kevent(
raise RuntimeError("must be called from async context") from None


async def wait_readable(fd: int | _HasFileNo) -> None:
async def wait_readable(fd: (int | _HasFileNo)) -> None:
"""Block until the kernel reports that the given object is readable.

On Unix systems, ``fd`` must either be an integer file descriptor,
Expand Down Expand Up @@ -100,7 +100,7 @@ async def wait_readable(fd: int | _HasFileNo) -> None:
raise RuntimeError("must be called from async context") from None


async def wait_writable(fd: int | _HasFileNo) -> None:
async def wait_writable(fd: (int | _HasFileNo)) -> None:
"""Block until the kernel reports that the given object is writable.

See `wait_readable` for the definition of ``fd``.
Expand All @@ -119,7 +119,7 @@ async def wait_writable(fd: int | _HasFileNo) -> None:
raise RuntimeError("must be called from async context") from None


def notify_closing(fd: int | _HasFileNo) -> None:
def notify_closing(fd: (int | _HasFileNo)) -> None:
"""Notify waiters of the given object that it will be closed.

Call this before closing a file descriptor (on Unix) or socket (on
Expand Down
16 changes: 9 additions & 7 deletions src/trio/_core/_generated_io_windows.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
]


async def wait_readable(sock: _HasFileNo | int) -> None:
async def wait_readable(sock: (_HasFileNo | int)) -> None:
"""Block until the kernel reports that the given object is readable.
On Unix systems, ``sock`` must either be an integer file descriptor,
Expand Down Expand Up @@ -61,7 +61,7 @@ async def wait_readable(sock: _HasFileNo | int) -> None:
raise RuntimeError("must be called from async context") from None


async def wait_writable(sock: _HasFileNo | int) -> None:
async def wait_writable(sock: (_HasFileNo | int)) -> None:
"""Block until the kernel reports that the given object is writable.
See `wait_readable` for the definition of ``sock``.
Expand All @@ -80,7 +80,7 @@ async def wait_writable(sock: _HasFileNo | int) -> None:
raise RuntimeError("must be called from async context") from None


def notify_closing(handle: Handle | int | _HasFileNo) -> None:
def notify_closing(handle: (Handle | int | _HasFileNo)) -> None:
"""Notify waiters of the given object that it will be closed.
Call this before closing a file descriptor (on Unix) or socket (on
Expand Down Expand Up @@ -112,7 +112,7 @@ def notify_closing(handle: Handle | int | _HasFileNo) -> None:
raise RuntimeError("must be called from async context") from None


def register_with_iocp(handle: int | CData) -> None:
def register_with_iocp(handle: (int | CData)) -> None:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
<https://github.com/python-trio/trio/issues/26>`__ and `#52
Expand All @@ -125,7 +125,9 @@ def register_with_iocp(handle: int | CData) -> None:
raise RuntimeError("must be called from async context") from None


async def wait_overlapped(handle_: int | CData, lpOverlapped: CData | int) -> object:
async def wait_overlapped(
handle_: (int | CData), lpOverlapped: (CData | int)
) -> object:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
<https://github.com/python-trio/trio/issues/26>`__ and `#52
Expand All @@ -141,7 +143,7 @@ async def wait_overlapped(handle_: int | CData, lpOverlapped: CData | int) -> ob


async def write_overlapped(
handle: int | CData, data: Buffer, file_offset: int = 0
handle: (int | CData), data: Buffer, file_offset: int = 0
) -> int:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -158,7 +160,7 @@ async def write_overlapped(


async def readinto_overlapped(
handle: int | CData, buffer: Buffer, file_offset: int = 0
handle: (int | CData), buffer: Buffer, file_offset: int = 0
) -> int:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand Down
2 changes: 1 addition & 1 deletion src/trio/_core/_generated_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def spawn_system_task(
async_fn: Callable[[Unpack[PosArgT]], Awaitable[object]],
*args: Unpack[PosArgT],
name: object = None,
context: contextvars.Context | None = None,
context: (contextvars.Context | None) = None,
) -> Task:
"""Spawn a "system" task.
Expand Down
48 changes: 40 additions & 8 deletions src/trio/_tools/gen_exports.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,18 +92,43 @@
Example input: ast.parse("def f(a, *, b): ...")
Example output: "(a, b=b)"
"""
call_args = []
call_args: list[str] = []
for arg in funcdef.args.args:
call_args.append(arg.arg)

if funcdef.args.vararg:
call_args.append("*" + funcdef.args.vararg.arg)

for arg in funcdef.args.kwonlyargs:
call_args.append(arg.arg + "=" + arg.arg)

if funcdef.args.kwarg:
call_args.append("**" + funcdef.args.kwarg.arg)

return "({})".format(", ".join(call_args))


def run_ruff_format(file: File, source: str) -> tuple[bool, str]:
result = subprocess.run(
args=[
sys.executable,
"-m",
"ruff",
"format",
"--stdin-filename",
str(file.path),
"-",
],
input=source,
capture_output=True,
encoding="utf8",
)

if result.returncode != 0:
return False, f"Failed to run ruff!\n{result.stderr}"
return True, result.stdout


def run_ruff(file: File, source: str) -> tuple[bool, str]:
"""Run ruff on the specified file.

Expand All @@ -116,8 +141,6 @@
Raises:
ImportError: If ruff is not installed.
"""
# imported to check that `subprocess` calls will succeed
import ruff # noqa: F401

result = subprocess.run(
# "-" as a filename = use stdin, return on stdout.
Expand Down Expand Up @@ -154,7 +177,13 @@
SystemExit: If either failed.
"""
Fuyukai marked this conversation as resolved.
Show resolved Hide resolved

success, response = run_ruff(file, source)
success, response = run_ruff_format(file, source)

if not success: # pragma: no cover
print(response)
sys.exit(1)

success, response = run_ruff(file, response)
if not success: # pragma: no cover # Test for run_ruff should catch
print(response)
sys.exit(1)
Expand Down Expand Up @@ -184,8 +213,8 @@

generated = ["".join(header)]

source = astor.code_to_ast.parse_file(file.path)
method_names = []
source = ast.parse(file.path.read_text())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason why we're not using astor anymore?

If it's because we're running this through an autoformatter which should do AST -> code, then maybe we could use ast.unparse instead of astor.to_source and drop astor entirely. Though I see that ast.unparse is 3.9+ (cause type hints...) so I guess maybe that's just a # TODO: ... comment for when we drop 3.8.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason why we're not using astor anymore?

astor is untyped. This function is literally just a wrapper around ast.parse that opens the file. Errors in my editor annoy me.

method_names: list[str] = []
for method in get_public_methods(source):
# Remove self from arguments
assert method.args.args[0].arg == "self"
Expand Down Expand Up @@ -213,7 +242,7 @@
del method.body[1:]

# Create the function definition including the body
func = astor.to_source(method, indent_with=" " * 4)
func: str = astor.to_source(method, indent_with=" " * 4) # type: ignore

Check failure on line 245 in src/trio/_tools/gen_exports.py

View workflow job for this annotation

GitHub Actions / Ubuntu (3.8, check formatting)

Mypy-Linux+Mac+Windows

src/trio/_tools/gen_exports.py:245: Unused "type: ignore" comment [unused-ignore]

if is_cm: # pragma: no cover
func = func.replace("->Iterator", "->ContextManager")
Expand Down Expand Up @@ -249,20 +278,23 @@


def process(files: Iterable[File], *, do_test: bool) -> None:
new_files = {}
new_files: dict[str, str] = {}

for file in files:
print("Scanning:", file.path)
new_source = gen_public_wrappers_source(file)
new_source = run_linters(file, new_source)
dirname, basename = os.path.split(file.path)
new_path = os.path.join(dirname, PREFIX + basename)
new_files[new_path] = new_source

if do_test:
if not matches_disk_files(new_files):
print("Generated sources are outdated. Please regenerate.")
sys.exit(1)
else:
print("Generated sources are up to date.")

else:
for new_path, new_source in new_files.items():
with open(new_path, "w", encoding="utf-8") as f:
Expand Down