Skip to content

Commit

Permalink
chore: Update Python development tools (#1771)
Browse files Browse the repository at this point in the history
  • Loading branch information
achimnol committed Dec 8, 2023
1 parent 2be9476 commit f95020d
Show file tree
Hide file tree
Showing 304 changed files with 6,290 additions and 6,784 deletions.
1 change: 1 addition & 0 deletions changes/1771.misc.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Update the Python development tool versions and restyle the codebase with updated Ruff (0.1.7), replacing Black with Ruff
4 changes: 2 additions & 2 deletions pants.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ pants_version = "2.17.0"
pythonpath = ["%(buildroot)s/tools/pants-plugins"]
backend_packages = [
"pants.backend.python",
"pants.backend.python.lint.black",
"pants.backend.python.typecheck.mypy",
"pants.backend.shell",
"pants.backend.experimental.python",
"pants.backend.experimental.python.lint.ruff",
# "pants.backend.experimental.python.lint.ruff",
"pants.backend.experimental.visibility",
"pants.backend.plugin_development",
"ruff_preview", # a vendored backport of the pants 2.20's lint plugin
"setupgen",
"platform_resources",
"scie",
Expand Down
12 changes: 8 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,14 @@ split-on-trailing-comma = true
"src/ai/backend/manager/config.py" = ["E402"]
"src/ai/backend/manager/models/alembic/env.py" = ["E402"]

[tool.ruff.format]
preview = true # enable the black's preview style

[tool.black]
# unused for our codebase but preserved for `pants tailor`
line-length = 100
preview = true

[tool.mypy]
ignore_missing_imports = true
implicit_optional = true # FIXME: remove after adding https://github.com/hauntsaninja/no_implicit_optional to fmt
Expand All @@ -96,7 +104,3 @@ namespace_packages = true
explicit_package_bases = true
python_executable = "dist/export/python/virtualenvs/python-default/3.11.6/bin/python"
disable_error_code = ["typeddict-unknown-key"]

[tool.black]
line-length = 100
preview = true
78 changes: 35 additions & 43 deletions src/ai/backend/accelerator/cuda_open/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,12 +330,10 @@ async def generate_docker_args(
if vol_param.startswith(vol_name + ":"):
_, _, permission = vol_param.split(":")
driver = nvidia_params["VolumeDriver"]
await docker.volumes.create(
{
"Name": vol_name,
"Driver": driver,
}
)
await docker.volumes.create({
"Name": vol_name,
"Driver": driver,
})
for vol_name in required_volumes:
for vol_param in nvidia_params["Volumes"]:
if vol_param.startswith(vol_name + ":"):
Expand Down Expand Up @@ -372,22 +370,20 @@ async def generate_docker_args(
if self.docker_version >= (19, 3, 0):
docker_config: Dict[str, Any] = {}
if assigned_device_ids:
docker_config.update(
{
"HostConfig": {
"DeviceRequests": [
{
"Driver": "nvidia",
"DeviceIDs": assigned_device_ids,
# "all" does not work here
"Capabilities": [
["utility", "compute", "video", "graphics", "display"],
],
},
],
},
}
)
docker_config.update({
"HostConfig": {
"DeviceRequests": [
{
"Driver": "nvidia",
"DeviceIDs": assigned_device_ids,
# "all" does not work here
"Capabilities": [
["utility", "compute", "video", "graphics", "display"],
],
},
],
},
})
return docker_config
else:
return {
Expand All @@ -414,16 +410,14 @@ async def get_attached_devices(
if device.device_id in device_ids:
proc = device.processing_units
mem = BinarySize(device.memory_size)
attached_devices.append(
{ # TODO: update common.types.DeviceModelInfo
"device_id": device.device_id,
"model_name": device.model_name,
"data": {
"smp": proc,
"mem": mem,
},
}
)
attached_devices.append({ # TODO: update common.types.DeviceModelInfo
"device_id": device.device_id,
"model_name": device.model_name,
"data": {
"smp": proc,
"mem": mem,
},
})
return attached_devices

async def restore_from_container(
Expand All @@ -437,17 +431,15 @@ async def restore_from_container(
if resource_spec is None:
return
if hasattr(alloc_map, "apply_allocation"):
alloc_map.apply_allocation(
{
SlotName("cuda.device"): resource_spec.allocations.get(
DeviceName("cuda"),
{},
).get(
SlotName("cuda.device"),
{},
),
}
)
alloc_map.apply_allocation({
SlotName("cuda.device"): resource_spec.allocations.get(
DeviceName("cuda"),
{},
).get(
SlotName("cuda.device"),
{},
),
})
else:
alloc_map.allocations[SlotName("cuda.device")].update(
resource_spec.allocations.get(
Expand Down
126 changes: 55 additions & 71 deletions src/ai/backend/accelerator/mock/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,56 +89,44 @@

PREFIX = "mock"

_format_config_iv = t.Dict(
{
t.Key("human_readable_name"): t.String,
t.Key("description"): t.String,
t.Key("display_unit"): t.String,
t.Key("number_format"): t.Dict(
{
t.Key("binary"): t.Bool,
t.Key("round_length"): t.Int[0:],
}
),
t.Key("display_icon"): t.String,
}
).allow_extra("*")


_mock_config_iv = t.Dict(
{
t.Key("slot_name"): t.String,
t.Key("device_plugin_name"): t.String,
t.Key("devices"): t.List(
t.Dict(
{
t.Key("mother_uuid"): tx.UUID,
t.Key("model_name"): t.String,
t.Key("numa_node"): t.Int[0:],
t.Key("subproc_count"): t.Int[1:],
t.Key("memory_size"): tx.BinarySize,
}
).allow_extra("*")
),
t.Key("attributes"): t.Dict({}).allow_extra("*"),
t.Key("formats"): t.Dict({}).allow_extra("*"),
}
).allow_extra("*")
_format_config_iv = t.Dict({
t.Key("human_readable_name"): t.String,
t.Key("description"): t.String,
t.Key("display_unit"): t.String,
t.Key("number_format"): t.Dict({
t.Key("binary"): t.Bool,
t.Key("round_length"): t.Int[0:],
}),
t.Key("display_icon"): t.String,
}).allow_extra("*")


_mock_config_iv = t.Dict({
t.Key("slot_name"): t.String,
t.Key("device_plugin_name"): t.String,
t.Key("devices"): t.List(
t.Dict({
t.Key("mother_uuid"): tx.UUID,
t.Key("model_name"): t.String,
t.Key("numa_node"): t.Int[0:],
t.Key("subproc_count"): t.Int[1:],
t.Key("memory_size"): tx.BinarySize,
}).allow_extra("*")
),
t.Key("attributes"): t.Dict({}).allow_extra("*"),
t.Key("formats"): t.Dict({}).allow_extra("*"),
}).allow_extra("*")

_cuda_devices_config_iv = t.List(
t.Dict(
{
t.Key("is_mig_device"): t.ToBool,
}
).allow_extra("*")
t.Dict({
t.Key("is_mig_device"): t.ToBool,
}).allow_extra("*")
)

_cuda_attributes_iv = t.Dict(
{
t.Key("nvidia_driver", default="450.00.00"): t.String,
t.Key("cuda_runtime", default="11.0"): t.String,
}
).allow_extra("*")
_cuda_attributes_iv = t.Dict({
t.Key("nvidia_driver", default="450.00.00"): t.String,
t.Key("cuda_runtime", default="11.0"): t.String,
}).allow_extra("*")


class MockPlugin(AbstractComputePlugin):
Expand Down Expand Up @@ -713,21 +701,19 @@ async def restore_from_container(
return
if hasattr(alloc_map, "apply_allocation"):
for slot_name, _ in self.slot_types:
alloc_map.apply_allocation(
{
slot_name: resource_spec.allocations.get(
self.key,
{},
).get(
slot_name,
{
dev_id: Decimal(0)
for dev_id, dev_slot_info in alloc_map.device_slots.items()
if dev_slot_info.slot_name == slot_name
},
),
}
)
alloc_map.apply_allocation({
slot_name: resource_spec.allocations.get(
self.key,
{},
).get(
slot_name,
{
dev_id: Decimal(0)
for dev_id, dev_slot_info in alloc_map.device_slots.items()
if dev_slot_info.slot_name == slot_name
},
),
})
else: # older agents without lablup/backend.ai-agent#180
if self._mode == AllocationModes.DISCRETE:
alloc_map.allocations[SlotName(f"{self.key}.device")].update(
Expand Down Expand Up @@ -775,16 +761,14 @@ async def get_attached_devices(
else:
proc = device.processing_units
mem = BinarySize(device.memory_size)
attached_devices.append(
{
"device_id": device.device_id,
"model_name": device.model_name,
"data": {
"smp": proc,
"mem": mem,
},
}
)
attached_devices.append({
"device_id": device.device_id,
"model_name": device.model_name,
"data": {
"smp": proc,
"mem": mem,
},
})
return attached_devices

async def get_node_hwinfo(self) -> HardwareMetadata:
Expand Down

0 comments on commit f95020d

Please sign in to comment.