Skip to content

Commit

Permalink
Update black version to 24.3.0 (#3193)
Browse files Browse the repository at this point in the history
* Update black version to 24.3.0

* fix black changes
  • Loading branch information
dvora-h committed Mar 27, 2024
1 parent 037d108 commit 07fc339
Show file tree
Hide file tree
Showing 13 changed files with 55 additions and 56 deletions.
2 changes: 2 additions & 0 deletions .flake8
Expand Up @@ -16,6 +16,8 @@ exclude =
ignore =
E126
E203
E701
E704
F405
N801
N802
Expand Down
2 changes: 1 addition & 1 deletion dev_requirements.txt
@@ -1,5 +1,5 @@
click==8.0.4
black==22.3.0
black==24.3.0
flake8==5.0.4
flake8-isort==6.0.0
flynt~=0.69.0
Expand Down
31 changes: 16 additions & 15 deletions redis/_parsers/helpers.py
Expand Up @@ -819,30 +819,31 @@ def string_keys_to_dict(key_string, callback):
lambda r, **kwargs: r,
),
**string_keys_to_dict("XREAD XREADGROUP", parse_xread_resp3),
"ACL LOG": lambda r: [
{str_if_bytes(key): str_if_bytes(value) for key, value in x.items()} for x in r
]
if isinstance(r, list)
else bool_ok(r),
"ACL LOG": lambda r: (
[
{str_if_bytes(key): str_if_bytes(value) for key, value in x.items()}
for x in r
]
if isinstance(r, list)
else bool_ok(r)
),
"COMMAND": parse_command_resp3,
"CONFIG GET": lambda r: {
str_if_bytes(key)
if key is not None
else None: str_if_bytes(value)
if value is not None
else None
str_if_bytes(key) if key is not None else None: (
str_if_bytes(value) if value is not None else None
)
for key, value in r.items()
},
"MEMORY STATS": lambda r: {str_if_bytes(key): value for key, value in r.items()},
"SENTINEL MASTER": parse_sentinel_state_resp3,
"SENTINEL MASTERS": parse_sentinel_masters_resp3,
"SENTINEL SENTINELS": parse_sentinel_slaves_and_sentinels_resp3,
"SENTINEL SLAVES": parse_sentinel_slaves_and_sentinels_resp3,
"STRALGO": lambda r, **options: {
str_if_bytes(key): str_if_bytes(value) for key, value in r.items()
}
if isinstance(r, dict)
else str_if_bytes(r),
"STRALGO": lambda r, **options: (
{str_if_bytes(key): str_if_bytes(value) for key, value in r.items()}
if isinstance(r, dict)
else str_if_bytes(r)
),
"XINFO CONSUMERS": lambda r: [
{str_if_bytes(key): value for key, value in x.items()} for x in r
],
Expand Down
12 changes: 4 additions & 8 deletions redis/asyncio/client.py
Expand Up @@ -88,13 +88,11 @@


class ResponseCallbackProtocol(Protocol):
def __call__(self, response: Any, **kwargs):
...
def __call__(self, response: Any, **kwargs): ...


class AsyncResponseCallbackProtocol(Protocol):
async def __call__(self, response: Any, **kwargs):
...
async def __call__(self, response: Any, **kwargs): ...


ResponseCallbackT = Union[ResponseCallbackProtocol, AsyncResponseCallbackProtocol]
Expand Down Expand Up @@ -1220,13 +1218,11 @@ async def run(


class PubsubWorkerExceptionHandler(Protocol):
def __call__(self, e: BaseException, pubsub: PubSub):
...
def __call__(self, e: BaseException, pubsub: PubSub): ...


class AsyncPubsubWorkerExceptionHandler(Protocol):
async def __call__(self, e: BaseException, pubsub: PubSub):
...
async def __call__(self, e: BaseException, pubsub: PubSub): ...


PSWorkerThreadExcHandlerT = Union[
Expand Down
14 changes: 7 additions & 7 deletions redis/asyncio/cluster.py
Expand Up @@ -402,10 +402,10 @@ def __init__(
self.command_flags = self.__class__.COMMAND_FLAGS.copy()
self.response_callbacks = kwargs["response_callbacks"]
self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy()
self.result_callbacks[
"CLUSTER SLOTS"
] = lambda cmd, res, **kwargs: parse_cluster_slots(
list(res.values())[0], **kwargs
self.result_callbacks["CLUSTER SLOTS"] = (
lambda cmd, res, **kwargs: parse_cluster_slots(
list(res.values())[0], **kwargs
)
)

self._initialize = True
Expand Down Expand Up @@ -1318,9 +1318,9 @@ async def initialize(self) -> None:
)
tmp_slots[i].append(target_replica_node)
# add this node to the nodes cache
tmp_nodes_cache[
target_replica_node.name
] = target_replica_node
tmp_nodes_cache[target_replica_node.name] = (
target_replica_node
)
else:
# Validate that 2 nodes want to use the same slot cache
# setup
Expand Down
14 changes: 7 additions & 7 deletions redis/asyncio/connection.py
Expand Up @@ -87,13 +87,11 @@ class _Sentinel(enum.Enum):


class ConnectCallbackProtocol(Protocol):
def __call__(self, connection: "AbstractConnection"):
...
def __call__(self, connection: "AbstractConnection"): ...


class AsyncConnectCallbackProtocol(Protocol):
async def __call__(self, connection: "AbstractConnection"):
...
async def __call__(self, connection: "AbstractConnection"): ...


ConnectCallbackT = Union[ConnectCallbackProtocol, AsyncConnectCallbackProtocol]
Expand Down Expand Up @@ -319,9 +317,11 @@ async def connect(self):
await self.on_connect()
else:
# Use the passed function redis_connect_func
await self.redis_connect_func(self) if asyncio.iscoroutinefunction(
self.redis_connect_func
) else self.redis_connect_func(self)
(
await self.redis_connect_func(self)
if asyncio.iscoroutinefunction(self.redis_connect_func)
else self.redis_connect_func(self)
)
except RedisError:
# clean up after any error in on_connect
await self.disconnect()
Expand Down
8 changes: 5 additions & 3 deletions redis/asyncio/sentinel.py
Expand Up @@ -108,9 +108,11 @@ class SentinelConnectionPool(ConnectionPool):
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs["connection_class"] = kwargs.get(
"connection_class",
SentinelManagedSSLConnection
if kwargs.pop("ssl", False)
else SentinelManagedConnection,
(
SentinelManagedSSLConnection
if kwargs.pop("ssl", False)
else SentinelManagedConnection
),
)
self.is_master = kwargs.pop("is_master", True)
self.check_connection = kwargs.pop("check_connection", False)
Expand Down
6 changes: 3 additions & 3 deletions redis/cluster.py
Expand Up @@ -1582,9 +1582,9 @@ def initialize(self):
)
tmp_slots[i].append(target_replica_node)
# add this node to the nodes cache
tmp_nodes_cache[
target_replica_node.name
] = target_replica_node
tmp_nodes_cache[target_replica_node.name] = (
target_replica_node
)
else:
# Validate that 2 nodes want to use the same slot cache
# setup
Expand Down
4 changes: 1 addition & 3 deletions redis/commands/core.py
Expand Up @@ -3399,9 +3399,7 @@ def smembers(self, name: str) -> Union[Awaitable[Set], Set]:
"""
return self.execute_command("SMEMBERS", name, keys=[name])

def smismember(
self, name: str, values: List, *args: List
) -> Union[
def smismember(self, name: str, values: List, *args: List) -> Union[
Awaitable[List[Union[Literal[0], Literal[1]]]],
List[Union[Literal[0], Literal[1]]],
]:
Expand Down
3 changes: 1 addition & 2 deletions redis/exceptions.py
Expand Up @@ -217,5 +217,4 @@ class SlotNotCoveredError(RedisClusterException):
pass


class MaxConnectionsError(ConnectionError):
...
class MaxConnectionsError(ConnectionError): ...
8 changes: 5 additions & 3 deletions redis/sentinel.py
Expand Up @@ -145,9 +145,11 @@ class SentinelConnectionPool(ConnectionPool):
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs["connection_class"] = kwargs.get(
"connection_class",
SentinelManagedSSLConnection
if kwargs.pop("ssl", False)
else SentinelManagedConnection,
(
SentinelManagedSSLConnection
if kwargs.pop("ssl", False)
else SentinelManagedConnection
),
)
self.is_master = kwargs.pop("is_master", True)
self.check_connection = kwargs.pop("check_connection", False)
Expand Down
6 changes: 2 additions & 4 deletions redis/typing.py
Expand Up @@ -54,12 +54,10 @@
class CommandsProtocol(Protocol):
connection_pool: Union["AsyncConnectionPool", "ConnectionPool"]

def execute_command(self, *args, **options):
...
def execute_command(self, *args, **options): ...


class ClusterCommandsProtocol(CommandsProtocol, Protocol):
encoder: "Encoder"

def execute_command(self, *args, **options) -> Union[Any, Awaitable]:
...
def execute_command(self, *args, **options) -> Union[Any, Awaitable]: ...
1 change: 1 addition & 0 deletions tests/test_asyncio/test_commands.py
@@ -1,6 +1,7 @@
"""
Tests async overrides of commands from their mixins
"""

import asyncio
import binascii
import datetime
Expand Down

0 comments on commit 07fc339

Please sign in to comment.