diff --git a/userbenchmark/dynamo/dynamobench/_dynamo/testing.py b/userbenchmark/dynamo/dynamobench/_dynamo/testing.py index 3f5dd0255b..04aa008446 100644 --- a/userbenchmark/dynamo/dynamobench/_dynamo/testing.py +++ b/userbenchmark/dynamo/dynamobench/_dynamo/testing.py @@ -16,7 +16,6 @@ Optional, overload, Sequence, - Tuple, TypeVar, Union, ) @@ -141,7 +140,7 @@ def reduce_to_scalar_loss(out: torch.Tensor) -> torch.Tensor: @overload def reduce_to_scalar_loss( - out: Union[List[Any], Tuple[Any, ...], Dict[Any, Any]] + out: Union[List[Any], tuple[Any, ...], Dict[Any, Any]] ) -> float: ... diff --git a/userbenchmark/dynamo/dynamobench/_dynamo/utils.py b/userbenchmark/dynamo/dynamobench/_dynamo/utils.py index d5c2206392..bac180e137 100644 --- a/userbenchmark/dynamo/dynamobench/_dynamo/utils.py +++ b/userbenchmark/dynamo/dynamobench/_dynamo/utils.py @@ -54,7 +54,6 @@ Optional, overload, Set, - Tuple, Type, TypeVar, Union, @@ -106,7 +105,7 @@ # NOTE: Make sure `NP_SUPPORTED_MODULES` and `NP_TO_TNP_MODULE` are in sync. if np: - NP_SUPPORTED_MODULES: Tuple[types.ModuleType, ...] = ( + NP_SUPPORTED_MODULES: tuple[types.ModuleType, ...] = ( np, np.fft, np.linalg, @@ -202,8 +201,8 @@ def log(cls): def tabulate( - rows: Union[List[Tuple[str, object]], List[List[object]]], - headers: Union[Tuple[str, ...], List[str]], + rows: Union[List[tuple[str, object]], List[List[object]]], + headers: Union[tuple[str, ...], List[str]], ) -> str: try: import tabulate @@ -590,7 +589,7 @@ def compile_times(repr: Literal["str"], aggregate: bool = False) -> str: @overload def compile_times( repr: Literal["csv"], aggregate: bool = False -) -> Tuple[List[str], List[object]]: +) -> tuple[List[str], List[object]]: ... @@ -658,7 +657,7 @@ def __init__(self, maxsize: int = 4096) -> None: def reset(self): self.set = OrderedDict() - def add(self, key: Union[str, Tuple[object, object]]) -> bool: + def add(self, key: Union[str, tuple[object, object]]) -> bool: if key in self.set: self.set.move_to_end(key, last=True) if not config.verbose: @@ -797,7 +796,7 @@ def istype(obj: object, allowed_types: Type[T]) -> TypeIs[T]: @overload def istype( - obj: object, allowed_types: Tuple[Type[List[T]], Type[Tuple[T, ...]]] + obj: object, allowed_types: tuple[Type[List[T]], Type[tuple[T, ...]]] ) -> TypeIs[T]: ... @@ -940,7 +939,7 @@ def is_numpy_ndarray(value): def istensor(obj): """Check of obj is a tensor""" - tensor_list: Tuple[type, ...] = ( + tensor_list: tuple[type, ...] = ( torch.Tensor, torch.nn.Parameter, *config.traceable_tensor_subclasses, @@ -1900,7 +1899,7 @@ def is_namedtuple_cls(cls): @functools.lru_cache(1) -def namedtuple_fields(cls) -> Tuple[str, ...]: +def namedtuple_fields(cls) -> tuple[str, ...]: """Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple""" if cls is slice: return ("start", "stop", "step") @@ -2188,7 +2187,7 @@ def tuple_iterator_getitem(it, index): iter_next = next -def normalize_range_iter(range_iter) -> Tuple[int, int, int]: +def normalize_range_iter(range_iter) -> tuple[int, int, int]: _, (range_obj,), maybe_idx = range_iter.__reduce__() # In 3.12+, `maybe_idx` could be None, and `range_obj.start` would've been # already incremented by the current index. @@ -3070,7 +3069,7 @@ def tensor_always_has_static_shape( tensor: Union[torch.Tensor, Any], is_tensor: bool, tensor_source: Source, -) -> Tuple[bool, Optional[TensorStaticReason]]: +) -> tuple[bool, Optional[TensorStaticReason]]: """ Given a tensor, source, and is_tensor flag, determine if a shape should be static.