Skip to content

Commit

Permalink
Convert directory fbcode/deeplearning to use the Ruff Formatter
Browse files Browse the repository at this point in the history
Summary:
X-link: flashlight/wav2letter#1024

X-link: flashlight/text#88

X-link: flashlight/flashlight#1176

X-link: pytorch/FBGEMM#3242

Converts the directory specified to use the Ruff formatter in pyfmt

ruff_dog

If this diff causes merge conflicts when rebasing, please run
`hg status -n -0 --change . -I '**/*.{py,pyi}' | xargs -0 arc pyfmt`
on your diff, and amend any changes before rebasing onto latest.
That should help reduce or eliminate any merge conflicts.

allow-large-files
bypass-github-export-checks

Differential Revision: D63766623
  • Loading branch information
Thomas Polasek authored and facebook-github-bot committed Oct 11, 2024
1 parent 0bb9b13 commit 4e9eddb
Show file tree
Hide file tree
Showing 31 changed files with 31 additions and 103 deletions.
1 change: 0 additions & 1 deletion benchmarks/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
Contains models used for benchmarking
"""


from dataclasses import dataclass
from typing import Any

Expand Down
1 change: 0 additions & 1 deletion benchmarks/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
Run via Jupyter
"""


from benchmark import ModelBenchmarks


Expand Down
1 change: 0 additions & 1 deletion crypten/communicator/communicator.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,6 @@ def _logging(func):

@wraps(func)
def logging_wrapper(self, *args, **kwargs):

# TODO: Replace this
# - hacks the inputs into some of the functions for world_size 1:
world_size = self.get_world_size()
Expand Down
1 change: 0 additions & 1 deletion crypten/communicator/in_process_communicator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@


class InProcessCommunicator(Communicator):

BYTES_PER_ELEMENT = 8
tls = threading.local()
mailbox = None
Expand Down
11 changes: 2 additions & 9 deletions crypten/cryptensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,16 +78,12 @@ def register_cryptensor_cls(cls):
if name in CrypTensor.__CRYPTENSOR_TYPES__:
raise ValueError(
"Cannot register duplicate CrypTensor type: \
tensor type {} already exists.".format(
name
)
tensor type {} already exists.".format(name)
)
if not issubclass(cls, CrypTensor):
raise ValueError(
"Registered tensor ({}: {}) must extend \
CrypTensor".format(
name, cls.__name__
)
CrypTensor".format(name, cls.__name__)
)
CrypTensor.__CRYPTENSOR_TYPES__[name] = cls
return cls
Expand Down Expand Up @@ -222,7 +218,6 @@ def backward(self, grad_input=None, top_node=True):
"""
if self.requires_grad:
with CrypTensor.no_grad(): # disable autograd for backward pass

# in initial backward call, identify all required nodes:
if top_node:
self._identify_required_grads()
Expand Down Expand Up @@ -302,7 +297,6 @@ def __torch_function__(self, func, types, args=(), kwargs=None):
)

def _get_forward_function_no_ctx(self, grad_fn):

# determine if self is a dummy object (the case for staticmethods):
is_dummy = getattr(self, "__IS_DUMMY__", False)

Expand All @@ -320,7 +314,6 @@ def autograd_forward_no_ctx(*args, **kwargs):
return autograd_forward_no_ctx

def _get_autograd_forward_function(self, name, grad_fn, in_place):

# determine if self is a dummy object (the case for staticmethods):
is_dummy = getattr(self, "__IS_DUMMY__", False)

Expand Down
11 changes: 0 additions & 11 deletions crypten/gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,6 @@ def backward(ctx, grad_output):
class AutogradSqueeze(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):

# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
Expand Down Expand Up @@ -497,7 +496,6 @@ def backward(ctx, grad_output):
class AutogradDropout(AutogradFunction):
@staticmethod
def forward(ctx, input, p=0.5, training=True, inplace=False):

if training and inplace:
logging.warning(
"CrypTen dropout does not support inplace computation during training."
Expand Down Expand Up @@ -534,7 +532,6 @@ def backward(ctx, grad_output):
class AutogradFeatureDropout(AutogradFunction):
@staticmethod
def forward(ctx, input, p=0.5, training=True, inplace=False):

if training and inplace:
logging.warning(
"CrypTen _feature_dropout does not support inplace computation during training."
Expand Down Expand Up @@ -1133,7 +1130,6 @@ def backward(ctx, grad_output):
class AutogradSum(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):

# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
Expand Down Expand Up @@ -1192,7 +1188,6 @@ def backward(ctx, grad_output):
class AutogradMean(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):

# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
Expand Down Expand Up @@ -1230,7 +1225,6 @@ def backward(ctx, grad_output):
class AutogradVariance(AutogradFunction):
@staticmethod
def forward(ctx, self, *args, **kwargs):

# preprocess inputs:
if len(args) == 0:
dim = None
Expand Down Expand Up @@ -1287,7 +1281,6 @@ def backward(ctx, grad_output):
class AutogradMin(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):

# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
Expand Down Expand Up @@ -1335,7 +1328,6 @@ def backward(ctx, grad_output):
class AutogradMax(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):

# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
Expand Down Expand Up @@ -1453,7 +1445,6 @@ def backward(ctx, grad_output):
class AutogradAvgPool2D(AutogradFunction):
@staticmethod
def forward(ctx, input, kernel_size, stride=None, padding=0, ceil_mode=False):

# preprocess inputs:
if stride is None:
stride = kernel_size
Expand Down Expand Up @@ -1528,7 +1519,6 @@ def forward(
ceil_mode=False,
return_indices=False,
):

# preprocess inputs:
if stride is None:
stride = kernel_size
Expand Down Expand Up @@ -1887,7 +1877,6 @@ def backward(ctx, grad_output):
grad_output = grad_output.mul(weight)
grad_input = grad_output.mul(inv_var)
if training:

# compute gradient term that is due to the mean:
num_element = reduce(
lambda x, y: x * y, [grad_output.size(d) for d in stats_dimensions]
Expand Down
18 changes: 9 additions & 9 deletions crypten/mpc/primitives/arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,9 @@ def __setitem__(self, index, value):
"""Set tensor values by index"""
if isinstance(value, (int, float)) or is_tensor(value):
value = ArithmeticSharedTensor(value)
assert isinstance(
value, ArithmeticSharedTensor
), "Unsupported input type %s for __setitem__" % type(value)
assert isinstance(value, ArithmeticSharedTensor), (
"Unsupported input type %s for __setitem__" % type(value)
)
self.share.__setitem__(index, value.share)

def pad(self, pad, mode="constant", value=0):
Expand Down Expand Up @@ -268,9 +268,9 @@ def stack(tensors, *args, **kwargs):
for i, tensor in enumerate(tensors):
if is_tensor(tensor):
tensors[i] = ArithmeticSharedTensor(tensor)
assert isinstance(
tensors[i], ArithmeticSharedTensor
), "Can't stack %s with ArithmeticSharedTensor" % type(tensor)
assert isinstance(tensors[i], ArithmeticSharedTensor), (
"Can't stack %s with ArithmeticSharedTensor" % type(tensor)
)

result = tensors[0].shallow_copy()
result.share = torch_stack(
Expand Down Expand Up @@ -630,9 +630,9 @@ def scatter_(self, dim, index, src):
"""
if is_tensor(src):
src = ArithmeticSharedTensor(src)
assert isinstance(
src, ArithmeticSharedTensor
), "Unrecognized scatter src type: %s" % type(src)
assert isinstance(src, ArithmeticSharedTensor), (
"Unrecognized scatter src type: %s" % type(src)
)
self.share.scatter_(dim, index, src.share)
return self

Expand Down
12 changes: 6 additions & 6 deletions crypten/mpc/primitives/binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,9 +318,9 @@ def __setitem__(self, index, value):
"""Set tensor values by index"""
if is_tensor(value) or isinstance(value, list):
value = BinarySharedTensor(value)
assert isinstance(
value, BinarySharedTensor
), "Unsupported input type %s for __setitem__" % type(value)
assert isinstance(value, BinarySharedTensor), (
"Unsupported input type %s for __setitem__" % type(value)
)
self.share.__setitem__(index, value.share)

@staticmethod
Expand Down Expand Up @@ -436,9 +436,9 @@ def scatter_(self, dim, index, src):
"""
if is_tensor(src):
src = BinarySharedTensor(src)
assert isinstance(
src, BinarySharedTensor
), "Unrecognized scatter src type: %s" % type(src)
assert isinstance(src, BinarySharedTensor), (
"Unrecognized scatter src type: %s" % type(src)
)
self.share.scatter_(dim, index, src.share)
return self

Expand Down
1 change: 0 additions & 1 deletion crypten/mpc/primitives/converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@


def _A2B(arithmetic_tensor):

# first try memory-inefficient implementation that takes O(log P) rounds:
try:
binary_tensor = BinarySharedTensor.stack(
Expand Down
1 change: 0 additions & 1 deletion crypten/mpc/provider/ttp_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,6 @@ def _get_binary_PRSS(self, size, bitlength=None, remove_rank=None):
return result

def additive(self, size0, size1, op, *args, **kwargs):

# Add all shares of `a` and `b` to get plaintext `a` and `b`
a = self._get_additive_PRSS(size0)
b = self._get_additive_PRSS(size1)
Expand Down
12 changes: 2 additions & 10 deletions crypten/nn/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,6 @@ def _apply(self, fn):
def encrypt(self, mode=True, src=0):
"""Encrypts the model."""
if mode != self.encrypted:

# encrypt / decrypt parameters:
self.encrypted = mode
for name, param in self.named_parameters(recurse=False):
Expand Down Expand Up @@ -705,7 +704,6 @@ def _clear_unused_values():
_mark_as_computed(input_name)
node_to_compute = _find_computable_node()
while node_to_compute is not None:

# compute output of module:
input = [values[name] for name in self._graph[node_to_compute]]
if len(input) == 1:
Expand All @@ -726,8 +724,8 @@ def _clear_unused_values():
assert isinstance(
output, tuple
), f"expected outputs {output_names} of {module} to be tuple, not {type(output)}"
assert len(output_names) == len(
output
assert (
len(output_names) == len(output)
), f"expected {len(output_names)} outputs from {module}, received {len(output)}"
for node, value in zip(output_names, output):
values[node] = value
Expand Down Expand Up @@ -1381,7 +1379,6 @@ class Expand(Module):
"""

def forward(self, x):

# unpack inputs:
input, shape = tuple(x)
if torch.is_tensor(shape):
Expand Down Expand Up @@ -1966,7 +1963,6 @@ def __init__(self, stride, padding, dilation, groups=1):
self.groups = groups

def forward(self, x):

# unpack inputs:
if len(x) == 2:
x, weight = x
Expand Down Expand Up @@ -2010,7 +2006,6 @@ def forward(self, x):

@staticmethod
def from_onnx(attributes=None):

# check attribute inputs:
if attributes is None:
attributes = {}
Expand Down Expand Up @@ -2124,7 +2119,6 @@ def __init__(
groups=1,
bias=True,
):

# check inputs:
super().__init__()
assert isinstance(stride, int), "stride must be an integer"
Expand Down Expand Up @@ -2558,7 +2552,6 @@ def forward(self, x):

@staticmethod
def from_onnx(pool_type, attributes=None):

# check attributes:
if attributes is None:
attributes = {}
Expand Down Expand Up @@ -2887,7 +2880,6 @@ def forward(self, input):

@staticmethod
def from_onnx(parameters=None, attributes=None):

# preprocess all attributes:
if parameters is None:
parameters = {}
Expand Down
25 changes: 12 additions & 13 deletions crypten/nn/onnx_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,6 @@ def _to_crypten(onnx_model):

# loop over all nodes:
for node in onnx_model.graph.node:

# get attributes and node type:
attributes = {attr.name: _get_attribute_value(attr) for attr in node.attribute}
crypten_class = _get_operator_class(node.op_type, attributes)
Expand Down Expand Up @@ -271,21 +270,21 @@ def _update_onnx_symbolic_registry():
for version_key, version_val in sym_registry._registry.items():
for function_key in version_val.keys():
if function_key == "softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_softmax
sym_registry._registry[version_key][function_key] = (
_onnx_crypten_softmax
)
if function_key == "log_softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_logsoftmax
sym_registry._registry[version_key][function_key] = (
_onnx_crypten_logsoftmax
)
if function_key == "dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_dropout
sym_registry._registry[version_key][function_key] = (
_onnx_crypten_dropout
)
if function_key == "feature_dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_feature_dropout
sym_registry._registry[version_key][function_key] = (
_onnx_crypten_feature_dropout
)
else:
# Update ONNX symbolic registry using torch.onnx.register_custom_op_symbolic
torch.onnx.register_custom_op_symbolic(
Expand Down
1 change: 0 additions & 1 deletion crypten/nn/tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def graph(model):

# loop all graph connections:
for output_name, input_names in model._graph.items():

# get parameters and type of module:
module = modules[output_name]
op = str(type(module))
Expand Down
1 change: 0 additions & 1 deletion examples/bandits/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,6 @@ def build_learner(args, bandits, download_mnist):
logging.info("Loading clusters from file...")
clusters = torch.load(clusters_file)
else:

# load data and allocate clusters:
context, _ = load_data(
split=args.mnist_split, pca=pca, download_mnist_func=download_mnist
Expand Down
1 change: 0 additions & 1 deletion examples/bandits/plain_contextual_bandits.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def online_learner(

# initialization of model parameters:
if idx == 0:

# initialize accumulators for linear least squares:
A_inv = torch.stack(
[
Expand Down
1 change: 0 additions & 1 deletion examples/bandits/private_contextual_bandits.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ def online_learner(

# initialization of model parameters:
if idx == 0:

# initialize accumulators for linear least squares:
A_inv = [torch.eye(num_features).unsqueeze(0) for _ in range(num_arms)]
A_inv = crypten.cat([crypten.cryptensor(A) for A in A_inv])
Expand Down
Loading

0 comments on commit 4e9eddb

Please sign in to comment.