Skip to content

Commit 4e9eddb

Browse files
Thomas Polasekfacebook-github-bot
authored andcommitted
Convert directory fbcode/deeplearning to use the Ruff Formatter
Summary: X-link: flashlight/wav2letter#1024 X-link: flashlight/text#88 X-link: flashlight/flashlight#1176 X-link: pytorch/FBGEMM#3242 Converts the directory specified to use the Ruff formatter in pyfmt ruff_dog If this diff causes merge conflicts when rebasing, please run `hg status -n -0 --change . -I '**/*.{py,pyi}' | xargs -0 arc pyfmt` on your diff, and amend any changes before rebasing onto latest. That should help reduce or eliminate any merge conflicts. allow-large-files bypass-github-export-checks Differential Revision: D63766623
1 parent 0bb9b13 commit 4e9eddb

31 files changed

+31
-103
lines changed

benchmarks/models.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
Contains models used for benchmarking
99
"""
1010

11-
1211
from dataclasses import dataclass
1312
from typing import Any
1413

benchmarks/profiler.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
Run via Jupyter
1111
"""
1212

13-
1413
from benchmark import ModelBenchmarks
1514

1615

crypten/communicator/communicator.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,6 @@ def _logging(func):
162162

163163
@wraps(func)
164164
def logging_wrapper(self, *args, **kwargs):
165-
166165
# TODO: Replace this
167166
# - hacks the inputs into some of the functions for world_size 1:
168167
world_size = self.get_world_size()

crypten/communicator/in_process_communicator.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818

1919
class InProcessCommunicator(Communicator):
20-
2120
BYTES_PER_ELEMENT = 8
2221
tls = threading.local()
2322
mailbox = None

crypten/cryptensor.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -78,16 +78,12 @@ def register_cryptensor_cls(cls):
7878
if name in CrypTensor.__CRYPTENSOR_TYPES__:
7979
raise ValueError(
8080
"Cannot register duplicate CrypTensor type: \
81-
tensor type {} already exists.".format(
82-
name
83-
)
81+
tensor type {} already exists.".format(name)
8482
)
8583
if not issubclass(cls, CrypTensor):
8684
raise ValueError(
8785
"Registered tensor ({}: {}) must extend \
88-
CrypTensor".format(
89-
name, cls.__name__
90-
)
86+
CrypTensor".format(name, cls.__name__)
9187
)
9288
CrypTensor.__CRYPTENSOR_TYPES__[name] = cls
9389
return cls
@@ -222,7 +218,6 @@ def backward(self, grad_input=None, top_node=True):
222218
"""
223219
if self.requires_grad:
224220
with CrypTensor.no_grad(): # disable autograd for backward pass
225-
226221
# in initial backward call, identify all required nodes:
227222
if top_node:
228223
self._identify_required_grads()
@@ -302,7 +297,6 @@ def __torch_function__(self, func, types, args=(), kwargs=None):
302297
)
303298

304299
def _get_forward_function_no_ctx(self, grad_fn):
305-
306300
# determine if self is a dummy object (the case for staticmethods):
307301
is_dummy = getattr(self, "__IS_DUMMY__", False)
308302

@@ -320,7 +314,6 @@ def autograd_forward_no_ctx(*args, **kwargs):
320314
return autograd_forward_no_ctx
321315

322316
def _get_autograd_forward_function(self, name, grad_fn, in_place):
323-
324317
# determine if self is a dummy object (the case for staticmethods):
325318
is_dummy = getattr(self, "__IS_DUMMY__", False)
326319

crypten/gradients.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,6 @@ def backward(ctx, grad_output):
408408
class AutogradSqueeze(AutogradFunction):
409409
@staticmethod
410410
def forward(ctx, *args, **kwargs):
411-
412411
# preprocess inputs:
413412
assert len(args) >= 1
414413
if len(args) == 1:
@@ -497,7 +496,6 @@ def backward(ctx, grad_output):
497496
class AutogradDropout(AutogradFunction):
498497
@staticmethod
499498
def forward(ctx, input, p=0.5, training=True, inplace=False):
500-
501499
if training and inplace:
502500
logging.warning(
503501
"CrypTen dropout does not support inplace computation during training."
@@ -534,7 +532,6 @@ def backward(ctx, grad_output):
534532
class AutogradFeatureDropout(AutogradFunction):
535533
@staticmethod
536534
def forward(ctx, input, p=0.5, training=True, inplace=False):
537-
538535
if training and inplace:
539536
logging.warning(
540537
"CrypTen _feature_dropout does not support inplace computation during training."
@@ -1133,7 +1130,6 @@ def backward(ctx, grad_output):
11331130
class AutogradSum(AutogradFunction):
11341131
@staticmethod
11351132
def forward(ctx, *args, **kwargs):
1136-
11371133
# preprocess inputs:
11381134
assert len(args) >= 1
11391135
if len(args) == 1:
@@ -1192,7 +1188,6 @@ def backward(ctx, grad_output):
11921188
class AutogradMean(AutogradFunction):
11931189
@staticmethod
11941190
def forward(ctx, *args, **kwargs):
1195-
11961191
# preprocess inputs:
11971192
assert len(args) >= 1
11981193
if len(args) == 1:
@@ -1230,7 +1225,6 @@ def backward(ctx, grad_output):
12301225
class AutogradVariance(AutogradFunction):
12311226
@staticmethod
12321227
def forward(ctx, self, *args, **kwargs):
1233-
12341228
# preprocess inputs:
12351229
if len(args) == 0:
12361230
dim = None
@@ -1287,7 +1281,6 @@ def backward(ctx, grad_output):
12871281
class AutogradMin(AutogradFunction):
12881282
@staticmethod
12891283
def forward(ctx, *args, **kwargs):
1290-
12911284
# preprocess inputs:
12921285
assert len(args) >= 1
12931286
if len(args) == 1:
@@ -1335,7 +1328,6 @@ def backward(ctx, grad_output):
13351328
class AutogradMax(AutogradFunction):
13361329
@staticmethod
13371330
def forward(ctx, *args, **kwargs):
1338-
13391331
# preprocess inputs:
13401332
assert len(args) >= 1
13411333
if len(args) == 1:
@@ -1453,7 +1445,6 @@ def backward(ctx, grad_output):
14531445
class AutogradAvgPool2D(AutogradFunction):
14541446
@staticmethod
14551447
def forward(ctx, input, kernel_size, stride=None, padding=0, ceil_mode=False):
1456-
14571448
# preprocess inputs:
14581449
if stride is None:
14591450
stride = kernel_size
@@ -1528,7 +1519,6 @@ def forward(
15281519
ceil_mode=False,
15291520
return_indices=False,
15301521
):
1531-
15321522
# preprocess inputs:
15331523
if stride is None:
15341524
stride = kernel_size
@@ -1887,7 +1877,6 @@ def backward(ctx, grad_output):
18871877
grad_output = grad_output.mul(weight)
18881878
grad_input = grad_output.mul(inv_var)
18891879
if training:
1890-
18911880
# compute gradient term that is due to the mean:
18921881
num_element = reduce(
18931882
lambda x, y: x * y, [grad_output.size(d) for d in stats_dimensions]

crypten/mpc/primitives/arithmetic.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -223,9 +223,9 @@ def __setitem__(self, index, value):
223223
"""Set tensor values by index"""
224224
if isinstance(value, (int, float)) or is_tensor(value):
225225
value = ArithmeticSharedTensor(value)
226-
assert isinstance(
227-
value, ArithmeticSharedTensor
228-
), "Unsupported input type %s for __setitem__" % type(value)
226+
assert isinstance(value, ArithmeticSharedTensor), (
227+
"Unsupported input type %s for __setitem__" % type(value)
228+
)
229229
self.share.__setitem__(index, value.share)
230230

231231
def pad(self, pad, mode="constant", value=0):
@@ -268,9 +268,9 @@ def stack(tensors, *args, **kwargs):
268268
for i, tensor in enumerate(tensors):
269269
if is_tensor(tensor):
270270
tensors[i] = ArithmeticSharedTensor(tensor)
271-
assert isinstance(
272-
tensors[i], ArithmeticSharedTensor
273-
), "Can't stack %s with ArithmeticSharedTensor" % type(tensor)
271+
assert isinstance(tensors[i], ArithmeticSharedTensor), (
272+
"Can't stack %s with ArithmeticSharedTensor" % type(tensor)
273+
)
274274

275275
result = tensors[0].shallow_copy()
276276
result.share = torch_stack(
@@ -630,9 +630,9 @@ def scatter_(self, dim, index, src):
630630
"""
631631
if is_tensor(src):
632632
src = ArithmeticSharedTensor(src)
633-
assert isinstance(
634-
src, ArithmeticSharedTensor
635-
), "Unrecognized scatter src type: %s" % type(src)
633+
assert isinstance(src, ArithmeticSharedTensor), (
634+
"Unrecognized scatter src type: %s" % type(src)
635+
)
636636
self.share.scatter_(dim, index, src.share)
637637
return self
638638

crypten/mpc/primitives/binary.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -318,9 +318,9 @@ def __setitem__(self, index, value):
318318
"""Set tensor values by index"""
319319
if is_tensor(value) or isinstance(value, list):
320320
value = BinarySharedTensor(value)
321-
assert isinstance(
322-
value, BinarySharedTensor
323-
), "Unsupported input type %s for __setitem__" % type(value)
321+
assert isinstance(value, BinarySharedTensor), (
322+
"Unsupported input type %s for __setitem__" % type(value)
323+
)
324324
self.share.__setitem__(index, value.share)
325325

326326
@staticmethod
@@ -436,9 +436,9 @@ def scatter_(self, dim, index, src):
436436
"""
437437
if is_tensor(src):
438438
src = BinarySharedTensor(src)
439-
assert isinstance(
440-
src, BinarySharedTensor
441-
), "Unrecognized scatter src type: %s" % type(src)
439+
assert isinstance(src, BinarySharedTensor), (
440+
"Unrecognized scatter src type: %s" % type(src)
441+
)
442442
self.share.scatter_(dim, index, src.share)
443443
return self
444444

crypten/mpc/primitives/converters.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717

1818
def _A2B(arithmetic_tensor):
19-
2019
# first try memory-inefficient implementation that takes O(log P) rounds:
2120
try:
2221
binary_tensor = BinarySharedTensor.stack(

crypten/mpc/provider/ttp_provider.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,6 @@ def _get_binary_PRSS(self, size, bitlength=None, remove_rank=None):
309309
return result
310310

311311
def additive(self, size0, size1, op, *args, **kwargs):
312-
313312
# Add all shares of `a` and `b` to get plaintext `a` and `b`
314313
a = self._get_additive_PRSS(size0)
315314
b = self._get_additive_PRSS(size1)

0 commit comments

Comments
 (0)