Skip to content

Commit 881381c

Browse files
Myle Ottfacebook-github-bot
authored andcommitted
v0.7.1: fix PyPI setup and tests
Summary: Pull Request resolved: #818 Differential Revision: D15916265 Pulled By: myleott fbshipit-source-id: c66c0bd988d3472c4150226952f34ee8d4c3db86
1 parent 9462a81 commit 881381c

File tree

6 files changed

+34
-24
lines changed

6 files changed

+34
-24
lines changed

docs/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,9 @@
6060
# built documents.
6161
#
6262
# The short X.Y version.
63-
version = '0.7.0'
63+
version = '0.7.1'
6464
# The full version, including alpha/beta/rc tags.
65-
release = '0.7.0'
65+
release = '0.7.1'
6666

6767
# The language for content autogenerated by Sphinx. Refer to documentation
6868
# for a list of supported languages.

fairseq/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
# can be found in the PATENTS file in the same directory.
77

88
__all__ = ['pdb']
9-
__version__ = '0.7.0'
9+
__version__ = '0.7.1'
1010

1111
import fairseq.criterions
1212
import fairseq.models

fairseq/data/audio/__init__.py

Whitespace-only changes.

fairseq/trainer.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -233,11 +233,11 @@ def train_step(self, samples, dummy_batch=False, raise_oom=False):
233233
# forward and backward pass
234234
logging_outputs, sample_sizes, ooms = [], [], 0
235235
for i, sample in enumerate(samples):
236-
sample = self._prepare_sample(sample, self.args.fp16)
236+
sample = self._prepare_sample(sample)
237237
if sample is None:
238238
# when sample is None, run forward/backward on a dummy batch
239239
# and ignore the resulting gradients
240-
sample = self._prepare_sample(self._dummy_batch, self.args.fp16)
240+
sample = self._prepare_sample(self._dummy_batch)
241241
ignore_grad = True
242242
else:
243243
ignore_grad = False
@@ -381,9 +381,9 @@ def valid_step(self, sample, raise_oom=False):
381381
self.model.eval()
382382
self.criterion.eval()
383383

384-
sample = self._prepare_sample(sample, self.args.fp16)
384+
sample = self._prepare_sample(sample)
385385
if sample is None:
386-
sample = self._prepare_sample(self._dummy_batch, self.args.fp16)
386+
sample = self._prepare_sample(self._dummy_batch)
387387
ignore_results = True
388388
else:
389389
ignore_results = False
@@ -488,7 +488,7 @@ def set_num_updates(self, num_updates):
488488
self._num_updates = num_updates
489489
self.lr_step_update()
490490

491-
def _prepare_sample(self, sample, fp16):
491+
def _prepare_sample(self, sample):
492492
if sample is None or len(sample) == 0:
493493
return None
494494

@@ -500,7 +500,10 @@ def apply_half(t):
500500
return t.half()
501501
return t
502502

503-
return utils.apply(apply_half, sample) if fp16 else sample
503+
if self.args.fp16:
504+
sample = utils.apply_to_sample(apply_half, sample)
505+
506+
return sample
504507

505508
def _set_seed(self):
506509
# Set seed based on args.seed and the update number so that we get

fairseq/utils.py

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -31,26 +31,32 @@ def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
3131
)
3232

3333

34-
def apply(f, sample):
34+
def apply_to_sample(f, sample):
3535
if len(sample) == 0:
3636
return {}
37-
if torch.is_tensor(sample):
38-
return f(sample)
39-
elif isinstance(sample, dict):
40-
return {
41-
key: apply(f, value)
42-
for key, value in sample.items()
43-
}
44-
elif isinstance(sample, list):
45-
return [apply(f, x) for x in sample]
46-
else:
47-
return sample
37+
38+
def _apply(x):
39+
if torch.is_tensor(x):
40+
return f(x)
41+
elif isinstance(x, dict):
42+
return {
43+
key: _apply(value)
44+
for key, value in x.items()
45+
}
46+
elif isinstance(x, list):
47+
return [_apply(x) for x in x]
48+
else:
49+
return x
50+
51+
return _apply(sample)
4852

4953

5054
def move_to_cuda(sample):
55+
5156
def _move_to_cuda(tensor):
52-
return tensor.cuda()
53-
return apply(_move_to_cuda, sample)
57+
return tensor.cuda()
58+
59+
return apply_to_sample(_move_to_cuda, sample)
5460

5561

5662
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)

setup.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
setup(
3131
name='fairseq',
32-
version='0.7.0',
32+
version='0.7.1',
3333
description='Facebook AI Research Sequence-to-Sequence Toolkit',
3434
url='https://github.com/pytorch/fairseq',
3535
classifiers=[
@@ -40,6 +40,7 @@
4040
'Topic :: Scientific/Engineering :: Artificial Intelligence',
4141
],
4242
long_description=readme,
43+
long_description_content_type='text/markdown',
4344
install_requires=[
4445
'cffi',
4546
'numpy',

0 commit comments

Comments
 (0)