From b10fbc1edcda9f695532b7644fc6baff6f79a37b Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Wed, 27 May 2020 21:27:38 +0300 Subject: [PATCH 01/13] STY: Old formatting to f-strings (PEP-498) %, '{}'.format -> f'{}' F-strings are supported from py3.6 which is nibabel's minimal python version (up-to-date). Was done with: https://github.com/ikamensh/flynt Excluding versioneer.py First commit changes were reviewed manually --- bin/nib-dicomfs | 2 +- doc/source/conf.py | 2 +- doc/source/devel/register_me.py | 2 +- doc/tools/apigen.py | 7 ++-- doc/tools/build_modref_templates.py | 2 +- nibabel/_version.py | 27 ++++++------ nibabel/analyze.py | 9 ++-- nibabel/batteryrunners.py | 3 +- .../benchmarks/bench_arrayproxy_slicing.py | 5 +-- nibabel/benchmarks/bench_fileslice.py | 4 +- nibabel/benchmarks/bench_streamlines.py | 10 ++--- nibabel/benchmarks/butils.py | 4 +- nibabel/brikhead.py | 6 +-- nibabel/casting.py | 4 +- nibabel/cifti2/cifti2.py | 2 +- nibabel/cifti2/cifti2_axes.py | 10 ++--- nibabel/cifti2/tests/test_cifti2io_header.py | 2 +- nibabel/cifti2/tests/test_new_cifti2.py | 4 +- nibabel/cmdline/conform.py | 4 +- nibabel/cmdline/dicomfs.py | 42 +++++++++---------- nibabel/cmdline/diff.py | 2 +- nibabel/cmdline/ls.py | 18 ++++---- nibabel/cmdline/nifti_dx.py | 6 +-- nibabel/cmdline/parrec2nii.py | 25 ++++++----- nibabel/cmdline/tck2trk.py | 2 +- nibabel/cmdline/trk2tck.py | 2 +- nibabel/cmdline/utils.py | 9 ++-- nibabel/data.py | 11 +++-- nibabel/dataobj_images.py | 2 +- nibabel/deprecated.py | 2 +- nibabel/dft.py | 22 +++++----- nibabel/externals/netcdf.py | 13 +++--- nibabel/externals/oset.py | 4 +- nibabel/filebasedimages.py | 3 +- nibabel/filename_parser.py | 3 +- nibabel/fileslice.py | 2 +- nibabel/fileutils.py | 3 +- nibabel/freesurfer/io.py | 18 ++++---- nibabel/freesurfer/mghformat.py | 5 +-- nibabel/freesurfer/tests/test_io.py | 25 ++++++----- nibabel/funcs.py | 3 +- nibabel/gifti/gifti.py | 4 +- nibabel/gifti/parse_gifti_fast.py | 2 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 3 +- nibabel/info.py | 5 +-- nibabel/loadsave.py | 15 +++---- nibabel/nicom/csareader.py | 5 +-- nibabel/nicom/dicomreaders.py | 2 +- nibabel/nicom/dicomwrappers.py | 2 +- nibabel/nifti1.py | 22 ++++------ nibabel/optpkg.py | 7 ++-- nibabel/orientations.py | 7 ++-- nibabel/parrec.py | 11 +++-- nibabel/processing.py | 4 +- nibabel/quaternions.py | 2 +- nibabel/rstutils.py | 7 ++-- nibabel/streamlines/__init__.py | 4 +- nibabel/streamlines/array_sequence.py | 5 +-- nibabel/streamlines/tck.py | 12 +++--- .../streamlines/tests/test_array_sequence.py | 2 +- nibabel/streamlines/tests/test_trk.py | 8 ++-- nibabel/testing/__init__.py | 6 +-- nibabel/tests/data/check_parrec_reslice.py | 5 +-- nibabel/tests/nibabel_data.py | 2 +- nibabel/tests/scriptrunner.py | 14 +++---- nibabel/tests/test_api_validators.py | 2 +- nibabel/tests/test_arrayproxy.py | 2 +- nibabel/tests/test_arraywriters.py | 2 +- nibabel/tests/test_data.py | 4 +- nibabel/tests/test_deprecator.py | 3 +- nibabel/tests/test_floating.py | 2 +- nibabel/tests/test_image_types.py | 3 +- nibabel/tests/test_loadsave.py | 2 +- nibabel/tests/test_removalschedule.py | 6 +-- nibabel/tests/test_scaling.py | 2 +- nibabel/tests/test_scripts.py | 8 ++-- nibabel/tests/test_spatialimages.py | 4 +- nibabel/tests/test_wrapstruct.py | 2 +- nibabel/trackvis.py | 13 +++--- nibabel/viewers.py | 4 +- nibabel/volumeutils.py | 3 +- nibabel/wrapstruct.py | 9 ++-- nisext/sexts.py | 4 +- nisext/testers.py | 18 ++++---- nisext/tests/test_testers.py | 2 +- tools/gitwash_dumper.py | 20 ++++----- tools/make_tarball.py | 6 +-- 87 files changed, 277 insertions(+), 331 deletions(-) diff --git a/bin/nib-dicomfs b/bin/nib-dicomfs index 05b6a50afc..51c7414752 100755 --- a/bin/nib-dicomfs +++ b/bin/nib-dicomfs @@ -12,4 +12,4 @@ from nibabel.cmdline.dicomfs import main if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/doc/source/conf.py b/doc/source/conf.py index d3e75237ab..9ea5495ef1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -92,7 +92,7 @@ # General information about the project. project = u'NiBabel' -copyright = u'2006-2020, %(maintainer)s <%(author_email)s>' % metadata +copyright = f"2006-2020, {metadata['maintainer']} <{metadata['author_email']}>" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/source/devel/register_me.py b/doc/source/devel/register_me.py index d2f571f04f..76e0dbc641 100644 --- a/doc/source/devel/register_me.py +++ b/doc/source/devel/register_me.py @@ -40,7 +40,7 @@ def main(): dsource.set(name, version, OUR_PATH) dsource.write(file(ini_fname, 'wt')) - print('Registered package %s, %s to %s' % (name, version, ini_fname)) + print(f'Registered package {name}, {version} to {ini_fname}') if __name__ == '__main__': diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index 05498c69a9..52966300e2 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -342,8 +342,7 @@ def _survives_exclude(self, matchstr, match_type): elif match_type == 'package': patterns = self.package_skip_patterns else: - raise ValueError('Cannot interpret match type "%s"' - % match_type) + raise ValueError(f'Cannot interpret match type "{match_type}"') # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: @@ -424,7 +423,7 @@ def write_modules_api(self, modules, outdir): written_modules = [] for ulm, mods in module_by_ulm.items(): - print("Generating docs for %s:" % ulm) + print(f"Generating docs for {ulm}:") document_head = [] document_body = [] @@ -505,5 +504,5 @@ def write_index(self, outdir, froot='gen', relative_to=None): w("=" * len(title) + "\n\n") w('.. toctree::\n\n') for f in self.written_modules: - w(' %s\n' % os.path.join(relpath, f)) + w(f' {os.path.join(relpath, f)}\n') idx.close() diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index da752b6c42..6ec6848579 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -18,7 +18,7 @@ def abort(error): - print('*WARNING* API documentation not generated: %s' % error) + print(f'*WARNING* API documentation not generated: {error}') exit(1) diff --git a/nibabel/_version.py b/nibabel/_version.py index 60031b4d17..bfb8d6e9f9 100644 --- a/nibabel/_version.py +++ b/nibabel/_version.py @@ -87,20 +87,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if e.errno == errno.ENOENT: continue if verbose: - print("unable to run %s" % dispcmd) + print(f"unable to run {dispcmd}") print(e) return None, None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print(f"unable to find command, tried {commands}") return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) + print(f"unable to run {dispcmd} (error)") + print(f"stdout was {stdout}") return None, p.returncode return stdout, p.returncode @@ -201,9 +201,9 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) + print(f"discarding '{','.join(refs - tags)}', no digits") if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) + print(f"likely tags: {','.join(sorted(tags))}") for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): @@ -214,7 +214,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if not re.match(r'\d', r): continue if verbose: - print("picking %s" % r) + print(f"picking {r}") return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, @@ -243,14 +243,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): hide_stderr=True) if rc != 0: if verbose: - print("Directory %s not under git control" % root) + print(f"Directory {root} not under git control") raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", - "--match", "%s*" % tag_prefix], + "--match", f"{tag_prefix}*"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: @@ -283,8 +283,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = (f"unable to parse git-describe output: '{describe_out}'") return pieces # tag @@ -384,13 +383,13 @@ def render_pep440_post(pieces): if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] + rendered += f"g{pieces['short']}" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" - rendered += "+g%s" % pieces["short"] + rendered += f"+g{pieces['short']}" return rendered @@ -481,7 +480,7 @@ def render(pieces, style): elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: - raise ValueError("unknown style '%s'" % style) + raise ValueError(f"unknown style '{style}'") return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 53e01db64c..6aa1418f72 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -571,16 +571,16 @@ def set_data_dtype(self, datatype): dt = np.dtype(dt) except TypeError: raise HeaderDataError( - 'data dtype "{0}" not recognized'.format(datatype)) + f'data dtype "{datatype}" not recognized') if dt not in self._data_type_codes: raise HeaderDataError( - 'data dtype "{0}" not supported'.format(datatype)) + f'data dtype "{datatype}" not supported') code = self._data_type_codes[dt] dtype = self._data_type_codes.dtype[code] # test for void, being careful of user-defined types if dtype.type is np.void and not dtype.fields: raise HeaderDataError( - 'data dtype "{0}" known but not supported'.format(datatype)) + f'data dtype "{datatype}" known but not supported') self._structarr['datatype'] = code self._structarr['bitpix'] = dtype.itemsize * 8 @@ -632,8 +632,7 @@ def set_data_shape(self, shape): values_fit = np.all(dims[1:ndims + 1] == shape) # Error if we did not succeed setting dimensions if not values_fit: - raise HeaderDataError('shape %s does not fit in dim datatype' % - (shape,)) + raise HeaderDataError(f'shape {shape} does not fit in dim datatype') self._structarr['pixdim'][ndims + 1:] = 1.0 def get_base_affine(self): diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index 78a887fb56..67af4f4a8b 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -291,8 +291,7 @@ def write_raise(self, stream, error_level=40, log_level=30): write the report to `stream`, otherwise we write nothing. """ if self.problem_level >= log_level: - stream.write('Level %s: %s\n' % - (self.problem_level, self.message)) + stream.write(f'Level {self.problem_level}: {self.message}\n') if self.problem_level and self.problem_level >= error_level: if self.error: raise self.error(self.problem_msg) diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 2ed9ec9ccd..8afebb546a 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -96,7 +96,7 @@ def fmt_sliceobj(sliceobj): slcstr.append(s) else: slcstr.append(str(int(s * SHAPE[i]))) - return '[{}]'.format(', '.join(slcstr)) + return f"[{', '.join(slcstr)}]" with InTemporaryDirectory(): @@ -133,8 +133,7 @@ def fmt_sliceobj(sliceobj): have_igzip, keep_open, sliceobj = test seed = seeds[SLICEOBJS.index(sliceobj)] - print('Running test {} of {} ({})...'.format( - ti + 1, len(tests), label)) + print(f'Running test {ti + 1} of {len(tests)} ({label})...') # load uncompressed and compressed versions of the image img = nib.load(testfile, keep_file_open=keep_open) diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index c19d3b89f0..5d99b939d7 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -20,7 +20,7 @@ from ..tmpdirs import InTemporaryDirectory SHAPE = (64, 64, 32, 100) -ROW_NAMES = ['axis {0}, len {1}'.format(i, SHAPE[i]) +ROW_NAMES = [f'axis {i}, len {SHAPE[i]}' for i in range(len(SHAPE))] COL_NAMES = ['mid int', 'step 1', @@ -79,7 +79,7 @@ def my_table(title, times, base): print() print(rst_table(times, ROW_NAMES, COL_NAMES, title, val_fmt='{0[0]:3.2f} ({0[1]:3.2f})')) - print('Base time: {0:3.2f}'.format(base)) + print(f'Base time: {base:3.2f}') if bytes: fobj = BytesIO() times, base = run_slices(fobj, repeat) diff --git a/nibabel/benchmarks/bench_streamlines.py b/nibabel/benchmarks/bench_streamlines.py index 5c49c9e177..54ed8e3e4d 100644 --- a/nibabel/benchmarks/bench_streamlines.py +++ b/nibabel/benchmarks/bench_streamlines.py @@ -44,16 +44,14 @@ def bench_load_trk(): streamlines_old = [d[0] - 0.5 for d in tv.read(trk_file, points_space="rasmm")[0]] mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat) - print("Old: Loaded {:,} streamlines in {:6.2f}".format(NB_STREAMLINES, - mtime_old)) + print(f"Old: Loaded {NB_STREAMLINES:,} streamlines in {mtime_old:6.2f}") trk = nib.streamlines.load(trk_file, lazy_load=False) streamlines_new = trk.streamlines mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)', repeat) - print("\nNew: Loaded {:,} streamlines in {:6.2}".format(NB_STREAMLINES, - mtime_new)) - print("Speedup of {:.2f}".format(mtime_old / mtime_new)) + print(f"\nNew: Loaded {NB_STREAMLINES:,} streamlines in {mtime_new:6.2}") + print(f"Speedup of {mtime_old / mtime_new:.2f}") for s1, s2 in zip(streamlines_new, streamlines_old): assert_array_equal(s1, s2) @@ -81,7 +79,7 @@ def bench_load_trk(): repeat) msg = "New: Loaded {:,} streamlines with scalars in {:6.2f}" print(msg.format(NB_STREAMLINES, mtime_new)) - print("Speedup of {:2f}".format(mtime_old / mtime_new)) + print(f"Speedup of {mtime_old / mtime_new:2f}") for s1, s2 in zip(scalars_new, scalars_old): assert_array_equal(s1, s2) diff --git a/nibabel/benchmarks/butils.py b/nibabel/benchmarks/butils.py index bea5872272..4cc521ab66 100644 --- a/nibabel/benchmarks/butils.py +++ b/nibabel/benchmarks/butils.py @@ -7,8 +7,6 @@ def print_git_title(title): """ Prints title string with git hash if possible, and underline """ - title = '{0} for git revision {1}'.format( - title, - get_info()['commit_hash']) + title = f"{title} for git revision {get_info()['commit_hash']}" print(title) print('-' * len(title)) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 2afd5b2c89..7693818e7b 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -118,11 +118,9 @@ def _unpack_var(var): 'Offending attribute:\n%s' % var) atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: - raise AFNIHeaderError('Invalid attribute type entry in HEAD file. ' - '%s' % err_msg) + raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}') if len(aname) != 1: - raise AFNIHeaderError('Invalid attribute name entry in HEAD file. ' - '%s' % err_msg) + raise AFNIHeaderError(f'Invalid attribute name entry in HEAD file. {err_msg}') atype = _attr_dic.get(atype[0], str) attr = ' '.join(var.strip().splitlines()[3:]) if atype is not str: diff --git a/nibabel/casting.py b/nibabel/casting.py index 8406824dbe..7f2e8d2d5f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -268,7 +268,7 @@ def type_info(np_type): # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32): - raise FloatingError('We had not expected type %s' % np_type) + raise FloatingError(f'We had not expected type {np_type}') if (vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024)): # double pair on PPC. The _check_nmant routine does not work for this @@ -402,7 +402,7 @@ def as_int(x, check=True): return ix fx = np.floor(x) if check and fx != x: - raise FloatingError('Not an integer: %s' % x) + raise FloatingError(f'Not an integer: {x}') if not fx.dtype.type == np.longdouble: return int(x) # Subtract float64 chunks until we have all of the number. If the int is diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index bd86ebfaa7..d011d6e8e3 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -92,7 +92,7 @@ class Cifti2HeaderError(Exception): def _value_if_klass(val, klass): if val is None or isinstance(val, klass): return val - raise ValueError('Not a valid %s instance.' % klass.__name__) + raise ValueError(f'Not a valid {klass.__name__} instance.') def _underscore(string): diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index c4c47007db..f1495552bd 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -348,7 +348,7 @@ def from_mask(cls, mask, name='other', affine=None): else: affine = np.asanyarray(affine) if affine.shape != (4, 4): - raise ValueError("Affine transformation should be a 4x4 array or None, not %r" % affine) + raise ValueError(f"Affine transformation should be a 4x4 array or None, not {affine!r}") mask = np.asanyarray(mask) if mask.ndim == 1: @@ -533,9 +533,9 @@ def to_cifti_brain_structure_name(name): orientation = 'both' structure = name if orientation.lower() == 'both': - proposed_name = 'CIFTI_STRUCTURE_%s' % structure.upper() + proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}' else: - proposed_name = 'CIFTI_STRUCTURE_%s_%s' % (structure.upper(), orientation.upper()) + proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}_{orientation.upper()}' if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: raise ValueError('%s was interpreted as %s, which is not a valid CIFTI brain structure' % (name, proposed_name)) @@ -990,9 +990,9 @@ def __getitem__(self, item): if isinstance(item, str): idx = np.where(self.name == item)[0] if len(idx) == 0: - raise IndexError("Parcel %s not found" % item) + raise IndexError(f"Parcel {item} not found") if len(idx) > 1: - raise IndexError("Multiple parcels with name %s found" % item) + raise IndexError(f"Multiple parcels with name {item} found") return self.voxels[idx[0]], self.vertices[idx[0]] if isinstance(item, int): return self.get_element(item) diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 0fef5ccd78..df4fe10fcd 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -330,7 +330,7 @@ def test_read_parcels(): assert len(vertices) == length assert vertices[0] == first_element assert vertices[-1] == last_element - assert vertices.brain_structure == 'CIFTI_STRUCTURE_CORTEX_%s' % orientation + assert vertices.brain_structure == f'CIFTI_STRUCTURE_CORTEX_{orientation}' @needs_nibabel_data('nitest-cifti2') diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 944a1c1576..65ef95c316 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -125,7 +125,7 @@ def create_parcel_map(applies_to_matrix_dimension): volume = ci.Cifti2VoxelIndicesIJK(element) mapping.append(ci.Cifti2Parcel(name, volume, surfaces)) - mapping.extend([ci.Cifti2Surface('CIFTI_STRUCTURE_CORTEX_%s' % orientation, + mapping.extend([ci.Cifti2Surface(f'CIFTI_STRUCTURE_CORTEX_{orientation}', number_of_vertices) for orientation in ['LEFT', 'RIGHT']]) mapping.volume = ci.Cifti2Volume(dimensions, ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine)) @@ -148,7 +148,7 @@ def check_parcel_map(mapping): assert parcel.voxel_indices_ijk._indices == element for surface, orientation in zip(mapping.surfaces, ('LEFT', 'RIGHT')): - assert surface.brain_structure == 'CIFTI_STRUCTURE_CORTEX_%s' % orientation + assert surface.brain_structure == f'CIFTI_STRUCTURE_CORTEX_{orientation}' assert surface.surface_number_of_vertices == number_of_vertices assert mapping.volume.volume_dimensions == dimensions diff --git a/nibabel/cmdline/conform.py b/nibabel/cmdline/conform.py index 65b4ccc388..cfa86b6951 100644 --- a/nibabel/cmdline/conform.py +++ b/nibabel/cmdline/conform.py @@ -34,7 +34,7 @@ def _get_parser(): help="Orientation of the conformed output.") p.add_argument("-f", "--force", action="store_true", help="Overwrite existing output files.") - p.add_argument("-V", "--version", action="version", version="{} {}".format(p.prog, __version__)) + p.add_argument("-V", "--version", action="version", version=f"{p.prog} {__version__}") return p @@ -46,7 +46,7 @@ def main(args=None): from_img = load(opts.infile) if not opts.force and Path(opts.outfile).exists(): - raise FileExistsError("Output file exists: {}".format(opts.outfile)) + raise FileExistsError(f"Output file exists: {opts.outfile}") out_img = conform( from_img=from_img, diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index c54c07f966..6663da9299 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -71,31 +71,31 @@ def get_paths(self): for study in dft.get_studies(self.dicom_path, self.followlinks): pd = paths.setdefault(study.patient_name_or_uid(), {}) patient_info = 'patient information\n' - patient_info = 'name: %s\n' % study.patient_name - patient_info += 'ID: %s\n' % study.patient_id - patient_info += 'birth date: %s\n' % study.patient_birth_date - patient_info += 'sex: %s\n' % study.patient_sex + patient_info = f'name: {study.patient_name}\n' + patient_info += f'ID: {study.patient_id}\n' + patient_info += f'birth date: {study.patient_birth_date}\n' + patient_info += f'sex: {study.patient_sex}\n' pd['INFO'] = patient_info.encode('ascii', 'replace') - study_datetime = '%s_%s' % (study.date, study.time) + study_datetime = f'{study.date}_{study.time}' study_info = 'study info\n' - study_info += 'UID: %s\n' % study.uid - study_info += 'date: %s\n' % study.date - study_info += 'time: %s\n' % study.time - study_info += 'comments: %s\n' % study.comments + study_info += f'UID: {study.uid}\n' + study_info += f'date: {study.date}\n' + study_info += f'time: {study.time}\n' + study_info += f'comments: {study.comments}\n' d = {'INFO': study_info.encode('ascii', 'replace')} for series in study.series: series_info = 'series info\n' - series_info += 'UID: %s\n' % series.uid - series_info += 'number: %s\n' % series.number - series_info += 'description: %s\n' % series.description + series_info += f'UID: {series.uid}\n' + series_info += f'number: {series.number}\n' + series_info += f'description: {series.description}\n' series_info += 'rows: %d\n' % series.rows series_info += 'columns: %d\n' % series.columns series_info += 'bits allocated: %d\n' % series.bits_allocated series_info += 'bits stored: %d\n' % series.bits_stored series_info += 'storage instances: %d\n' % len(series.storage_instances) d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), - '%s.nii' % series.number: (series.nifti_size, series.as_nifti), - '%s.png' % series.number: (series.png_size, series.as_png)} + f'{series.number}.nii': (series.nifti_size, series.as_nifti), + f'{series.number}.png': (series.png_size, series.as_png)} pd[study_datetime] = d return paths @@ -105,7 +105,7 @@ def match_path(self, path): logger.debug('return root') return wd for part in path.lstrip('/').split('/'): - logger.debug("path:%s part:%s" % (path, part)) + logger.debug(f"path:{path} part:{part}") if part not in wd: return None wd = wd[part] @@ -113,20 +113,20 @@ def match_path(self, path): return wd def readdir(self, path, fh): - logger.info('readdir %s' % (path,)) + logger.info(f'readdir {path}') matched_path = self.match_path(path) if matched_path is None: return -errno.ENOENT - logger.debug('matched %s' % (matched_path,)) + logger.debug(f'matched {matched_path}') fnames = [k.encode('ascii', 'replace') for k in matched_path.keys()] fnames.append('.') fnames.append('..') return [fuse.Direntry(f) for f in fnames] def getattr(self, path): - logger.debug('getattr %s' % path) + logger.debug(f'getattr {path}') matched_path = self.match_path(path) - logger.debug('matched: %s' % (matched_path,)) + logger.debug(f'matched: {matched_path}') now = time.time() st = fuse.Stat() if isinstance(matched_path, dict): @@ -161,7 +161,7 @@ def getattr(self, path): return -errno.ENOENT def open(self, path, flags): - logger.debug('open %s' % (path,)) + logger.debug(f'open {path}') matched_path = self.match_path(path) if matched_path is None: return -errno.ENOENT @@ -223,7 +223,7 @@ def main(args=None): logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) if len(files) != 2: - sys.stderr.write("Please provide two arguments:\n%s\n" % parser.usage) + sys.stderr.write(f"Please provide two arguments:\n{parser.usage}\n") sys.exit(1) fs = DICOMFS( diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index c745ff7abc..f1e4958e8f 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -32,7 +32,7 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, version="%prog " + nib.__version__) p.add_options([ diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index ea2e4032ae..08f975bc3d 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -31,7 +31,7 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, version="%prog " + nib.__version__) p.add_options([ @@ -65,20 +65,20 @@ def get_opt_parser(): def proc_file(f, opts): - verbose(1, "Loading %s" % f) + verbose(1, f"Loading {f}") - row = ["@l%s" % f] + row = [f"@l{f}"] try: vol = nib.load(f) h = vol.header except Exception as e: row += ['failed'] - verbose(2, "Failed to gather information -- %s" % str(e)) + verbose(2, f"Failed to gather information -- {str(e)}") return row row += [str(safe_get(h, 'data_dtype')), - '@l[%s]' % ap(safe_get(h, 'data_shape'), '%3g'), - '@l%s' % ap(safe_get(h, 'zooms'), '%.2f', 'x')] + f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]", + f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"] # Slope if hasattr(h, 'has_data_slope') and \ (h.has_data_slope or h.has_data_intercept) and \ @@ -116,7 +116,7 @@ def proc_file(f, opts): else: row += [''] except Exception as e: - verbose(2, "Failed to obtain qform or sform -- %s" % str(e)) + verbose(2, f"Failed to obtain qform or sform -- {str(e)}") if isinstance(h, nib.AnalyzeHeader): row += [''] else: @@ -136,7 +136,7 @@ def proc_file(f, opts): # just # of elements row += ["@l[%d]" % np.prod(d.shape)] # stats - row += [len(d) and '@l[%.2g, %.2g]' % (np.min(d), np.max(d)) or '-'] + row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: @@ -146,7 +146,7 @@ def proc_file(f, opts): counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq)) row += ["@l" + counts] except IOError as e: - verbose(2, "Failed to obtain stats/counts -- %s" % str(e)) + verbose(2, f"Failed to obtain stats/counts -- {str(e)}") row += [_err()] return row diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 259c24d97d..51867da065 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -23,7 +23,7 @@ def main(args=None): """ Go go team """ parser = OptionParser( - usage="%s [FILE ...]\n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [FILE ...]\n\n" + __doc__, version="%prog " + nib.__version__) (opts, files) = parser.parse_args(args=args) @@ -32,7 +32,7 @@ def main(args=None): hdr = fobj.read(nib.nifti1.header_dtype.itemsize) result = nib.Nifti1Header.diagnose_binaryblock(hdr) if len(result): - print('Picky header check output for "%s"\n' % fname) + print(f'Picky header check output for "{fname}\"\n') print(result + '\n') else: - print('Header for "%s" is clean' % fname) + print(f'Header for "{fname}" is clean') diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 0dfa03cac9..917615e620 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -22,7 +22,7 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="%s [OPTIONS] \n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [OPTIONS] \n\n" + __doc__, version="%prog " + nibabel.__version__) p.add_option( Option("-v", "--verbose", action="store_true", dest="verbose", @@ -136,7 +136,7 @@ def get_opt_parser(): def verbose(msg, indent=0): if verbose.switch: - print("%s%s" % (' ' * indent, msg)) + print(f"{' ' * indent}{msg}") def error(msg, exit_code): @@ -172,7 +172,7 @@ def proc_file(infile, opts): affine = pr_hdr.get_affine(origin=opts.origin) slope, intercept = pr_hdr.get_data_scaling(scaling) if opts.scaling != 'off': - verbose('Using data scaling "%s"' % opts.scaling) + verbose(f'Using data scaling "{opts.scaling}"') # get original scaling, and decide if we scale in-place or not if opts.scaling == 'off': slope = np.array([1.]) @@ -208,8 +208,7 @@ def proc_file(infile, opts): bad_mask = np.logical_and(bvals != 0, (bvecs == 0).all(axis=1)) if bad_mask.sum() > 0: pl = 's' if bad_mask.sum() != 1 else '' - verbose('Removing %s DTI trace volume%s' - % (bad_mask.sum(), pl)) + verbose(f'Removing {bad_mask.sum()} DTI trace volume{pl}') good_mask = ~bad_mask in_data = in_data[..., good_mask] bvals = bvals[good_mask] @@ -243,7 +242,7 @@ def proc_file(infile, opts): dump_ext = nifti1.Nifti1Extension('comment', hdr_dump) nhdr.extensions.append(dump_ext) - verbose('Writing %s' % outfilename) + verbose(f'Writing {outfilename}') nibabel.save(nimg, outfilename) # write out bvals/bvecs if requested @@ -256,7 +255,7 @@ def proc_file(infile, opts): with open(basefilename + '.bvals', 'w') as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: - fid.write('%s ' % val) + fid.write(f'{val} ') fid.write('\n') else: verbose('Writing .bvals and .bvecs files') @@ -267,12 +266,12 @@ def proc_file(infile, opts): with open(basefilename + '.bvals', 'w') as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: - fid.write('%s ' % val) + fid.write(f'{val} ') fid.write('\n') with open(basefilename + '.bvecs', 'w') as fid: for row in bvecs.T: for val in row: - fid.write('%s ' % val) + fid.write(f'{val} ') fid.write('\n') # export data labels varying along the 4th dimensions if requested @@ -299,7 +298,7 @@ def proc_file(infile, opts): verbose('Writing dwell time (%r sec) calculated assuming %sT ' 'magnet' % (dwell_time, opts.field_strength)) with open(basefilename + '.dwell_time', 'w') as fid: - fid.write('%r\n' % dwell_time) + fid.write(f'{dwell_time!r}\n') # done @@ -310,18 +309,18 @@ def main(): verbose.switch = opts.verbose if opts.origin not in ['scanner', 'fov']: - error("Unrecognized value for --origin: '%s'." % opts.origin, 1) + error(f"Unrecognized value for --origin: '{opts.origin}'.", 1) if opts.dwell_time and opts.field_strength is None: error('Need --field-strength for dwell time calculation', 1) # store any exceptions errs = [] for infile in infiles: - verbose('Processing %s' % infile) + verbose(f'Processing {infile}') try: proc_file(infile, opts) except Exception as e: - errs.append('%s: %s' % (infile, e)) + errs.append(f'{infile}: {e}') if len(errs): error('Caught %i exceptions. Dump follows:\n\n %s' diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index deb3adcd5f..9b359babaf 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -35,7 +35,7 @@ def main(): for tractogram in args.tractograms: tractogram_format = nib.streamlines.detect_format(tractogram) if tractogram_format is not nib.streamlines.TckFile: - print("Skipping non TCK file: '{}'".format(tractogram)) + print(f"Skipping non TCK file: '{tractogram}'") continue filename, _ = os.path.splitext(tractogram) diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index a55f7e95af..efdcf1fd02 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -25,7 +25,7 @@ def main(): for tractogram in args.tractograms: tractogram_format = nib.streamlines.detect_format(tractogram) if tractogram_format is not nib.streamlines.TrkFile: - print("Skipping non TRK file: '{}'".format(tractogram)) + print(f"Skipping non TRK file: '{tractogram}'") continue filename, _ = os.path.splitext(tractogram) diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 57c0ccc286..8931beb617 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -33,7 +33,7 @@ def verbose(thing, msg): """ # TODO: consider using nibabel's logger if thing <= int(verbose_level): - print("%s%s" % (' ' * thing, msg)) + print(f"{' ' * thing}{msg}") def table2string(table, out=None): @@ -78,8 +78,7 @@ def table2string(table, out=None): align = item[1] item = item[2:] if align not in ['l', 'r', 'c', 'w']: - raise ValueError('Unknown alignment %s. Known are l,r,c' % - align) + raise ValueError(f'Unknown alignment {align}. Known are l,r,c') else: align = 'c' @@ -93,7 +92,7 @@ def table2string(table, out=None): elif align == 'r': nspacesl, nspacesr = nspacesl + nspacesr, 0 else: - raise RuntimeError('Should not get here with align=%s' % align) + raise RuntimeError(f'Should not get here with align={align}') string_ += "%%%ds%%s%%%ds " \ % (nspacesl, nspacesr) % ('', item, '') @@ -121,5 +120,5 @@ def safe_get(obj, name): f = getattr(obj, 'get_' + name) return f() except Exception as e: - verbose(2, "get_%s() failed -- %s" % (name, e)) + verbose(2, f"get_{name}() failed -- {e}") return '-' diff --git a/nibabel/data.py b/nibabel/data.py index 793b6b310c..adce51b92c 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -128,11 +128,11 @@ def __init__(self, base_path, config_filename=None): cfg_file = self.get_filename(config_filename) readfiles = self.config.read(cfg_file) if not readfiles: - raise DataError('Could not read config file %s' % cfg_file) + raise DataError(f'Could not read config file {cfg_file}') try: self.version = self.config.get('DEFAULT', 'version') except configparser.Error: - raise DataError('Could not get version from %s' % cfg_file) + raise DataError(f'Could not get version from {cfg_file}') version_parts = self.version.split('.') self.major_version = int(version_parts[0]) self.minor_version = int(version_parts[1]) @@ -294,12 +294,11 @@ def make_datasource(pkg_def, **kwargs): pth = [pjoin(this_data_path, *names) for this_data_path in data_path] pkg_hint = pkg_def.get('install hint', DEFAULT_INSTALL_HINT) - msg = ('%s; Is it possible you have not installed a data package?' % - e) + msg = (f'{e}; Is it possible you have not installed a data package?') if 'name' in pkg_def: - msg += '\n\nYou may need the package "%s"' % pkg_def['name'] + msg += f"\n\nYou may need the package \"{pkg_def['name']}\"" if pkg_hint is not None: - msg += '\n\n%s' % pkg_hint + msg += f'\n\n{pkg_hint}' raise DataError(msg) return VersionedDatasource(pth) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index e0e3d52849..68972a8cb1 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -344,7 +344,7 @@ def get_fdata(self, caching='fill', dtype=np.float64): raise ValueError('caching value should be "fill" or "unchanged"') dtype = np.dtype(dtype) if not issubclass(dtype.type, np.inexact): - raise ValueError('{} should be floating point type'.format(dtype)) + raise ValueError(f'{dtype} should be floating point type') # Return cache if cache present and of correct dtype. if self._fdata_cache is not None: if self._fdata_cache.dtype.type == dtype.type: diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 1a0f85330d..2dd1f11db3 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -36,7 +36,7 @@ def __getattr__(self, key): return getattr(mod, key) def __repr__(self): - return "".format(self._module_name) + return f"" class FutureWarningMixin(object): diff --git a/nibabel/dft.py b/nibabel/dft.py index f46259b232..2768f1ec59 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -279,7 +279,7 @@ def _get_subdirs(base_dir, files_dict=None, followlinks=False): for (dirpath, dirnames, filenames) in os.walk(base_dir, followlinks=followlinks): abs_dir = os.path.realpath(dirpath) if abs_dir in dirs: - raise CachingError('link cycle detected under %s' % base_dir) + raise CachingError(f'link cycle detected under {base_dir}') dirs.append(abs_dir) if files_dict is not None: files_dict[abs_dir] = filenames @@ -306,7 +306,7 @@ def update_cache(base_dir, followlinks=False): for dir in sorted(mtimes.keys()): if dir in db_mtimes and mtimes[dir] <= db_mtimes[dir]: continue - logger.debug('updating %s' % dir) + logger.debug(f'updating {dir}') _update_dir(c, dir, files_by_dir[dir], studies, series, storage_instances) if dir in db_mtimes: @@ -353,20 +353,20 @@ def get_studies(base_dir=None, followlinks=False): def _update_dir(c, dir, files, studies, series, storage_instances): - logger.debug('Updating directory %s' % dir) + logger.debug(f'Updating directory {dir}') c.execute("SELECT name, mtime FROM file WHERE directory = ?", (dir, )) db_mtimes = dict(c) for fname in db_mtimes: if fname not in files: - logger.debug(' remove %s' % fname) + logger.debug(f' remove {fname}') c.execute("DELETE FROM file WHERE directory = ? AND name = ?", (dir, fname)) for fname in files: - mtime = os.lstat('%s/%s' % (dir, fname)).st_mtime + mtime = os.lstat(f'{dir}/{fname}').st_mtime if fname in db_mtimes and mtime <= db_mtimes[fname]: - logger.debug(' okay %s' % fname) + logger.debug(f' okay {fname}') else: - logger.debug(' update %s' % fname) + logger.debug(f' update {fname}') si_uid = _update_file(c, dir, fname, studies, series, storage_instances) if fname not in db_mtimes: @@ -386,7 +386,7 @@ def _update_dir(c, dir, files, studies, series, storage_instances): def _update_file(c, path, fname, studies, series, storage_instances): try: - do = read_file('%s/%s' % (path, fname)) + do = read_file(f'{path}/{fname}') except pydicom.filereader.InvalidDicomError: logger.debug(' not a DICOM file') return None @@ -395,7 +395,7 @@ def _update_file(c, path, fname, studies, series, storage_instances): except AttributeError: study_comments = '' try: - logger.debug(' storage instance %s' % str(do.SOPInstanceUID)) + logger.debug(f' storage instance {str(do.SOPInstanceUID)}') if str(do.StudyInstanceUID) not in studies: query = """INSERT INTO study (uid, date, @@ -444,7 +444,7 @@ def _update_file(c, path, fname, studies, series, storage_instances): c.execute(query, params) storage_instances.append(str(do.SOPInstanceUID)) except AttributeError as data: - logger.debug(' %s' % str(data)) + logger.debug(f' {str(data)}') return None return str(do.SOPInstanceUID) @@ -486,7 +486,7 @@ def clear_cache(): mtime INTEGER NOT NULL, storage_instance TEXT DEFAULT NULL REFERENCES storage_instance, PRIMARY KEY (directory, name))""") -DB_FNAME = pjoin(tempfile.gettempdir(), 'dft.%s.sqlite' % getpass.getuser()) +DB_FNAME = pjoin(tempfile.gettempdir(), f'dft.{getpass.getuser()}.sqlite') DB = None diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py index a0099ec6b4..b2e2c9a868 100644 --- a/nibabel/externals/netcdf.py +++ b/nibabel/externals/netcdf.py @@ -258,7 +258,7 @@ def __init__(self, filename, mode='r', mmap=None, version=1, else: # maybe it's a string self.filename = filename omode = 'r+' if mode == 'a' else mode - self.fp = open(self.filename, '%sb' % omode) + self.fp = open(self.filename, f'{omode}b') if mmap is None: # Mmapped files on PyPy cannot be usually closed # before the GC runs, so it's better to use mmap=False @@ -397,7 +397,7 @@ def createVariable(self, name, type, dimensions): type = dtype(type) typecode, size = type.char, type.itemsize if (typecode, size) not in REVERSE: - raise ValueError("NetCDF 3 does not support type %s" % type) + raise ValueError(f"NetCDF 3 does not support type {type}") data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 self.variables[name] = netcdf_variable( @@ -589,7 +589,7 @@ def _write_att_values(self, values): break typecode, size = TYPEMAP[nc_type] - dtype_ = '>%s' % typecode + dtype_ = f'>{typecode}' # asarray() dies with bytes and '>c' in py3k. Change to 'S' dtype_ = 'S' if dtype_ == '>c' else dtype_ @@ -614,8 +614,7 @@ def _read(self): # Check magic bytes and version magic = self.fp.read(3) if not magic == b'CDF': - raise TypeError("Error: %s is not a valid NetCDF 3 file" % - self.filename) + raise TypeError(f"Error: {self.filename} is not a valid NetCDF 3 file") self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] # Read file headers and set data. @@ -762,7 +761,7 @@ def _read_var(self): begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() typecode, size = TYPEMAP[nc_type] - dtype_ = '>%s' % typecode + dtype_ = f'>{typecode}' return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize @@ -777,7 +776,7 @@ def _read_att_values(self): self.fp.read(-count % 4) # read padding if typecode != 'c': - values = frombuffer(values, dtype='>%s' % typecode).copy() + values = frombuffer(values, dtype=f'>{typecode}').copy() if values.shape == (1,): values = values[0] else: diff --git a/nibabel/externals/oset.py b/nibabel/externals/oset.py index 0a29c661c5..3c49f8f856 100644 --- a/nibabel/externals/oset.py +++ b/nibabel/externals/oset.py @@ -72,8 +72,8 @@ def pop(self, last=True): def __repr__(self): if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self)) + return f'{self.__class__.__name__}()' + return f'{self.__class__.__name__}({list(self)!r})' def __eq__(self, other): if isinstance(other, OrderedSet): diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index fdc8a00e7f..436c2cd676 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -301,8 +301,7 @@ def filespec_to_file_map(klass, filespec): trailing_suffixes=klass._compressed_suffixes) except TypesFilenamesError: raise ImageFileError( - 'Filespec "{0}" does not look right for class {1}'.format( - filespec, klass)) + f'Filespec "{filespec}" does not look right for class {klass}') file_map = {} for key, fname in filenames.items(): file_map[key] = FileHolder(filename=fname) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 5d84a9d6dc..af0ff74541 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -134,8 +134,7 @@ def types_filenames(template_fname, types_exts, # is ignored). It's confusing to change # this to test.img.gz, or test.gz.img, so error raise TypesFilenamesError( - 'Confusing ignored suffix %s without extension' - % ignored) + f'Confusing ignored suffix {ignored} without extension') # if we've got to here, we have a guessed name and a found # extension. else: # not enforcing extensions. If there's an extension, we set the diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 6c4616196f..0bb987c8be 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -426,7 +426,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, action = heuristic(slicer, dim_len, stride) # Check return values (we may be using a custom function) if action not in ('full', 'contiguous', None): - raise ValueError('Unexpected return %s from heuristic' % action) + raise ValueError(f'Unexpected return {action} from heuristic') if is_int and action == 'contiguous': raise ValueError("int index cannot be contiguous") # If this is the slowest changing dimension, never upgrade None or diff --git a/nibabel/fileutils.py b/nibabel/fileutils.py index b88e2f7128..c518cdd921 100644 --- a/nibabel/fileutils.py +++ b/nibabel/fileutils.py @@ -52,8 +52,7 @@ def read_zt_byte_strings(fobj, n_strings=1, bufsize=1024): if eof or n_found >= n_strings: break if n_found < n_strings: - raise ValueError('Expected {0} strings, found {1}'.format( - n_strings, n_found)) + raise ValueError(f'Expected {n_strings} strings, found {n_found}') n_extra = n_found - n_strings leftover_strings = byte_strings[n_strings:] + [trailing] # Add number of extra strings to account for lost terminal 0s diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 2bcbbffb1d..467797ab51 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -228,12 +228,11 @@ def write_geometry(filepath, coords, faces, create_stamp=None, magic_bytes = np.array([255, 255, 254], dtype=np.uint8) if create_stamp is None: - create_stamp = "created by %s on %s" % (getpass.getuser(), - time.ctime()) + create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" with open(filepath, 'wb') as fobj: magic_bytes.tofile(fobj) - fobj.write(("%s\n\n" % create_stamp).encode('utf-8')) + fobj.write((f"{create_stamp}\n\n").encode('utf-8')) np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj) @@ -309,8 +308,7 @@ def write_morph_data(file_like, values, fnum=0): if vnum > i4info.max: raise ValueError("Too many values for morphometry file") if not i4info.min <= fnum <= i4info.max: - raise ValueError("Argument fnum must be between {0} and {1}".format( - i4info.min, i4info.max)) + raise ValueError(f"Argument fnum must be between {i4info.min} and {i4info.max}") with Opener(file_like, 'wb') as fobj: fobj.write(magic_bytes) @@ -537,8 +535,7 @@ def write_string(s): if fill_ctab: ctab = np.hstack((ctab[:, :4], _pack_rgb(ctab[:, :3]))) elif not np.array_equal(ctab[:, [4]], _pack_rgb(ctab[:, :3])): - warnings.warn('Annotation values in {} will be incorrect'.format( - filepath)) + warnings.warn(f'Annotation values in {filepath} will be incorrect') # vtxct write(vnum) @@ -605,7 +602,7 @@ def _serialize_volume_info(volume_info): 'zras', 'cras'] diff = set(volume_info.keys()).difference(keys) if len(diff) > 0: - raise ValueError('Invalid volume info: %s.' % diff.pop()) + raise ValueError(f'Invalid volume info: {diff.pop()}.') strings = list() for key in keys: @@ -616,11 +613,10 @@ def _serialize_volume_info(volume_info): strings.append(np.array(volume_info[key], dtype='>i4').tobytes()) elif key in ('valid', 'filename'): val = volume_info[key] - strings.append('{0} = {1}\n'.format(key, val).encode('utf-8')) + strings.append(f'{key} = {val}\n'.encode('utf-8')) elif key == 'volume': val = volume_info[key] - strings.append('{0} = {1} {2} {3}\n'.format( - key, val[0], val[1], val[2]).encode('utf-8')) + strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode('utf-8')) else: val = volume_info[key] strings.append('{0} = {1:0.10g} {2:0.10g} {3:0.10g}\n'.format( diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 72a754efe8..0a9c4fab17 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -223,7 +223,7 @@ def set_data_dtype(self, datatype): try: code = self._data_type_codes[datatype] except KeyError: - raise MGHError('datatype dtype "%s" not recognized' % datatype) + raise MGHError(f'datatype dtype "{datatype}" not recognized') self._structarr['type'] = code def _ndims(self): @@ -284,8 +284,7 @@ def set_zooms(self, zooms): hdr['delta'] = zooms[:3] if len(zooms) == 4: if zooms[3] < 0: - raise HeaderDataError('TR must be non-negative; got {!r}' - ''.format(zooms[3])) + raise HeaderDataError(f'TR must be non-negative; got {zooms[3]!r}') hdr['tr'] = zooms[3] def get_data_shape(self): diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 382dfbb186..ba44c14f13 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -36,7 +36,7 @@ have_freesurfer = isdir(data_path) freesurfer_test = unittest.skipUnless(have_freesurfer, - 'cannot find freesurfer {0} directory'.format(DATA_SDIR)) + f'cannot find freesurfer {DATA_SDIR} directory') def _hash_file_content(fname): hasher = hashlib.md5() @@ -49,12 +49,12 @@ def _hash_file_content(fname): @freesurfer_test def test_geometry(): """Test IO of .surf""" - surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated")) + surf_path = pjoin(data_path, "surf", f"{'lh'}.{'inflated'}") coords, faces = read_geometry(surf_path) assert 0 == faces.min() assert coords.shape[0] == faces.max() + 1 - surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "sphere")) + surf_path = pjoin(data_path, "surf", f"{'lh'}.{'sphere'}") coords, faces, volume_info, create_stamp = read_geometry( surf_path, read_metadata=True, read_stamp=True) @@ -68,8 +68,7 @@ def test_geometry(): # with respect to read_geometry() with InTemporaryDirectory(): surf_path = 'test' - create_stamp = "created by %s on %s" % (getpass.getuser(), - time.ctime()) + create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" volume_info['cras'] = [1., 2., 3.] write_geometry(surf_path, coords, faces, create_stamp, volume_info) @@ -133,7 +132,7 @@ def test_quad_geometry(): @freesurfer_test def test_morph_data(): """Test IO of morphometry data file (eg. curvature).""" - curv_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "curv")) + curv_path = pjoin(data_path, "surf", f"{'lh'}.{'curv'}") curv = read_morph_data(curv_path) assert -1.0 < curv.min() < 0 assert 0 < curv.max() < 1.0 @@ -171,7 +170,7 @@ def test_annot(): """Test IO of .annot against freesurfer example data.""" annots = ['aparc', 'aparc.a2005s'] for a in annots: - annot_path = pjoin(data_path, "label", "%s.%s.annot" % ("lh", a)) + annot_path = pjoin(data_path, "label", f"{'lh'}.{a}.annot") hash_ = _hash_file_content(annot_path) labels, ctab, names = read_annot(annot_path) @@ -214,7 +213,7 @@ def test_read_write_annot(): # 3 colours in the LUT. nvertices = 10 nlabels = 3 - names = ['label {}'.format(l) for l in range(1, nlabels + 1)] + names = [f'label {l}' for l in range(1, nlabels + 1)] # randomly generate a label for each vertex, making sure # that at least one of each label value is present. Label # values are in the range (0, nlabels-1) - they are used @@ -248,7 +247,7 @@ def test_write_annot_fill_ctab(): """Test the `fill_ctab` parameter to :func:`.write_annot`. """ nvertices = 10 nlabels = 3 - names = ['label {}'.format(l) for l in range(1, nlabels + 1)] + names = [f'label {l}' for l in range(1, nlabels + 1)] labels = list(range(nlabels)) + \ list(np.random.randint(0, nlabels, nvertices - nlabels)) labels = np.array(labels, dtype=np.int32) @@ -269,7 +268,7 @@ def test_write_annot_fill_ctab(): rgbal = np.hstack((rgba, badannot)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert any('Annotation values in {} will be incorrect'.format(annot_path) == str(ww.message) + assert any(f'Annotation values in {annot_path} will be incorrect' == str(ww.message) for ww in w) labels2, rgbal2, names2 = read_annot(annot_path, orig_ids=True) names2 = [n.decode('ascii') for n in names2] @@ -284,7 +283,7 @@ def test_write_annot_fill_ctab(): rgbal[:, 2] * (2 ** 16)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert all('Annotation values in {} will be incorrect'.format(annot_path) != str(ww.message) + assert all(f'Annotation values in {annot_path} will be incorrect' != str(ww.message) for ww in w) labels2, rgbal2, names2 = read_annot(annot_path) names2 = [n.decode('ascii') for n in names2] @@ -322,7 +321,7 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): with InTemporaryDirectory(): nverts = 10 nlabels = 3 - names = ['Label {}'.format(l) for l in range(nlabels)] + names = [f'Label {l}' for l in range(nlabels)] labels = np.concatenate(( np.arange(nlabels), np.random.randint(0, nlabels, nverts - nlabels))) np.random.shuffle(labels) @@ -356,7 +355,7 @@ def test_write_annot_maxstruct(): """Test writing ANNOT files with repeated labels""" with InTemporaryDirectory(): nlabels = 3 - names = ['label {}'.format(l) for l in range(1, nlabels + 1)] + names = [f'label {l}' for l in range(1, nlabels + 1)] # max label < n_labels labels = np.array([1, 1, 1], dtype=np.int32) rgba = np.array(np.random.randint(0, 255, (nlabels, 4)), dtype=np.int32) diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 21246d8ec6..1271b3e926 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -134,8 +134,7 @@ def concat_images(images, check_affines=True, axis=None): for i, img in enumerate(images): if len(img.shape) != n_dim: raise ValueError( - 'Image {0} has {1} dimensions, image 0 has {2}'.format( - i, len(img.shape), n_dim)) + f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}') if not np.all(np.array(img.shape)[idx_mask] == masked_shape): raise ValueError('shape {0} for image {1} not compatible with ' 'first image shape {2} with axis == {3}'.format( diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 8fec096259..2bc10906d4 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -487,7 +487,7 @@ def to_xml_open(self): \tExternalFileOffset="%d">\n""" di = "" for i, n in enumerate(self.dims): - di = di + '\tDim%s=\"%s\"\n' % (str(i), str(n)) + di = di + f'\tDim{str(i)}="{str(n)}\"\n' return out % (intent_codes.niistring[self.intent], data_type_codes.niistring[self.datatype], array_index_order_codes.label[self.ind_ord], @@ -838,7 +838,7 @@ def print_summary(self): print(self.labeltable.print_summary()) for i, da in enumerate(self.darrays): print('----') - print('DataArray %s:' % i) + print(f'DataArray {i}:') print(da.print_summary()) print('----end----') diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 044a70fede..3b60693478 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -177,7 +177,7 @@ def StartElementHandler(self, name, attrs): attrs["ArrayIndexingOrder"]] num_dim = int(attrs.get("Dimensionality", 0)) for i in range(num_dim): - di = "Dim%s" % str(i) + di = f"Dim{str(i)}" if di in attrs: self.da.dims.append(int(attrs[di])) # dimensionality has to correspond to the number of DimX given diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 54d8e78621..c323ae51df 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -106,8 +106,7 @@ def assert_default_types(loaded): continue loadedtype = type(getattr(loaded, attr)) assert loadedtype == defaulttype, ( - "Type mismatch for attribute: {} ({!s} != {!s})".format( - attr, loadedtype, defaulttype)) + f"Type mismatch for attribute: {attr} ({loadedtype!s} != {defaulttype!s})") def test_default_types(): diff --git a/nibabel/info.py b/nibabel/info.py index e9a48e42d1..5a344c6f8e 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -16,10 +16,7 @@ _version_extra = 'dev' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" -VERSION = "%s.%s.%s%s" % (_version_major, - _version_minor, - _version_micro, - _version_extra) +VERSION = f"{_version_major}.{_version_minor}.{_version_micro}{_version_extra}" # Note: this long_description is the canonical place to edit this text. diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 85713aa24b..cedbe8e8e9 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -41,9 +41,9 @@ def load(filename, **kwargs): try: stat_result = os.stat(filename) except OSError: - raise FileNotFoundError("No such file or no access: '%s'" % filename) + raise FileNotFoundError(f"No such file or no access: '{filename}'") if stat_result.st_size <= 0: - raise ImageFileError("Empty file: '%s'" % filename) + raise ImageFileError(f"Empty file: '{filename}'") sniff = None for image_klass in all_image_classes: @@ -52,8 +52,7 @@ def load(filename, **kwargs): img = image_klass.from_filename(filename, **kwargs) return img - raise ImageFileError('Cannot work out file type of "%s"' % - filename) + raise ImageFileError(f'Cannot work out file type of "{filename}"') @deprecate_with_version('guessed_image_type deprecated.' @@ -78,8 +77,7 @@ def guessed_image_type(filename): if is_valid: return image_klass - raise ImageFileError('Cannot work out file type of "%s"' % - filename) + raise ImageFileError(f'Cannot work out file type of "{filename}"') def save(img, filename): @@ -130,8 +128,7 @@ def save(img, filename): valid_klasses = [klass for klass in all_image_classes if ext in klass.valid_exts] if not valid_klasses: # if list is empty - raise ImageFileError('Cannot work out file type of "%s"' % - filename) + raise ImageFileError(f'Cannot work out file type of "{filename}"') # Got a list of valid extensions, but that's no guarantee # the file conversion will work. So, try each image @@ -207,7 +204,7 @@ def read_img_data(img, prefer='scaled'): other formats with more complicated scaling - such as MINC. """ if prefer not in ('scaled', 'unscaled'): - raise ValueError('Invalid string "%s" for "prefer"' % prefer) + raise ValueError(f'Invalid string "{prefer}" for "prefer"') hdr = img.header if not hasattr(hdr, 'raw_data_from_fileobj'): # We can only do scaled diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 6ef089e301..8082608b73 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -56,7 +56,7 @@ def get_csa_header(dcm_data, csa_type='image'): elif csa_type == 'series': element_offset = 0x20 else: - raise ValueError('Invalid CSA header type "%s"' % csa_type) + raise ValueError(f'Invalid CSA header type "{csa_type}"') if not (0x29, 0x10) in dcm_data: # Cannot be Siemens CSA return None section_start = find_private_section(dcm_data, 0x29, 'SIEMENS CSA HEADER') @@ -123,8 +123,7 @@ def read(csa_str): if tag_no == 1: tag0_n_items = n_items if n_items > MAX_CSA_ITEMS: - raise CSAReadError('Expected <= {0} tags, got {1}'.format( - MAX_CSA_ITEMS, n_items)) + raise CSAReadError(f'Expected <= {MAX_CSA_ITEMS} tags, got {n_items}') items = [] for item_no in range(n_items): x0, x1, x2, x3 = up_str.unpack('4i') diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index dee8b507d5..5d5ea11799 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -83,7 +83,7 @@ def read_mosaic_dir(dicom_path, gradients = [] arrays = [] if len(filenames) == 0: - raise IOError('Found no files with "%s"' % full_globber) + raise IOError(f'Found no files with "{full_globber}"') for fname in filenames: dcm_w = wrapper_from_file(fname, **dicom_kwargs) # Because the routine sorts by filename, it only makes sense to use diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index b718b980aa..00e964928f 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -286,7 +286,7 @@ def series_signature(self): def __getitem__(self, key): """ Return values from DICOM object""" if key not in self.dcm_data: - raise KeyError('"%s" not in self.dcm_data' % key) + raise KeyError(f'"{key}" not in self.dcm_data') return self.dcm_data.get(key) def get(self, key, default=None): diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 202decd8e0..c39831b70d 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -345,7 +345,7 @@ def __repr__(self): # deal with unknown codes code = self._code - s = "Nifti1Extension('%s', '%s')" % (code, self._content) + s = f"Nifti1Extension('{code}', '{self._content}')" return s def __eq__(self, other): @@ -519,7 +519,7 @@ def get_sizeondisk(self): return np.sum([e.get_sizeondisk() for e in self]) def __repr__(self): - s = "Nifti1Extensions(%s)" % ', '.join(str(e) for e in self) + s = f"Nifti1Extensions({', '.join(str(e) for e in self)})" return s def __cmp__(self, other): @@ -1169,7 +1169,7 @@ def get_slope_inter(self): return None, None if not np.isfinite(inter): raise HeaderDataError( - 'Valid slope but invalid intercept {0}'.format(inter)) + f'Valid slope but invalid intercept {inter}') return slope, inter def set_slope_inter(self, slope, inter=None): @@ -1397,8 +1397,7 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): icode = code p_descr = ('p1', 'p2', 'p3') if len(params) and len(params) != len(p_descr): - raise HeaderDataError('Need params of form %s, or empty' - % (p_descr,)) + raise HeaderDataError(f'Need params of form {p_descr}, or empty') hdr['intent_code'] = icode hdr['intent_name'] = name all_params = [0] * 3 @@ -1615,8 +1614,7 @@ def _slice_time_order(self, slabel, n_slices): sp_ind_time_order = (list(range(n_slices - 2, -1, -2)) + list(range(n_slices - 1, -1, -2))) else: - raise HeaderDataError('We do not handle slice ordering "%s"' - % slabel) + raise HeaderDataError(f'We do not handle slice ordering "{slabel}"') return np.argsort(sp_ind_time_order) def get_xyzt_units(self): @@ -1682,8 +1680,7 @@ def _chk_magic(hdr, fix=False): magic = hdr['magic'].item() if magic in (hdr.pair_magic, hdr.single_magic): return hdr, rep - rep.problem_msg = ('magic string "%s" is not valid' % - asstr(magic)) + rep.problem_msg = (f'magic string "{asstr(magic)}" is not valid') rep.problem_level = 45 if fix: rep.fix_msg = 'leaving as is, but future errors are likely' @@ -1703,8 +1700,7 @@ def _chk_offset(hdr, fix=False): 'single file nifti1' % offset) if fix: hdr['vox_offset'] = hdr.single_vox_offset - rep.fix_msg = 'setting to minimum value of {0}'.format( - hdr.single_vox_offset) + rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' return hdr, rep if not offset % 16: return hdr, rep @@ -1895,7 +1891,7 @@ def set_qform(self, affine, code=None, strip_shears=True, **kwargs): """ update_affine = kwargs.pop('update_affine', True) if kwargs: - raise TypeError('Unexpected keyword argument(s) %s' % kwargs) + raise TypeError(f'Unexpected keyword argument(s) {kwargs}') self._header.set_qform(affine, code, strip_shears) if update_affine: if self._affine is None: @@ -1984,7 +1980,7 @@ def set_sform(self, affine, code=None, **kwargs): """ update_affine = kwargs.pop('update_affine', True) if kwargs: - raise TypeError('Unexpected keyword argument(s) %s' % kwargs) + raise TypeError(f'Unexpected keyword argument(s) {kwargs}') self._header.set_sform(affine, code) if update_affine: if self._affine is None: diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 81dae3010c..69a08af8db 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -99,10 +99,9 @@ def optional_package(name, trip_msg=None, min_version=None): # Failed version check if trip_msg is None: if callable(min_version): - trip_msg = 'Package %s fails version check' % min_version + trip_msg = f'Package {min_version} fails version check' else: - trip_msg = ('These functions need %s version >= %s' % - (name, min_version)) + trip_msg = (f'These functions need {name} version >= {min_version}') if trip_msg is None: trip_msg = ('We need package %s for these functions, but ' '``import %s`` raised %s' @@ -111,6 +110,6 @@ def optional_package(name, trip_msg=None, min_version=None): def setup_module(): import unittest - raise unittest.SkipTest('No %s for these tests' % name) + raise unittest.SkipTest(f'No {name} for these tests') return pkg, False, setup_module diff --git a/nibabel/orientations.py b/nibabel/orientations.py index baf08c549e..81ee461726 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -113,8 +113,7 @@ def ornt_transform(start_ornt, end_ornt): if start_ornt.shape != end_ornt.shape: raise ValueError("The orientations must have the same shape") if start_ornt.shape[1] != 2: - raise ValueError("Invalid shape for an orientation: %s" % - (start_ornt.shape,)) + raise ValueError(f"Invalid shape for an orientation: {start_ornt.shape}") result = np.empty_like(start_ornt) for end_in_idx, (end_out_idx, end_flip) in enumerate(end_ornt): for start_in_idx, (start_out_idx, start_flip) in enumerate(start_ornt): @@ -296,7 +295,7 @@ def ornt2axcodes(ornt, labels=None): continue axint = int(np.round(axno)) if axint != axno: - raise ValueError('Non integer axis number %f' % axno) + raise ValueError(f'Non integer axis number {axno:f}') elif direction == 1: axcode = labels[axint][1] elif direction == -1: @@ -336,7 +335,7 @@ def axcodes2ornt(axcodes, labels=None): labels = list(zip('LPI', 'RAS')) if labels is None else labels allowed_labels = sum([list(L) for L in labels], []) + [None] if len(allowed_labels) != len(set(allowed_labels)): - raise ValueError('Duplicate labels in {}'.format(allowed_labels)) + raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): raise ValueError('Not all axis codes {} in label set {}' .format(list(axcodes), allowed_labels)) diff --git a/nibabel/parrec.py b/nibabel/parrec.py index e9ecc91cc4..bb63d28f80 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -430,8 +430,7 @@ def vol_is_full(slice_nos, slice_max, slice_min=1): slice_set = set(range(slice_min, slice_max + 1)) if not slice_set.issuperset(slice_nos): raise ValueError( - 'Slice numbers outside inclusive range {0} to {1}'.format( - slice_min, slice_max)) + f'Slice numbers outside inclusive range {slice_min} to {slice_max}') vol_nos = np.array(vol_numbers(slice_nos)) slice_nos = np.asarray(slice_nos) is_full = np.ones(slice_nos.shape, dtype=bool) @@ -500,10 +499,10 @@ def parse_PAR_header(fobj): version, gen_dict, image_lines = _split_header(fobj) if version not in supported_versions: warnings.warn(one_line( - """ PAR/REC version '{0}' is currently not supported -- making an + f""" PAR/REC version '{version}' is currently not supported -- making an attempt to read nevertheless. Please email the NiBabel mailing list, if you are interested in adding support for this version. - """.format(version))) + """)) general_info = _process_gen_dict(gen_dict) image_defs = _process_image_lines(image_lines, version) return general_info, image_defs @@ -980,7 +979,7 @@ def get_affine(self, origin='scanner'): permute_to_psl = ACQ_TO_PSL.get(slice_orientation) if permute_to_psl is None: raise PARRECError( - "Unknown slice orientation ({0}).".format(slice_orientation)) + f"Unknown slice orientation ({slice_orientation}).") # hdr has deg, we need radians # Order is [ap, fh, rl] ap_rot, fh_rot, rl_rot = self.general_info['angulation'] * DEG2RAD @@ -1076,7 +1075,7 @@ def get_data_scaling(self, method="dv"): slope = 1.0 / scale_slope intercept = rescale_intercept / (rescale_slope * scale_slope) else: - raise ValueError("Unknown scaling method '%s'." % method) + raise ValueError(f"Unknown scaling method '{method}'.") reorder = self.get_sorted_slice_indices() slope = slope[reorder] intercept = intercept[reorder] diff --git a/nibabel/processing.py b/nibabel/processing.py index b3bd83d706..5be5333a5d 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -369,9 +369,9 @@ def conform(from_img, if from_img.ndim != required_ndim: raise ValueError("Only 3D images are supported.") elif len(out_shape) != required_ndim: - raise ValueError("`out_shape` must have {} values".format(required_ndim)) + raise ValueError(f"`out_shape` must have {required_ndim} values") elif len(voxel_size) != required_ndim: - raise ValueError("`voxel_size` must have {} values".format(required_ndim)) + raise ValueError(f"`voxel_size` must have {required_ndim} values") start_ornt = io_orientation(from_img.affine) end_ornt = axcodes2ornt(orientation) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index cd3646853d..8947d513fa 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -96,7 +96,7 @@ def fillpositive(xyz, w2_thresh=None): w2 = 1.0 - np.dot(xyz, xyz) if w2 < 0: if w2 < w2_thresh: - raise ValueError('w2 should be positive, but is %e' % w2) + raise ValueError(f'w2 should be positive, but is {w2:e}') w = 0 else: w = np.sqrt(w2) diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index d0bdb655b0..1e4033b676 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -51,8 +51,7 @@ def rst_table(cell_values, cross = format_chars.pop('cross', '+') title_heading = format_chars.pop('title_heading', '*') if len(format_chars) != 0: - raise ValueError('Unexpected ``format_char`` keys {0}'.format( - ', '.join(format_chars))) + raise ValueError(f"Unexpected ``format_char`` keys {', '.join(format_chars)}") down_joiner = ' ' + down + ' ' down_starter = down + ' ' down_ender = ' ' + down @@ -66,11 +65,11 @@ def rst_table(cell_values, cell_values = np.asarray(cell_values) R, C = cell_values.shape[:2] if row_names is None: - row_names = ['row[{0}]'.format(r) for r in range(R)] + row_names = [f'row[{r}]' for r in range(R)] elif len(row_names) != R: raise ValueError('len(row_names) != number of rows') if col_names is None: - col_names = ['col[{0}]'.format(c) for c in range(C)] + col_names = [f'col[{c}]' for c in range(C)] elif len(col_names) != C: raise ValueError('len(col_names) != number of columns') row_len = max(len(name) for name in row_names) diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 102ad8fd01..8dfe96f927 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -90,7 +90,7 @@ def load(fileobj, lazy_load=False): tractogram_file = detect_format(fileobj) if tractogram_file is None: - raise ValueError("Unknown format for 'fileobj': {}".format(fileobj)) + raise ValueError(f"Unknown format for 'fileobj': {fileobj}") return tractogram_file.load(fileobj, lazy_load=lazy_load) @@ -116,7 +116,7 @@ def save(tractogram, filename, **kwargs): tractogram_file_class = detect_format(filename) if isinstance(tractogram, Tractogram): if tractogram_file_class is None: - msg = "Unknown tractogram file format: '{}'".format(filename) + msg = f"Unknown tractogram file format: '{filename}'" raise ValueError(msg) tractogram_file = tractogram_file_class(tractogram, **kwargs) diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 71b4bcb3be..51e7c4d7fa 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -74,7 +74,7 @@ def fn_binary_op(self, value): "__floordiv__", "__truediv__", "__lshift__", "__rshift__", "__or__", "__and__", "__xor__"]: _wrap(cls, op=op, inplace=False) - _wrap(cls, op="__i{}__".format(op.strip("_")), inplace=True) + _wrap(cls, op=f"__i{op.strip('_')}__", inplace=True) for op in ["__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__"]: _wrap(cls, op) @@ -526,8 +526,7 @@ def __repr__(self): else: data = str(list(self)) - return "{name}({data})".format(name=self.__class__.__name__, - data=data) + return f"{self.__class__.__name__}({data})" def save(self, filename): """ Saves this :class:`ArraySequence` object to a .npz file. """ diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 627dcbb569..bad1c51a8b 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -252,9 +252,9 @@ def _write_header(fileobj, header): lines = [] lines.append(asstr(header[Field.MAGIC_NUMBER])) - lines.append("count: {0:010}".format(header[Field.NB_STREAMLINES])) + lines.append(f"count: {header[Field.NB_STREAMLINES]:010}") lines.append("datatype: Float32LE") # Always Float32LE. - lines.extend(["{0}: {1}".format(k, v) + lines.extend([f"{k}: {v}" for k, v in header.items() if k not in exclude and not k.startswith("_")]) lines.append("file: . ") # Manually add this last field. @@ -262,12 +262,12 @@ def _write_header(fileobj, header): # Check the header is well formatted. if out.count("\n") > len(lines) - 1: # \n only allowed between lines. - msg = "Key-value pairs cannot contain '\\n':\n{}".format(out) + msg = f"Key-value pairs cannot contain '\\n':\n{out}" raise HeaderError(msg) if out.count(":") > len(lines) - 1: # : only one per line (except the last one which contains END). - msg = "Key-value pairs cannot contain ':':\n{}".format(out) + msg = f"Key-value pairs cannot contain ':':\n{out}" raise HeaderError(msg) # Write header to file. @@ -338,7 +338,7 @@ def _read_header(fileobj): msg = ("Missing 'file' attribute in TCK header." " Will try to guess it.") warnings.warn(msg, HeaderWarning) - hdr['file'] = '. {}'.format(offset_data) + hdr['file'] = f'. {offset_data}' if hdr['file'].split()[0] != '.': msg = ("TCK only supports single-file - in other words the" @@ -452,7 +452,7 @@ def __str__(self): hdr = self.header info = "" - info += "\nMAGIC NUMBER: {0}".format(hdr[Field.MAGIC_NUMBER]) + info += f"\nMAGIC NUMBER: {hdr[Field.MAGIC_NUMBER]}" info += "\n" info += "\n".join(["{}: {}".format(k, v) for k, v in hdr.items() if not k.startswith('_')]) diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index 06e19248f4..aa61e89e3e 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -402,7 +402,7 @@ def _test_binary(op, arrseq, scalars, seqs, inplace=False): if op in CMP_OPS: continue - op = "__i{}__".format(op.strip("_")) + op = f"__i{op.strip('_')}__" _test_binary(op, seq, SCALARS, ARRSEQS, inplace=True) if op == "__itruediv__": diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index 8fb35fc368..968cd41500 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -373,7 +373,7 @@ def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_point. data_per_point = {} for i in range(10): - data_per_point['#{0}'.format(i)] = DATA['fa'] + data_per_point[f'#{i}'] = DATA['fa'] tractogram = Tractogram(DATA['streamlines'], data_per_point=data_per_point, @@ -388,7 +388,7 @@ def test_write_too_many_scalars_and_properties(self): assert_tractogram_equal(new_trk.tractogram, tractogram) # More than 10 data_per_point should raise an error. - data_per_point['#{0}'.format(i+1)] = DATA['fa'] + data_per_point[f'#{i + 1}'] = DATA['fa'] tractogram = Tractogram(DATA['streamlines'], data_per_point=data_per_point, @@ -401,7 +401,7 @@ def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_streamline. data_per_streamline = {} for i in range(10): - data_per_streamline['#{0}'.format(i)] = DATA['mean_torsion'] + data_per_streamline[f'#{i}'] = DATA['mean_torsion'] tractogram = Tractogram(DATA['streamlines'], data_per_streamline=data_per_streamline, @@ -416,7 +416,7 @@ def test_write_too_many_scalars_and_properties(self): assert_tractogram_equal(new_trk.tractogram, tractogram) # More than 10 data_per_streamline should raise an error. - data_per_streamline['#{0}'.format(i+1)] = DATA['mean_torsion'] + data_per_streamline[f'#{i + 1}'] = DATA['mean_torsion'] tractogram = Tractogram(DATA['streamlines'], data_per_streamline=data_per_streamline) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 71f9e84db2..b52be2a5c6 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -31,7 +31,7 @@ def test_data(subdir=None, fname=None): elif subdir in ('gifti', 'nicom', 'externals'): resource = os.path.join(subdir, 'tests', 'data') else: - raise ValueError("Unknown test data directory: %s" % subdir) + raise ValueError(f"Unknown test data directory: {subdir}") if fname is not None: resource = os.path.join(resource, fname) @@ -89,7 +89,7 @@ def assert_re_in(regex, c, flags=0): for e in c: if re.match(regex, e, flags=flags): return - raise AssertionError("Not a single entry matched %r in %r" % (regex, c)) + raise AssertionError(f"Not a single entry matched {regex!r} in {c!r}") def get_fresh_mod(mod_name=__name__): @@ -199,7 +199,7 @@ class suppress_warnings(error_warnings): def runif_extra_has(test_str): """Decorator checks to see if NIPY_EXTRA_TESTS env var contains test_str""" - return unittest.skipUnless(test_str in EXTRA_SET, "Skip {0} tests.".format(test_str)) + return unittest.skipUnless(test_str in EXTRA_SET, f"Skip {test_str} tests.") def assert_arr_dict_equal(dict1, dict2): diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index c7352c3f89..1f10a02aa1 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -29,6 +29,7 @@ from nibabel import parrec from nibabel.affines import to_matvec from nibabel.optpkg import optional_package + _, have_scipy, _ = optional_package('scipy') @@ -71,6 +72,4 @@ def gmean_norm(data): fixed_img = resample_img2img(normal_img, funny_img) fixed_data = fixed_img.get_fdata() difference_data = normal_normed - gmean_norm(fixed_data) - print('RMS resliced {:<52} : {}'.format( - parfile, - np.sqrt(np.sum(difference_data ** 2)))) + print(f'RMS resliced {parfile:<52} : {np.sqrt(np.sum(difference_data ** 2))}') diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 3c1b58502d..663d7845a8 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -46,4 +46,4 @@ def needs_nibabel_data(subdir=None): # Path should not be empty (as is the case for not-updated submodules) have_files = exists(required_path) and len(listdir(required_path)) > 0 return unittest.skipUnless(have_files, - "Need files in {0} for these tests".format(required_path)) + f"Need files in {required_path} for these tests") diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 0027cc36b2..bc7e9977f0 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -80,7 +80,7 @@ def __init__(self, self.local_script_dir = local_script_dir(script_sdir) self.local_module_dir = local_module_dir(module_sdir) if debug_print_var is None: - debug_print_var = '{0}_DEBUG_PRINT'.format(module_sdir.upper()) + debug_print_var = f'{module_sdir.upper()}_DEBUG_PRINT' self.debug_print = os.environ.get(debug_print_var, False) self.output_processor = output_processor @@ -119,9 +119,9 @@ def run_command(self, cmd, check_code=True): # Quote any arguments with spaces. The quotes delimit the arguments # on Windows, and the arguments might be file paths with spaces. # On Unix the list elements are each separate arguments. - cmd = ['"{0}"'.format(c) if ' ' in c else c for c in cmd] + cmd = [f'"{c}"' if ' ' in c else c for c in cmd] if self.debug_print: - print("Running command '%s'" % cmd) + print(f"Running command '{cmd}'") env = os.environ if not self.local_module_dir is None: # module likely comes from the current working directory. We might need @@ -139,13 +139,13 @@ def run_command(self, cmd, check_code=True): proc.terminate() if check_code and proc.returncode != 0: raise RuntimeError( - """Command "{0}" failed with + f"""Command "{cmd}" failed with stdout ------ - {1} + {stdout} stderr ------ - {2} - """.format(cmd, stdout, stderr)) + {stderr} + """) opp = self.output_processor return proc.returncode, opp(stdout), opp(stderr) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index a4d23aaefd..76043348c9 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -18,7 +18,7 @@ def meth(self): for imaker, params in self.obj_params(): validator(self, imaker, params) meth.__name__ = 'test_' + name[len('validate_'):] - meth.__doc__ = 'autogenerated test from {}.{}'.format(klass.__name__, name) + meth.__doc__ = f'autogenerated test from {klass.__name__}.{name}' return meth for name in dir(klass): if not name.startswith('validate_'): diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 4b065b312f..887b231464 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -409,7 +409,7 @@ def test_keep_file_open_true_false_invalid(): with InTemporaryDirectory(), \ mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), \ patch_indexed_gzip(have_igzip): - fname = 'testdata.{}'.format(filetype) + fname = f'testdata.{filetype}' # create the test data file if filetype == 'gz': with gzip.open(fname, 'wb') as fobj: diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 9268c3fe36..1e93440269 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -655,7 +655,7 @@ def test_float_int_min_max(): arr = np.array([finf['min'], finf['max']], dtype=in_dt) # Bug in numpy 1.6.2 on PPC leading to infs - abort if not np.all(np.isfinite(arr)): - print('Hit PPC max -> inf bug; skip in_type %s' % in_dt) + print(f'Hit PPC max -> inf bug; skip in_type {in_dt}') continue for out_dt in IUINT_TYPES: try: diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 57d5b36f38..56671cdf7d 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -141,7 +141,7 @@ def test_data_path(with_nimd_env): tmpfile = pjoin(tmpdir, 'config.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') - fobj.write('path = %s' % tst_pth) + fobj.write(f'path = {tst_pth}') nibd.get_nipy_user_dir = lambda: tmpdir assert get_data_path() == tst_list + def_dirs + [tmpdir] nibd.get_nipy_user_dir = lambda: fake_user_dir @@ -152,7 +152,7 @@ def test_data_path(with_nimd_env): tmpfile = pjoin(tmpdir, 'an_example.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') - fobj.write('path = %s\n' % tst_pth) + fobj.write(f'path = {tst_pth}\n') tmpfile = pjoin(tmpdir, 'another_example.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index cf56dd598d..a22e633d5a 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -100,8 +100,7 @@ def test_dep_func(self): with pytest.raises(ExpiredDeprecationError): func() assert (func.__doc__ == - 'foo\n\n* Raises {} as of version: 1.8\n' - .format(ExpiredDeprecationError)) + f'foo\n\n* Raises {ExpiredDeprecationError} as of version: 1.8\n') func = dec('foo', '1.2', '1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index e419eb8868..a08a24d102 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -67,7 +67,7 @@ def test_type_info(): ld_dict = dbl_dict.copy() ld_dict['width'] = width else: - raise ValueError("Unexpected float type {} to test".format(np.longdouble)) + raise ValueError(f"Unexpected float type {np.longdouble} to test") assert ld_dict == infod diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index a19289037f..cd1ea18709 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -57,8 +57,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, if expect_success: # Check that the sniff returned is appropriate. - new_msg = '%s returned sniff==None (%s)' % (img_klass.__name__, - msg) + new_msg = f'{img_klass.__name__} returned sniff==None ({msg})' expected_sizeof_hdr = getattr(img_klass.header_class, 'sizeof_hdr', 0) current_sizeof_hdr = 0 if new_sniff is None else \ diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 71f0435f1a..fdf2d93dde 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -89,7 +89,7 @@ def test_read_img_data_nifti(): with pytest.raises(ImageFileError): read_img_data(img) # Make a filemap - froot = 'an_image_{0}'.format(i) + froot = f'an_image_{i}' img.file_map = img.filespec_to_file_map(froot) # Trying to read from this filemap will generate an error because # we are going to read from files that do not exist diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 28144f3af4..386dca2cd9 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -35,7 +35,7 @@ def test_module_removal(): for module in _filter(MODULE_SCHEDULE): with pytest.raises(ImportError): __import__(module) - assert False, "Time to remove %s" % module + assert False, f"Time to remove {module}" def test_object_removal(): @@ -44,7 +44,7 @@ def test_object_removal(): module = __import__(module_name) except ImportError: continue - assert not hasattr(module, obj), "Time to remove %s.%s" % (module_name, obj,) + assert not hasattr(module, obj), f"Time to remove {module_name}.{obj}" def test_attribute_removal(): @@ -57,7 +57,7 @@ def test_attribute_removal(): klass = getattr(module, cls) except AttributeError: continue - assert not hasattr(klass, attr), "Time to remove %s.%s.%s" % (module_name, cls, attr,) + assert not hasattr(klass, attr), f"Time to remove {module_name}.{cls}.{attr}" # diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index 9ef8dd3bad..3c33eb5740 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -188,7 +188,7 @@ def check_int_a2f(in_type, out_type): # Bug in numpy 1.6.2 on PPC leading to infs - abort if not np.all(np.isfinite(data)): if DEBUG: - print('Hit PPC max -> inf bug; skip in_type %s' % in_type) + print(f'Hit PPC max -> inf bug; skip in_type {in_type}') return else: # Funny behavior with complex256 data = np.zeros((2,), in_type) diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 87d28d8245..591f85343a 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -175,7 +175,7 @@ def test_help(): continue # do not test this one code, stdout, stderr = run_command([cmd, '--help']) assert code == 0 - assert_re_in(".*%s" % cmd, stdout) + assert_re_in(f".*{cmd}", stdout) assert_re_in(".*Usage", stdout) # Some third party modules might like to announce some Deprecation # etc warnings, see e.g. https://travis-ci.org/nipy/nibabel/jobs/370353602 @@ -194,15 +194,15 @@ def test_nib_nifti_dx(): clean_hdr = pjoin(DATA_PATH, 'nifti1.hdr') cmd = ['nib-nifti-dx', clean_hdr] code, stdout, stderr = run_command(cmd) - assert stdout.strip() == 'Header for "%s" is clean' % clean_hdr + assert stdout.strip() == f'Header for "{clean_hdr}" is clean' dirty_hdr = pjoin(DATA_PATH, 'analyze.hdr') cmd = ['nib-nifti-dx', dirty_hdr] code, stdout, stderr = run_command(cmd) - expected = """Picky header check output for "%s" + expected = f"""Picky header check output for "{dirty_hdr}" pixdim[0] (qfac) should be 1 (default) or -1 magic string "" is not valid -sform_code 11776 not valid""" % (dirty_hdr,) +sform_code 11776 not valid""" # Split strings to remove line endings assert stdout == expected diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 65c0759bc6..58f05180fa 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -630,9 +630,9 @@ def test_load_mmap(self): back_img = func(param1, **kwargs) back_data = np.asanyarray(back_img.dataobj) if expected_mode is None: - assert not isinstance(back_data, np.memmap), 'Should not be a %s' % img_klass.__name__ + assert not isinstance(back_data, np.memmap), f'Should not be a {img_klass.__name__}' else: - assert isinstance(back_data, np.memmap), 'Not a %s' % img_klass.__name__ + assert isinstance(back_data, np.memmap), f'Not a {img_klass.__name__}' if self.check_mmap_mode: assert back_data.mode == expected_mode del back_img, back_data diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index fc63556edc..011e16d47d 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -347,7 +347,7 @@ class MyHdr(self.header_class): # Speculating that we can set code value 0 or 1 new_code = 1 if code == 0 else 0 hdr[key] = new_code - assert hdr.get_value_label(key) == ''.format(new_code) + assert hdr.get_value_label(key) == f'' class MyWrapStruct(WrapStruct): diff --git a/nibabel/trackvis.py b/nibabel/trackvis.py index 3b46336bd8..f18405b0d0 100644 --- a/nibabel/trackvis.py +++ b/nibabel/trackvis.py @@ -167,8 +167,7 @@ def read(fileobj, as_generator=False, points_space=None, strict=True): else: hdr = hdr.newbyteorder() if hdr['hdr_size'] != 1000: - raise HeaderError('Invalid hdr_size of %s' - % hdr['hdr_size']) + raise HeaderError(f"Invalid hdr_size of {hdr['hdr_size']}") endianness = swapped_code # Check version and adapt structure accordingly version = hdr['version'] @@ -248,8 +247,7 @@ def track_gen(): # Raise error if we didn't get as many streams as claimed if n_streams_required != np.inf and n_streams < n_streams_required: raise DataError( - 'Expecting {0} streamlines, found only {1}'.format( - stream_count, n_streams)) + f'Expecting {stream_count} streamlines, found only {n_streams}') streamlines = track_gen() if not as_generator: @@ -428,8 +426,7 @@ def write(fileobj, streamlines, hdr_mapping=None, endianness=None, raise DataError('Expecting 0 scalars per point') else: if scalars.shape != (n_pts, n_s): - raise DataError('Scalars should be shape (%s, %s)' % - (n_pts, n_s)) + raise DataError(f'Scalars should be shape ({n_pts}, {n_s})') if scalars.dtype != f4dt: scalars = scalars.astype(f4dt) pts = np.c_[pts, scalars] @@ -439,7 +436,7 @@ def write(fileobj, streamlines, hdr_mapping=None, endianness=None, raise DataError('Expecting 0 properties per point') else: if props.size != n_p: - raise DataError('Properties should be size %s' % n_p) + raise DataError(f'Properties should be size {n_p}') if props.dtype != f4dt: props = props.astype(f4dt) fileobj.write(props.tobytes()) @@ -480,7 +477,7 @@ def _check_hdr_points_space(hdr, points_space): raise HeaderError('Cannot convert between voxels and voxmm when ' '"voxel_sizes" all 0') if np.any(voxel_size == 0): - warnings.warn('zero values in "voxel_size" - %s' % voxel_size) + warnings.warn(f'zero values in "voxel_size" - {voxel_size}') return elif points_space == 'rasmm': try: diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 27a750bc0f..509fa99ef2 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -213,8 +213,8 @@ def __init__(self, data, affine=None, axes=None, title=None): self._draw() def __repr__(self): - title = '' if self._title is None else ('%s ' % self._title) - vol = '' if self.n_volumes <= 1 else (', %s' % self.n_volumes) + title = '' if self._title is None else (f'{self._title} ') + vol = '' if self.n_volumes <= 1 else (f', {self.n_volumes}') r = ('<%s: %s(%s, %s, %s%s)>' % (self.__class__.__name__, title, self._sizes[0], self._sizes[1], self._sizes[2], vol)) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 606e06f52f..962233c395 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -108,8 +108,7 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): self.field1 = {} # a placeholder for the check below for name in fields: if name in self.__dict__: - raise KeyError('Input name %s already in object dict' - % name) + raise KeyError(f'Input name {name} already in object dict') self.__dict__[name] = map_maker() self.field1 = self.__dict__[fields[0]] self.add_codes(codes) diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index 4eabe2504a..50d447a3fd 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -417,8 +417,7 @@ def structarr(self): def __str__(self): """ Return string representation for printing """ - summary = "%s object, endian='%s'" % (self.__class__, - self.endianness) + summary = f"{self.__class__} object, endian='{self.endianness}'" return '\n'.join([summary, pretty_mapping(self)]) def as_byteswapped(self, endianness=None): @@ -529,16 +528,16 @@ def get_value_label(self, fieldname): 'two' """ if fieldname not in self._field_recoders: - raise ValueError('%s not a coded field' % fieldname) + raise ValueError(f'{fieldname} not a coded field') code = int(self._structarr[fieldname]) try: return self._field_recoders[fieldname].label[code] except KeyError: - return ''.format(code) + return f'' def __str__(self): """ Return string representation for printing """ - summary = "%s object, endian='%s'" % (self.__class__, self.endianness) + summary = f"{self.__class__} object, endian='{self.endianness}'" def _getter(obj, key): try: diff --git a/nisext/sexts.py b/nisext/sexts.py index 9ca3519f45..a2b1a10af7 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -160,7 +160,7 @@ def version_getter(pkg_name): msgs['opt suffix']) return elif status == 'no-version': - raise RuntimeError('Cannot find version for %s' % pkg_name) + raise RuntimeError(f'Cannot find version for {pkg_name}') assert status == 'low-version' if not optional_tf: raise RuntimeError(msgs['version too old'] % (have_version, @@ -253,7 +253,7 @@ def run(self): froot, ext = splitext(fname) bat_file = pjoin(pth, froot + '.bat') bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname) - log.info("Making %s wrapper for %s" % (bat_file, filepath)) + log.info(f"Making {bat_file} wrapper for {filepath}") if self.dry_run: continue with open(bat_file, 'wt') as fobj: diff --git a/nisext/testers.py b/nisext/testers.py index f324d272b4..e661de72a2 100644 --- a/nisext/testers.py +++ b/nisext/testers.py @@ -137,7 +137,7 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): os.environ['PYTHONPATH'] = r'"{pkg_path}"' + os.path.pathsep + PYTHONPATH """.format(**locals()) if print_location: - p_loc = 'print(%s.__file__);' % mod_name + p_loc = f'print({mod_name}.__file__);' else: p_loc = '' cwd = os.getcwd() @@ -154,7 +154,7 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): import {mod_name} {p_loc} {cmd}""".format(**locals())) - res = back_tick('{0} script.py'.format(PYTHON), ret_err=True) + res = back_tick(f'{PYTHON} script.py', ret_err=True) finally: os.chdir(cwd) shutil.rmtree(tmpdir) @@ -194,8 +194,7 @@ def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): py_lib_locs = ' --install-purelib=%s --install-platlib=%s' % ( site_pkgs_path, site_pkgs_path) pwd = os.path.abspath(os.getcwd()) - cmd = ('%s setup.py --quiet install --prefix=%s %s' % - (PYTHON, to_dir, py_lib_locs)) + cmd = (f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}') try: os.chdir(from_dir) back_tick(cmd) @@ -267,11 +266,11 @@ def contexts_print_info(mod_name, repo_path, install_path): out_fname = pjoin(install_path, 'test.zip') try: os.chdir(repo_path) - back_tick('git archive --format zip -o %s HEAD' % out_fname) + back_tick(f'git archive --format zip -o {out_fname} HEAD') finally: os.chdir(pwd) install_from_zip(out_fname, install_path, None) - cmd_str = 'print(%s.get_info())' % mod_name + cmd_str = f'print({mod_name}.get_info())' print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0]) # now test install into a directory from the repository install_from_to(repo_path, install_path, PY_LIB_SDIR) @@ -444,7 +443,7 @@ def sdist_tests(mod_name, repo_path=None, label='fast', doctests=True): install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, 'bin') site_pkgs_path = pjoin(install_path, PY_LIB_SDIR) script_path = pjoin(install_path, 'bin') - cmd = "%s.test(label='%s', doctests=%s)" % (mod_name, label, doctests) + cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, cmd, @@ -479,7 +478,7 @@ def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True): 'bdist_egg', '*.egg') zip_extract_all(zip_fname, install_path) - cmd = "%s.test(label='%s', doctests=%s)" % (mod_name, label, doctests) + cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" stdout, stderr = run_mod_cmd(mod_name, install_path, cmd, @@ -524,8 +523,7 @@ def make_dist(repo_path, out_dir, setup_params, zipglob): pwd = os.path.abspath(os.getcwd()) try: os.chdir(repo_path) - back_tick('%s setup.py %s --dist-dir=%s' - % (PYTHON, setup_params, out_dir)) + back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}') zips = glob(pjoin(out_dir, zipglob)) if len(zips) != 1: raise OSError('There must be one and only one %s file, ' diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py index 08fa70cd1a..8c8a09633f 100644 --- a/nisext/tests/test_testers.py +++ b/nisext/tests/test_testers.py @@ -14,7 +14,7 @@ def test_back_tick(): assert_equal(back_tick(cmd), "Hello") assert_equal(back_tick(cmd, ret_err=True), ("Hello", "")) assert_equal(back_tick(cmd, True, False), (b"Hello", b"")) - cmd = '{0} -c "raise ValueError()"'.format(PYTHON) + cmd = f'{PYTHON} -c "raise ValueError()"' assert_raises(RuntimeError, back_tick, cmd) diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py index 86b0a74221..156976daf5 100755 --- a/tools/gitwash_dumper.py +++ b/tools/gitwash_dumper.py @@ -19,10 +19,10 @@ def clone_repo(url, branch): cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: - cmd = 'git clone %s %s' % (url, tmpdir) + cmd = f'git clone {url} {tmpdir}' call(cmd, shell=True) os.chdir(tmpdir) - cmd = 'git checkout %s' % branch + cmd = f'git checkout {branch}' call(cmd, shell=True) except: shutil.rmtree(tmpdir) @@ -136,17 +136,17 @@ def make_link_targets(proj_name, 'and / or mailing list URLs') lines = [] if not url is None: - lines.append('.. _%s: %s\n' % (proj_name, url)) + lines.append(f'.. _{proj_name}: {url}\n') if not have_gh_url: - gh_url = 'https://github.com/%s/%s\n' % (user_name, repo_name) - lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url)) + gh_url = f'https://github.com/{user_name}/{repo_name}\n' + lines.append(f'.. _`{proj_name} github`: {gh_url}\n') if not ml_url is None: - lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url)) + lines.append(f'.. _`{proj_name} mailing list`: {ml_url}\n') if len(lines) == 0: # Nothing to do return # A neat little header line - lines = ['.. %s\n' % proj_name] + lines + lines = [f'.. {proj_name}\n'] + lines out_links = open(out_link_fname, 'wt') out_links.writelines(lines) out_links.close() @@ -175,13 +175,11 @@ def main(): help="github username for main repo - e.g fperez", metavar="MAIN_GH_USER") parser.add_option("--gitwash-url", dest="gitwash_url", - help="URL to gitwash repository - default %s" - % GITWASH_CENTRAL, + help=f"URL to gitwash repository - default {GITWASH_CENTRAL}", default=GITWASH_CENTRAL, metavar="GITWASH_URL") parser.add_option("--gitwash-branch", dest="gitwash_branch", - help="branch in gitwash repository - default %s" - % GITWASH_BRANCH, + help=f"branch in gitwash repository - default {GITWASH_BRANCH}", default=GITWASH_BRANCH, metavar="GITWASH_BRANCH") parser.add_option("--source-suffix", dest="source_suffix", diff --git a/tools/make_tarball.py b/tools/make_tarball.py index 7a9cba1269..69c901d67d 100755 --- a/tools/make_tarball.py +++ b/tools/make_tarball.py @@ -10,8 +10,8 @@ from toollib import * tag = commands.getoutput('git describe') -base_name = 'nibabel-%s' % tag -tar_name = '%s.tgz' % base_name +base_name = f'nibabel-{tag}' +tar_name = f'{base_name}.tgz' # git archive is weird: Even if I give it a specific path, it still won't # archive the whole tree. It seems the only way to get the whole tree is to cd @@ -22,4 +22,4 @@ cd('..') git_tpl = 'git archive --format=tar --prefix={0}/ HEAD | gzip > {1}' c(git_tpl.format(base_name, tar_name)) -c('mv {0} tools/'.format(tar_name)) +c(f'mv {tar_name} tools/') From b183b737ba302ba2e6106271c21117e0d554c48f Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Thu, 4 Jun 2020 23:57:30 +0300 Subject: [PATCH 02/13] More f-strings flynt -ll 999 . All changes were reviewed manually --- nibabel/_version.py | 14 +++++++------- nibabel/analyze.py | 8 +++----- nibabel/arrayproxy.py | 8 ++++---- nibabel/benchmarks/bench_arrayproxy_slicing.py | 8 ++++---- nibabel/brikhead.py | 8 ++++---- nibabel/casting.py | 4 ++-- nibabel/cifti2/cifti2.py | 14 ++++++-------- nibabel/spatialimages.py | 6 +++--- nibabel/tests/test_volumeutils.py | 5 ++--- nibabel/volumeutils.py | 13 ++++--------- nisext/sexts.py | 2 +- nisext/tests/test_testers.py | 2 +- 12 files changed, 41 insertions(+), 51 deletions(-) diff --git a/nibabel/_version.py b/nibabel/_version.py index bfb8d6e9f9..7af21161d8 100644 --- a/nibabel/_version.py +++ b/nibabel/_version.py @@ -125,8 +125,8 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print(f"Tried directories {str(rootdirs)} but " + f"none started with prefix {parentdir_prefix}") raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -283,17 +283,17 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = (f"unable to parse git-describe output: '{describe_out}'") + pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + txt = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" + print(txt) + pieces["error"] = (f"tag '{full_tag}' doesn't start with prefix " + f"'{tag_prefix}'") return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 6aa1418f72..a6a5df7614 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -394,11 +394,9 @@ def from_header(klass, header=None, check=True): try: obj.set_data_dtype(orig_code) except HeaderDataError: - raise HeaderDataError('Input header %s has datatype %s but ' - 'output header %s does not support it' - % (header.__class__, - header.get_value_label('datatype'), - klass)) + raise HeaderDataError(f"Input header {header.__class__} has " + f"datatype {header.get_value_label('datatype')} " + f"but output header {klass} does not support it") obj.set_data_dtype(header.get_data_dtype()) obj.set_data_shape(header.get_data_shape()) obj.set_zooms(header.get_zooms()) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 62de283071..0f169c297e 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -255,8 +255,8 @@ def _should_keep_file_open(self, file_like, keep_file_open): if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT if keep_file_open not in (True, False): - raise ValueError("nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT must be boolean. " - "Found: {}".format(keep_file_open)) + raise ValueError(f"nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT " + f"must be boolean. Found: {keep_file_open}") elif keep_file_open not in (True, False): raise ValueError('keep_file_open must be one of {None, True, False}') @@ -412,8 +412,8 @@ def reshape(self, shape): shape = tuple(unknown_size if e == -1 else e for e in shape) if np.prod(shape) != size: - raise ValueError("cannot reshape array of size {:d} into shape " - "{!s}".format(size, shape)) + raise ValueError(f"cannot reshape array of size {size:d} " + f"into shape {shape!s}") return self.__class__(file_like=self.file_like, spec=(shape, self._dtype, self._offset, self._slope, self._inter), diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 8afebb546a..4059eb9ab2 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -100,8 +100,8 @@ def fmt_sliceobj(sliceobj): with InTemporaryDirectory(): - print('Generating test data... ({} MB)'.format( - int(round(np.prod(SHAPE) * 4 / 1048576.)))) + print(f'Generating test data... ' + f'({int(round(np.prod(SHAPE) * 4 / 1048576.0))} MB)') data = np.array(np.random.random(SHAPE), dtype=np.float32) @@ -180,8 +180,8 @@ def testfunc(): data[:, 2] = np.nan data[:, 3] = [r[5] - r[6] for r in results] - rowlbls = ['Type {}, keep_open {}, slice {}'.format( - r[0], r[1], fmt_sliceobj(r[2])) for r in results] + rowlbls = [(f'Type {r[0]}, keep_open {r[1]}, ' + f'slice {fmt_sliceobj(r[2])}') for r in results] collbls = ['Time', 'Baseline time', 'Time ratio', 'Memory deviation'] print(rst_table(data, rowlbls, collbls)) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 7693818e7b..3c43b1e0ab 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -114,8 +114,8 @@ def _unpack_var(var): TEMPLATE_SPACE ORIG """ - err_msg = ('Please check HEAD file to ensure it is AFNI compliant. ' - 'Offending attribute:\n%s' % var) + err_msg = (f'Please check HEAD file to ensure it is AFNI compliant. ' + f'Offending attribute:\n{var}') atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}') @@ -127,8 +127,8 @@ def _unpack_var(var): try: attr = [atype(f) for f in attr.split()] except ValueError: - raise AFNIHeaderError('Failed to read variable from HEAD file due ' - 'to improper type casting. %s' % err_msg) + raise AFNIHeaderError(f'Failed to read variable from HEAD file ' + f'due to improper type casting. {err_msg}') else: # AFNI string attributes will always start with open single quote and # end with a tilde (NUL). These attributes CANNOT contain tildes (so diff --git a/nibabel/casting.py b/nibabel/casting.py index 7f2e8d2d5f..163b876c02 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -296,8 +296,8 @@ def type_info(np_type): maxexp=16384, width=width) else: # don't recognize the type - raise FloatingError('We had not expected long double type %s ' - 'with info %s' % (np_type, info)) + raise FloatingError(f'We had not expected long double ' + f'type {np_type} with info {info}') return ret diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index d011d6e8e3..95b39d8560 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -291,8 +291,8 @@ def _to_xml_element(self): v = _float_01(getattr(self, c_)) except ValueError: raise Cifti2HeaderError( - 'Label invalid %s needs to be a float between 0 and 1. ' - 'and it is %s' % (c_, v) + f'Label invalid {c_} needs to be a ' + f'float between 0 and 1. and it is {v}' ) lab = xml.Element('Label') @@ -1379,9 +1379,8 @@ def __init__(self, self.update_headers() if self._dataobj.shape != self.header.matrix.get_data_shape(): - warn("Dataobj shape {} does not match shape expected from CIFTI-2 header {}".format( - self._dataobj.shape, self.header.matrix.get_data_shape() - )) + warn(f"Dataobj shape {self._dataobj.shape} does not match shape " + f"expected from CIFTI-2 header {self.header.matrix.get_data_shape()}") @property def nifti_header(self): @@ -1459,9 +1458,8 @@ def to_file_map(self, file_map=None): header.extensions.append(extension) if self._dataobj.shape != self.header.matrix.get_data_shape(): raise ValueError( - "Dataobj shape {} does not match shape expected from CIFTI-2 header {}".format( - self._dataobj.shape, self.header.matrix.get_data_shape() - )) + f"Dataobj shape {self._dataobj.shape} does not match shape " + f"expected from CIFTI-2 header {self.header.matrix.get_data_shape()}") # if intent code is not set, default to unknown CIFTI if header.get_intent()[0] == 'none': header.set_intent('NIFTI_INTENT_CONNECTIVITY_UNKNOWN') diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 19dd2a0247..fa1e91940e 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -516,11 +516,11 @@ def __str__(self): shape = self.shape affine = self.affine return '\n'.join((str(self.__class__), - 'data shape %s' % (shape,), + f'data shape {shape}', 'affine: ', - '%s' % affine, + f'{affine}', 'metadata:', - '%s' % self._header)) + f'{self._header}')) def get_data_dtype(self): return self._header.get_data_dtype() diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index f84878f55c..b9ef7c5bd2 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -1224,9 +1224,8 @@ def read(self, n_bytes): array_from_file(shape, np.int8, NoStringIO()) except IOError as err: message = str(err) - assert message == 'Expected {0} bytes, got {1} bytes from {2}\n' \ - ' - could the file be damaged?'.format( - 11390625000000000000, 0, 'object') + assert message == (f"Expected {11390625000000000000} bytes, got {0} " + f"bytes from {'object'}\n - could the file be damaged?") def test__ftype4scaled_finite_warningfilters(): diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 962233c395..cca3ac7c0b 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -526,11 +526,8 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): n_read = len(data_bytes) needs_copy = True if n_bytes != n_read: - raise IOError('Expected {0} bytes, got {1} bytes from {2}\n' - ' - could the file be damaged?'.format( - n_bytes, - n_read, - getattr(infile, 'name', 'object'))) + raise IOError(f"Expected {n_bytes} bytes, got {n_read} bytes from " + f"{getattr(infile, 'name', 'object')}\n - could the file be damaged?") arr = np.ndarray(shape, in_dtype, buffer=data_bytes, order=order) if needs_copy: return arr.copy() @@ -747,10 +744,8 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, # nan_fill can be (just) outside clip range nan_fill = np.clip(nan_fill, both_mn, both_mx) else: - raise ValueError("nan_fill == {0}, outside safe int range " - "({1}-{2}); change scaling or " - "set nan2zero=False?".format( - nan_fill, int(both_mn), int(both_mx))) + raise ValueError(f"nan_fill == {nan_fill}, outside safe int range " + f"({int(both_mn)}-{int(both_mx)}); change scaling or set nan2zero=False?") # Make sure non-nan output clipped to shared range post_mn = np.max([post_mn, both_mn]) post_mx = np.min([post_mx, both_mx]) diff --git a/nisext/sexts.py b/nisext/sexts.py index a2b1a10af7..37a8adcc7c 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -142,7 +142,7 @@ def version_getter(pkg_name): msgs = { 'missing': 'Cannot import package "%s" - is it installed?', 'missing opt': 'Missing optional package "%s"', - 'opt suffix' : '; you may get run-time errors', + 'opt suffix': '; you may get run-time errors', 'version too old': 'You have version %s of package "%s"' ' but we need version >= %s', } msgs.update(messages) diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py index 8c8a09633f..48053461a4 100644 --- a/nisext/tests/test_testers.py +++ b/nisext/tests/test_testers.py @@ -10,7 +10,7 @@ def test_back_tick(): - cmd = '{0} -c "print(\'Hello\')"'.format(PYTHON) + cmd = f'{PYTHON} -c "print(\'Hello\')"' assert_equal(back_tick(cmd), "Hello") assert_equal(back_tick(cmd, ret_err=True), ("Hello", "")) assert_equal(back_tick(cmd, True, False), (b"Hello", b"")) From 2271c97eca45815f7c812f89ad8bdec38b1bbe15 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Thu, 18 Jun 2020 20:49:08 +0300 Subject: [PATCH 03/13] More f-strings flynt -ll 999 . All changes were reviewed manually (Excluding versioneer) --- nibabel/cifti2/cifti2_axes.py | 36 ++++++++++------------ nibabel/cmdline/dicomfs.py | 4 +-- nibabel/cmdline/parrec2nii.py | 8 ++--- nibabel/cmdline/tck2trk.py | 5 +-- nibabel/cmdline/tests/test_conform.py | 4 +-- nibabel/cmdline/trk2tck.py | 5 +-- nibabel/data.py | 18 ++++------- nibabel/deprecator.py | 6 ++-- nibabel/ecat.py | 6 ++-- nibabel/filebasedimages.py | 4 +-- nibabel/filename_parser.py | 4 +-- nibabel/freesurfer/io.py | 4 +-- nibabel/freesurfer/mghformat.py | 5 ++- nibabel/funcs.py | 9 +++--- nibabel/gifti/gifti.py | 5 ++- nibabel/keywordonly.py | 4 +-- nibabel/nicom/dicomreaders.py | 10 +++--- nibabel/nifti1.py | 30 +++++++++--------- nibabel/openers.py | 4 +-- nibabel/optpkg.py | 8 ++--- nibabel/orientations.py | 4 +-- nibabel/parrec.py | 23 +++++++------- nibabel/streamlines/array_sequence.py | 22 +++++++------ nibabel/streamlines/tck.py | 8 ++--- nibabel/streamlines/tractogram.py | 6 ++-- nibabel/tests/data/check_parrec_reslice.py | 8 ++--- nibabel/tests/test_deprecator.py | 17 +++++----- nibabel/tests/test_image_types.py | 12 +++----- nibabel/tests/test_nifti1.py | 5 ++- nibabel/tests/test_scripts.py | 9 +++--- nibabel/trackvis.py | 26 ++++++++-------- nibabel/viewers.py | 13 ++++---- nisext/testers.py | 11 +++---- 33 files changed, 158 insertions(+), 185 deletions(-) diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index f1495552bd..2d473e15b3 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -320,8 +320,8 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, for check_name in ('name', 'voxel', 'vertex'): shape = (self.size, 3) if check_name == 'voxel' else (self.size, ) if getattr(self, check_name).shape != shape: - raise ValueError("Input {} has incorrect shape ({}) for BrainModelAxis axis".format( - check_name, getattr(self, check_name).shape)) + raise ValueError(f"Input {check_name} has incorrect shape " + f"({getattr(self, check_name).shape}) for BrainModelAxis axis") @classmethod def from_mask(cls, mask, name='other', affine=None): @@ -537,8 +537,8 @@ def to_cifti_brain_structure_name(name): else: proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}_{orientation.upper()}' if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: - raise ValueError('%s was interpreted as %s, which is not a valid CIFTI brain structure' - % (name, proposed_name)) + raise ValueError(f'{name} was interpreted as {proposed_name}, which is not ' + f'a valid CIFTI brain structure') return proposed_name @property @@ -650,8 +650,8 @@ def __add__(self, other): nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError("Trying to concatenate two BrainModels with inconsistent " - "number of vertices for %s" % name) + raise ValueError(f"Trying to concatenate two BrainModels with " + f"inconsistent number of vertices for {name}") nvertices[name] = value return self.__class__( np.append(self.name, other.name), @@ -763,8 +763,8 @@ def __init__(self, name, voxels, vertices, affine=None, volume_shape=None, nvert for check_name in ('name', 'voxels', 'vertices'): if getattr(self, check_name).shape != (self.size, ): - raise ValueError("Input {} has incorrect shape ({}) for Parcel axis".format( - check_name, getattr(self, check_name).shape)) + raise ValueError(f"Input {check_name} has incorrect shape " + f"({getattr(self, check_name).shape}) for Parcel axis") @classmethod def from_brain_models(cls, named_brain_models): @@ -804,8 +804,8 @@ def from_brain_models(cls, named_brain_models): for name, _, bm_part in bm.iter_structures(): if name in bm.nvertices.keys(): if name in nvertices.keys() and nvertices[name] != bm.nvertices[name]: - raise ValueError("Got multiple conflicting number of " - "vertices for surface structure %s" % name) + raise ValueError(f"Got multiple conflicting number of " + f"vertices for surface structure {name}") nvertices[name] = bm.nvertices[name] vertices[name] = bm_part.vertex all_vertices[idx_parcel] = vertices @@ -846,8 +846,7 @@ def from_index_mapping(cls, mim): name = vertex.brain_structure vertices[vertex.brain_structure] = np.array(vertex) if name not in nvertices.keys(): - raise ValueError("Number of vertices for surface structure %s not defined" % - name) + raise ValueError(f"Number of vertices for surface structure {name} not defined") all_voxels[idx_parcel] = voxels all_vertices[idx_parcel] = vertices all_names.append(parcel.name) @@ -968,9 +967,8 @@ def __add__(self, other): nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError("Trying to concatenate two ParcelsAxis with inconsistent " - "number of vertices for %s" - % name) + raise ValueError(f"Trying to concatenate two ParcelsAxis with " + f"inconsistent number of vertices for {name}") nvertices[name] = value return self.__class__( np.append(self.name, other.name), @@ -1042,8 +1040,8 @@ def __init__(self, name, meta=None): for check_name in ('name', 'meta'): if getattr(self, check_name).shape != (self.size, ): - raise ValueError("Input {} has incorrect shape ({}) for ScalarAxis axis".format( - check_name, getattr(self, check_name).shape)) + raise ValueError(f"Input {check_name} has incorrect shape " + f"({getattr(self, check_name).shape}) for ScalarAxis axis") @classmethod def from_index_mapping(cls, mim): @@ -1176,8 +1174,8 @@ def __init__(self, name, label, meta=None): for check_name in ('name', 'meta', 'label'): if getattr(self, check_name).shape != (self.size, ): - raise ValueError("Input {} has incorrect shape ({}) for LabelAxis axis".format( - check_name, getattr(self, check_name).shape)) + raise ValueError(f"Input {check_name} has incorrect shape " + f"({getattr(self, check_name).shape}) for LabelAxis axis") @classmethod def from_index_mapping(cls, mim): diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 6663da9299..41a5d98338 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -196,8 +196,8 @@ def release(self, path, flags, fh): def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="%s [OPTIONS] " - % os.path.basename(sys.argv[0]), + usage=f"{os.path.basename(sys.argv[0])} [OPTIONS] " + f" ", version="%prog " + nib.__version__) p.add_options([ diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 917615e620..5b235bf81b 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -158,8 +158,8 @@ def proc_file(infile, opts): else: outfilename = basefilename + '.nii' if os.path.isfile(outfilename) and not opts.overwrite: - raise IOError('Output file "%s" exists, use --overwrite to ' - 'overwrite it' % outfilename) + raise IOError(f'Output file "{outfilename}" exists, ' + f'use --overwrite to overwrite it') # load the PAR header and data scaling = 'dv' if opts.scaling == 'off' else opts.scaling @@ -295,8 +295,8 @@ def proc_file(infile, opts): except MRIError: verbose('No EPI factors, dwell time not written') else: - verbose('Writing dwell time (%r sec) calculated assuming %sT ' - 'magnet' % (dwell_time, opts.field_strength)) + verbose(f'Writing dwell time ({dwell_time!r} sec) ' + f'calculated assuming {opts.field_strength}T magnet') with open(basefilename + '.dwell_time', 'w') as fid: fid.write(f'{dwell_time!r}\n') # done diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index 9b359babaf..2b9d2321ac 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -41,8 +41,9 @@ def main(): filename, _ = os.path.splitext(tractogram) output_filename = filename + '.trk' if os.path.isfile(output_filename) and not args.force: - msg = "Skipping existing file: '{}'. Use -f to overwrite." - print(msg.format(output_filename)) + msg = (f"Skipping existing file: '{output_filename}'. " + f"Use -f to overwrite.") + print(msg) continue # Build header using infos from the anatomical image. diff --git a/nibabel/cmdline/tests/test_conform.py b/nibabel/cmdline/tests/test_conform.py index fd29cbf5a2..0f64f5953b 100644 --- a/nibabel/cmdline/tests/test_conform.py +++ b/nibabel/cmdline/tests/test_conform.py @@ -46,8 +46,8 @@ def test_nondefault(tmpdir): out_shape = (100, 100, 150) voxel_size = (1, 2, 4) orientation = "LAS" - args = "{} {} --out-shape {} --voxel-size {} --orientation {}".format( - infile, outfile, " ".join(map(str, out_shape)), " ".join(map(str, voxel_size)), orientation) + args = (f"{infile} {outfile} --out-shape {' '.join(map(str, out_shape))} " + f"--voxel-size {' '.join(map(str, voxel_size))} --orientation {orientation}") main(args.split()) assert outfile.isfile() c = nib.load(outfile) diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index efdcf1fd02..68d0738d73 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -31,8 +31,9 @@ def main(): filename, _ = os.path.splitext(tractogram) output_filename = filename + '.tck' if os.path.isfile(output_filename) and not args.force: - msg = "Skipping existing file: '{}'. Use -f to overwrite." - print(msg.format(output_filename)) + msg = (f"Skipping existing file: '{output_filename}'. " + f"Use -f to overwrite.") + print(msg) continue trk = nib.streamlines.load(tractogram) diff --git a/nibabel/data.py b/nibabel/data.py index adce51b92c..36eaa9b776 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -240,9 +240,8 @@ def find_data_dir(root_dirs, *names): pth = pjoin(path, ds_relative) if os.path.isdir(pth): return pth - raise DataError('Could not find datasource "%s" in data path "%s"' % - (ds_relative, - os.path.pathsep.join(root_dirs))) + raise DataError(f'Could not find datasource "{ds_relative}" in ' + f'data path "{os.path.pathsep.join(root_dirs)}"') def make_datasource(pkg_def, **kwargs): @@ -313,9 +312,8 @@ def __init__(self, name, msg): def __getattr__(self, attr_name): """ Raise informative error accessing not-found attributes """ raise BomberError( - 'Trying to access attribute "%s" ' - 'of non-existent data "%s"\n\n%s\n' % - (attr_name, self.name, self.msg)) + f'Trying to access attribute "{attr_name}" of ' + f'non-existent data "{self.name}"\n\n{self.msg}\n') def datasource_or_bomber(pkg_def, **options): @@ -358,10 +356,6 @@ def datasource_or_bomber(pkg_def, **options): pkg_name = pkg_def['name'] else: pkg_name = 'data at ' + unix_relpath - msg = ('%(name)s is version %(pkg_version)s but we need ' - 'version >= %(req_version)s\n\n%(pkg_hint)s' % - dict(name=pkg_name, - pkg_version=ds.version, - req_version=version, - pkg_hint=pkg_hint)) + msg = (f"{pkg_name} is version {ds.version} but we need " + f"version >= {version}\n\n{pkg_hint}") return Bomber(sys_relpath, DataError(msg)) diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index a0b7b8535a..6b9f4e0532 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -146,10 +146,8 @@ def __call__(self, message, since='', until='', if since: messages.append('* deprecated from version: ' + since) if until: - messages.append('* {0} {1} as of version: {2}'.format( - "Raises" if self.is_bad_version(until) else "Will raise", - error_class, - until)) + messages.append(f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} " + f"{error_class} as of version: {until}") message = '\n'.join(messages) def deprecator(func): diff --git a/nibabel/ecat.py b/nibabel/ecat.py index a917a25f79..ef3d19656b 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -425,10 +425,8 @@ def get_frame_order(mlist): valid_order = np.argsort(ids) if not all(valid_order == sorted(valid_order)): # raise UserWarning if Frames stored out of order - warnings.warn_explicit('Frames stored out of order;' - 'true order = %s\n' - 'frames will be accessed in order ' - 'STORED, NOT true order' % valid_order, + warnings.warn_explicit(f'Frames stored out of order;true order = {valid_order}\n' + f'frames will be accessed in order STORED, NOT true order', UserWarning, 'ecat', 0) id_dict = {} for i in range(n_valid): diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 436c2cd676..248b754af0 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -34,8 +34,8 @@ def from_header(klass, header=None): # different field names if type(header) == klass: return header.copy() - raise NotImplementedError("Header class requires a conversion" - " from %s to %s" % (klass, type(header))) + raise NotImplementedError(f"Header class requires a conversion " + f"from {klass} to {type(header)}") @classmethod def from_fileobj(klass, fileobj): diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index af0ff74541..149cef06a9 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -127,8 +127,8 @@ def types_filenames(template_fname, types_exts, if found_ext: # an extension, but the wrong one raise TypesFilenamesError( - 'File extension "%s" was not in expected list: %s' - % (found_ext, [e for t, e in types_exts])) + f'File extension "{found_ext}" was not in ' + f'expected list: {[e for t, e in types_exts]}') elif ignored: # there was no extension, but an ignored suffix # This is a special case like 'test.gz' (where .gz # is ignored). It's confusing to change diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 467797ab51..1ac35d81c8 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -619,6 +619,6 @@ def _serialize_volume_info(volume_info): strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode('utf-8')) else: val = volume_info[key] - strings.append('{0} = {1:0.10g} {2:0.10g} {3:0.10g}\n'.format( - key.ljust(6), val[0], val[1], val[2]).encode('utf-8')) + strings.append(f'{key.ljust(6)} = {val[0]:0.10g} {val[1]:0.10g} ' + f'{val[2]:0.10g}\n'.encode('utf-8')) return b''.join(strings) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 0a9c4fab17..082524eeb5 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -278,9 +278,8 @@ def set_zooms(self, zooms): if len(zooms) > ndims: raise HeaderDataError('Expecting %d zoom values' % ndims) if np.any(zooms[:3] <= 0): - raise HeaderDataError('Spatial (first three) zooms must be ' - 'positive; got {!r}' - ''.format(tuple(zooms[:3]))) + raise HeaderDataError(f'Spatial (first three) zooms must be ' + f'positive; got {tuple(zooms[:3])!r}') hdr['delta'] = zooms[:3] if len(zooms) == 4: if zooms[3] < 0: diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 1271b3e926..ee8ad0a482 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -136,12 +136,11 @@ def concat_images(images, check_affines=True, axis=None): raise ValueError( f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}') if not np.all(np.array(img.shape)[idx_mask] == masked_shape): - raise ValueError('shape {0} for image {1} not compatible with ' - 'first image shape {2} with axis == {3}'.format( - img.shape, i, shape0, axis)) + raise ValueError(f'shape {img.shape} for image {i} not compatible with ' + f'first image shape {shape0} with axis == {axis}') if check_affines and not np.all(img.affine == affine): - raise ValueError('Affine for image {0} does not match affine ' - 'for first image'.format(i)) + raise ValueError(f'Affine for image {i} does not match affine for ' + f'first image') # Do not fill cache in image if it is empty out_data[i] = np.asanyarray(img.dataobj) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 2bc10906d4..cc7b4b8603 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -386,9 +386,8 @@ def num_dim(self): '2.1', '4.0') def num_dim(self, value): if value != len(self.dims): - raise ValueError('num_dim value {0} != number of dimensions ' - 'len(self.dims) {1}' - .format(value, len(self.dims))) + raise ValueError(f'num_dim value {value} != number of ' + f'dimensions len(self.dims) {len(self.dims)}') @classmethod @deprecate_with_version( diff --git a/nibabel/keywordonly.py b/nibabel/keywordonly.py index 198e70f2c9..2e19444174 100644 --- a/nibabel/keywordonly.py +++ b/nibabel/keywordonly.py @@ -19,8 +19,8 @@ def decorator(func): def wrapper(*args, **kwargs): if len(args) > n: raise TypeError( - '{0} takes at most {1} positional argument{2}'.format( - func.__name__, n, 's' if n > 1 else '')) + f"{func.__name__} takes at most {n} positional " + f"argument{'s' if n > 1 else ''}") return func(*args, **kwargs) return wrapper return decorator diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 5d5ea11799..2d017b581e 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -95,12 +95,10 @@ def read_mosaic_dir(dicom_path, q = dcm_w.q_vector if q is None: # probably not diffusion if check_is_dwi: - raise DicomReadError('Could not find diffusion ' - 'information reading file "%s"; ' - ' is it possible this is not ' - 'a _raw_ diffusion directory? ' - 'Could it be a processed dataset ' - 'like ADC etc?' % fname) + raise DicomReadError( + f'Could not find diffusion information reading file "{fname}"; ' + f'is it possible this is not a _raw_ diffusion directory? ' + f'Could it be a processed dataset like ADC etc?') b = np.nan g = np.ones((3,)) + np.nan else: diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index c39831b70d..afae656899 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -438,8 +438,8 @@ def __init__(self, code, content, parent_hdr=None): self._is_implicit_VR = False self._content = pdcm.dataset.Dataset() else: - raise TypeError("content must be either a bytestring or a pydicom " - "Dataset. Got %s" % content.__class__) + raise TypeError(f"content must be either a bytestring or a " + f"pydicom Dataset. Got {content.__class__}") def _guess_implicit_VR(self): """Try to guess DICOM syntax by checking for valid VRs. @@ -711,8 +711,8 @@ def write_to(self, fileobj): self._structarr['vox_offset'] = min_vox_offset elif vox_offset < min_vox_offset: raise HeaderDataError( - 'vox offset set to {0}, but need at least {1}'.format( - vox_offset, min_vox_offset)) + f'vox offset set to {vox_offset}, but need at least ' + f'{min_vox_offset}') super(Nifti1Header, self).write_to(fileobj) # Write extensions if len(self.extensions) == 0: @@ -875,8 +875,8 @@ def set_data_shape(self, shape): else: overflow = hdr['glmin'] != shape[0] if overflow: - raise HeaderDataError('shape[0] %s does not fit in glmax ' - 'datatype' % shape[0]) + raise HeaderDataError(f'shape[0] {shape[0]} does ' + f'not fit in glmax datatype') warnings.warn('Using large vector Freesurfer hack; header will ' 'not be compatible with SPM or FSL', stacklevel=2) shape = (-1, 1, 1) + shape[3:] @@ -1461,9 +1461,8 @@ def get_n_slices(self): try: slice_len = shape[slice_dim] except IndexError: - raise HeaderDataError('Slice dimension index (%s) outside ' - 'shape tuple (%s)' - % (slice_dim, shape)) + raise HeaderDataError(f'Slice dimension index ({slice_dim}) ' + f'outside shape tuple ({shape})') return slice_len def get_slice_times(self): @@ -1581,13 +1580,12 @@ def set_slice_times(self, slice_times): matching_labels.append(label) if not matching_labels: - raise HeaderDataError('slice ordering of %s fits ' - 'with no known scheme' % st_order) + raise HeaderDataError(f'slice ordering of {st_order} ' + f'fits with no known scheme') if len(matching_labels) > 1: warnings.warn( - 'Multiple slice orders satisfy: %s. Choosing the first one' - % ', '.join(matching_labels) - ) + f"Multiple slice orders satisfy: {', '.join(matching_labels)}. " + f"Choosing the first one") label = matching_labels[0] # Set values into header hdr['slice_start'] = slice_start @@ -1706,8 +1704,8 @@ def _chk_offset(hdr, fix=False): return hdr, rep # SPM uses memory mapping to read the data, and # apparently this has to start on 16 byte boundaries - rep.problem_msg = ('vox offset (={0:g}) not divisible ' - 'by 16, not SPM compatible'.format(offset)) + rep.problem_msg = (f'vox offset (={offset:g}) not divisible ' + f'by 16, not SPM compatible') rep.problem_level = 30 if fix: rep.fix_msg = 'leaving at current value' diff --git a/nibabel/openers.py b/nibabel/openers.py index a658c65c0a..0220db71c1 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -24,8 +24,8 @@ # < 0.7 - no good if StrictVersion(version) < StrictVersion('0.7.0'): - warnings.warn('indexed_gzip is present, but too old ' - '(>= 0.7.0 required): {})'.format(version)) + warnings.warn(f'indexed_gzip is present, but too old ' + f'(>= 0.7.0 required): {version})') HAVE_INDEXED_GZIP = False # >= 0.8 SafeIndexedGzipFile renamed to IndexedGzipFile elif StrictVersion(version) < StrictVersion('0.8.0'): diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 69a08af8db..5911454b08 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -101,11 +101,11 @@ def optional_package(name, trip_msg=None, min_version=None): if callable(min_version): trip_msg = f'Package {min_version} fails version check' else: - trip_msg = (f'These functions need {name} version >= {min_version}') + trip_msg = (f'These functions need {name} ' + f'version >= {min_version}') if trip_msg is None: - trip_msg = ('We need package %s for these functions, but ' - '``import %s`` raised %s' - % (name, name, exc)) + trip_msg = (f'We need package {name} for these functions, ' + f'but ``import {name}`` raised {exc}') pkg = TripWire(trip_msg) def setup_module(): diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 81ee461726..9432cb9e64 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -337,8 +337,8 @@ def axcodes2ornt(axcodes, labels=None): if len(allowed_labels) != len(set(allowed_labels)): raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): - raise ValueError('Not all axis codes {} in label set {}' - .format(list(axcodes), allowed_labels)) + raise ValueError(f'Not all axis codes {list(axcodes)} ' + f'in label set {allowed_labels}') n_axes = len(axcodes) ornt = np.ones((n_axes, 2), dtype=np.int8) * np.nan for code_idx, code in enumerate(axcodes): diff --git a/nibabel/parrec.py b/nibabel/parrec.py index bb63d28f80..11c4570c8b 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -459,8 +459,8 @@ def _chk_trunc(idef_name, gdef_max_name): n_expected = general_info[gdef_max_name] if n_have != n_expected: _err_or_warn( - "Header inconsistency: Found {0} {1} values, " - "but expected {2}".format(n_have, idef_name, n_expected)) + f"Header inconsistency: Found {n_have} {idef_name} " + f"values, but expected {n_expected}") _chk_trunc('slice', 'max_slices') _chk_trunc('echo', 'max_echoes') @@ -732,9 +732,8 @@ def __init__(self, info, image_defs, permit_truncated=False, # dtype bitpix = self._get_unique_image_prop('image pixel size') if bitpix not in (8, 16): - raise PARRECError('Only 8- and 16-bit data supported (not %s)' - 'please report this to the nibabel developers' - % bitpix) + raise PARRECError(f'Only 8- and 16-bit data supported (not {bitpix}) ' + f'please report this to the nibabel developers') # REC data always little endian dt = np.dtype('uint' + str(bitpix)).newbyteorder('<') super(PARRECHeader, self).__init__(data_dtype=dt, @@ -768,11 +767,11 @@ def as_analyze_map(self): # the NIfTI1 header, specifically in nifti1.py `header_dtd` defs. # Here we set the parameters we can to simplify PAR/REC # to NIfTI conversion. - descr = ("%s;%s;%s;%s" - % (self.general_info['exam_name'], - self.general_info['patient_name'], - self.general_info['exam_date'].replace(' ', ''), - self.general_info['protocol_name']))[:80] # max len + descr = (f"{self.general_info['exam_name']};" + f"{self.general_info['patient_name']};" + f"{self.general_info['exam_date'].replace(' ', '')};" + f"{self.general_info['protocol_name']}" + )[:80] # max len is_fmri = (self.general_info['max_dynamics'] > 1) t = 'msec' if is_fmri else 'unknown' xyzt_units = unit_codes['mm'] + unit_codes[t] @@ -868,8 +867,8 @@ def _get_unique_image_prop(self, name): """ props = self.image_defs[name] if np.any(np.diff(props, axis=0)): - raise PARRECError('Varying {0} in image sequence ({1}). This is ' - 'not suppported.'.format(name, props)) + raise PARRECError(f'Varying {name} in image sequence ' + f'({props}). This is not suppported.') return props[0] @deprecate_with_version('get_voxel_size deprecated. ' diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 51e7c4d7fa..2b2d27c323 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -173,16 +173,16 @@ def _check_shape(self, arrseq): """ Check whether this array sequence is compatible with another. """ msg = "cannot perform operation - array sequences have different" if len(self._lengths) != len(arrseq._lengths): - msg += " lengths: {} vs. {}." - raise ValueError(msg.format(len(self._lengths), len(arrseq._lengths))) + msg += f" lengths: {len(self._lengths)} vs. {len(arrseq._lengths)}." + raise ValueError(msg) if self.total_nb_rows != arrseq.total_nb_rows: - msg += " amount of data: {} vs. {}." - raise ValueError(msg.format(self.total_nb_rows, arrseq.total_nb_rows)) + msg += f" amount of data: {self.total_nb_rows} vs. {arrseq.total_nb_rows}." + raise ValueError(msg) if self.common_shape != arrseq.common_shape: - msg += " common shape: {} vs. {}." - raise ValueError(msg.format(self.common_shape, arrseq.common_shape)) + msg += f" common shape: {self.common_shape} vs. {arrseq.common_shape}." + raise ValueError(msg) return True @@ -438,12 +438,14 @@ def __setitem__(self, idx, elements): if is_array_sequence(elements): if len(lengths) != len(elements): - msg = "Trying to set {} sequences with {} sequences." - raise ValueError(msg.format(len(lengths), len(elements))) + msg = (f"Trying to set {len(lengths)} sequences with " + f"{len(elements)} sequences.") + raise ValueError(msg) if sum(lengths) != elements.total_nb_rows: - msg = "Trying to set {} points with {} points." - raise ValueError(msg.format(sum(lengths), elements.total_nb_rows)) + msg = (f"Trying to set {sum(lengths)} points with " + f"{elements.total_nb_rows} points.") + raise ValueError(msg) for o1, l1, o2, l2 in zip(offsets, lengths, elements._offsets, elements._lengths): data[o1:o1 + l1] = elements._data[o2:o2 + l2] diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index bad1c51a8b..8e1fc69881 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -211,15 +211,15 @@ def save(self, fileobj): data_for_streamline = first_item.data_for_streamline if len(data_for_streamline) > 0: keys = ", ".join(data_for_streamline.keys()) - msg = ("TCK format does not support saving additional data" - " alongside streamlines. Dropping: {}".format(keys)) + msg = (f"TCK format does not support saving additional " + f"data alongside streamlines. Dropping: {keys}") warnings.warn(msg, DataWarning) data_for_points = first_item.data_for_points if len(data_for_points) > 0: keys = ", ".join(data_for_points.keys()) - msg = ("TCK format does not support saving additional data" - " alongside points. Dropping: {}".format(keys)) + msg = (f"TCK format does not support saving additional " + f"data alongside points. Dropping: {keys}") warnings.warn(msg, DataWarning) for t in tractogram: diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index e8ecbac4ff..2ad748ac16 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -169,9 +169,9 @@ def __setitem__(self, key, value): value = ArraySequence(value) # We make sure there is the right amount of data. - if self.n_rows > 0 and value.total_nb_rows != self.n_rows: - msg = ("The number of values ({0}) should match " - "({1}).").format(value.total_nb_rows, self.n_rows) + if 0 < self.n_rows != value.total_nb_rows: + msg = (f"The number of values ({value.total_nb_rows}) " + f"should match ({self.n_rows}).") raise ValueError(msg) self.store[key] = value diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index 1f10a02aa1..4296e5bedd 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -61,9 +61,8 @@ def gmean_norm(data): normal_data = normal_img.get_fdata() normal_normed = gmean_norm(normal_data) - print("RMS of standard image {:<44}: {}".format( - normal_fname, - np.sqrt(np.sum(normal_normed ** 2)))) + print(f"RMS of standard image {normal_fname:<44}: " + f"{np.sqrt(np.sum(normal_normed ** 2))}") for parfile in glob.glob("*.PAR"): if parfile == normal_fname: @@ -72,4 +71,5 @@ def gmean_norm(data): fixed_img = resample_img2img(normal_img, funny_img) fixed_data = fixed_img.get_fdata() difference_data = normal_normed - gmean_norm(fixed_data) - print(f'RMS resliced {parfile:<52} : {np.sqrt(np.sum(difference_data ** 2))}') + print(f'RMS resliced {parfile:<52} : ' + f'{np.sqrt(np.sum(difference_data ** 2))}') diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index a22e633d5a..249c1ac502 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -94,8 +94,8 @@ def test_dep_func(self): assert func() is None assert len(w) == 1 assert (func.__doc__ == - 'foo\n\n* Will raise {} as of version: 99.4\n' - .format(ExpiredDeprecationError)) + f'foo\n\n* Will raise {ExpiredDeprecationError} ' + f'as of version: 99.4\n') func = dec('foo', until='1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() @@ -105,16 +105,13 @@ def test_dep_func(self): with pytest.raises(ExpiredDeprecationError): func() assert (func.__doc__ == - 'foo\n\n* deprecated from version: 1.2\n' - '* Raises {} as of version: 1.8\n' - .format(ExpiredDeprecationError)) + f'foo\n\n* deprecated from version: 1.2\n* Raises ' + f'{ExpiredDeprecationError} as of version: 1.8\n') func = dec('foo', '1.2', '1.8')(func_doc_long) assert (func.__doc__ == - 'A docstring\n \n foo\n \n' - ' * deprecated from version: 1.2\n' - ' * Raises {} as of version: 1.8\n \n' - ' Some text\n' - .format(ExpiredDeprecationError)) + f'A docstring\n \n foo\n \n * deprecated from version: 1.2\n ' + f'* Raises {ExpiredDeprecationError} as of version: 1.8\n \n' + f' Some text\n') with pytest.raises(ExpiredDeprecationError): func() diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index cd1ea18709..5b09fd1d62 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -65,11 +65,9 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, assert current_sizeof_hdr >= expected_sizeof_hdr, new_msg # Check that the image type was recognized. - new_msg = '%s (%s) image is%s a %s image.' % ( - basename(img_path), - msg, - '' if is_img else ' not', - img_klass.__name__) + new_msg = (f"{basename(img_path)} ({msg}) image " + f"is{'' if is_img else ' not'} " + f"a {img_klass.__name__} image.") assert is_img, new_msg if sniff_mode == 'vanilla': @@ -99,8 +97,8 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, # Reuse the sniff... but it will only change for some # sniff_mode values. - msg = '%s/ %s/ %s' % (expected_img_klass.__name__, sniff_mode, - str(expect_success)) + msg = (f'{expected_img_klass.__name__}/ {sniff_mode}/ ' + f'{str(expect_success)}') sniff = check_img(img_path, klass, sniff_mode=sniff_mode, sniff=sniff, expect_success=expect_success, msg=msg) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 901c94ccf5..0e3b87774d 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -265,9 +265,8 @@ def test_magic_offset_checks(self): fhdr, message, raiser = self.log_chk(hdr, 30) assert fhdr['vox_offset'] == bad_spm assert (message == - 'vox offset (={0:g}) not divisible by 16, ' - 'not SPM compatible; leaving at current ' - 'value'.format(bad_spm)) + f'vox offset (={bad_spm:g}) not divisible by 16, ' + f'not SPM compatible; leaving at current value') # Check minimum offset (if offset set) hdr['magic'] = hdr.single_magic hdr['vox_offset'] = 10 diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 591f85343a..47dfe987c2 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -65,9 +65,8 @@ def load_small_file(): def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): # test nib-ls script fname = pjoin(DATA_PATH, 'example4d.nii.gz') - expected_re = (" (int16|[<>]i2) \[128, 96, 24, 2\] " - "2.00x2.00x2.20x2000.00 #exts: 2%s sform%s$" - % (hdrs_str, other_str)) + expected_re = (f" (int16|[<>]i2) \\[128, 96, 24, 2\\] 2.00x2.00x2.20x2000.00 " + f"#exts: 2{hdrs_str} sform{other_str}$") cmd = ['nib-ls'] + opts + [fname] code, stdout, stderr = run_command(cmd) assert fname == stdout[:len(fname)] @@ -137,8 +136,8 @@ def test_nib_ls_multiple(): # they should be indented correctly. Since all files are int type - ln = max(len(f) for f in fnames) i_str = ' i' if sys.byteorder == 'little' else ' ' - % (self.__class__.__name__, title, self._sizes[0], self._sizes[1], - self._sizes[2], vol)) + title = '' if self._title is None else f'{self._title} ' + vol = '' if self.n_volumes <= 1 else f', {self.n_volumes}' + r = (f'<{self.__class__.__name__}: {title}({self._sizes[0]}, ' + f'{self._sizes[1]}, {self._sizes[2]}{vol})>') return r # User-level functions ################################################### @@ -295,8 +294,8 @@ def link_to(self, other): Other viewer to use to link movements. """ if not isinstance(other, self.__class__): - raise TypeError('other must be an instance of %s, not %s' - % (self.__class__.__name__, type(other))) + raise TypeError(f'other must be an instance of ' + f'{self.__class__.__name__}, not {type(other)}') self._link(other, is_primary=True) def _link(self, other, is_primary): diff --git a/nisext/testers.py b/nisext/testers.py index e661de72a2..df26c7af39 100644 --- a/nisext/testers.py +++ b/nisext/testers.py @@ -191,10 +191,10 @@ def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): subdirectory within `to_dir` to which scripts will be installed """ site_pkgs_path = os.path.join(to_dir, py_lib_sdir) - py_lib_locs = ' --install-purelib=%s --install-platlib=%s' % ( - site_pkgs_path, site_pkgs_path) + py_lib_locs = (f' --install-purelib={site_pkgs_path} ' + f'--install-platlib={site_pkgs_path}') pwd = os.path.abspath(os.getcwd()) - cmd = (f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}') + cmd = f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}' try: os.chdir(from_dir) back_tick(cmd) @@ -526,9 +526,8 @@ def make_dist(repo_path, out_dir, setup_params, zipglob): back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}') zips = glob(pjoin(out_dir, zipglob)) if len(zips) != 1: - raise OSError('There must be one and only one %s file, ' - 'but I found "%s"' % - (zipglob, ': '.join(zips))) + raise OSError(f"There must be one and only one {zipglob} " + f"file, but I found \"{': '.join(zips)}\"") finally: os.chdir(pwd) return zips[0] From 6b15a972eb77bfae3c5610b308c47d4b1aba02b4 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Thu, 18 Jun 2020 21:02:48 +0300 Subject: [PATCH 04/13] A bit more f-strings, but that's it --- nibabel/streamlines/tck.py | 12 +++++----- nibabel/streamlines/tractogram.py | 15 ++++++------ nibabel/streamlines/trk.py | 39 +++++++++++++++---------------- nibabel/tests/test_analyze.py | 3 ++- 4 files changed, 34 insertions(+), 35 deletions(-) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 8e1fc69881..6e59d06a8b 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -330,8 +330,8 @@ def _read_header(fileobj): hdr['datatype'] = "Float32LE" if not hdr['datatype'].startswith('Float32'): - msg = ("TCK only supports float32 dtype but 'datatype: {}' was" - " specified in the header.").format(hdr['datatype']) + msg = (f"TCK only supports float32 dtype but 'datatype: " + f"{hdr['datatype']}' was specified in the header.") raise HeaderError(msg) if 'file' not in hdr: @@ -341,9 +341,9 @@ def _read_header(fileobj): hdr['file'] = f'. {offset_data}' if hdr['file'].split()[0] != '.': - msg = ("TCK only supports single-file - in other words the" - " filename part must be specified as '.' but '{}' was" - " specified.").format(hdr['file'].split()[0]) + msg = (f"TCK only supports single-file - in other words the" + f" filename part must be specified as '.' but " + f"'{hdr['file'].split()[0]}' was specified.") raise HeaderError("Missing 'file' attribute in TCK header.") # Set endianness and _dtype attributes in the header. @@ -454,6 +454,6 @@ def __str__(self): info = "" info += f"\nMAGIC NUMBER: {hdr[Field.MAGIC_NUMBER]}" info += "\n" - info += "\n".join(["{}: {}".format(k, v) + info += "\n".join([f"{k}: {v}" for k, v in hdr.items() if not k.startswith('_')]) return info diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index 2ad748ac16..e4e99473dc 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -109,8 +109,8 @@ def __setitem__(self, key, value): # We make sure there is the right amount of values if self.n_rows > 0 and len(value) != self.n_rows: - msg = ("The number of values ({0}) should match n_elements " - "({1}).").format(len(value), self.n_rows) + msg = (f"The number of values ({len(value)}) should match " + f"n_elements ({self.n_rows}).") raise ValueError(msg) self.store[key] = value @@ -140,10 +140,9 @@ def extend(self, other): """ if (len(self) > 0 and len(other) > 0 and sorted(self.keys()) != sorted(other.keys())): - msg = ("Entry mismatched between the two PerArrayDict objects." - " This PerArrayDict contains '{0}' whereas the other " - " contains '{1}'.").format(sorted(self.keys()), - sorted(other.keys())) + msg = (f"Entry mismatched between the two PerArrayDict objects. " + f"This PerArrayDict contains '{sorted(self.keys())}' " + f"whereas the other contains '{sorted(other.keys())}'.") raise ValueError(msg) self.n_rows += other.n_rows @@ -365,8 +364,8 @@ def affine_to_rasmm(self, value): if value is not None: value = np.array(value) if value.shape != (4, 4): - msg = ("Affine matrix has a shape of (4, 4) but a ndarray with" - "shape {} was provided instead.").format(value.shape) + msg = (f"Affine matrix has a shape of (4, 4) but a ndarray with" + f"shape {value.shape} was provided instead.") raise ValueError(msg) self._affine_to_rasmm = value diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 3dd12e306c..ba7b356886 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -153,15 +153,14 @@ def encode_value_in_name(value, name, max_name_len=20): `value`, padded with ``\x00`` bytes. """ if len(name) > max_name_len: - msg = ("Data information named '{0}' is too long" - " (max {1} characters.)").format(name, max_name_len) + msg = (f"Data information named '{name}' is too long" + f" (max {max_name_len} characters.)") raise ValueError(msg) encoded_name = name if value <= 1 else name + '\x00' + str(value) if len(encoded_name) > max_name_len: - msg = ("Data information named '{0}' is too long (need to be less" - " than {1} characters when storing more than one value" - " for a given data information." - ).format(name, max_name_len - (len(str(value)) + 1)) + msg = (f"Data information named '{name}' is too long (need to be less" + f" than {max_name_len - (len(str(value)) + 1)} characters " + f"when storing more than one value for a given data information.") raise ValueError(msg) # Fill to the end with zeros return encoded_name.ljust(max_name_len, '\x00').encode('latin1') @@ -196,8 +195,8 @@ def decode_value_from_name(encoded_name): value = int(splits[1]) # Decode value. elif len(splits) > 2: # The remaining bytes are not \x00, raising. - msg = ("Wrong scalar_name or property_name: '{0}'." - " Unused characters should be \\x00.").format(encoded_name) + msg = (f"Wrong scalar_name or property_name: '{encoded_name}'." + f" Unused characters should be \\x00.") raise HeaderError(msg) return name, value @@ -473,9 +472,9 @@ def save(self, fileobj): # Update field 'property_name' using 'data_per_streamline'. data_for_streamline = first_item.data_for_streamline if len(data_for_streamline) > MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE: - msg = ("Can only store {0} named data_per_streamline (also" - " known as 'properties' in the TRK format)." - ).format(MAX_NB_NAMED_SCALARS_PER_POINT) + msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} named " + f"data_per_streamline (also known as 'properties' in the " + f"TRK format).") raise ValueError(msg) data_for_streamline_keys = sorted(data_for_streamline.keys()) @@ -491,9 +490,9 @@ def save(self, fileobj): # Update field 'scalar_name' using 'tractogram.data_per_point'. data_for_points = first_item.data_for_points if len(data_for_points) > MAX_NB_NAMED_SCALARS_PER_POINT: - msg = ("Can only store {0} named data_per_point (also known" - " as 'scalars' in the TRK format)." - ).format(MAX_NB_NAMED_SCALARS_PER_POINT) + msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} " + f"named data_per_point (also known as 'scalars' in " + f"the TRK format).") raise ValueError(msg) data_for_points_keys = sorted(data_for_points.keys()) @@ -588,9 +587,9 @@ def _read_header(fileobj): # Swap byte order header_rec = header_rec.newbyteorder() if header_rec['hdr_size'] != TrkFile.HEADER_SIZE: - msg = "Invalid hdr_size: {0} instead of {1}" - raise HeaderError(msg.format(header_rec['hdr_size'], - TrkFile.HEADER_SIZE)) + msg = (f"Invalid hdr_size: {header_rec['hdr_size']} " + f"instead of {TrkFile.HEADER_SIZE}") + raise HeaderError(msg) if header_rec['version'] == 1: # There is no 4x4 matrix for voxel to RAS transformation. @@ -616,9 +615,9 @@ def _read_header(fileobj): # able to determine the axis directions. axcodes = aff2axcodes(header[Field.VOXEL_TO_RASMM]) if None in axcodes: - msg = ("The 'vox_to_ras' affine is invalid! Could not" - " determine the axis directions from it.\n{0}" - ).format(header[Field.VOXEL_TO_RASMM]) + msg = (f"The 'vox_to_ras' affine is invalid! Could not" + f" determine the axis directions from it.\n" + f"{header[Field.VOXEL_TO_RASMM]}") raise HeaderError(msg) # By default, the voxel order is LPS. diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index a71ba3339f..7dc56f7c68 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -141,7 +141,8 @@ def test_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 30) assert fhdr['sizeof_hdr'] == self.sizeof_hdr - assert message == 'sizeof_hdr should be {0}; set sizeof_hdr to {0}'.format(self.sizeof_hdr) + assert (message == f'sizeof_hdr should be {self.sizeof_hdr}; ' + f'set sizeof_hdr to {self.sizeof_hdr}') pytest.raises(*raiser) # RGB datatype does not raise error hdr = HC() From fdfe1cdc459fa3edc24b047327fd5be8f505f59c Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Fri, 19 Jun 2020 16:39:27 +0300 Subject: [PATCH 05/13] Limit lines to 100 columns Excluding URLs which cannot be split and test code --- doc/source/conf.py | 3 ++- nibabel/externals/netcdf.py | 4 ++-- nibabel/volumeutils.py | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 9ea5495ef1..1fe335f63f 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -206,7 +206,8 @@ #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -html_sidebars = {'index': ['localtoc.html', 'relations.html', 'sourcelink.html', 'indexsidebar.html', 'searchbox.html', 'reggie.html']} +html_sidebars = {'index': ['localtoc.html', 'relations.html', 'sourcelink.html', + 'indexsidebar.html', 'searchbox.html', 'reggie.html']} # Additional templates that should be rendered to pages, maps page names to # template names. diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py index b2e2c9a868..bcc252de5f 100644 --- a/nibabel/externals/netcdf.py +++ b/nibabel/externals/netcdf.py @@ -398,8 +398,8 @@ def createVariable(self, name, type, dimensions): typecode, size = type.char, type.itemsize if (typecode, size) not in REVERSE: raise ValueError(f"NetCDF 3 does not support type {type}") - - data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 + # convert to big endian always for NetCDF 3 + data = empty(shape_, dtype=type.newbyteorder("B")) self.variables[name] = netcdf_variable( data, typecode, size, shape, dimensions, maskandscale=self.maskandscale) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index cca3ac7c0b..8d2815318e 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -745,7 +745,8 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, nan_fill = np.clip(nan_fill, both_mn, both_mx) else: raise ValueError(f"nan_fill == {nan_fill}, outside safe int range " - f"({int(both_mn)}-{int(both_mx)}); change scaling or set nan2zero=False?") + f"({int(both_mn)}-{int(both_mx)}); " + f"change scaling or set nan2zero=False?") # Make sure non-nan output clipped to shared range post_mn = np.max([post_mn, both_mn]) post_mx = np.min([post_mx, both_mx]) From 012cf8b7185e2ec135313a706cf5a0cd5a174f80 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Mon, 22 Jun 2020 20:49:05 +0300 Subject: [PATCH 06/13] RV: Revert _version.py and externals/ --- nibabel/_version.py | 39 +++++++++++++++++++------------------ nibabel/externals/netcdf.py | 17 ++++++++-------- nibabel/externals/oset.py | 4 ++-- 3 files changed, 31 insertions(+), 29 deletions(-) diff --git a/nibabel/_version.py b/nibabel/_version.py index 7af21161d8..60031b4d17 100644 --- a/nibabel/_version.py +++ b/nibabel/_version.py @@ -87,20 +87,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if e.errno == errno.ENOENT: continue if verbose: - print(f"unable to run {dispcmd}") + print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: - print(f"unable to find command, tried {commands}") + print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: - print(f"unable to run {dispcmd} (error)") - print(f"stdout was {stdout}") + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode @@ -125,8 +125,8 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): root = os.path.dirname(root) # up a level if verbose: - print(f"Tried directories {str(rootdirs)} but " - f"none started with prefix {parentdir_prefix}") + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -201,9 +201,9 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: - print(f"discarding '{','.join(refs - tags)}', no digits") + print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: - print(f"likely tags: {','.join(sorted(tags))}") + print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): @@ -214,7 +214,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if not re.match(r'\d', r): continue if verbose: - print(f"picking {r}") + print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, @@ -243,14 +243,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): hide_stderr=True) if rc != 0: if verbose: - print(f"Directory {root} not under git control") + print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", - "--match", f"{tag_prefix}*"], + "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: @@ -283,17 +283,18 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: - txt = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" - print(txt) - pieces["error"] = (f"tag '{full_tag}' doesn't start with prefix " - f"'{tag_prefix}'") + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] @@ -383,13 +384,13 @@ def render_pep440_post(pieces): if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) - rendered += f"g{pieces['short']}" + rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" - rendered += f"+g{pieces['short']}" + rendered += "+g%s" % pieces["short"] return rendered @@ -480,7 +481,7 @@ def render(pieces, style): elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: - raise ValueError(f"unknown style '{style}'") + raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py index bcc252de5f..a0099ec6b4 100644 --- a/nibabel/externals/netcdf.py +++ b/nibabel/externals/netcdf.py @@ -258,7 +258,7 @@ def __init__(self, filename, mode='r', mmap=None, version=1, else: # maybe it's a string self.filename = filename omode = 'r+' if mode == 'a' else mode - self.fp = open(self.filename, f'{omode}b') + self.fp = open(self.filename, '%sb' % omode) if mmap is None: # Mmapped files on PyPy cannot be usually closed # before the GC runs, so it's better to use mmap=False @@ -397,9 +397,9 @@ def createVariable(self, name, type, dimensions): type = dtype(type) typecode, size = type.char, type.itemsize if (typecode, size) not in REVERSE: - raise ValueError(f"NetCDF 3 does not support type {type}") - # convert to big endian always for NetCDF 3 - data = empty(shape_, dtype=type.newbyteorder("B")) + raise ValueError("NetCDF 3 does not support type %s" % type) + + data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 self.variables[name] = netcdf_variable( data, typecode, size, shape, dimensions, maskandscale=self.maskandscale) @@ -589,7 +589,7 @@ def _write_att_values(self, values): break typecode, size = TYPEMAP[nc_type] - dtype_ = f'>{typecode}' + dtype_ = '>%s' % typecode # asarray() dies with bytes and '>c' in py3k. Change to 'S' dtype_ = 'S' if dtype_ == '>c' else dtype_ @@ -614,7 +614,8 @@ def _read(self): # Check magic bytes and version magic = self.fp.read(3) if not magic == b'CDF': - raise TypeError(f"Error: {self.filename} is not a valid NetCDF 3 file") + raise TypeError("Error: %s is not a valid NetCDF 3 file" % + self.filename) self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] # Read file headers and set data. @@ -761,7 +762,7 @@ def _read_var(self): begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() typecode, size = TYPEMAP[nc_type] - dtype_ = f'>{typecode}' + dtype_ = '>%s' % typecode return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize @@ -776,7 +777,7 @@ def _read_att_values(self): self.fp.read(-count % 4) # read padding if typecode != 'c': - values = frombuffer(values, dtype=f'>{typecode}').copy() + values = frombuffer(values, dtype='>%s' % typecode).copy() if values.shape == (1,): values = values[0] else: diff --git a/nibabel/externals/oset.py b/nibabel/externals/oset.py index 3c49f8f856..0a29c661c5 100644 --- a/nibabel/externals/oset.py +++ b/nibabel/externals/oset.py @@ -72,8 +72,8 @@ def pop(self, last=True): def __repr__(self): if not self: - return f'{self.__class__.__name__}()' - return f'{self.__class__.__name__}({list(self)!r})' + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): if isinstance(other, OrderedSet): From e2892bb6ce96f04968ba23265c73648c790fb49f Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Mon, 22 Jun 2020 20:50:28 +0300 Subject: [PATCH 07/13] Update nibabel/arrayproxy.py Co-authored-by: Chris Markiewicz --- nibabel/arrayproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 0f169c297e..c8ecc138ff 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -255,7 +255,7 @@ def _should_keep_file_open(self, file_like, keep_file_open): if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT if keep_file_open not in (True, False): - raise ValueError(f"nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT " + raise ValueError("nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT " f"must be boolean. Found: {keep_file_open}") elif keep_file_open not in (True, False): raise ValueError('keep_file_open must be one of {None, True, False}') From 5212c7dad3418549b7c9f2b1c9ddfcd89e47ead8 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Mon, 22 Jun 2020 21:06:01 +0300 Subject: [PATCH 08/13] STY: f-str -> format (readability) --- nibabel/cmdline/dicomfs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 41a5d98338..6e53be103b 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -196,8 +196,8 @@ def release(self, path, flags, fh): def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage=f"{os.path.basename(sys.argv[0])} [OPTIONS] " - f" ", + usage="{} [OPTIONS] ".format( + os.path.basename(sys.argv[0])), version="%prog " + nib.__version__) p.add_options([ From 092dc9b2375ff780474ee74f71dd2ff62745004d Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Mon, 22 Jun 2020 21:53:14 +0300 Subject: [PATCH 09/13] STY: Apply suggestions from code review Co-authored-by: Chris Markiewicz --- .../benchmarks/bench_arrayproxy_slicing.py | 6 ++--- nibabel/benchmarks/bench_fileslice.py | 3 +-- nibabel/brikhead.py | 4 ++-- nibabel/casting.py | 3 +-- nibabel/cifti2/cifti2.py | 3 +-- nibabel/cifti2/cifti2_axes.py | 8 +++---- nibabel/cmdline/ls.py | 6 ++--- nibabel/cmdline/parrec2nii.py | 5 ++-- nibabel/cmdline/tck2trk.py | 4 +--- nibabel/cmdline/trk2tck.py | 4 +--- nibabel/cmdline/utils.py | 2 +- nibabel/data.py | 5 ++-- nibabel/dft.py | 4 ++-- nibabel/ecat.py | 4 ++-- nibabel/filebasedimages.py | 2 +- nibabel/freesurfer/io.py | 4 ++-- nibabel/freesurfer/mghformat.py | 6 ++--- nibabel/freesurfer/tests/test_io.py | 8 +++---- nibabel/funcs.py | 3 +-- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/parse_gifti_fast.py | 2 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 2 +- nibabel/keywordonly.py | 3 +-- nibabel/nicom/dicomreaders.py | 4 ++-- nibabel/nifti1.py | 23 ++++++++----------- nibabel/openers.py | 3 +-- nibabel/optpkg.py | 3 +-- nibabel/orientations.py | 3 +-- nibabel/parrec.py | 2 +- nibabel/spatialimages.py | 4 ++-- tools/make_tarball.py | 3 +-- 31 files changed, 58 insertions(+), 80 deletions(-) diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 4059eb9ab2..108e236b5f 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -100,8 +100,7 @@ def fmt_sliceobj(sliceobj): with InTemporaryDirectory(): - print(f'Generating test data... ' - f'({int(round(np.prod(SHAPE) * 4 / 1048576.0))} MB)') + print(f'Generating test data... ({int(round(np.prod(SHAPE) * 4 / 1048576.0))} MB)') data = np.array(np.random.random(SHAPE), dtype=np.float32) @@ -180,8 +179,7 @@ def testfunc(): data[:, 2] = np.nan data[:, 3] = [r[5] - r[6] for r in results] - rowlbls = [(f'Type {r[0]}, keep_open {r[1]}, ' - f'slice {fmt_sliceobj(r[2])}') for r in results] + rowlbls = [f'Type {r[0]}, keep_open {r[1]}, slice {fmt_sliceobj(r[2])}' for r in results] collbls = ['Time', 'Baseline time', 'Time ratio', 'Memory deviation'] print(rst_table(data, rowlbls, collbls)) diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index 5d99b939d7..3812538947 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -20,8 +20,7 @@ from ..tmpdirs import InTemporaryDirectory SHAPE = (64, 64, 32, 100) -ROW_NAMES = [f'axis {i}, len {SHAPE[i]}' - for i in range(len(SHAPE))] +ROW_NAMES = [f'axis {i}, len {dim}' for i, dim in enumerate(SHAPE)] COL_NAMES = ['mid int', 'step 1', 'half step 1', diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 3c43b1e0ab..09406c18d7 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -114,7 +114,7 @@ def _unpack_var(var): TEMPLATE_SPACE ORIG """ - err_msg = (f'Please check HEAD file to ensure it is AFNI compliant. ' + err_msg = ('Please check HEAD file to ensure it is AFNI compliant. ' f'Offending attribute:\n{var}') atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: @@ -127,7 +127,7 @@ def _unpack_var(var): try: attr = [atype(f) for f in attr.split()] except ValueError: - raise AFNIHeaderError(f'Failed to read variable from HEAD file ' + raise AFNIHeaderError('Failed to read variable from HEAD file ' f'due to improper type casting. {err_msg}') else: # AFNI string attributes will always start with open single quote and diff --git a/nibabel/casting.py b/nibabel/casting.py index 163b876c02..146148f9e4 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -296,8 +296,7 @@ def type_info(np_type): maxexp=16384, width=width) else: # don't recognize the type - raise FloatingError(f'We had not expected long double ' - f'type {np_type} with info {info}') + raise FloatingError(f'We had not expected long double type {np_type} with info {info}') return ret diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 95b39d8560..a6020521ac 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -291,8 +291,7 @@ def _to_xml_element(self): v = _float_01(getattr(self, c_)) except ValueError: raise Cifti2HeaderError( - f'Label invalid {c_} needs to be a ' - f'float between 0 and 1. and it is {v}' + f'Label invalid {c_} needs to be a float between 0 and 1. and it is {v}' ) lab = xml.Element('Label') diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 2d473e15b3..a3b11630bc 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -538,7 +538,7 @@ def to_cifti_brain_structure_name(name): proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}_{orientation.upper()}' if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: raise ValueError(f'{name} was interpreted as {proposed_name}, which is not ' - f'a valid CIFTI brain structure') + 'a valid CIFTI brain structure') return proposed_name @property @@ -650,7 +650,7 @@ def __add__(self, other): nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError(f"Trying to concatenate two BrainModels with " + raise ValueError("Trying to concatenate two BrainModels with " f"inconsistent number of vertices for {name}") nvertices[name] = value return self.__class__( @@ -804,7 +804,7 @@ def from_brain_models(cls, named_brain_models): for name, _, bm_part in bm.iter_structures(): if name in bm.nvertices.keys(): if name in nvertices.keys() and nvertices[name] != bm.nvertices[name]: - raise ValueError(f"Got multiple conflicting number of " + raise ValueError("Got multiple conflicting number of " f"vertices for surface structure {name}") nvertices[name] = bm.nvertices[name] vertices[name] = bm_part.vertex @@ -967,7 +967,7 @@ def __add__(self, other): nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError(f"Trying to concatenate two ParcelsAxis with " + raise ValueError("Trying to concatenate two ParcelsAxis with " f"inconsistent number of vertices for {name}") nvertices[name] = value return self.__class__( diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 08f975bc3d..0488676737 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -116,7 +116,7 @@ def proc_file(f, opts): else: row += [''] except Exception as e: - verbose(2, f"Failed to obtain qform or sform -- {str(e)}") + verbose(2, f"Failed to obtain qform or sform -- {e}") if isinstance(h, nib.AnalyzeHeader): row += [''] else: @@ -136,7 +136,7 @@ def proc_file(f, opts): # just # of elements row += ["@l[%d]" % np.prod(d.shape)] # stats - row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-'] + row += [f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' if len(d) else '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: @@ -146,7 +146,7 @@ def proc_file(f, opts): counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq)) row += ["@l" + counts] except IOError as e: - verbose(2, f"Failed to obtain stats/counts -- {str(e)}") + verbose(2, f"Failed to obtain stats/counts -- {e}") row += [_err()] return row diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 5b235bf81b..176e957bef 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -136,7 +136,7 @@ def get_opt_parser(): def verbose(msg, indent=0): if verbose.switch: - print(f"{' ' * indent}{msg}") + print(' ' * indent + msg) def error(msg, exit_code): @@ -158,8 +158,7 @@ def proc_file(infile, opts): else: outfilename = basefilename + '.nii' if os.path.isfile(outfilename) and not opts.overwrite: - raise IOError(f'Output file "{outfilename}" exists, ' - f'use --overwrite to overwrite it') + raise IOError(f'Output file "{outfilename}" exists, use --overwrite to overwrite it') # load the PAR header and data scaling = 'dv' if opts.scaling == 'off' else opts.scaling diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index 2b9d2321ac..b8d9ce12d8 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -41,9 +41,7 @@ def main(): filename, _ = os.path.splitext(tractogram) output_filename = filename + '.trk' if os.path.isfile(output_filename) and not args.force: - msg = (f"Skipping existing file: '{output_filename}'. " - f"Use -f to overwrite.") - print(msg) + print(f"Skipping existing file: '{output_filename}'. Use -f to overwrite.") continue # Build header using infos from the anatomical image. diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index 68d0738d73..bddb58c7b1 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -31,9 +31,7 @@ def main(): filename, _ = os.path.splitext(tractogram) output_filename = filename + '.tck' if os.path.isfile(output_filename) and not args.force: - msg = (f"Skipping existing file: '{output_filename}'. " - f"Use -f to overwrite.") - print(msg) + print(f"Skipping existing file: '{output_filename}'. Use -f to overwrite.") continue trk = nib.streamlines.load(tractogram) diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 8931beb617..c03f97277d 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -33,7 +33,7 @@ def verbose(thing, msg): """ # TODO: consider using nibabel's logger if thing <= int(verbose_level): - print(f"{' ' * thing}{msg}") + print(' ' * thing + msg) def table2string(table, out=None): diff --git a/nibabel/data.py b/nibabel/data.py index 36eaa9b776..873c5cd6b8 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -293,7 +293,7 @@ def make_datasource(pkg_def, **kwargs): pth = [pjoin(this_data_path, *names) for this_data_path in data_path] pkg_hint = pkg_def.get('install hint', DEFAULT_INSTALL_HINT) - msg = (f'{e}; Is it possible you have not installed a data package?') + msg = f'{e}; Is it possible you have not installed a data package?' if 'name' in pkg_def: msg += f"\n\nYou may need the package \"{pkg_def['name']}\"" if pkg_hint is not None: @@ -356,6 +356,5 @@ def datasource_or_bomber(pkg_def, **options): pkg_name = pkg_def['name'] else: pkg_name = 'data at ' + unix_relpath - msg = (f"{pkg_name} is version {ds.version} but we need " - f"version >= {version}\n\n{pkg_hint}") + msg = f"{pkg_name} is version {ds.version} but we need version >= {version}\n\n{pkg_hint}" return Bomber(sys_relpath, DataError(msg)) diff --git a/nibabel/dft.py b/nibabel/dft.py index 2768f1ec59..4caf55da75 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -395,7 +395,7 @@ def _update_file(c, path, fname, studies, series, storage_instances): except AttributeError: study_comments = '' try: - logger.debug(f' storage instance {str(do.SOPInstanceUID)}') + logger.debug(f' storage instance {do.SOPInstanceUID}') if str(do.StudyInstanceUID) not in studies: query = """INSERT INTO study (uid, date, @@ -444,7 +444,7 @@ def _update_file(c, path, fname, studies, series, storage_instances): c.execute(query, params) storage_instances.append(str(do.SOPInstanceUID)) except AttributeError as data: - logger.debug(f' {str(data)}') + logger.debug(f' {data}') return None return str(do.SOPInstanceUID) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index ef3d19656b..a7dad5de0c 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -425,8 +425,8 @@ def get_frame_order(mlist): valid_order = np.argsort(ids) if not all(valid_order == sorted(valid_order)): # raise UserWarning if Frames stored out of order - warnings.warn_explicit(f'Frames stored out of order;true order = {valid_order}\n' - f'frames will be accessed in order STORED, NOT true order', + warnings.warn_explicit(f'Frames stored out of order; true order = {valid_order}\n' + 'frames will be accessed in order STORED, NOT true order', UserWarning, 'ecat', 0) id_dict = {} for i in range(n_valid): diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 248b754af0..006b70d615 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -34,7 +34,7 @@ def from_header(klass, header=None): # different field names if type(header) == klass: return header.copy() - raise NotImplementedError(f"Header class requires a conversion " + raise NotImplementedError("Header class requires a conversion " f"from {klass} to {type(header)}") @classmethod diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 1ac35d81c8..168d131f36 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -619,6 +619,6 @@ def _serialize_volume_info(volume_info): strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode('utf-8')) else: val = volume_info[key] - strings.append(f'{key.ljust(6)} = {val[0]:0.10g} {val[1]:0.10g} ' - f'{val[2]:0.10g}\n'.encode('utf-8')) + strings.append( + f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode('utf-8')) return b''.join(strings) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 082524eeb5..7a11cd0f2e 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -278,12 +278,12 @@ def set_zooms(self, zooms): if len(zooms) > ndims: raise HeaderDataError('Expecting %d zoom values' % ndims) if np.any(zooms[:3] <= 0): - raise HeaderDataError(f'Spatial (first three) zooms must be ' - f'positive; got {tuple(zooms[:3])!r}') + raise HeaderDataError('Spatial (first three) zooms must be positive; got ' + f'{tuple(zooms[:3])}') hdr['delta'] = zooms[:3] if len(zooms) == 4: if zooms[3] < 0: - raise HeaderDataError(f'TR must be non-negative; got {zooms[3]!r}') + raise HeaderDataError(f'TR must be non-negative; got {zooms[3]}') hdr['tr'] = zooms[3] def get_data_shape(self): diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index ba44c14f13..f449193bbd 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -49,12 +49,12 @@ def _hash_file_content(fname): @freesurfer_test def test_geometry(): """Test IO of .surf""" - surf_path = pjoin(data_path, "surf", f"{'lh'}.{'inflated'}") + surf_path = pjoin(data_path, "surf", "lh.inflated") coords, faces = read_geometry(surf_path) assert 0 == faces.min() assert coords.shape[0] == faces.max() + 1 - surf_path = pjoin(data_path, "surf", f"{'lh'}.{'sphere'}") + surf_path = pjoin(data_path, "surf", "lh.sphere") coords, faces, volume_info, create_stamp = read_geometry( surf_path, read_metadata=True, read_stamp=True) @@ -132,7 +132,7 @@ def test_quad_geometry(): @freesurfer_test def test_morph_data(): """Test IO of morphometry data file (eg. curvature).""" - curv_path = pjoin(data_path, "surf", f"{'lh'}.{'curv'}") + curv_path = pjoin(data_path, "surf", "lh.curv") curv = read_morph_data(curv_path) assert -1.0 < curv.min() < 0 assert 0 < curv.max() < 1.0 @@ -170,7 +170,7 @@ def test_annot(): """Test IO of .annot against freesurfer example data.""" annots = ['aparc', 'aparc.a2005s'] for a in annots: - annot_path = pjoin(data_path, "label", f"{'lh'}.{a}.annot") + annot_path = pjoin(data_path, "label", f"lh.{a}.annot") hash_ = _hash_file_content(annot_path) labels, ctab, names = read_annot(annot_path) diff --git a/nibabel/funcs.py b/nibabel/funcs.py index ee8ad0a482..8e01a06f5b 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -139,8 +139,7 @@ def concat_images(images, check_affines=True, axis=None): raise ValueError(f'shape {img.shape} for image {i} not compatible with ' f'first image shape {shape0} with axis == {axis}') if check_affines and not np.all(img.affine == affine): - raise ValueError(f'Affine for image {i} does not match affine for ' - f'first image') + raise ValueError(f'Affine for image {i} does not match affine for first image') # Do not fill cache in image if it is empty out_data[i] = np.asanyarray(img.dataobj) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index cc7b4b8603..78f77b19ea 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -486,7 +486,7 @@ def to_xml_open(self): \tExternalFileOffset="%d">\n""" di = "" for i, n in enumerate(self.dims): - di = di + f'\tDim{str(i)}="{str(n)}\"\n' + di = di + f'\tDim{i}="{n}"\n' return out % (intent_codes.niistring[self.intent], data_type_codes.niistring[self.datatype], array_index_order_codes.label[self.ind_ord], diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 3b60693478..f803bf8383 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -177,7 +177,7 @@ def StartElementHandler(self, name, attrs): attrs["ArrayIndexingOrder"]] num_dim = int(attrs.get("Dimensionality", 0)) for i in range(num_dim): - di = f"Dim{str(i)}" + di = f"Dim{i}" if di in attrs: self.da.dims.append(int(attrs[di])) # dimensionality has to correspond to the number of DimX given diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index c323ae51df..8847ea1c55 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -106,7 +106,7 @@ def assert_default_types(loaded): continue loadedtype = type(getattr(loaded, attr)) assert loadedtype == defaulttype, ( - f"Type mismatch for attribute: {attr} ({loadedtype!s} != {defaulttype!s})") + f"Type mismatch for attribute: {attr} ({loadedtype} != {defaulttype})") def test_default_types(): diff --git a/nibabel/keywordonly.py b/nibabel/keywordonly.py index 2e19444174..9bf679fb14 100644 --- a/nibabel/keywordonly.py +++ b/nibabel/keywordonly.py @@ -19,8 +19,7 @@ def decorator(func): def wrapper(*args, **kwargs): if len(args) > n: raise TypeError( - f"{func.__name__} takes at most {n} positional " - f"argument{'s' if n > 1 else ''}") + f"{func.__name__} takes at most {n} positional argument{'s' if n > 1 else ''}") return func(*args, **kwargs) return wrapper return decorator diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 2d017b581e..cf29f95a1e 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -97,8 +97,8 @@ def read_mosaic_dir(dicom_path, if check_is_dwi: raise DicomReadError( f'Could not find diffusion information reading file "{fname}"; ' - f'is it possible this is not a _raw_ diffusion directory? ' - f'Could it be a processed dataset like ADC etc?') + 'is it possible this is not a _raw_ diffusion directory? ' + 'Could it be a processed dataset like ADC etc?') b = np.nan g = np.ones((3,)) + np.nan else: diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index afae656899..b02a4e38a3 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -438,8 +438,8 @@ def __init__(self, code, content, parent_hdr=None): self._is_implicit_VR = False self._content = pdcm.dataset.Dataset() else: - raise TypeError(f"content must be either a bytestring or a " - f"pydicom Dataset. Got {content.__class__}") + raise TypeError(f"content must be either a bytestring or a pydicom Dataset. " + f"Got {content.__class__}") def _guess_implicit_VR(self): """Try to guess DICOM syntax by checking for valid VRs. @@ -519,8 +519,7 @@ def get_sizeondisk(self): return np.sum([e.get_sizeondisk() for e in self]) def __repr__(self): - s = f"Nifti1Extensions({', '.join(str(e) for e in self)})" - return s + return "Nifti1Extensions(%s)" % ', '.join(str(e) for e in self) def __cmp__(self, other): return cmp(list(self), list(other)) @@ -711,8 +710,7 @@ def write_to(self, fileobj): self._structarr['vox_offset'] = min_vox_offset elif vox_offset < min_vox_offset: raise HeaderDataError( - f'vox offset set to {vox_offset}, but need at least ' - f'{min_vox_offset}') + f'vox offset set to {vox_offset}, but need at least {min_vox_offset}') super(Nifti1Header, self).write_to(fileobj) # Write extensions if len(self.extensions) == 0: @@ -875,8 +873,7 @@ def set_data_shape(self, shape): else: overflow = hdr['glmin'] != shape[0] if overflow: - raise HeaderDataError(f'shape[0] {shape[0]} does ' - f'not fit in glmax datatype') + raise HeaderDataError(f'shape[0] {shape[0]} does not fit in glmax datatype') warnings.warn('Using large vector Freesurfer hack; header will ' 'not be compatible with SPM or FSL', stacklevel=2) shape = (-1, 1, 1) + shape[3:] @@ -1580,12 +1577,11 @@ def set_slice_times(self, slice_times): matching_labels.append(label) if not matching_labels: - raise HeaderDataError(f'slice ordering of {st_order} ' - f'fits with no known scheme') + raise HeaderDataError(f'slice ordering of {st_order} fits with no known scheme') if len(matching_labels) > 1: warnings.warn( f"Multiple slice orders satisfy: {', '.join(matching_labels)}. " - f"Choosing the first one") + "Choosing the first one") label = matching_labels[0] # Set values into header hdr['slice_start'] = slice_start @@ -1678,7 +1674,7 @@ def _chk_magic(hdr, fix=False): magic = hdr['magic'].item() if magic in (hdr.pair_magic, hdr.single_magic): return hdr, rep - rep.problem_msg = (f'magic string "{asstr(magic)}" is not valid') + rep.problem_msg = f'magic string "{asstr(magic)}" is not valid' rep.problem_level = 45 if fix: rep.fix_msg = 'leaving as is, but future errors are likely' @@ -1704,8 +1700,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep # SPM uses memory mapping to read the data, and # apparently this has to start on 16 byte boundaries - rep.problem_msg = (f'vox offset (={offset:g}) not divisible ' - f'by 16, not SPM compatible') + rep.problem_msg = f'vox offset (={offset:g}) not divisible by 16, not SPM compatible' rep.problem_level = 30 if fix: rep.fix_msg = 'leaving at current value' diff --git a/nibabel/openers.py b/nibabel/openers.py index 0220db71c1..dbfc005bb3 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -24,8 +24,7 @@ # < 0.7 - no good if StrictVersion(version) < StrictVersion('0.7.0'): - warnings.warn(f'indexed_gzip is present, but too old ' - f'(>= 0.7.0 required): {version})') + warnings.warn(f'indexed_gzip is present, but too old (>= 0.7.0 required): {version})') HAVE_INDEXED_GZIP = False # >= 0.8 SafeIndexedGzipFile renamed to IndexedGzipFile elif StrictVersion(version) < StrictVersion('0.8.0'): diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 5911454b08..83c157869f 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -101,8 +101,7 @@ def optional_package(name, trip_msg=None, min_version=None): if callable(min_version): trip_msg = f'Package {min_version} fails version check' else: - trip_msg = (f'These functions need {name} ' - f'version >= {min_version}') + trip_msg = f'These functions need {name} version >= {min_version}' if trip_msg is None: trip_msg = (f'We need package {name} for these functions, ' f'but ``import {name}`` raised {exc}') diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 9432cb9e64..1cfb07e55f 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -337,8 +337,7 @@ def axcodes2ornt(axcodes, labels=None): if len(allowed_labels) != len(set(allowed_labels)): raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): - raise ValueError(f'Not all axis codes {list(axcodes)} ' - f'in label set {allowed_labels}') + raise ValueError(f'Not all axis codes {list(axcodes)} in label set {allowed_labels}') n_axes = len(axcodes) ornt = np.ones((n_axes, 2), dtype=np.int8) * np.nan for code_idx, code in enumerate(axcodes): diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 11c4570c8b..00a841b1ff 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -733,7 +733,7 @@ def __init__(self, info, image_defs, permit_truncated=False, bitpix = self._get_unique_image_prop('image pixel size') if bitpix not in (8, 16): raise PARRECError(f'Only 8- and 16-bit data supported (not {bitpix}) ' - f'please report this to the nibabel developers') + 'please report this to the nibabel developers') # REC data always little endian dt = np.dtype('uint' + str(bitpix)).newbyteorder('<') super(PARRECHeader, self).__init__(data_dtype=dt, diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index fa1e91940e..a602d62d8f 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -518,9 +518,9 @@ def __str__(self): return '\n'.join((str(self.__class__), f'data shape {shape}', 'affine: ', - f'{affine}', + str(affine), 'metadata:', - f'{self._header}')) + str(self._header))) def get_data_dtype(self): return self._header.get_data_dtype() diff --git a/tools/make_tarball.py b/tools/make_tarball.py index 69c901d67d..149837aaf5 100755 --- a/tools/make_tarball.py +++ b/tools/make_tarball.py @@ -20,6 +20,5 @@ start_dir = os.getcwd() cd('..') -git_tpl = 'git archive --format=tar --prefix={0}/ HEAD | gzip > {1}' -c(git_tpl.format(base_name, tar_name)) +c(f'git archive --format=tar --prefix={base_name}/ HEAD | gzip > {tar_name}') c(f'mv {tar_name} tools/') From a3f51aa43f8b2e93bb5d3baf45434e0530ab2f31 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Mon, 22 Jun 2020 21:59:39 +0300 Subject: [PATCH 10/13] STY: Use f-strings only where necessary + remove str calls in f-strings, one line if possible --- nibabel/cmdline/ls.py | 2 +- nibabel/funcs.py | 3 +-- nibabel/gifti/gifti.py | 2 +- nibabel/parrec.py | 3 +-- nibabel/streamlines/tck.py | 9 ++++----- nibabel/streamlines/tractogram.py | 9 ++++----- nibabel/streamlines/trk.py | 18 +++++++++--------- nibabel/tests/test_deprecator.py | 8 ++++---- nibabel/tests/test_image_types.py | 2 +- nibabel/tests/test_nifti1.py | 2 +- nibabel/tests/test_scripts.py | 2 +- nibabel/trackvis.py | 7 +++---- nibabel/viewers.py | 2 +- nibabel/volumeutils.py | 2 +- 14 files changed, 33 insertions(+), 38 deletions(-) diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 0488676737..91e55860f4 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -73,7 +73,7 @@ def proc_file(f, opts): h = vol.header except Exception as e: row += ['failed'] - verbose(2, f"Failed to gather information -- {str(e)}") + verbose(2, f"Failed to gather information -- {e}") return row row += [str(safe_get(h, 'data_dtype')), diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 8e01a06f5b..df5eb0e96f 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -133,8 +133,7 @@ def concat_images(images, check_affines=True, axis=None): masked_shape = np.array(shape0)[idx_mask] for i, img in enumerate(images): if len(img.shape) != n_dim: - raise ValueError( - f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}') + raise ValueError(f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}') if not np.all(np.array(img.shape)[idx_mask] == masked_shape): raise ValueError(f'shape {img.shape} for image {i} not compatible with ' f'first image shape {shape0} with axis == {axis}') diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 78f77b19ea..0f509eaa9d 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -486,7 +486,7 @@ def to_xml_open(self): \tExternalFileOffset="%d">\n""" di = "" for i, n in enumerate(self.dims): - di = di + f'\tDim{i}="{n}"\n' + di = di + f'\tDim{i}="{n}\"\n' return out % (intent_codes.niistring[self.intent], data_type_codes.niistring[self.datatype], array_index_order_codes.label[self.ind_ord], diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 00a841b1ff..9d86b57319 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -429,8 +429,7 @@ def vol_is_full(slice_nos, slice_max, slice_min=1): """ slice_set = set(range(slice_min, slice_max + 1)) if not slice_set.issuperset(slice_nos): - raise ValueError( - f'Slice numbers outside inclusive range {slice_min} to {slice_max}') + raise ValueError(f'Slice numbers outside inclusive range {slice_min} to {slice_max}') vol_nos = np.array(vol_numbers(slice_nos)) slice_nos = np.asarray(slice_nos) is_full = np.ones(slice_nos.shape, dtype=bool) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 6e59d06a8b..5e0172e069 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -211,14 +211,14 @@ def save(self, fileobj): data_for_streamline = first_item.data_for_streamline if len(data_for_streamline) > 0: keys = ", ".join(data_for_streamline.keys()) - msg = (f"TCK format does not support saving additional " + msg = ("TCK format does not support saving additional " f"data alongside streamlines. Dropping: {keys}") warnings.warn(msg, DataWarning) data_for_points = first_item.data_for_points if len(data_for_points) > 0: keys = ", ".join(data_for_points.keys()) - msg = (f"TCK format does not support saving additional " + msg = ("TCK format does not support saving additional " f"data alongside points. Dropping: {keys}") warnings.warn(msg, DataWarning) @@ -330,13 +330,12 @@ def _read_header(fileobj): hdr['datatype'] = "Float32LE" if not hdr['datatype'].startswith('Float32'): - msg = (f"TCK only supports float32 dtype but 'datatype: " + msg = ("TCK only supports float32 dtype but 'datatype: " f"{hdr['datatype']}' was specified in the header.") raise HeaderError(msg) if 'file' not in hdr: - msg = ("Missing 'file' attribute in TCK header." - " Will try to guess it.") + msg = "Missing 'file' attribute in TCK header. Will try to guess it." warnings.warn(msg, HeaderWarning) hdr['file'] = f'. {offset_data}' diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index e4e99473dc..c3814aac37 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -108,9 +108,8 @@ def __setitem__(self, key, value): raise ValueError("data_per_streamline must be a 2D array.") # We make sure there is the right amount of values - if self.n_rows > 0 and len(value) != self.n_rows: - msg = (f"The number of values ({len(value)}) should match " - f"n_elements ({self.n_rows}).") + if 0 < self.n_rows != len(value): + msg = f"The number of values ({len(value)}) should match n_elements ({self.n_rows})." raise ValueError(msg) self.store[key] = value @@ -140,7 +139,7 @@ def extend(self, other): """ if (len(self) > 0 and len(other) > 0 and sorted(self.keys()) != sorted(other.keys())): - msg = (f"Entry mismatched between the two PerArrayDict objects. " + msg = ("Entry mismatched between the two PerArrayDict objects. " f"This PerArrayDict contains '{sorted(self.keys())}' " f"whereas the other contains '{sorted(other.keys())}'.") raise ValueError(msg) @@ -364,7 +363,7 @@ def affine_to_rasmm(self, value): if value is not None: value = np.array(value) if value.shape != (4, 4): - msg = (f"Affine matrix has a shape of (4, 4) but a ndarray with" + msg = ("Affine matrix has a shape of (4, 4) but a ndarray with " f"shape {value.shape} was provided instead.") raise ValueError(msg) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index ba7b356886..2a3a5bc22f 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -160,7 +160,7 @@ def encode_value_in_name(value, name, max_name_len=20): if len(encoded_name) > max_name_len: msg = (f"Data information named '{name}' is too long (need to be less" f" than {max_name_len - (len(str(value)) + 1)} characters " - f"when storing more than one value for a given data information.") + "when storing more than one value for a given data information.") raise ValueError(msg) # Fill to the end with zeros return encoded_name.ljust(max_name_len, '\x00').encode('latin1') @@ -195,8 +195,8 @@ def decode_value_from_name(encoded_name): value = int(splits[1]) # Decode value. elif len(splits) > 2: # The remaining bytes are not \x00, raising. - msg = (f"Wrong scalar_name or property_name: '{encoded_name}'." - f" Unused characters should be \\x00.") + msg = (f"Wrong scalar_name or property_name: '{encoded_name}'. " + "Unused characters should be \\x00.") raise HeaderError(msg) return name, value @@ -473,8 +473,8 @@ def save(self, fileobj): data_for_streamline = first_item.data_for_streamline if len(data_for_streamline) > MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE: msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} named " - f"data_per_streamline (also known as 'properties' in the " - f"TRK format).") + "data_per_streamline (also known as 'properties' in the " + "TRK format).") raise ValueError(msg) data_for_streamline_keys = sorted(data_for_streamline.keys()) @@ -491,8 +491,8 @@ def save(self, fileobj): data_for_points = first_item.data_for_points if len(data_for_points) > MAX_NB_NAMED_SCALARS_PER_POINT: msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} " - f"named data_per_point (also known as 'scalars' in " - f"the TRK format).") + "named data_per_point (also known as 'scalars' in " + "the TRK format).") raise ValueError(msg) data_for_points_keys = sorted(data_for_points.keys()) @@ -615,8 +615,8 @@ def _read_header(fileobj): # able to determine the axis directions. axcodes = aff2axcodes(header[Field.VOXEL_TO_RASMM]) if None in axcodes: - msg = (f"The 'vox_to_ras' affine is invalid! Could not" - f" determine the axis directions from it.\n" + msg = ("The 'vox_to_ras' affine is invalid! Could not" + " determine the axis directions from it.\n" f"{header[Field.VOXEL_TO_RASMM]}") raise HeaderError(msg) diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 249c1ac502..76e7c09609 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -95,7 +95,7 @@ def test_dep_func(self): assert len(w) == 1 assert (func.__doc__ == f'foo\n\n* Will raise {ExpiredDeprecationError} ' - f'as of version: 99.4\n') + 'as of version: 99.4\n') func = dec('foo', until='1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() @@ -105,13 +105,13 @@ def test_dep_func(self): with pytest.raises(ExpiredDeprecationError): func() assert (func.__doc__ == - f'foo\n\n* deprecated from version: 1.2\n* Raises ' + 'foo\n\n* deprecated from version: 1.2\n* Raises ' f'{ExpiredDeprecationError} as of version: 1.8\n') func = dec('foo', '1.2', '1.8')(func_doc_long) assert (func.__doc__ == - f'A docstring\n \n foo\n \n * deprecated from version: 1.2\n ' + 'A docstring\n \n foo\n \n * deprecated from version: 1.2\n ' f'* Raises {ExpiredDeprecationError} as of version: 1.8\n \n' - f' Some text\n') + ' Some text\n') with pytest.raises(ExpiredDeprecationError): func() diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index 5b09fd1d62..50142cfc92 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -98,7 +98,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, # Reuse the sniff... but it will only change for some # sniff_mode values. msg = (f'{expected_img_klass.__name__}/ {sniff_mode}/ ' - f'{str(expect_success)}') + f'{expect_success}') sniff = check_img(img_path, klass, sniff_mode=sniff_mode, sniff=sniff, expect_success=expect_success, msg=msg) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 0e3b87774d..8622b3706d 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -266,7 +266,7 @@ def test_magic_offset_checks(self): assert fhdr['vox_offset'] == bad_spm assert (message == f'vox offset (={bad_spm:g}) not divisible by 16, ' - f'not SPM compatible; leaving at current value') + 'not SPM compatible; leaving at current value') # Check minimum offset (if offset set) hdr['magic'] = hdr.single_magic hdr['vox_offset'] = 10 diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 47dfe987c2..e12599fc85 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -65,7 +65,7 @@ def load_small_file(): def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): # test nib-ls script fname = pjoin(DATA_PATH, 'example4d.nii.gz') - expected_re = (f" (int16|[<>]i2) \\[128, 96, 24, 2\\] 2.00x2.00x2.20x2000.00 " + expected_re = (" (int16|[<>]i2) \\[128, 96, 24, 2\\] 2.00x2.00x2.20x2000.00 " f"#exts: 2{hdrs_str} sform{other_str}$") cmd = ['nib-ls'] + opts + [fname] code, stdout, stderr = run_command(cmd) diff --git a/nibabel/trackvis.py b/nibabel/trackvis.py index e58ca6b62e..a18e1cfe6b 100644 --- a/nibabel/trackvis.py +++ b/nibabel/trackvis.py @@ -471,7 +471,7 @@ def _check_hdr_points_space(hdr, points_space): voxel_size = hdr['voxel_size'] if np.any(voxel_size < 0): raise HeaderError(f'Negative voxel sizes {voxel_size} not ' - f'valid for voxel - voxmm conversion') + 'valid for voxel - voxmm conversion') if np.all(voxel_size == 0): raise HeaderError('Cannot convert between voxels and voxmm when ' '"voxel_sizes" all 0') @@ -501,8 +501,7 @@ def _check_hdr_points_space(hdr, points_space): raise HeaderError(f'Affine implies voxel_order {aff_order} ' f'but header voxel_order is {voxel_order}') else: - raise ValueError(f'Painfully confusing "points_space" value of ' - f'"{points_space}"') + raise ValueError(f'Painfully confusing "points_space" value of "{points_space}"') def _hdr_from_mapping(hdr=None, mapping=None, endianness=native_code): @@ -669,7 +668,7 @@ def aff_from_hdr(trk_hdr, atleast_v2=True): aff[:, 2] *= -1 exp_order = ''.join(aff2axcodes(aff)) if voxel_order != exp_order: - raise HeaderError(f'Estimate of header affine does not match ' + raise HeaderError('Estimate of header affine does not match ' f'voxel_order of {exp_order}') return aff diff --git a/nibabel/viewers.py b/nibabel/viewers.py index fa21330af8..5d48665780 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -294,7 +294,7 @@ def link_to(self, other): Other viewer to use to link movements. """ if not isinstance(other, self.__class__): - raise TypeError(f'other must be an instance of ' + raise TypeError('other must be an instance of ' f'{self.__class__.__name__}, not {type(other)}') self._link(other, is_primary=True) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 8d2815318e..eddc6a9d51 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -746,7 +746,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, else: raise ValueError(f"nan_fill == {nan_fill}, outside safe int range " f"({int(both_mn)}-{int(both_mx)}); " - f"change scaling or set nan2zero=False?") + "change scaling or set nan2zero=False?") # Make sure non-nan output clipped to shared range post_mn = np.max([post_mn, both_mn]) post_mx = np.min([post_mx, both_mx]) From 42fb61d0a016de71d3e6d258546d5b32a19daaa5 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Tue, 23 Jun 2020 08:51:07 +0300 Subject: [PATCH 11/13] STY: continue error in one line --- nibabel/nifti1.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index b02a4e38a3..44c5834070 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1165,8 +1165,7 @@ def get_slope_inter(self): if slope == 0 or not np.isfinite(slope): return None, None if not np.isfinite(inter): - raise HeaderDataError( - f'Valid slope but invalid intercept {inter}') + raise HeaderDataError(f'Valid slope but invalid intercept {inter}') return slope, inter def set_slope_inter(self, slope, inter=None): From 7689ea855d31ae3ddfe6113af33d81b1d0e2084d Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Mon, 6 Jul 2020 10:03:13 +0300 Subject: [PATCH 12/13] STY: Apply suggestions from code review Co-authored-by: Chris Markiewicz --- nibabel/arrayproxy.py | 3 +-- nibabel/streamlines/array_sequence.py | 3 +-- nibabel/streamlines/tck.py | 8 +++----- nibabel/streamlines/tractogram.py | 3 +-- nibabel/streamlines/trk.py | 3 +-- nibabel/tests/data/check_parrec_reslice.py | 3 +-- nibabel/tests/test_deprecator.py | 3 +-- nibabel/tests/test_volumeutils.py | 4 ++-- 8 files changed, 11 insertions(+), 19 deletions(-) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index c8ecc138ff..30fd37d153 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -412,8 +412,7 @@ def reshape(self, shape): shape = tuple(unknown_size if e == -1 else e for e in shape) if np.prod(shape) != size: - raise ValueError(f"cannot reshape array of size {size:d} " - f"into shape {shape!s}") + raise ValueError(f"cannot reshape array of size {size:d} into shape {shape!s}") return self.__class__(file_like=self.file_like, spec=(shape, self._dtype, self._offset, self._slope, self._inter), diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 2b2d27c323..382748ee03 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -438,8 +438,7 @@ def __setitem__(self, idx, elements): if is_array_sequence(elements): if len(lengths) != len(elements): - msg = (f"Trying to set {len(lengths)} sequences with " - f"{len(elements)} sequences.") + msg = f"Trying to set {len(lengths)} sequences with {len(elements)} sequences." raise ValueError(msg) if sum(lengths) != elements.total_nb_rows: diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 5e0172e069..ff12bc2322 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -340,9 +340,8 @@ def _read_header(fileobj): hdr['file'] = f'. {offset_data}' if hdr['file'].split()[0] != '.': - msg = (f"TCK only supports single-file - in other words the" - f" filename part must be specified as '.' but " - f"'{hdr['file'].split()[0]}' was specified.") + msg = ("TCK only supports single-file - in other words the filename part must be " + f"specified as '.' but '{hdr['file'].split()[0]}' was specified.") raise HeaderError("Missing 'file' attribute in TCK header.") # Set endianness and _dtype attributes in the header. @@ -453,6 +452,5 @@ def __str__(self): info = "" info += f"\nMAGIC NUMBER: {hdr[Field.MAGIC_NUMBER]}" info += "\n" - info += "\n".join([f"{k}: {v}" - for k, v in hdr.items() if not k.startswith('_')]) + info += "\n".join(f"{k}: {v}" for k, v in hdr.items() if not k.startswith('_')) return info diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index c3814aac37..47ce747268 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -168,8 +168,7 @@ def __setitem__(self, key, value): # We make sure there is the right amount of data. if 0 < self.n_rows != value.total_nb_rows: - msg = (f"The number of values ({value.total_nb_rows}) " - f"should match ({self.n_rows}).") + msg = f"The number of values ({value.total_nb_rows}) should match ({self.n_rows})." raise ValueError(msg) self.store[key] = value diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 2a3a5bc22f..c602937928 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -153,8 +153,7 @@ def encode_value_in_name(value, name, max_name_len=20): `value`, padded with ``\x00`` bytes. """ if len(name) > max_name_len: - msg = (f"Data information named '{name}' is too long" - f" (max {max_name_len} characters.)") + msg = f"Data information named '{name}' is too long (max {max_name_len} characters.)" raise ValueError(msg) encoded_name = name if value <= 1 else name + '\x00' + str(value) if len(encoded_name) > max_name_len: diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index 4296e5bedd..dfeca91b45 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -61,8 +61,7 @@ def gmean_norm(data): normal_data = normal_img.get_fdata() normal_normed = gmean_norm(normal_data) - print(f"RMS of standard image {normal_fname:<44}: " - f"{np.sqrt(np.sum(normal_normed ** 2))}") + print(f"RMS of standard image {normal_fname:<44}: {np.sqrt(np.sum(normal_normed ** 2))}") for parfile in glob.glob("*.PAR"): if parfile == normal_fname: diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 76e7c09609..f9d22b1337 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -94,8 +94,7 @@ def test_dep_func(self): assert func() is None assert len(w) == 1 assert (func.__doc__ == - f'foo\n\n* Will raise {ExpiredDeprecationError} ' - 'as of version: 99.4\n') + f'foo\n\n* Will raise {ExpiredDeprecationError} as of version: 99.4\n') func = dec('foo', until='1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index b9ef7c5bd2..3b6c98edcc 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -1224,8 +1224,8 @@ def read(self, n_bytes): array_from_file(shape, np.int8, NoStringIO()) except IOError as err: message = str(err) - assert message == (f"Expected {11390625000000000000} bytes, got {0} " - f"bytes from {'object'}\n - could the file be damaged?") + assert message == ("Expected 11390625000000000000 bytes, got 0 " + "bytes from object\n - could the file be damaged?") def test__ftype4scaled_finite_warningfilters(): From 0dd06ee8b4cbedf990a257bcb4b3ecd3c0916fbf Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Mon, 6 Jul 2020 10:08:53 +0300 Subject: [PATCH 13/13] STY: Apply suggestions from code review Co-authored-by: Chris Markiewicz --- nibabel/streamlines/array_sequence.py | 3 +-- nibabel/tests/data/check_parrec_reslice.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 382748ee03..4eaf31065b 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -442,8 +442,7 @@ def __setitem__(self, idx, elements): raise ValueError(msg) if sum(lengths) != elements.total_nb_rows: - msg = (f"Trying to set {sum(lengths)} points with " - f"{elements.total_nb_rows} points.") + msg = f"Trying to set {sum(lengths)} points with {elements.total_nb_rows} points." raise ValueError(msg) for o1, l1, o2, l2 in zip(offsets, lengths, elements._offsets, elements._lengths): diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index dfeca91b45..ffee1f3829 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -70,5 +70,4 @@ def gmean_norm(data): fixed_img = resample_img2img(normal_img, funny_img) fixed_data = fixed_img.get_fdata() difference_data = normal_normed - gmean_norm(fixed_data) - print(f'RMS resliced {parfile:<52} : ' - f'{np.sqrt(np.sum(difference_data ** 2))}') + print(f'RMS resliced {parfile:<52} : {np.sqrt(np.sum(difference_data ** 2))}')