diff --git a/deepmd/calculator.py b/deepmd/calculator.py index dca2343ce0..1d8e955de7 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -103,7 +103,7 @@ def __init__( self.type_dict = type_dict else: self.type_dict = dict( - zip(self.dp.get_type_map(), range(self.dp.get_ntypes())) + zip(self.dp.get_type_map(), range(self.dp.get_ntypes()), strict=True) ) def calculate( diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 3fa1119418..380c300216 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -155,7 +155,7 @@ def get_model_sels(self) -> list[int | list[int]]: def _sort_rcuts_sels(self) -> tuple[tuple[Array, Array], list[int]]: # sort the pair of rcut and sels in ascending order, first based on sel, then on rcut. zipped = sorted( - zip(self.get_model_rcuts(), self.get_model_nsels()), + zip(self.get_model_rcuts(), self.get_model_nsels(), strict=True), key=lambda x: (x[1], x[0]), ) return [p[0] for p in zipped], [p[1] for p in zipped] @@ -235,12 +235,14 @@ def forward_atomic( ) raw_nlists = [ nlists[get_multiple_nlist_key(rcut, sel)] - for rcut, sel in zip(self.get_model_rcuts(), self.get_model_nsels()) + for rcut, sel in zip( + self.get_model_rcuts(), self.get_model_nsels(), strict=True + ) ] nlists_ = [ nl if mt else nlist_distinguish_types(nl, extended_atype, sel) for mt, nl, sel in zip( - self.mixed_types_list, raw_nlists, self.get_model_sels() + self.mixed_types_list, raw_nlists, self.get_model_sels(), strict=True ) ] ener_list = [] diff --git a/deepmd/dpmodel/descriptor/hybrid.py b/deepmd/dpmodel/descriptor/hybrid.py index 3682e17c4d..2cb8585d77 100644 --- a/deepmd/dpmodel/descriptor/hybrid.py +++ b/deepmd/dpmodel/descriptor/hybrid.py @@ -101,7 +101,7 @@ def __init__( start_idx = np.cumsum(np.pad(hybrid_sel, (1, 0), "constant"))[:-1] end_idx = start_idx + np.array(sub_sel) cut_idx = np.concatenate( - [range(ss, ee) for ss, ee in zip(start_idx, end_idx)] + [range(ss, ee) for ss, ee in zip(start_idx, end_idx, strict=True)] ) nlist_cut_idx.append(cut_idx) self.nlist_cut_idx = nlist_cut_idx @@ -310,7 +310,7 @@ def call( ) else: nl_distinguish_types = None - for descrpt, nci in zip(self.descrpt_list, self.nlist_cut_idx): + for descrpt, nci in zip(self.descrpt_list, self.nlist_cut_idx, strict=True): # cut the nlist to the correct length if self.mixed_types() == descrpt.mixed_types(): nl = xp.take(nlist, nci, axis=2) diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index 3bd0f435e8..8088ba1d2f 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -229,6 +229,7 @@ def eval( zip( [x.name for x in request_defs], out, + strict=True, ) ) diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index eb5320bad1..367d3ae080 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -255,7 +255,7 @@ def build_multiple_neighbor_list( rr = xp.where(nlist_mask, xp.full_like(rr, float("inf")), rr) nlist0 = nlist ret = {} - for rc, ns in zip(rcuts[::-1], nsels[::-1]): + for rc, ns in zip(rcuts[::-1], nsels[::-1], strict=True): tnlist_1 = nlist0[:, :, :ns] tnlist_1 = xp.where(rr[:, :, :ns] > rc, xp.full_like(tnlist_1, -1), tnlist_1) ret[get_multiple_nlist_key(rc, ns)] = tnlist_1 diff --git a/deepmd/jax/infer/deep_eval.py b/deepmd/jax/infer/deep_eval.py index 10c4b7d7a5..4008d75a53 100644 --- a/deepmd/jax/infer/deep_eval.py +++ b/deepmd/jax/infer/deep_eval.py @@ -249,6 +249,7 @@ def eval( zip( [x.name for x in request_defs], out, + strict=True, ) ) diff --git a/deepmd/tf/descriptor/loc_frame.py b/deepmd/tf/descriptor/loc_frame.py index 87db875038..187f2a5e31 100644 --- a/deepmd/tf/descriptor/loc_frame.py +++ b/deepmd/tf/descriptor/loc_frame.py @@ -187,7 +187,7 @@ def compute_input_stats( sumn = [] sumv2 = [] for cc, bb, tt, nn, mm in zip( - data_coord, data_box, data_atype, natoms_vec, mesh + data_coord, data_box, data_atype, natoms_vec, mesh, strict=True ): sysv, sysv2, sysn = self._compute_dstats_sys_nonsmth(cc, bb, tt, nn, mm) sumv.append(sysv) diff --git a/deepmd/tf/descriptor/se_a.py b/deepmd/tf/descriptor/se_a.py index 3d2ab39c9f..5feea7fd63 100644 --- a/deepmd/tf/descriptor/se_a.py +++ b/deepmd/tf/descriptor/se_a.py @@ -374,7 +374,7 @@ def compute_input_stats( sumr2 = [] suma2 = [] for cc, bb, tt, nn, mm in zip( - data_coord, data_box, data_atype, natoms_vec, mesh + data_coord, data_box, data_atype, natoms_vec, mesh, strict=True ): sysr, sysr2, sysa, sysa2, sysn = self._compute_dstats_sys_smth( cc, bb, tt, nn, mm @@ -1331,7 +1331,7 @@ def init_variables( start_index_old[0] = 0 for nn, oo, ii, jj in zip( - n_descpt, n_descpt_old, start_index, start_index_old + n_descpt, n_descpt_old, start_index, start_index_old, strict=True ): if nn < oo: # new size is smaller, copy part of std diff --git a/deepmd/tf/descriptor/se_a_ef.py b/deepmd/tf/descriptor/se_a_ef.py index 37aa830431..b2e161938d 100644 --- a/deepmd/tf/descriptor/se_a_ef.py +++ b/deepmd/tf/descriptor/se_a_ef.py @@ -419,7 +419,13 @@ def compute_input_stats( sumr2 = [] suma2 = [] for cc, bb, tt, nn, mm, ee in zip( - data_coord, data_box, data_atype, natoms_vec, mesh, data_efield + data_coord, + data_box, + data_atype, + natoms_vec, + mesh, + data_efield, + strict=True, ): sysr, sysr2, sysa, sysa2, sysn = self._compute_dstats_sys_smth( cc, bb, tt, nn, mm, ee diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 3978709715..dc30c4088e 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -379,7 +379,13 @@ def compute_input_stats( if mixed_type: sys_num = 0 for cc, bb, tt, nn, mm, r_n in zip( - data_coord, data_box, data_atype, natoms_vec, mesh, real_natoms_vec + data_coord, + data_box, + data_atype, + natoms_vec, + mesh, + real_natoms_vec, + strict=True, ): sysr, sysr2, sysa, sysa2, sysn = self._compute_dstats_sys_smth( cc, bb, tt, nn, mm, mixed_type, r_n @@ -392,7 +398,7 @@ def compute_input_stats( suma2.append(sysa2) else: for cc, bb, tt, nn, mm in zip( - data_coord, data_box, data_atype, natoms_vec, mesh + data_coord, data_box, data_atype, natoms_vec, mesh, strict=True ): sysr, sysr2, sysa, sysa2, sysn = self._compute_dstats_sys_smth( cc, bb, tt, nn, mm diff --git a/deepmd/tf/descriptor/se_r.py b/deepmd/tf/descriptor/se_r.py index c38a13d35a..07452f9ccd 100644 --- a/deepmd/tf/descriptor/se_r.py +++ b/deepmd/tf/descriptor/se_r.py @@ -271,7 +271,7 @@ def compute_input_stats( sumn = [] sumr2 = [] for cc, bb, tt, nn, mm in zip( - data_coord, data_box, data_atype, natoms_vec, mesh + data_coord, data_box, data_atype, natoms_vec, mesh, strict=True ): sysr, sysr2, sysn = self._compute_dstats_sys_se_r(cc, bb, tt, nn, mm) sumr.append(sysr) diff --git a/deepmd/tf/descriptor/se_t.py b/deepmd/tf/descriptor/se_t.py index b03746a9c6..bb7b64bf5f 100644 --- a/deepmd/tf/descriptor/se_t.py +++ b/deepmd/tf/descriptor/se_t.py @@ -258,7 +258,7 @@ def compute_input_stats( sumr2 = [] suma2 = [] for cc, bb, tt, nn, mm in zip( - data_coord, data_box, data_atype, natoms_vec, mesh + data_coord, data_box, data_atype, natoms_vec, mesh, strict=True ): sysr, sysr2, sysa, sysa2, sysn = self._compute_dstats_sys_smth( cc, bb, tt, nn, mm diff --git a/deepmd/tf/infer/deep_eval.py b/deepmd/tf/infer/deep_eval.py index 20f513cd75..aada3a0976 100644 --- a/deepmd/tf/infer/deep_eval.py +++ b/deepmd/tf/infer/deep_eval.py @@ -752,7 +752,8 @@ def eval( output = (output,) output_dict = { - odef.name: oo for oo, odef in zip(output, self.output_def.var_defs.values()) + odef.name: oo + for oo, odef in zip(output, self.output_def.var_defs.values(), strict=True) } # ugly!! if self.modifier_type is not None and issubclass(self.model_type, DeepPot): diff --git a/deepmd/tf/nvnmd/entrypoints/mapt.py b/deepmd/tf/nvnmd/entrypoints/mapt.py index 121263974a..977069058a 100644 --- a/deepmd/tf/nvnmd/entrypoints/mapt.py +++ b/deepmd/tf/nvnmd/entrypoints/mapt.py @@ -439,11 +439,11 @@ def run_u2s(self): u = N2 * np.reshape(np.arange(0, N + 1) / N, [-1, 1]) # pylint: disable=no-explicit-dtype res_lst = run_sess(sess, vals, feed_dict={dic_ph["u"]: u}) - res_dic = dict(zip(keys, res_lst)) + res_dic = dict(zip(keys, res_lst, strict=True)) u2 = N2 * np.reshape(np.arange(0, N * 16 + 1) / (N * 16), [-1, 1]) # pylint: disable=no-explicit-dtype res_lst2 = run_sess(sess, vals, feed_dict={dic_ph["u"]: u2}) - res_dic2 = dict(zip(keys, res_lst2)) # reference for compare + res_dic2 = dict(zip(keys, res_lst2, strict=True)) # reference for compare # change value for tt in range(ndim): @@ -536,11 +536,11 @@ def run_s2g(self): s = N2 * np.reshape(np.arange(0, N + 1) / N, [-1, 1]) + smin_ # pylint: disable=no-explicit-dtype res_lst = run_sess(sess, vals, feed_dict={dic_ph["s"]: s}) - res_dic = dict(zip(keys, res_lst)) + res_dic = dict(zip(keys, res_lst, strict=True)) s2 = N2 * np.reshape(np.arange(0, N * 16 + 1) / (N * 16), [-1, 1]) + smin_ # pylint: disable=no-explicit-dtype res_lst2 = run_sess(sess, vals, feed_dict={dic_ph["s"]: s2}) - res_dic2 = dict(zip(keys, res_lst2)) + res_dic2 = dict(zip(keys, res_lst2, strict=True)) sess.close() return res_dic, res_dic2 @@ -601,7 +601,7 @@ def run_t2g(self): vals = list(dic_ph.values()) # res_lst = run_sess(sess, vals, feed_dict={}) - res_dic = dict(zip(keys, res_lst)) + res_dic = dict(zip(keys, res_lst, strict=True)) sess.close() return res_dic diff --git a/deepmd/tf/train/trainer.py b/deepmd/tf/train/trainer.py index f70c919301..4af59fd290 100644 --- a/deepmd/tf/train/trainer.py +++ b/deepmd/tf/train/trainer.py @@ -967,4 +967,4 @@ def get_data_dict(self, batch_list: list[np.ndarray]) -> dict[str, np.ndarray]: dict[str, np.ndarray] The dict of the loaded data. """ - return dict(zip(self.data_keys, batch_list)) + return dict(zip(self.data_keys, batch_list, strict=True)) diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index 012ffb4260..e701e82ec6 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -248,7 +248,7 @@ def concate_result(r: list[Any]) -> Any: return ret if not returned_dict: - r_list = [concate_result(r) for r in zip(*results)] + r_list = [concate_result(r) for r in zip(*results, strict=True)] r = tuple(r_list) if len(r) == 1: # avoid returning tuple if callable doesn't return tuple diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 82ef5ec25d..c9edfc2298 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -172,7 +172,9 @@ def __init__( rule = int(words[1]) filtered_data_systems = [] filtered_system_dirs = [] - for sys_dir, data_sys in zip(self.system_dirs, self.data_systems): + for sys_dir, data_sys in zip( + self.system_dirs, self.data_systems, strict=True + ): if data_sys.get_natoms() <= rule: filtered_data_systems.append(data_sys) filtered_system_dirs.append(sys_dir) diff --git a/deepmd/utils/model_branch_dict.py b/deepmd/utils/model_branch_dict.py index 501f5287b6..ccf085cb2b 100644 --- a/deepmd/utils/model_branch_dict.py +++ b/deepmd/utils/model_branch_dict.py @@ -169,7 +169,7 @@ def as_table(self) -> str: # Step 3: Determine actual width for each column # For the first two columns, we already decided the exact widths above. col_widths: list[int] = [] - for idx, col in enumerate(zip(*wrapped_rows)): + for idx, col in enumerate(zip(*wrapped_rows, strict=True)): if idx == 0: col_widths.append(branch_col_width) elif idx == 1: @@ -187,7 +187,8 @@ def draw_row_line(cells_parts: list[list[str]]) -> str: return ( "| " + " | ".join( - part.ljust(width) for part, width in zip(cells_parts, col_widths) + part.ljust(width) + for part, width in zip(cells_parts, col_widths, strict=True) ) + " |" ) diff --git a/deepmd/utils/update_sel.py b/deepmd/utils/update_sel.py index 616b69c2ca..589b17edac 100644 --- a/deepmd/utils/update_sel.py +++ b/deepmd/utils/update_sel.py @@ -43,7 +43,9 @@ def update_one_sel( sel = [int(self.wrap_up_4(ii * ratio)) for ii in tmp_sel] else: # sel is set by user - for ii, (tt, dd) in enumerate(zip(tmp_sel, sel)): + # TODO: Fix len(tmp_sel) != len(sel) for TF spin models when strict is True + # error reported by source/tests/tf/test_init_frz_model_spin.py + for ii, (tt, dd) in enumerate(zip(tmp_sel, sel, strict=False)): if dd and tt > dd: # we may skip warning for sel=0, where the user is likely # to exclude such type in the descriptor diff --git a/pyproject.toml b/pyproject.toml index 0adba2729a..9c6f213cfd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -383,6 +383,7 @@ select = [ "TCH", # flake8-type-checking "PYI", # flake8-pyi "ANN", # type annotations + "B905", # zip-without-explicit-strict ] ignore = [ @@ -430,9 +431,9 @@ runtime-evaluated-base-classes = ["torch.nn.Module"] "backend/**" = ["ANN"] "data/**" = ["ANN"] "deepmd/tf/**" = ["TID253", "ANN"] -"deepmd/pt/**" = ["TID253"] +"deepmd/pt/**" = ["TID253", "B905"] "deepmd/jax/**" = ["TID253"] -"deepmd/pd/**" = ["TID253", "ANN"] +"deepmd/pd/**" = ["TID253", "ANN", "B905"] "source/**" = ["ANN"] "source/tests/tf/**" = ["TID253", "ANN"] diff --git a/source/tests/common/dpmodel/test_pairtab_atomic_model.py b/source/tests/common/dpmodel/test_pairtab_atomic_model.py index f8145c258a..1e87bb4bd7 100644 --- a/source/tests/common/dpmodel/test_pairtab_atomic_model.py +++ b/source/tests/common/dpmodel/test_pairtab_atomic_model.py @@ -158,6 +158,7 @@ def test_extrapolation_nonzero_rmax(self, mock_loadtxt) -> None: 0.035, 0.025, ], + strict=True, ): extended_coord = np.array( [ diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index c1e6644be6..88fad4e10b 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -355,7 +355,7 @@ def test_tf_consistent_with_ref(self) -> None: data2.pop("@version") np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): np.testing.assert_allclose( rr1.ravel(), rr2.ravel(), rtol=self.rtol, atol=self.atol ) @@ -372,7 +372,7 @@ def test_tf_self_consistent(self) -> None: obj2 = self.tf_class.deserialize(data1, suffix=self.unique_id) ret2, data2 = self.get_tf_ret_serialization_from_cls(obj2) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -391,7 +391,7 @@ def test_dp_consistent_with_ref(self) -> None: ret2 = self.extract_ret(ret2, self.RefBackend.DP) data2 = dp_obj.serialize() np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): if rr1 is SKIP_FLAG or rr2 is SKIP_FLAG: continue np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) @@ -407,7 +407,7 @@ def test_dp_self_consistent(self) -> None: obj1 = self.dp_class.deserialize(data1) ret2, data2 = self.get_dp_ret_serialization_from_cls(obj1) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -436,7 +436,7 @@ def test_pt_consistent_with_ref(self) -> None: data1.pop("@variables", None) data2.pop("@variables", None) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -449,7 +449,7 @@ def test_pt_self_consistent(self) -> None: obj2 = self.pt_class.deserialize(data1) ret2, data2 = self.get_pt_ret_serialization_from_cls(obj2) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -473,7 +473,7 @@ def test_jax_consistent_with_ref(self) -> None: data1.pop("@variables", None) data2.pop("@variables", None) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -486,7 +486,7 @@ def test_jax_self_consistent(self) -> None: obj1 = self.jax_class.deserialize(data1) ret2, data2 = self.get_jax_ret_serialization_from_cls(obj1) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -515,7 +515,7 @@ def test_pd_consistent_with_ref(self): data1.pop("@variables", None) data2.pop("@variables", None) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -528,7 +528,7 @@ def test_pd_self_consistent(self): obj2 = self.pd_class.deserialize(data1) ret2, data2 = self.get_pd_ret_serialization_from_cls(obj2) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -550,7 +550,7 @@ def test_array_api_strict_consistent_with_ref(self) -> None: ret2 = self.extract_ret(ret2, self.RefBackend.ARRAY_API_STRICT) data2 = array_api_strict_obj.serialize() np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" @@ -564,7 +564,7 @@ def test_array_api_strict_self_consistent(self) -> None: obj1 = self.array_api_strict_class.deserialize(data1) ret2, data2 = self.get_array_api_strict_ret_serialization_from_cls(obj1) np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" diff --git a/source/tests/consistent/fitting/test_dipole.py b/source/tests/consistent/fitting/test_dipole.py index 010944d109..a1c48d102d 100644 --- a/source/tests/consistent/fitting/test_dipole.py +++ b/source/tests/consistent/fitting/test_dipole.py @@ -285,7 +285,7 @@ def test_tf_consistent_with_ref(self) -> None: if networks is None: data1["nets"]["networks"][ii] = None np.testing.assert_equal(data1, data2) - for rr1, rr2 in zip(ret1, ret2): + for rr1, rr2 in zip(ret1, ret2, strict=True): np.testing.assert_allclose( rr1.ravel()[: rr2.size], rr2.ravel(), rtol=self.rtol, atol=self.atol ) diff --git a/source/tests/consistent/io/test_io.py b/source/tests/consistent/io/test_io.py index 7ae9af6891..982d56d8fa 100644 --- a/source/tests/consistent/io/test_io.py +++ b/source/tests/consistent/io/test_io.py @@ -140,7 +140,9 @@ def test_deep_eval(self) -> None: nframes = self.atype.shape[0] prefix = "test_consistent_io_" + self.__class__.__name__.lower() rets = [] + rets_atomic = [] rets_nopbc = [] + rets_nopbc_atomic = [] for backend_name, suffix_idx in ( # unfortunately, jax2tf cannot work with tf v1 behaviors ("jax", 2) if DP_TEST_TF2_ONLY else ("tensorflow", 0), @@ -183,7 +185,8 @@ def test_deep_eval(self) -> None: aparam=aparam, atomic=True, ) - rets.append(ret) + rets.append(ret[:3]) + rets_atomic.append(ret[3:]) ret = deep_eval.eval( self.coords, None, @@ -200,22 +203,24 @@ def test_deep_eval(self) -> None: aparam=aparam, atomic=True, ) - rets_nopbc.append(ret) - for ret in rets[1:]: - for vv1, vv2 in zip(rets[0], ret): - if np.isnan(vv2).all(): - # expect all nan if not supported - continue - np.testing.assert_allclose(vv1, vv2, rtol=1e-12, atol=1e-12) + rets_nopbc.append(ret[:3]) + rets_nopbc_atomic.append(ret[3:]) - for idx, ret in enumerate(rets_nopbc[1:]): - for vv1, vv2 in zip(rets_nopbc[0], ret): - if np.isnan(vv2).all(): - # expect all nan if not supported - continue - np.testing.assert_allclose( - vv1, vv2, rtol=1e-12, atol=1e-12, err_msg=f"backend {idx + 1}" - ) + for rets_idx, rets_x in enumerate( + (rets, rets_atomic, rets_nopbc, rets_nopbc_atomic) + ): + for idx, ret in enumerate(rets_x[1:]): + for vv1, vv2 in zip(rets_x[0], ret, strict=True): + if np.isnan(vv2).all(): + # expect all nan if not supported + continue + np.testing.assert_allclose( + vv1, + vv2, + rtol=1e-12, + atol=1e-12, + err_msg=f"backend {idx + 1} for rets_idx {rets_idx}", + ) class TestDeepPot(unittest.TestCase, IOTest): diff --git a/source/tests/pd/model/test_nlist.py b/source/tests/pd/model/test_nlist.py index 0947355ac0..09713c412b 100644 --- a/source/tests/pd/model/test_nlist.py +++ b/source/tests/pd/model/test_nlist.py @@ -253,7 +253,9 @@ def test_build_directional_nlist(self): ecoord, eatype, mapping = extend_coord_with_ghosts( self.coord, self.atype, self.cell, self.rcut ) - for distinguish_types, mysel in zip([True, False], [sum(self.nsel), 300]): + for distinguish_types, mysel in zip( + [True, False], [sum(self.nsel), 300], strict=True + ): # full neighbor list nlist_full = build_neighbor_list( ecoord, diff --git a/source/tests/pd/model/test_se_e2_a.py b/source/tests/pd/model/test_se_e2_a.py index b1e6abe5ae..f512f9e1a1 100644 --- a/source/tests/pd/model/test_se_e2_a.py +++ b/source/tests/pd/model/test_se_e2_a.py @@ -96,7 +96,7 @@ def test_consistency( self.atype_ext, self.nlist, ) - for aa, bb in zip([rd1, gr1, sw1], [rd2, gr2, sw2]): + for aa, bb in zip([rd1, gr1, sw1], [rd2, gr2, sw2], strict=True): np.testing.assert_allclose( aa.detach().cpu().numpy(), bb, diff --git a/source/tests/pd/test_utils.py b/source/tests/pd/test_utils.py index 8d25cff964..39e9dcfe43 100644 --- a/source/tests/pd/test_utils.py +++ b/source/tests/pd/test_utils.py @@ -21,6 +21,7 @@ def test_to_numpy(self): for ptp, npp in zip( [paddle.float16, paddle.float32, paddle.float64], [np.float16, np.float32, np.float64], + strict=True, ): foo = foo.astype(npp) bar = to_paddle_tensor(foo) diff --git a/source/tests/pt/model/test_descriptor_se_r.py b/source/tests/pt/model/test_descriptor_se_r.py index 8c6e2c6776..64811f774c 100644 --- a/source/tests/pt/model/test_descriptor_se_r.py +++ b/source/tests/pt/model/test_descriptor_se_r.py @@ -100,7 +100,7 @@ def test_consistency( self.atype_ext, self.nlist, ) - for aa, bb in zip([rd1, sw1], [rd2, sw2]): + for aa, bb in zip([rd1, sw1], [rd2, sw2], strict=True): np.testing.assert_allclose( aa.detach().cpu().numpy(), bb, diff --git a/source/tests/pt/model/test_nlist.py b/source/tests/pt/model/test_nlist.py index fdcbb813bc..cac5e5294a 100644 --- a/source/tests/pt/model/test_nlist.py +++ b/source/tests/pt/model/test_nlist.py @@ -245,7 +245,9 @@ def test_build_directional_nlist(self) -> None: ecoord, eatype, mapping = extend_coord_with_ghosts( self.coord, self.atype, self.cell, self.rcut ) - for distinguish_types, mysel in zip([True, False], [sum(self.nsel), 300]): + for distinguish_types, mysel in zip( + [True, False], [sum(self.nsel), 300], strict=True + ): # full neighbor list nlist_full = build_neighbor_list( ecoord, diff --git a/source/tests/pt/model/test_pairtab_atomic_model.py b/source/tests/pt/model/test_pairtab_atomic_model.py index 0a6f6ea933..0f324cbf51 100644 --- a/source/tests/pt/model/test_pairtab_atomic_model.py +++ b/source/tests/pt/model/test_pairtab_atomic_model.py @@ -219,6 +219,7 @@ def test_extrapolation_nonzero_rmax(self, mock_loadtxt) -> None: 0.035, 0.025, ], + strict=True, ): extended_coord = torch.tensor( [ diff --git a/source/tests/pt/model/test_se_e2_a.py b/source/tests/pt/model/test_se_e2_a.py index bdedd440cb..bdf7e3b4b9 100644 --- a/source/tests/pt/model/test_se_e2_a.py +++ b/source/tests/pt/model/test_se_e2_a.py @@ -96,7 +96,7 @@ def test_consistency( self.atype_ext, self.nlist, ) - for aa, bb in zip([rd1, gr1, sw1], [rd2, gr2, sw2]): + for aa, bb in zip([rd1, gr1, sw1], [rd2, gr2, sw2], strict=True): np.testing.assert_allclose( aa.detach().cpu().numpy(), bb, diff --git a/source/tests/pt/model/test_se_t.py b/source/tests/pt/model/test_se_t.py index fb60ca011e..8f67d4a0aa 100644 --- a/source/tests/pt/model/test_se_t.py +++ b/source/tests/pt/model/test_se_t.py @@ -100,7 +100,7 @@ def test_consistency( self.atype_ext, self.nlist, ) - for aa, bb in zip([rd1, sw1], [rd2, sw2]): + for aa, bb in zip([rd1, sw1], [rd2, sw2], strict=True): np.testing.assert_allclose( aa.detach().cpu().numpy(), bb, diff --git a/source/tests/pt/test_adamuon.py b/source/tests/pt/test_adamuon.py index 4567833948..074fb67f3b 100644 --- a/source/tests/pt/test_adamuon.py +++ b/source/tests/pt/test_adamuon.py @@ -97,7 +97,9 @@ def test_optimizer_step_smoke(self) -> None: optimizer.step() # Verify all parameters with gradients changed - for i, (p, init_p) in enumerate(zip(model.parameters(), initial_params)): + for i, (p, init_p) in enumerate( + zip(model.parameters(), initial_params, strict=True) + ): if p.grad is not None: self.assertFalse( torch.allclose(p, init_p), @@ -369,7 +371,7 @@ def test_state_dict_save_load(self) -> None: params1 = list(optimizer.param_groups[0]["params"]) params2 = list(optimizer2.param_groups[0]["params"]) - for p1, p2 in zip(params1, params2): + for p1, p2 in zip(params1, params2, strict=True): s1 = optimizer.state[p1] s2 = optimizer2.state[p2] self.assertEqual(set(s1.keys()), set(s2.keys())) diff --git a/source/tests/pt/test_utils.py b/source/tests/pt/test_utils.py index 42bf2679a8..64ea5efd21 100644 --- a/source/tests/pt/test_utils.py +++ b/source/tests/pt/test_utils.py @@ -21,6 +21,7 @@ def test_to_numpy(self) -> None: for ptp, npp in zip( [torch.float16, torch.float32, torch.float64], [np.float16, np.float32, np.float64], + strict=True, ): foo = foo.astype(npp) bar = to_torch_tensor(foo) diff --git a/source/tests/tf/test_linear_model.py b/source/tests/tf/test_linear_model.py index e99c1042dd..6743c4658b 100644 --- a/source/tests/tf/test_linear_model.py +++ b/source/tests/tf/test_linear_model.py @@ -37,7 +37,7 @@ def setUp(self) -> None: os.path.join(infer_path, "deeppot-1.pbtxt"), ] self.graph_dirs = [pbtxt.replace("pbtxt", "pb") for pbtxt in self.pbtxts] - for pbtxt, pb in zip(self.pbtxts, self.graph_dirs): + for pbtxt, pb in zip(self.pbtxts, self.graph_dirs, strict=True): convert_pbtxt_to_pb(pbtxt, pb) self.graphs = [DeepPotential(pb) for pb in self.graph_dirs] diff --git a/source/tests/tf/test_model_devi.py b/source/tests/tf/test_model_devi.py index be9486cf02..e213c43893 100644 --- a/source/tests/tf/test_model_devi.py +++ b/source/tests/tf/test_model_devi.py @@ -41,7 +41,7 @@ def setUp(self) -> None: os.path.join(infer_path, "deeppot-1.pbtxt"), ] self.graph_dirs = [pbtxt.replace("pbtxt", "pb") for pbtxt in self.pbtxts] - for pbtxt, pb in zip(self.pbtxts, self.graph_dirs): + for pbtxt, pb in zip(self.pbtxts, self.graph_dirs, strict=True): convert_pbtxt_to_pb(pbtxt, pb) self.graphs = [DeepPotential(pb) for pb in self.graph_dirs] self.output = os.path.join(tests_path, "model_devi.out") @@ -216,7 +216,7 @@ def setUpClass(cls) -> None: os.path.join(infer_path, "fparam_aparam.pbtxt"), ] cls.graph_dirs = [pbtxt.replace("pbtxt", "pb") for pbtxt in cls.pbtxts] - for pbtxt, pb in zip(cls.pbtxts, cls.graph_dirs): + for pbtxt, pb in zip(cls.pbtxts, cls.graph_dirs, strict=True): convert_pbtxt_to_pb(pbtxt, pb) cls.graphs = [DeepPotential(pb) for pb in cls.graph_dirs] diff --git a/source/tests/tf/test_model_devi_mix.py b/source/tests/tf/test_model_devi_mix.py index 4ef9f39cab..2c660d7a22 100644 --- a/source/tests/tf/test_model_devi_mix.py +++ b/source/tests/tf/test_model_devi_mix.py @@ -58,7 +58,7 @@ def setUp(self) -> None: os.path.join(infer_path, "se_atten_no_atten_2.pbtxt"), ] self.graph_dirs = [pbtxt.replace("pbtxt", "pb") for pbtxt in self.pbtxts] - for pbtxt, pb in zip(self.pbtxts, self.graph_dirs): + for pbtxt, pb in zip(self.pbtxts, self.graph_dirs, strict=True): convert_pbtxt_to_pb(pbtxt, pb) self.graphs = [DeepPotential(pb) for pb in self.graph_dirs] self.output = os.path.join(tests_path, "model_devi.out") diff --git a/source/tests/tf/test_nvnmd_entrypoints.py b/source/tests/tf/test_nvnmd_entrypoints.py index 6833b4e0f8..4a6877761e 100644 --- a/source/tests/tf/test_nvnmd_entrypoints.py +++ b/source/tests/tf/test_nvnmd_entrypoints.py @@ -466,7 +466,7 @@ def test_model_qnn_v0(self) -> None: tensordic[key] = graph.get_tensor_by_name(key + ":0") # get value valuelist = sess.run(list(tensordic.values()), feed_dict=feed_dict) - valuedic = dict(zip(tensordic.keys(), valuelist)) + valuedic = dict(zip(tensordic.keys(), valuelist, strict=True)) # test # o_descriptor idx = np.array([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192]) @@ -783,7 +783,7 @@ def test_model_qnn_v1(self) -> None: tensordic[key] = graph.get_tensor_by_name(key + ":0") # get value valuelist = sess.run(list(tensordic.values()), feed_dict=feed_dict) - valuedic = dict(zip(tensordic.keys(), valuelist)) + valuedic = dict(zip(tensordic.keys(), valuelist, strict=True)) # test # o_descriptor idx = np.array([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192])