12
12
import subprocess
13
13
import sys
14
14
import threading
15
- from collections .abc import Callable , Iterable
15
+ from collections .abc import (
16
+ Callable ,
17
+ Iterable ,
18
+ )
16
19
from difflib import SequenceMatcher
17
20
from enum import Enum
18
- from typing import TYPE_CHECKING , Any , TextIO , TypeVar , Union , cast
21
+ from typing import (
22
+ TYPE_CHECKING ,
23
+ Any ,
24
+ TextIO ,
25
+ TypeVar ,
26
+ Union ,
27
+ cast ,
28
+ )
19
29
20
30
from . import constants
21
- from .argparse_custom import ChoicesProviderFunc , CompleterFunc
31
+ from . import string_utils as su
32
+ from .argparse_custom import (
33
+ ChoicesProviderFunc ,
34
+ CompleterFunc ,
35
+ )
22
36
23
37
if TYPE_CHECKING : # pragma: no cover
24
38
import cmd2 # noqa: F401
@@ -188,9 +202,7 @@ def alphabetical_sort(list_to_sort: Iterable[str]) -> list[str]:
188
202
:param list_to_sort: the list being sorted
189
203
:return: the sorted list
190
204
"""
191
- from .string_utils import norm_fold
192
-
193
- return sorted (list_to_sort , key = norm_fold )
205
+ return sorted (list_to_sort , key = su .norm_fold )
194
206
195
207
196
208
def try_int_or_force_to_lower_case (input_str : str ) -> int | str :
@@ -199,12 +211,10 @@ def try_int_or_force_to_lower_case(input_str: str) -> int | str:
199
211
:param input_str: string to convert
200
212
:return: the string as an integer or a lower case version of the string.
201
213
"""
202
- from .string_utils import norm_fold
203
-
204
214
try :
205
215
return int (input_str )
206
216
except ValueError :
207
- return norm_fold (input_str )
217
+ return su . norm_fold (input_str )
208
218
209
219
210
220
def natural_keys (input_str : str ) -> list [int | str ]:
@@ -238,11 +248,9 @@ def quote_specific_tokens(tokens: list[str], tokens_to_quote: list[str]) -> None
238
248
:param tokens: token list being edited
239
249
:param tokens_to_quote: the tokens, which if present in tokens, to quote
240
250
"""
241
- from .string_utils import quote
242
-
243
251
for i , token in enumerate (tokens ):
244
252
if token in tokens_to_quote :
245
- tokens [i ] = quote (token )
253
+ tokens [i ] = su . quote (token )
246
254
247
255
248
256
def unquote_specific_tokens (tokens : list [str ], tokens_to_unquote : list [str ]) -> None :
@@ -251,10 +259,8 @@ def unquote_specific_tokens(tokens: list[str], tokens_to_unquote: list[str]) ->
251
259
:param tokens: token list being edited
252
260
:param tokens_to_unquote: the tokens, which if present in tokens, to unquote
253
261
"""
254
- from .string_utils import strip_quotes
255
-
256
262
for i , token in enumerate (tokens ):
257
- unquoted_token = strip_quotes (token )
263
+ unquoted_token = su . strip_quotes (token )
258
264
if unquoted_token in tokens_to_unquote :
259
265
tokens [i ] = unquoted_token
260
266
@@ -264,12 +270,10 @@ def expand_user(token: str) -> str:
264
270
265
271
:param token: the string to expand
266
272
"""
267
- from .string_utils import is_quoted , strip_quotes
268
-
269
273
if token :
270
- if is_quoted (token ):
274
+ if su . is_quoted (token ):
271
275
quote_char = token [0 ]
272
- token = strip_quotes (token )
276
+ token = su . strip_quotes (token )
273
277
else :
274
278
quote_char = ''
275
279
0 commit comments