Skip to content

Commit f093008

Browse files
author
welli7ngton
committed
fix: start using the cv2find.Box type instead of trying to import from the typing module
1 parent 4b502c5 commit f093008

File tree

2 files changed

+46
-28
lines changed

2 files changed

+46
-28
lines changed

botcity/core/bot.py

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@
66
import subprocess
77
import time
88
import webbrowser
9-
from typing import Union, Tuple, Optional, List, Dict, Box, Generator, Any
9+
from typing import Union, Tuple, Optional, List, Dict, Generator, Any
10+
from collections import namedtuple
11+
1012
from numpy import ndarray
1113

1214
import pyperclip
@@ -131,7 +133,7 @@ def add_image(self, label: str, path: str) -> None:
131133
"""
132134
self.state.map_images[label] = path
133135

134-
def get_image_from_map(self, label: str) -> None:
136+
def get_image_from_map(self, label: str) -> Image.Image:
135137
"""
136138
Return an image from teh state image map.
137139
@@ -257,8 +259,8 @@ def _find_multiple_helper(
257259
region: Tuple[int, int, int, int],
258260
confidence: float,
259261
grayscale: bool,
260-
needle: Union[Image.Image, ndarray, str]
261-
) -> Union[Box, None]:
262+
needle: Union[Image.Image, ndarray, str],
263+
) -> Union[cv2find.Box, None]:
262264
ele = cv2find.locate_all_opencv(
263265
needle, haystack, region=region, confidence=confidence, grayscale=grayscale
264266
)
@@ -414,7 +416,7 @@ def find_all(
414416
matching: float = 0.9,
415417
waiting_time: int = 10000,
416418
grayscale: bool = False,
417-
) -> Generator[Box, Any, None]:
419+
) -> Generator[cv2find.Box, Any, None]:
418420
"""
419421
Find all elements defined by label on screen until a timeout happens.
420422
@@ -438,7 +440,9 @@ def find_all(
438440
None if not found.
439441
"""
440442

441-
def deduplicate(elems):
443+
def deduplicate(
444+
elems: list[Generator[cv2find.Box, Any, None]]
445+
) -> list[Generator[cv2find.Box, Any, None]]:
442446
def find_same(item, items):
443447
x_start = item.left
444448
x_end = item.left + item.width
@@ -554,7 +558,9 @@ def find_text(
554558
grayscale=True,
555559
)
556560

557-
def find_process(self, name: str = None, pid: str = None) -> Process:
561+
def find_process(
562+
self, name: Optional[str] = None, pid: Optional[str] = None
563+
) -> Union[Process, None]:
558564
"""
559565
Find a process by name or PID
560566
@@ -686,13 +692,14 @@ def save_screenshot(self, path: str) -> None:
686692
self.screenshot(path)
687693

688694
def get_element_coords(
689-
self, label: str,
695+
self,
696+
label: str,
690697
x: Optional[int] = None,
691698
y: Optional[int] = None,
692699
width: Optional[int] = None,
693700
height: Optional[int] = None,
694701
matching: float = 0.9,
695-
best: bool = True
702+
best: bool = True,
696703
) -> Tuple[int, int] | Tuple[None, None]:
697704
"""
698705
Find an element defined by label on screen and returns its coordinates.
@@ -754,7 +761,7 @@ def get_element_coords_centered(
754761
width: Optional[int] = None,
755762
height: Optional[int] = None,
756763
matching: float = 0.9,
757-
best: bool = True
764+
best: bool = True,
758765
):
759766
"""
760767
Find an element defined by label on screen and returns its centered coordinates.

botcity/core/cv2find.py

Lines changed: 29 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -30,15 +30,16 @@
3030
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3131
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3232
"""
33+
3334
import collections
3435
import cv2
3536
import numpy
3637
from PIL.Image import Image
3738
from typing import Union, Tuple, Optional, Generator, Any
3839

39-
RUNNING_CV_2 = cv2.__version__[0] < '3'
40+
RUNNING_CV_2 = cv2.__version__[0] < "3"
4041

41-
Box = collections.namedtuple('Box', 'left top width height')
42+
Box = collections.namedtuple("Box", "left top width height")
4243

4344
if RUNNING_CV_2:
4445
LOAD_COLOR = cv2.CV_LOAD_IMAGE_COLOR
@@ -48,7 +49,9 @@
4849
LOAD_GRAYSCALE = cv2.IMREAD_GRAYSCALE
4950

5051

51-
def _load_cv2(img: Union[Image, numpy.ndarray, str], grayscale: bool = False) -> numpy.ndarray:
52+
def _load_cv2(
53+
img: Union[Image, numpy.ndarray, str], grayscale: bool = False
54+
) -> numpy.ndarray:
5255
"""
5356
TODO
5457
"""
@@ -68,23 +71,25 @@ def _load_cv2(img: Union[Image, numpy.ndarray, str], grayscale: bool = False) ->
6871
else:
6972
img_cv = cv2.imread(img, LOAD_COLOR)
7073
if img_cv is None:
71-
raise IOError("Failed to read %s because file is missing, "
72-
"has improper permissions, or is an "
73-
"unsupported or invalid format" % img)
74+
raise IOError(
75+
"Failed to read %s because file is missing, "
76+
"has improper permissions, or is an "
77+
"unsupported or invalid format" % img
78+
)
7479
elif isinstance(img, numpy.ndarray):
7580
# don't try to convert an already-gray image to gray
7681
if grayscale and len(img.shape) == 3: # and img.shape[2] == 3:
7782
img_cv = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
7883
else:
7984
img_cv = img
80-
elif hasattr(img, 'convert'):
85+
elif hasattr(img, "convert"):
8186
# assume its a PIL.Image, convert to cv format
82-
img_array = numpy.array(img.convert('RGB'))
87+
img_array = numpy.array(img.convert("RGB"))
8388
img_cv = img_array[:, :, ::-1].copy() # -1 does RGB -> BGR
8489
if grayscale:
8590
img_cv = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
8691
else:
87-
raise TypeError('expected an image filename, OpenCV numpy array, or PIL image')
92+
raise TypeError("expected an image filename, OpenCV numpy array, or PIL image")
8893
return img_cv
8994

9095

@@ -95,7 +100,7 @@ def locate_all_opencv(
95100
limit: int = 10000,
96101
region: Optional[Tuple[int, int, int, int]] = None,
97102
step: int = 1,
98-
confidence: float = 0.999
103+
confidence: float = 0.999,
99104
) -> Generator[Box, Any, None]:
100105
"""
101106
TODO - rewrite this
@@ -116,15 +121,19 @@ def locate_all_opencv(
116121

117122
if region:
118123
haystack_image = haystack_image[
119-
region[1]:region[1] + region[3],
120-
region[0]:region[0] + region[2]
121-
]
124+
region[1] : region[1] + region[3], region[0] : region[0] + region[2]
125+
]
122126
else:
123-
region = (0, 0) # full image; these values used in the yield statement
124-
if (haystack_image.shape[0] < needle_image.shape[0] or
125-
haystack_image.shape[1] < needle_image.shape[1]):
127+
region = (0, 0, 0, 0) # full image; these values used in the yield statement
128+
129+
if (
130+
haystack_image.shape[0] < needle_image.shape[0]
131+
or haystack_image.shape[1] < needle_image.shape[1]
132+
):
126133
# avoid semi-cryptic OpenCV error below if bad size
127-
raise ValueError('needle dimension(s) exceed the haystack image or region dimensions')
134+
raise ValueError(
135+
"needle dimension(s) exceed the haystack image or region dimensions"
136+
)
128137

129138
if step == 2:
130139
confidence *= 0.95
@@ -147,6 +156,8 @@ def locate_all_opencv(
147156
matchy = matches[0] * step + region[1]
148157

149158
# Order results before sending back
150-
ordered = sorted(zip(matchx, matchy), key=lambda p: result[p[1]][p[0]], reverse=True)
159+
ordered = sorted(
160+
zip(matchx, matchy), key=lambda p: result[p[1]][p[0]], reverse=True
161+
)
151162
for x, y in ordered:
152163
yield Box(x, y, needle_width, needle_height)

0 commit comments

Comments
 (0)