diff --git a/SimpleCV/Camera.py b/SimpleCV/Camera.py index fb799def7..b05f2827e 100644 --- a/SimpleCV/Camera.py +++ b/SimpleCV/Camera.py @@ -369,7 +369,7 @@ def live(self): i.dl().text(txt, (10,i.height / 2), color=col) txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY)) i.dl().text(txt, (10,(i.height / 2) + 10), color=col) - print "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY)) + print("coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY))) if elapsed_time > 0 and elapsed_time < 5: @@ -381,7 +381,7 @@ def live(self): i.save(d) if d.mouseRight: - print "Closing Window" + print("Closing Window") d.done = True @@ -483,7 +483,7 @@ def __init__(self, camera_index = -1, prop_set = {}, threaded = True, calibratio if "delay" in prop_set: time.sleep(prop_set['delay']) - if platform.system() == "Linux" and (prop_set.has_key("height") or cv.GrabFrame(self.capture) == False): + if platform.system() == "Linux" and ("height" in prop_set or cv.GrabFrame(self.capture) == False): import pygame.camera pygame.camera.init() threaded = True #pygame must be threaded @@ -491,8 +491,8 @@ def __init__(self, camera_index = -1, prop_set = {}, threaded = True, calibratio camera_index = 0 self.index = camera_index _index.append(camera_index) - print _index - if(prop_set.has_key("height") and prop_set.has_key("width")): + print(_index) + if("height" in prop_set and "width" in prop_set): self.capture = pygame.camera.Camera("/dev/video" + str(camera_index), (prop_set['width'], prop_set['height'])) else: self.capture = pygame.camera.Camera("/dev/video" + str(camera_index)) @@ -517,7 +517,7 @@ def __init__(self, camera_index = -1, prop_set = {}, threaded = True, calibratio return None #set any properties in the constructor - for p in prop_set.keys(): + for p in list(prop_set.keys()): if p in self.prop_map: cv.SetCaptureProperty(self.capture, self.prop_map[p], prop_set[p]) @@ -682,16 +682,16 @@ def __init__(self, s, st, start=1): self.start = start if self.sourcetype not in ["video", "image", "imageset", "directory"]: - print 'Error: In VirtualCamera(), Incorrect Source option. "%s" \nUsage:' % self.sourcetype - print '\tVirtualCamera("filename","video")' - print '\tVirtualCamera("filename","image")' - print '\tVirtualCamera("./path_to_images","imageset")' - print '\tVirtualCamera("./path_to_images","directory")' + print('Error: In VirtualCamera(), Incorrect Source option. "%s" \nUsage:' % self.sourcetype) + print('\tVirtualCamera("filename","video")') + print('\tVirtualCamera("filename","image")') + print('\tVirtualCamera("./path_to_images","imageset")') + print('\tVirtualCamera("./path_to_images","directory")') return None else: if isinstance(self.source,str) and not os.path.exists(self.source): - print 'Error: In VirtualCamera()\n\t"%s" was not found.' % self.source + print('Error: In VirtualCamera()\n\t"%s" was not found.' % self.source) return None if (self.sourcetype == "imageset"): @@ -737,7 +737,7 @@ def getImage(self): return Image(self.source, self) elif (self.sourcetype == 'imageset'): - print len(self.source) + print(len(self.source)) img = self.source[self.counter % len(self.source)] self.counter = self.counter + 1 return img @@ -1073,21 +1073,21 @@ def run(self): #thank you missing urllib2 manual #http://www.voidspace.org.uk/python/articles/urllib2.shtml#id5 - password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() + password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, self.url, user, password) - handler = urllib2.HTTPBasicAuthHandler(password_mgr) - opener = urllib2.build_opener(handler) + handler = urllib.request.HTTPBasicAuthHandler(password_mgr) + opener = urllib.request.build_opener(handler) f = opener.open(self.url) else: - f = urllib2.urlopen(self.url) + f = urllib.request.urlopen(self.url) headers = f.info() - if (headers.has_key("content-type")): + if ("content-type" in headers): headers['Content-type'] = headers['content-type'] #force ucase first char - if not headers.has_key("Content-type"): + if "Content-type" not in headers: logger.warning("Tried to load a JpegStream from " + self.url + ", but didn't find a content-type header!") return @@ -1257,7 +1257,7 @@ def __init__(self, id = 0, properties = { "mode": "color"}): self.max_x = self.device.br_x self.max_y = self.device.br_y #save our extents for later - for k, v in properties.items(): + for k, v in list(properties.items()): setattr(self.device, k, v) def getImage(self): @@ -1335,7 +1335,7 @@ def printProperties(self): """ for prop in self.device.optlist: try: - print self.device[prop] + print(self.device[prop]) except: pass @@ -1589,7 +1589,7 @@ def getImage(self): if self._roi : img = img.crop(self._roi,centered=True) except : - print "Error croping the image. ROI specified is not correct." + print("Error croping the image. ROI specified is not correct.") return None return img @@ -1996,7 +1996,8 @@ def get3DImage(self, Q, method="BM", state=None): import cv2 except ImportError: cv2flag = False - import cv2.cv as cv + #import cv2.cv as cv + import cv2 (r, c) = self.size if method == "BM": sbm = cv.CreateStereoBMState() @@ -2107,12 +2108,12 @@ def get3DImage(self, Q, method="BM", state=None): Q = np.array(Q) if not isinstance(disparity, np.ndarray): disparity = np.array(disparity) - Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F) - Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3) + Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.CV_32F) + Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.CV_MINMAX, dtype=cv2.CV_8UC3) retVal = Image(Image3D_normalize, cv2image=True) else: - Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3) - Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3) + Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.CV_32FC3) + Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.CV_8UC3) cv.ReprojectImageTo3D(disparity, Image3D, Q) cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3) retVal = Image(Image3D_normalize) @@ -2148,19 +2149,20 @@ def get3DImageFromDisparity(self, disparity, Q): import cv2 except ImportError: cv2flag = False - import cv2.cv as cv + #import cv2.cv as cv + import cv2 if cv2flag: if not isinstance(Q, np.ndarray): Q = np.array(Q) disparity = disparity.getNumpyCv2() - Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F) - Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3) + Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.CV_32F) + Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.CV_MINMAX, dtype=cv2.CV_8UC3) retVal = Image(Image3D_normalize, cv2image=True) else: disparity = disparity.getMatrix() - Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3) - Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3) + Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.CV_32FC3) + Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.CV_8UC3) cv.ReprojectImageTo3D(disparity, Image3D, Q) cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3) retVal = Image(Image3D_normalize) @@ -2241,7 +2243,7 @@ def stereoCalibration(self,camLeft, camRight, nboards=30, chessboard=(8, 5), gri frameRight = cv.QueryFrame(captureRight) cv.FindChessboardCorners(frameRight, (chessboard)) except : - print "Error Initialising the Left and Right camera" + print("Error Initialising the Left and Right camera") return None imagePoints1 = cv.CreateMat(1, nboards * chessboard[0] * chessboard[1], cv.CV_64FC2) @@ -2284,7 +2286,7 @@ def stereoCalibration(self,camLeft, camRight, nboards=30, chessboard=(8, 5), gri cv.ShowImage(n2, frameRight) if cor1[0] and cor2[0] and k==0x20: - print count + print(count) for i in range(0, len(cor1[1])): cv.Set1D(imagePoints1, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor1[1][i][0], cor1[1][i][1])) cv.Set1D(imagePoints2, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor2[1][i][0], cor2[1][i][1])) @@ -2307,19 +2309,19 @@ def stereoCalibration(self,camLeft, camRight, nboards=30, chessboard=(8, 5), gri cv.Zero(D1) cv.Zero(D2) - print "Running stereo calibration..." + print("Running stereo calibration...") del(camLeft) del(camRight) cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, nPoints, CM1, D1, CM2, D2, WinSize, R, T, E, F, flags=cv.CV_CALIB_SAME_FOCAL_LENGTH | cv.CV_CALIB_ZERO_TANGENT_DIST) - print "Done." + print("Done.") return (CM1, CM2, D1, D2, R, T, E, F) cv.ShowImage(n1, frameLeft) cv.ShowImage(n2, frameRight) if k == 0x1b: - print "ESC pressed. Exiting. WARNING: NOT ENOUGH CHESSBOARDS FOUND YET" + print("ESC pressed. Exiting. WARNING: NOT ENOUGH CHESSBOARDS FOUND YET") cv.DestroyAllWindows() break @@ -2373,7 +2375,7 @@ def saveCalibration(self,calibration=None, fname="Stereo",cdir="."): cv.Save("{0}/{1}".format(cdir, filenames[5]), T) cv.Save("{0}/{1}".format(cdir, filenames[6]), E) cv.Save("{0}/{1}".format(cdir, filenames[7]), F) - print "Calibration parameters written to directory '{0}'.".format(cdir) + print("Calibration parameters written to directory '{0}'.".format(cdir)) return True except : @@ -2421,7 +2423,7 @@ def loadCalibration(self,fname="Stereo",dir="."): T = cv.Load("{0}/{1}".format(dir, filenames[5])) E = cv.Load("{0}/{1}".format(dir, filenames[6])) F = cv.Load("{0}/{1}".format(dir, filenames[7])) - print "Calibration files loaded from dir '{0}'.".format(dir) + print("Calibration files loaded from dir '{0}'.".format(dir)) return (CM1, CM2, D1, D2, R, T, E, F) except : @@ -2469,7 +2471,7 @@ def stereoRectify(self,calib=None,WinSize=(352,288)): P2 = cv.CreateMat(3, 4, cv.CV_64F) Q = cv.CreateMat(4, 4, cv.CV_64F) - print "Running stereo rectification..." + print("Running stereo rectification...") (leftroi, rightroi) = cv.StereoRectify(CM1, CM2, D1, D2, WinSize, R, T, R1, R2, P1, P2, Q) roi = [] @@ -2477,7 +2479,7 @@ def stereoRectify(self,calib=None,WinSize=(352,288)): roi.append(max(leftroi[1], rightroi[1])) roi.append(min(leftroi[2], rightroi[2])) roi.append(min(leftroi[3], rightroi[3])) - print "Done." + print("Done.") return (R1, R2, P1, P2, Q, roi) def getImagesUndistort(self,imgLeft, imgRight, calibration, rectification, WinSize=(352,288)): @@ -2572,7 +2574,8 @@ def get3DImage(self, leftIndex, rightIndex, Q, method="BM", state=None): import cv2 except ImportError: cv2flag = False - import cv2.cv as cv + #import cv2.cv as cv + import cv2 if cv2flag: camLeft = cv2.VideoCapture(leftIndex) camRight = cv2.VideoCapture(rightIndex) @@ -2975,7 +2978,7 @@ def __init__(self, camera_id = -1, properties = {}, threaded = False): camera_id = camlist[camera_id].UniqueId - camera_id = long(camera_id) + camera_id = int(camera_id) self.handle = ct.c_uint() init_count = 0 while self.dll.PvCameraOpen(camera_id,0,ct.byref(self.handle)) != 0: #wait until camera is availble @@ -3143,7 +3146,7 @@ def getAllProperties(self): """ props = {} - for p in self._properties.keys(): + for p in list(self._properties.keys()): props[p] = self.getProperty(p) return props @@ -3206,9 +3209,9 @@ def getImage(self, timeout = 5000): st = time.time() try: pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(self.frame), timeout) ) - except Exception, e: - print "Exception waiting for frame:", e - print "Time taken:",time.time() - st + except Exception as e: + print("Exception waiting for frame:", e) + print("Time taken:",time.time() - st) self.frame = None raise(e) img = self.unbuffer() @@ -3267,13 +3270,13 @@ def _getFrame(self, timeout = 5000): st = time.time() try: pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(frame), timeout) ) - except Exception, e: - print "Exception waiting for frame:", e - print "Time taken:",time.time() - st + except Exception as e: + print("Exception waiting for frame:", e) + print("Time taken:",time.time() - st) raise(e) - except Exception, e: - print "Exception aquiring frame:", e + except Exception as e: + print("Exception aquiring frame:", e) raise(e) return frame @@ -3284,8 +3287,8 @@ def acquire(self): self.runCommand("AcquisitionStart") pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(self.frame), None) ) self.runCommand("AcquisitionStop") - except Exception, e: - print "Exception aquiring frame:", e + except Exception as e: + print("Exception aquiring frame:", e) raise(e) @@ -3298,8 +3301,8 @@ def __init__(self, camera_id = None, properties = {}, threaded = False): try: from gi.repository import Aravis except: - print "GigE is supported by the Aravis library, download and build from https://github.com/sightmachine/aravis" - print "Note that you need to set GI_TYPELIB_PATH=$GI_TYPELIB_PATH:(PATH_TO_ARAVIS)/src for the GObject Introspection" + print("GigE is supported by the Aravis library, download and build from https://github.com/sightmachine/aravis") + print("Note that you need to set GI_TYPELIB_PATH=$GI_TYPELIB_PATH:(PATH_TO_ARAVIS)/src for the GObject Introspection") sys.exit() self._cam = Aravis.Camera.new (None) @@ -3387,17 +3390,17 @@ def getProperty(self, name = None): see function camera.getPropertyList() ''' if name == None: - print "You need to provide a property, available properties are:" - print "" + print("You need to provide a property, available properties are:") + print("") for p in self.getPropertyList(): - print p + print(p) return stringval = "get_{}".format(name) try: return getattr(self._cam, stringval)() except: - print 'Property {} does not appear to exist'.format(name) + print('Property {} does not appear to exist'.format(name)) return None def setProperty(self, name = None, *args): @@ -3413,21 +3416,21 @@ def setProperty(self, name = None, *args): ''' if name == None: - print "You need to provide a property, available properties are:" - print "" + print("You need to provide a property, available properties are:") + print("") for p in self.getPropertyList(): - print p + print(p) return if len(args) <= 0: - print "You must provide a value to set" + print("You must provide a value to set") return stringval = "set_{}".format(name) try: return getattr(self._cam, stringval)(*args) except: - print 'Property {} does not appear to exist or value is not in correct format'.format(name) + print('Property {} does not appear to exist or value is not in correct format'.format(name)) return None @@ -3437,7 +3440,7 @@ def getAllProperties(self): ''' for p in self.getPropertyList(): - print "{}: {}".format(p,self.getProperty(p)) + print("{}: {}".format(p,self.getProperty(p))) class VimbaCameraThread(threading.Thread): camera = None @@ -3805,8 +3808,8 @@ def _captureFrame(self, timeout = 5000): c.runFeatureCommand('AcquisitionStop') try: f.waitFrameCapture(timeout) - except Exception, e: - print "Exception waiting for frame: %s: %s" % (e, traceback.format_exc()) + except Exception as e: + print("Exception waiting for frame: %s: %s" % (e, traceback.format_exc())) raise(e) imgData = f.getBufferByteData() @@ -3819,7 +3822,7 @@ def _captureFrame(self, timeout = 5000): return Image(rgb, colorSpace=colorSpace, cv2image=imgData) - except Exception, e: - print "Exception acquiring frame: %s: %s" % (e, traceback.format_exc()) + except Exception as e: + print("Exception acquiring frame: %s: %s" % (e, traceback.format_exc())) raise(e) diff --git a/SimpleCV/Color.py b/SimpleCV/Color.py index 6d6a26a2e..59cc5cf5c 100644 --- a/SimpleCV/Color.py +++ b/SimpleCV/Color.py @@ -213,7 +213,7 @@ def getHueFromBGR(self,color_tuple): """ a = color_tuple - print a + print(a) h_float = colorsys.rgb_to_hsv(*tuple(reversed(color_tuple)))[0] return h_float*180 diff --git a/SimpleCV/ColorModel.py b/SimpleCV/ColorModel.py index 1a5fe625f..d6a1599af 100644 --- a/SimpleCV/ColorModel.py +++ b/SimpleCV/ColorModel.py @@ -69,7 +69,7 @@ def _makeCanonical(self, data): #create a unique set of colors. I had to look this one up #create a dict of encoded strings - return dict.fromkeys(map(np.ndarray.tostring, uniques), 1) + return dict.fromkeys(list(map(np.ndarray.tostring, uniques)), 1) def reset(self): """ @@ -169,7 +169,7 @@ def threshold(self, img): b = 0 rs = np.right_shift(img.getNumpy(), self.mBits).reshape(-1, 3) #bitshift down and reshape to Nx3 - mapped = np.array(map(self.mData.has_key, map(np.ndarray.tostring, rs))) #map to True/False based on the model + mapped = np.array(list(map(self.mData.has_key, list(map(np.ndarray.tostring, rs))))) #map to True/False based on the model thresh = np.where(mapped, a, b) #replace True and False with fg and bg return Image(thresh.reshape(img.width, img.height)) @@ -198,7 +198,7 @@ def contains(self, c): """ #reverse the color, cast to uint8, right shift, convert to string, check dict - return self.mData.has_key(np.right_shift(np.cast['uint8'](c[::-1]), self.mBits).tostring()) + return np.right_shift(np.cast['uint8'](c[::-1]), self.mBits).tostring() in self.mData def setIsForeground(self): """ diff --git a/SimpleCV/Display.py b/SimpleCV/Display.py index 47a8b1a8d..b66357798 100644 --- a/SimpleCV/Display.py +++ b/SimpleCV/Display.py @@ -1,7 +1,7 @@ from SimpleCV.base import * import SimpleCV.ImageClass -import Queue -from base import * +import queue +from .base import * PYGAME_INITIALIZED = False diff --git a/SimpleCV/DrawingLayer.py b/SimpleCV/DrawingLayer.py index a1cd9e60e..286cbb401 100644 --- a/SimpleCV/DrawingLayer.py +++ b/SimpleCV/DrawingLayer.py @@ -39,8 +39,9 @@ class DrawingLayer: width = 0 height = 0 - def __init__(self, (width, height)): + def __init__(self, xxx_todo_changeme): #pg.init() + (width, height) = xxx_todo_changeme if( not pg.font.get_init() ): pg.font.init() diff --git a/SimpleCV/EXIF.py b/SimpleCV/EXIF.py index 973696605..94fff9152 100755 --- a/SimpleCV/EXIF.py +++ b/SimpleCV/EXIF.py @@ -1179,7 +1179,7 @@ def s2n_motorola(str): # extract multibyte integer in Intel format (big endian) def s2n_intel(str): x = 0 - y = 0L + y = 0 for c in str: x = x | (ord(c) << y) y = y + 8 @@ -1260,7 +1260,7 @@ def s2n(self, offset, length, signed=0): val=s2n_motorola(slice) # Sign extension ? if signed: - msb=1L << (8*length-1) + msb=1 << (8*length-1) if val & msb: val=val-(msb << 1) return val @@ -1409,8 +1409,8 @@ def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS, relative=0, stop_tag='UNDEF'): values, field_offset, count * typelen) if self.debug: - print ' debug: %s: %s' % (tag_name, - repr(self.tags[ifd_name + ' ' + tag_name])) + print(' debug: %s: %s' % (tag_name, + repr(self.tags[ifd_name + ' ' + tag_name]))) if tag_name == stop_tag: break @@ -1510,13 +1510,13 @@ def decode_maker_note(self): if 'NIKON' in make: if note.values[0:7] == [78, 105, 107, 111, 110, 0, 1]: if self.debug: - print "Looks like a type 1 Nikon MakerNote." + print("Looks like a type 1 Nikon MakerNote.") self.dump_IFD(note.field_offset+8, 'MakerNote', dict=MAKERNOTE_NIKON_OLDER_TAGS) elif note.values[0:7] == [78, 105, 107, 111, 110, 0, 2]: if self.debug: - print "Looks like a labeled type 2 Nikon MakerNote" - if note.values[12:14] != [0, 42] and note.values[12:14] != [42L, 0L]: + print("Looks like a labeled type 2 Nikon MakerNote") + if note.values[12:14] != [0, 42] and note.values[12:14] != [42, 0]: raise ValueError("Missing marker tag '42' in MakerNote.") # skip the Makernote label and the TIFF header self.dump_IFD(note.field_offset+10+8, 'MakerNote', @@ -1524,7 +1524,7 @@ def decode_maker_note(self): else: # E99x or D1 if self.debug: - print "Looks like an unlabeled type 2 Nikon MakerNote" + print("Looks like an unlabeled type 2 Nikon MakerNote") self.dump_IFD(note.field_offset, 'MakerNote', dict=MAKERNOTE_NIKON_NEWER_TAGS) return @@ -1581,7 +1581,7 @@ def canon_decode_tag(self, value, dict): for i in range(1, len(value)): x=dict.get(i, ('Unknown', )) if self.debug: - print i, x + print(i, x) name=x[0] if len(x) > 1: val=x[1].get(value[i], 'Unknown') @@ -1632,7 +1632,7 @@ def process_file(f, stop_tag='UNDEF', details=True, strict=False, debug=False): # deal with the EXIF info we found if debug: - print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format' + print({'I': 'Intel', 'M': 'Motorola'}[endian], 'format') hdr = EXIF_header(f, endian, offset, fake_exif, strict, debug) ifd_list = hdr.list_IFDs() ctr = 0 @@ -1645,27 +1645,27 @@ def process_file(f, stop_tag='UNDEF', details=True, strict=False, debug=False): else: IFD_name = 'IFD %d' % ctr if debug: - print ' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i) + print(' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i)) hdr.dump_IFD(i, IFD_name, stop_tag=stop_tag) # EXIF IFD exif_off = hdr.tags.get(IFD_name+' ExifOffset') if exif_off: if debug: - print ' EXIF SubIFD at offset %d:' % exif_off.values[0] + print(' EXIF SubIFD at offset %d:' % exif_off.values[0]) hdr.dump_IFD(exif_off.values[0], 'EXIF', stop_tag=stop_tag) # Interoperability IFD contained in EXIF IFD intr_off = hdr.tags.get('EXIF SubIFD InteroperabilityOffset') if intr_off: if debug: - print ' EXIF Interoperability SubSubIFD at offset %d:' \ - % intr_off.values[0] + print(' EXIF Interoperability SubSubIFD at offset %d:' \ + % intr_off.values[0]) hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability', dict=INTR_TAGS, stop_tag=stop_tag) # GPS IFD gps_off = hdr.tags.get(IFD_name+' GPSInfo') if gps_off: if debug: - print ' GPS SubIFD at offset %d:' % gps_off.values[0] + print(' GPS SubIFD at offset %d:' % gps_off.values[0]) hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS, stop_tag=stop_tag) ctr += 1 @@ -1706,7 +1706,7 @@ def usage(exit_status): msg += '-t TAG --stop-tag TAG Stop processing when this tag is retrieved.\n' msg += '-s --strict Run in strict mode (stop on errors).\n' msg += '-d --debug Run in debug mode (display extra info).\n' - print msg + print(msg) sys.exit(exit_status) # library test/debug function (dump given files) @@ -1742,25 +1742,25 @@ def usage(exit_status): try: file=open(filename, 'rb') except: - print "'%s' is unreadable\n"%filename + print("'%s' is unreadable\n"%filename) continue - print filename + ':' + print(filename + ':') # get the tags data = process_file(file, stop_tag=stop_tag, details=detailed, strict=strict, debug=debug) if not data: - print 'No EXIF information found' + print('No EXIF information found') continue - x=data.keys() + x=list(data.keys()) x.sort() for i in x: if i in ('JPEGThumbnail', 'TIFFThumbnail'): continue try: - print ' %s (%s): %s' % \ - (i, FIELD_TYPES[data[i].field_type][2], data[i].printable) + print(' %s (%s): %s' % \ + (i, FIELD_TYPES[data[i].field_type][2], data[i].printable)) except: - print 'error', i, '"', data[i], '"' + print('error', i, '"', data[i], '"') if 'JPEGThumbnail' in data: - print 'File has JPEG thumbnail' - print + print('File has JPEG thumbnail') + print() diff --git a/SimpleCV/Features/BOFFeatureExtractor.py b/SimpleCV/Features/BOFFeatureExtractor.py index 8cdacd2be..cace65714 100644 --- a/SimpleCV/Features/BOFFeatureExtractor.py +++ b/SimpleCV/Features/BOFFeatureExtractor.py @@ -70,19 +70,19 @@ def generate(self,imgdirs,numcodes=128,sz=(11,11),imgs_per_dir=50,img_layout=(8, for i in range(nimgs): infile = files[i] if verbose: - print(path+" "+str(i)+" of "+str(imgs_per_dir)) - print "Opening file: " + infile + print((path+" "+str(i)+" of "+str(imgs_per_dir))) + print("Opening file: " + infile) img = Image(infile) newFeat = self._getPatches(img,sz) if verbose: - print " Got " + str(len(newFeat)) + " features." + print(" Got " + str(len(newFeat)) + " features.") rawFeatures = np.vstack((rawFeatures,newFeat)) del img rawFeatures = rawFeatures[1:,:] # pop the fake value we put on the top if verbose: - print "==================================" - print "Got " + str(len(rawFeatures)) + " features " - print "Doing K-Means .... this will take a long time" + print("==================================") + print("Got " + str(len(rawFeatures)) + " features ") + print("Doing K-Means .... this will take a long time") self.mCodebook = self._makeCodebook(rawFeatures,self.mNumCodes) self.mCodebookImg = self._codebook2Img(self.mCodebook,self.mPatchSize,self.mNumCodes,self.mLayout,self.mPadding) self.mCodebookImg.save('codebook.png') diff --git a/SimpleCV/Features/Blob.py b/SimpleCV/Features/Blob.py index 4c1666d69..da83ccfa9 100644 --- a/SimpleCV/Features/Blob.py +++ b/SimpleCV/Features/Blob.py @@ -97,7 +97,7 @@ def __init__(self): def __getstate__(self): skip = self.pickle_skip_properties newdict = {} - for k,v in self.__dict__.items(): + for k,v in list(self.__dict__.items()): if k in skip: continue else: @@ -510,21 +510,15 @@ def rotate(self,angle): self.mMask = self.mMask.rotate(angle,mode,point) self.mHullMask = self.mHullMask.rotate(angle,mode,point) - self.mContour = map(lambda x: - (x[0]*np.cos(theta)-x[1]*np.sin(theta), - x[0]*np.sin(theta)+x[1]*np.cos(theta)), - self.mContour) - self.mConvexHull = map(lambda x: - (x[0]*np.cos(theta)-x[1]*np.sin(theta), - x[0]*np.sin(theta)+x[1]*np.cos(theta)), - self.mConvexHull) + self.mContour = [(x[0]*np.cos(theta)-x[1]*np.sin(theta), + x[0]*np.sin(theta)+x[1]*np.cos(theta)) for x in self.mContour] + self.mConvexHull = [(x[0]*np.cos(theta)-x[1]*np.sin(theta), + x[0]*np.sin(theta)+x[1]*np.cos(theta)) for x in self.mConvexHull] if( self.mHoleContour is not None): for h in self.mHoleContour: - h = map(lambda x: - (x[0]*np.cos(theta)-x[1]*np.sin(theta), - x[0]*np.sin(theta)+x[1]*np.cos(theta)), - h) + h = [(x[0]*np.cos(theta)-x[1]*np.sin(theta), + x[0]*np.sin(theta)+x[1]*np.cos(theta)) for x in h] def drawAppx(self, color = Color.HOTPINK,width=-1,alpha=-1,layer=None): diff --git a/SimpleCV/Features/BlobMaker.py b/SimpleCV/Features/BlobMaker.py index 32278c26d..b9bb176df 100644 --- a/SimpleCV/Features/BlobMaker.py +++ b/SimpleCV/Features/BlobMaker.py @@ -97,7 +97,7 @@ def extractFromBinary(self,binaryImg,colorImg, minsize = 5, maxsize = -1,appx_le # note to self # http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/ retVal = self._extractFromBinary(seq,False,colorImg,minsize,maxsize,appx_level) - except RuntimeError,e: + except RuntimeError as e: logger.warning("You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image.") except e: logger.warning("SimpleCV Find Blobs Failed - This could be an OpenCV python binding issue") diff --git a/SimpleCV/Features/Detection.py b/SimpleCV/Features/Detection.py index e9963075f..47111f23a 100644 --- a/SimpleCV/Features/Detection.py +++ b/SimpleCV/Features/Detection.py @@ -1016,7 +1016,7 @@ def __init__(self, i, at_x, at_y, r): points = [(at_x-r,at_y-r),(at_x+r,at_y-r),(at_x+r,at_y+r),(at_x-r,at_y+r)] super(Circle, self).__init__(i, at_x, at_y, points) segments = 18 - rng = range(1,segments+1) + rng = list(range(1,segments+1)) self.mContour = [] for theta in rng: rp = 2.0*math.pi*float(theta)/float(segments) @@ -1256,7 +1256,7 @@ def __init__(self, i, keypoint, descriptor=None, flavor="SURF" ): super(KeyPoint, self).__init__(i, x, y, points) segments = 18 - rng = range(1,segments+1) + rng = list(range(1,segments+1)) self.points = [] for theta in rng: rp = 2.0*math.pi*float(theta)/float(segments) @@ -2278,7 +2278,7 @@ def CoordTransformPts(self,pts,intype="ROI",output="SRC"): x = self._transform(x,self.image.width,self.w,self.xtl,intype,output) y = self._transform(y,self.image.height,self.h,self.ytl,intype,output) - return zip(x,y) + return list(zip(x,y)) def _transform(self,x,imgsz,roisz,offset,intype,output): @@ -2362,7 +2362,7 @@ def splitX(self,x,unitVals=False,srcVals=False): x.insert(0,self.xtl) x.append(self.xtl+self.w) - for i in xrange(0,len(x)-1): + for i in range(0,len(x)-1): xstart = x[i] xstop = x[i+1] w = xstop-xstart @@ -2416,7 +2416,7 @@ def splitY(self,y,unitVals=False,srcVals=False): y.insert(0,self.ytl) y.append(self.ytl+self.h) - for i in xrange(0,len(y)-1): + for i in range(0,len(y)-1): ystart = y[i] ystop = y[i+1] h = ystop-ystart @@ -2616,7 +2616,7 @@ def _standardize(self,x,y=None,w=None,h=None): h = theFeature.height() # [x,y,w,h] (x,y,w,h) - elif(isinstance(x, (tuple,list)) and len(x) == 4 and isinstance(x[0],(int, long, float)) + elif(isinstance(x, (tuple,list)) and len(x) == 4 and isinstance(x[0],(int, float)) and y == None and w == None and h == None): x,y,w,h = x # x of the form [(x,y),(x1,y1),(x2,y2),(x3,y3)] @@ -2644,7 +2644,7 @@ def _standardize(self,x,y=None,w=None,h=None): elif(isinstance(x, (tuple,list)) and isinstance(y, (tuple,list)) and len(x) > 4 and len(y) > 4 ): - if(isinstance(x[0],(int, long, float)) and isinstance(y[0],(int, long, float))): + if(isinstance(x[0],(int, float)) and isinstance(y[0],(int, float))): xmax = np.max(x) ymax = np.max(y) xmin = np.min(x) @@ -2660,7 +2660,7 @@ def _standardize(self,x,y=None,w=None,h=None): # x of the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)] elif(isinstance(x, (list,tuple)) and len(x) > 4 and len(x[0]) == 2 and y == None and w == None and h == None): - if(isinstance(x[0][0],(int, long, float))): + if(isinstance(x[0][0],(int, float))): xs = [pt[0] for pt in x] ys = [pt[1] for pt in x] xmax = np.max(xs) diff --git a/SimpleCV/Features/FaceRecognizer.py b/SimpleCV/Features/FaceRecognizer.py index 6c4606c26..85e298ccb 100644 --- a/SimpleCV/Features/FaceRecognizer.py +++ b/SimpleCV/Features/FaceRecognizer.py @@ -27,7 +27,7 @@ def __init__(self): try: import cv2 self.model = cv2.createFisherFaceRecognizer() - except ImportError, AttributeError: + except ImportError as AttributeError: self.supported = False warnings.warn("Fisher Recognizer is supported by OpenCV >= 2.4.4") diff --git a/SimpleCV/Features/FeatureExtractorBase.py b/SimpleCV/Features/FeatureExtractorBase.py index 079bc105f..2bd9ec249 100644 --- a/SimpleCV/Features/FeatureExtractorBase.py +++ b/SimpleCV/Features/FeatureExtractorBase.py @@ -3,7 +3,7 @@ from SimpleCV.Color import Color from SimpleCV.ImageClass import Image -class FeatureExtractorBase(object): +class FeatureExtractorBase(object, metaclass=abc.ABCMeta): """ The featureExtractorBase class is a way of abstracting the process of collecting descriptive features within an image. A feature is some description of the image @@ -11,8 +11,6 @@ class FeatureExtractorBase(object): lengths. This feature vectors can then be composed together and used within a machine learning algorithm to descriminate between different classes of objects. """ - - __metaclass__ = abc.ABCMeta def load(cls, fname): """ load segmentation settings to file. diff --git a/SimpleCV/Features/FeatureUtils.py b/SimpleCV/Features/FeatureUtils.py index 9324f8f94..74cb4e37f 100644 --- a/SimpleCV/Features/FeatureUtils.py +++ b/SimpleCV/Features/FeatureUtils.py @@ -24,7 +24,7 @@ def GetParallelSets(line_fs,parallel_thresh=2): result = result.reshape(sz,sz) # find the lines that are less than our thresh l1,l2=np.where(result 0 and elapsed_time < 5: @@ -1223,7 +1223,7 @@ def live(self): i.save(d) if d.mouseRight: - print "Closing Window" + print("Closing Window") d.done = True @@ -2249,7 +2249,7 @@ def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, pa self.save(self._tempFiles[-1][0]) return self._tempFiles[-1][0] else : - print "Path does not exist!" + print("Path does not exist!") else : if (filename) : @@ -2269,7 +2269,7 @@ def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, pa if self._colorSpace != ColorSpace.BGR and self._colorSpace != ColorSpace.GRAY: saveimg = saveimg.toBGR() - if not isinstance(filehandle_or_filename, basestring): + if not isinstance(filehandle_or_filename, str): fh = filehandle_or_filename @@ -2298,7 +2298,7 @@ def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, pa try: from IPython.core.display import Image as IPImage except ImportError: - print "You need IPython Notebooks to use this display mode" + print("You need IPython Notebooks to use this display mode") return from IPython.core import display as Idisplay @@ -2323,13 +2323,13 @@ def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, pa self.filehandle = fh #set the filename for future save operations self.filename = "" return 1 - except Exception, e: + except Exception as e: if mode.lower() != 'webp': raise e if verbose: - print self.filename + print(self.filename) if not mode.lower() == 'webp': return 1 @@ -2388,7 +2388,7 @@ def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, pa return 0 if verbose: - print self.filename + print(self.filename) if temp: return filename @@ -2483,7 +2483,7 @@ def upload(self,dest,api_key=None,api_secret=None, verbose = True): try: import pycurl except ImportError: - print "PycURL Library not installed." + print("PycURL Library not installed.") return response = StringIO() @@ -2499,13 +2499,13 @@ def upload(self,dest,api_key=None,api_secret=None, verbose = True): match = re.search(r'(\w+).*?(\w+).*?(http://[\w.]+/[\w.]+)', response.getvalue() , re.DOTALL) if match: if(verbose): - print "Imgur page: http://imgur.com/" + match.group(1) - print "Original image: " + match.group(3) - print "Delete page: http://imgur.com/delete/" + match.group(2) + print("Imgur page: http://imgur.com/" + match.group(1)) + print("Original image: " + match.group(3)) + print("Delete page: http://imgur.com/delete/" + match.group(2)) return [match.group(1),match.group(3),match.group(2)] else : if(verbose): - print "The API Key given is not valid" + print("The API Key given is not valid") return None elif (dest=='flickr'): @@ -2514,7 +2514,7 @@ def upload(self,dest,api_key=None,api_secret=None, verbose = True): try : import flickrapi except ImportError: - print "Flickr API is not installed. Please install it from http://pypi.python.org/pypi/flickrapi" + print("Flickr API is not installed. Please install it from http://pypi.python.org/pypi/flickrapi") return False try : if (not(api_key==None and api_secret==None)): @@ -2527,16 +2527,16 @@ def upload(self,dest,api_key=None,api_secret=None, verbose = True): self.flickr = flickrapi.FlickrAPI(temp_token[0],temp_token[1],cache=True) self.flickr.authenticate_console('write') except NameError : - print "API key and Secret key are not set." + print("API key and Secret key are not set.") return except : - print "The API Key and Secret Key are not valid" + print("The API Key and Secret Key are not valid") return False if (self.filename) : try : self.flickr.upload(self.filename,self.filehandle) except : - print "Uploading Failed !" + print("Uploading Failed !") return False else : tf = self.save(temp=True) @@ -2550,7 +2550,7 @@ def upload(self,dest,api_key=None,api_secret=None, verbose = True): from dropbox import client, rest, session import webbrowser except ImportError: - print "Dropbox API is not installed. For more info refer : https://www.dropbox.com/developers/start/setup#python " + print("Dropbox API is not installed. For more info refer : https://www.dropbox.com/developers/start/setup#python ") return False try : if ( 'dropbox_token' not in globals() and api_key!=None and api_secret!=None ): @@ -2558,8 +2558,8 @@ def upload(self,dest,api_key=None,api_secret=None, verbose = True): request_token = sess.obtain_request_token() url = sess.build_authorize_url(request_token) webbrowser.open(url) - print "Please visit this website and press the 'Allow' button, then hit 'Enter' here." - raw_input() + print("Please visit this website and press the 'Allow' button, then hit 'Enter' here.") + input() access_token = sess.obtain_access_token(request_token) dropbox_token = client.DropboxClient(sess) else : @@ -2568,14 +2568,14 @@ def upload(self,dest,api_key=None,api_secret=None, verbose = True): else : return None except : - print "The API Key and Secret Key are not valid" + print("The API Key and Secret Key are not valid") return False if (self.filename) : try : f = open(self.filename) dropbox_token.put_file('/SimpleCVImages/'+os.path.split(self.filename)[-1], f) except : - print "Uploading Failed !" + print("Uploading Failed !") return False else : tf = self.save(temp=True) @@ -3094,7 +3094,7 @@ def flipHorizontal(self): """ newimg = self.getEmpty() - cv.Flip(self.getBitmap(), newimg, 1) + cv2.Flip(self.getBitmap(), newimg, 1) return Image(newimg, colorSpace=self._colorSpace) def flipVertical(self): @@ -3125,7 +3125,7 @@ def flipVertical(self): """ newimg = self.getEmpty() - cv.Flip(self.getBitmap(), newimg, 0) + cv2.Flip(self.getBitmap(), newimg, 0) return Image(newimg, colorSpace=self._colorSpace) @@ -3170,10 +3170,10 @@ def stretch(self, thresh_low = 0, thresh_high = 255): """ try: newimg = self.getEmpty(1) - cv.Threshold(self._getGrayscaleBitmap(), newimg, thresh_low, 255, cv.CV_THRESH_TOZERO) - cv.Not(newimg, newimg) - cv.Threshold(newimg, newimg, 255 - thresh_high, 255, cv.CV_THRESH_TOZERO) - cv.Not(newimg, newimg) + cv2.Threshold(self._getGrayscaleBitmap(), newimg, thresh_low, 255, cv.CV_THRESH_TOZERO) + cv2.Not(newimg, newimg) + cv2.Threshold(newimg, newimg, 255 - thresh_high, 255, cv.CV_THRESH_TOZERO) + cv2.Not(newimg, newimg) return Image(newimg) except: return None @@ -3269,16 +3269,16 @@ def binarize(self, thresh = -1, maxv = 255, blocksize = 0, p = 5): r = self.getEmpty(1) g = self.getEmpty(1) b = self.getEmpty(1) - cv.Split(self.getBitmap(), b, g, r, None) + cv2.Split(self.getBitmap(), b, g, r, None) - cv.Threshold(r, r, thresh[0], maxv, cv.CV_THRESH_BINARY_INV) - cv.Threshold(g, g, thresh[1], maxv, cv.CV_THRESH_BINARY_INV) - cv.Threshold(b, b, thresh[2], maxv, cv.CV_THRESH_BINARY_INV) + cv2.Threshold(r, r, thresh[0], maxv, cv2.CV_THRESH_BINARY_INV) + cv2.Threshold(g, g, thresh[1], maxv, cv2.CV_THRESH_BINARY_INV) + cv2.Threshold(b, b, thresh[2], maxv, cv2.CV_THRESH_BINARY_INV) - cv.Add(r, g, r) - cv.Add(r, b, r) + cv2.Add(r, g, r) + cv2.Add(r, b, r) return Image(r, colorSpace=self._colorSpace) @@ -3287,15 +3287,15 @@ def binarize(self, thresh = -1, maxv = 255, blocksize = 0, p = 5): elif thresh == -1: newbitmap = self.getEmpty(1) if blocksize: - cv.AdaptiveThreshold(self._getGrayscaleBitmap(), newbitmap, maxv, - cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv.CV_THRESH_BINARY_INV, blocksize, p) + cv2.AdaptiveThreshold(self._getGrayscaleBitmap(), newbitmap, maxv, + cv2.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv2.CV_THRESH_BINARY_INV, blocksize, p) else: - cv.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv.CV_THRESH_BINARY_INV + cv.CV_THRESH_OTSU) + cv2.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv2.CV_THRESH_BINARY_INV + cv2.CV_THRESH_OTSU) return Image(newbitmap, colorSpace=self._colorSpace) else: newbitmap = self.getEmpty(1) #desaturate the image, and apply the new threshold - cv.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv.CV_THRESH_BINARY_INV) + cv2.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv2.CV_THRESH_BINARY_INV) return Image(newbitmap, colorSpace=self._colorSpace) @@ -3328,34 +3328,34 @@ def meanColor(self, colorSpace = None): """ if colorSpace == None: - return tuple(cv.Avg(self.getBitmap())[0:3]) - + return tuple(cv2.Avg(self.getBitmap())[0:3]) + elif colorSpace == 'BGR': - return tuple(cv.Avg(self.toBGR().getBitmap())[0:3]) + return tuple(cv2.Avg(self.toBGR().getBitmap())[0:3]) elif colorSpace == 'RGB': - return tuple(cv.Avg(self.toRGB().getBitmap())[0:3]) + return tuple(cv2.Avg(self.toRGB().getBitmap())[0:3]) elif colorSpace == 'HSV': - return tuple(cv.Avg(self.toHSV().getBitmap())[0:3]) + return tuple(cv2.Avg(self.toHSV().getBitmap())[0:3]) elif colorSpace == 'XYZ': - return tuple(cv.Avg(self.toXYZ().getBitmap())[0:3]) + return tuple(cv2.Avg(self.toXYZ().getBitmap())[0:3]) elif colorSpace == 'Gray': - return (cv.Avg(self._getGrayscaleBitmap())[0]) + return (cv2.Avg(self._getGrayscaleBitmap())[0]) elif colorSpace == 'YCrCb': - return tuple(cv.Avg(self.toYCrCb().getBitmap())[0:3]) + return tuple(cv2.Avg(self.toYCrCb().getBitmap())[0:3]) elif colorSpace == 'HLS': - return tuple(cv.Avg(self.toHLS().getBitmap())[0:3]) + return tuple(cv2.Avg(self.toHLS().getBitmap())[0:3]) else: logger.warning("Image.meanColor: There is no supported conversion to the specified colorspace. Use one of these as argument: 'BGR' , 'RGB' , 'HSV' , 'Gray' , 'XYZ' , 'YCrCb' , 'HLS' .") return None - + def findCorners(self, maxnum = 50, minquality = 0.04, mindistance = 1.0): """ @@ -3404,11 +3404,11 @@ def findCorners(self, maxnum = 50, minquality = 0.04, mindistance = 1.0): """ #initialize buffer frames - eig_image = cv.CreateImage(cv.GetSize(self.getBitmap()), cv.IPL_DEPTH_32F, 1) - temp_image = cv.CreateImage(cv.GetSize(self.getBitmap()), cv.IPL_DEPTH_32F, 1) + eig_image = cv2.CreateImage(cv2.GetSize(self.getBitmap()), cv2.IPL_DEPTH_32F, 1) + temp_image = cv2.CreateImage(cv2.GetSize(self.getBitmap()), cv2.IPL_DEPTH_32F, 1) - corner_coordinates = cv.GoodFeaturesToTrack(self._getGrayscaleBitmap(), eig_image, temp_image, maxnum, minquality, mindistance, None) + corner_coordinates = cv2.GoodFeaturesToTrack(self._getGrayscaleBitmap(), eig_image, temp_image, maxnum, minquality, mindistance, None) corner_features = [] @@ -3582,12 +3582,12 @@ def getSkintoneMask(self, dilate_iter=0): Y_img = YCrCb.getEmpty(1) Cr_img = YCrCb.getEmpty(1) Cb_img = YCrCb.getEmpty(1) - cv.Split(YCrCb.getBitmap(),Y_img,Cr_img,Cb_img,None) - cv.LUT(Y_img,Y_img,cv.fromarray(Y)) - cv.LUT(Cr_img,Cr_img,cv.fromarray(Cr)) - cv.LUT(Cb_img,Cb_img,cv.fromarray(Cb)) + cv2.Split(YCrCb.getBitmap(),Y_img,Cr_img,Cb_img,None) + cv2.LUT(Y_img,Y_img,cv.fromarray(Y)) + cv2.LUT(Cr_img,Cr_img,cv.fromarray(Cr)) + cv2.LUT(Cb_img,Cb_img,cv.fromarray(Cb)) temp = self.getEmpty() - cv.Merge(Y_img,Cr_img,Cb_img,None,temp) + cv2.Merge(Y_img,Cr_img,Cb_img,None,temp) mask=Image(temp,colorSpace = ColorSpace.YCrCb) mask = mask.binarize((128,128,128)) mask = mask.toRGB().binarize() @@ -3665,12 +3665,12 @@ def findHaarFeatures(self, cascade, scale_factor=1.2, min_neighbors=2, use_canny - http://dismagazine.com/dystopia/evolved-lifestyles/8115/anti-surveillance-how-to-hide-from-machines/ """ - storage = cv.CreateMemStorage(0) + storage = cv2.CreateMemStorage(0) #lovely. This segfaults if not present from SimpleCV.Features.HaarCascade import HaarCascade - if isinstance(cascade, basestring): + if isinstance(cascade, str): cascade = HaarCascade(cascade) if not cascade.getCascade(): return None @@ -3687,7 +3687,7 @@ def findHaarFeatures(self, cascade, scale_factor=1.2, min_neighbors=2, use_canny cv2flag = True except ImportError: - objects = cv.HaarDetectObjects(self._getEqualizedGrayscaleBitmap(), + objects = cv2.HaarDetectObjects(self._getEqualizedGrayscaleBitmap(), cascade.getCascade(), storage, scale_factor, min_neighbors, use_canny, min_size) cv2flag = False @@ -3798,7 +3798,7 @@ def size(self): """ if self.width and self.height: - return cv.GetSize(self.getBitmap()) + return cv2.GetSize(self.getBitmap()) else: return (0, 0) @@ -3892,7 +3892,7 @@ def splitChannels(self, grayscale = True): r = self.getEmpty(1) g = self.getEmpty(1) b = self.getEmpty(1) - cv.Split(self.getBitmap(), b, g, r, None) + cv2.Split(self.getBitmap(), b, g, r, None) red = self.getEmpty() @@ -3901,13 +3901,13 @@ def splitChannels(self, grayscale = True): if (grayscale): - cv.Merge(r, r, r, None, red) - cv.Merge(g, g, g, None, green) - cv.Merge(b, b, b, None, blue) + cv2.Merge(r, r, r, None, red) + cv2.Merge(g, g, g, None, green) + cv2.Merge(b, b, b, None, blue) else: - cv.Merge(None, None, r, None, red) - cv.Merge(None, g, None, None, green) - cv.Merge(b, None, None, None, blue) + cv2.Merge(None, None, r, None, red) + cv2.Merge(None, g, None, None, green) + cv2.Merge(b, None, None, None, blue) return (Image(red), Image(green), Image(blue)) @@ -3951,28 +3951,28 @@ def mergeChannels(self,r=None,g=None,b=None): return None if( r is None ): r = self.getEmpty(1) - cv.Zero(r); + cv2.Zero(r); else: rt = r.getEmpty(1) - cv.Split(r.getBitmap(),rt,rt,rt,None) + cv2.Split(r.getBitmap(),rt,rt,rt,None) r = rt if( g is None ): g = self.getEmpty(1) - cv.Zero(g); + cv2.Zero(g); else: gt = g.getEmpty(1) - cv.Split(g.getBitmap(),gt,gt,gt,None) + cv2.Split(g.getBitmap(),gt,gt,gt,None) g = gt if( b is None ): b = self.getEmpty(1) - cv.Zero(b); + cv2.Zero(b); else: bt = b.getEmpty(1) - cv.Split(b.getBitmap(),bt,bt,bt,None) + cv2.Split(b.getBitmap(),bt,bt,bt,None) b = bt retVal = self.getEmpty() - cv.Merge(b,g,r,None,retVal) + cv2.Merge(b,g,r,None,retVal) return Image(retVal) def applyHLSCurve(self, hCurve, lCurve, sCurve): @@ -4011,19 +4011,19 @@ def applyHLSCurve(self, hCurve, lCurve, sCurve): #TODO CHECK CURVE SIZE #TODO CHECK COLORSPACE #TODO CHECK CURVE SIZE - temp = cv.CreateImage(self.size(), 8, 3) + temp = cv2.CreateImage(self.size(), 8, 3) #Move to HLS space - cv.CvtColor(self._bitmap, temp, cv.CV_RGB2HLS) - tempMat = cv.GetMat(temp) #convert the bitmap to a matrix + cv2.CvtColor(self._bitmap, temp, cv.CV_RGB2HLS) + tempMat = cv2.GetMat(temp) #convert the bitmap to a matrix #now apply the color curve correction tempMat = np.array(self.getMatrix()).copy() tempMat[:, :, 0] = np.take(hCurve.mCurve, tempMat[:, :, 0]) tempMat[:, :, 1] = np.take(sCurve.mCurve, tempMat[:, :, 1]) tempMat[:, :, 2] = np.take(lCurve.mCurve, tempMat[:, :, 2]) #Now we jimmy the np array into a cvMat - image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3) - cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1]) - cv.CvtColor(image, image, cv.CV_HLS2RGB) + image = cv2.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv2.IPL_DEPTH_8U, 3) + cv2.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1]) + cv2.CvtColor(image, image, cv.CV_HLS2RGB) return Image(image, colorSpace=self._colorSpace) @@ -4070,8 +4070,8 @@ def applyRGBCurve(self, rCurve, gCurve, bCurve): tempMat[:, :, 1] = np.take(gCurve.mCurve, tempMat[:, :, 1]) tempMat[:, :, 2] = np.take(rCurve.mCurve, tempMat[:, :, 2]) #Now we jimmy the np array into a cvMat - image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3) - cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1]) + image = cv2.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv2.IPL_DEPTH_8U, 3) + cv2.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1]) return Image(image, colorSpace=self._colorSpace) @@ -4183,7 +4183,7 @@ def hueDistance(self, color = Color.BLACK, minsaturation = 20, minvalue = 20, ma :py:meth:`findBlobsFromMask` """ - if isinstance(color, (float,int,long,complex)): + if isinstance(color, (float,int,complex)): color_hue = color else: color_hue = Color.hsv(color)[0] @@ -4254,8 +4254,8 @@ def erode(self, iterations=1, kernelsize=3): """ retVal = self.getEmpty() - kern = cv.CreateStructuringElementEx(kernelsize,kernelsize, 1, 1, cv.CV_SHAPE_RECT) - cv.Erode(self.getBitmap(), retVal, kern, iterations) + kern = cv2.CreateStructuringElementEx(kernelsize,kernelsize, 1, 1, cv2.CV_SHAPE_RECT) + cv2.Erode(self.getBitmap(), retVal, kern, iterations) return Image(retVal, colorSpace=self._colorSpace) @@ -4302,8 +4302,8 @@ def dilate(self, iterations=1): """ retVal = self.getEmpty() - kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT) - cv.Dilate(self.getBitmap(), retVal, kern, iterations) + kern = cv2.CreateStructuringElementEx(3, 3, 1, 1, cv2.CV_SHAPE_RECT) + cv2.Dilate(self.getBitmap(), retVal, kern, iterations) return Image(retVal, colorSpace=self._colorSpace) @@ -4346,11 +4346,11 @@ def morphOpen(self): """ retVal = self.getEmpty() temp = self.getEmpty() - kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT) + kern = cv2.CreateStructuringElementEx(3, 3, 1, 1, cv2.CV_SHAPE_RECT) try: - cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_OPEN, 1) + cv2.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv2.MORPH_OPEN, 1) except: - cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_OPEN, 1) + cv2.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv2.CV_MOP_OPEN, 1) #OPENCV 2.2 vs 2.3 compatability return( Image(retVal) ) @@ -4396,11 +4396,11 @@ def morphClose(self): retVal = self.getEmpty() temp = self.getEmpty() - kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT) + kern = cv2.CreateStructuringElementEx(3, 3, 1, 1, cv2.CV_SHAPE_RECT) try: - cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_CLOSE, 1) + cv2.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv2.MORPH_CLOSE, 1) except: - cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_CLOSE, 1) + cv2.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv2.CV_MOP_CLOSE, 1) #OPENCV 2.2 vs 2.3 compatability return Image(retVal, colorSpace=self._colorSpace) @@ -4447,11 +4447,11 @@ def morphGradient(self): retVal = self.getEmpty() temp = self.getEmpty() - kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT) + kern = cv2.CreateStructuringElementEx(3, 3, 1, 1, cv2.CV_SHAPE_RECT) try: - cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_GRADIENT, 1) + cv2.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv2.MORPH_GRADIENT, 1) except: - cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_GRADIENT, 1) + cv2.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv2.CV_MOP_GRADIENT, 1) return Image(retVal, colorSpace=self._colorSpace ) @@ -4484,7 +4484,7 @@ def histogram(self, numbins = 50): gray = self._getGrayscaleBitmap() - (hist, bin_edges) = np.histogram(np.asarray(cv.GetMat(gray)), bins=numbins) + (hist, bin_edges) = np.histogram(np.asarray(cv2.GetMat(gray)), bins=numbins) return hist.tolist() def hueHistogram(self, bins = 179, dynamicRange=True): @@ -4568,15 +4568,15 @@ def huePeaks(self, bins = 179): length = len(y_axis) if x_axis is None: - x_axis = range(length) + x_axis = list(range(length)) #perform some checks if length != len(x_axis): - raise ValueError, "Input vectors y_axis and x_axis must have same length" + raise ValueError("Input vectors y_axis and x_axis must have same length") if lookahead < 1: - raise ValueError, "Lookahead must be above '1' in value" + raise ValueError("Lookahead must be above '1' in value") if not (np.isscalar(delta) and delta >= 0): - raise ValueError, "delta must be a positive number" + raise ValueError("delta must be a positive number") #needs to be a numpy array y_axis = np.asarray(y_axis) @@ -4639,10 +4639,10 @@ def huePeaks(self, bins = 179): def __getitem__(self, coord): ret = self.getMatrix()[tuple(reversed(coord))] - if (type(ret) == cv.cvmat): - (width, height) = cv.GetSize(ret) - newmat = cv.CreateMat(height, width, ret.type) - cv.Copy(ret, newmat) #this seems to be a bug in opencv + if (type(ret) == cv2.cvmat): + (width, height) = cv2.GetSize(ret) + newmat = cv2.CreateMat(height, width, ret.type) + cv2.Copy(ret, newmat) #this seems to be a bug in opencv #if you don't copy the matrix slice, when you convert to bmp you get #a slice-sized hunk starting at 0, 0 return Image(newmat) @@ -4657,7 +4657,7 @@ def __setitem__(self, coord, value): value = tuple(reversed(value)) #RGB -> BGR if(isinstance(coord[0],slice)): - cv.Set(self.getMatrix()[tuple(reversed(coord))], value) + cv2.Set(self.getMatrix()[tuple(reversed(coord))], value) self._clearBuffers("_matrix") else: self.getMatrix()[tuple(reversed(coord))] = value @@ -4668,64 +4668,64 @@ def __setitem__(self, coord, value): def __sub__(self, other): newbitmap = self.getEmpty() if is_number(other): - cv.SubS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap) + cv2.SubS(self.getBitmap(), cv2.Scalar(other,other,other), newbitmap) else: - cv.Sub(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.Sub(self.getBitmap(), other.getBitmap(), newbitmap) return Image(newbitmap, colorSpace=self._colorSpace) def __add__(self, other): newbitmap = self.getEmpty() if is_number(other): - cv.AddS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap) + cv2.AddS(self.getBitmap(), cv2.Scalar(other,other,other), newbitmap) else: - cv.Add(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.Add(self.getBitmap(), other.getBitmap(), newbitmap) return Image(newbitmap, colorSpace=self._colorSpace) def __and__(self, other): newbitmap = self.getEmpty() if is_number(other): - cv.AndS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap) + cv2.AndS(self.getBitmap(), cv2.Scalar(other,other,other), newbitmap) else: - cv.And(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.And(self.getBitmap(), other.getBitmap(), newbitmap) return Image(newbitmap, colorSpace=self._colorSpace) def __or__(self, other): newbitmap = self.getEmpty() if is_number(other): - cv.OrS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap) + cv2.OrS(self.getBitmap(), cv2.Scalar(other,other,other), newbitmap) else: - cv.Or(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.Or(self.getBitmap(), other.getBitmap(), newbitmap) return Image(newbitmap, colorSpace=self._colorSpace) def __div__(self, other): newbitmap = self.getEmpty() if (not is_number(other)): - cv.Div(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.Div(self.getBitmap(), other.getBitmap(), newbitmap) else: - cv.ConvertScale(self.getBitmap(), newbitmap, 1.0/float(other)) + cv2.ConvertScale(self.getBitmap(), newbitmap, 1.0/float(other)) return Image(newbitmap, colorSpace=self._colorSpace) def __mul__(self, other): newbitmap = self.getEmpty() if (not is_number(other)): - cv.Mul(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.Mul(self.getBitmap(), other.getBitmap(), newbitmap) else: - cv.ConvertScale(self.getBitmap(), newbitmap, float(other)) + cv2.ConvertScale(self.getBitmap(), newbitmap, float(other)) return Image(newbitmap, colorSpace=self._colorSpace) def __pow__(self, other): newbitmap = self.getEmpty() - cv.Pow(self.getBitmap(), newbitmap, other) + cv2.Pow(self.getBitmap(), newbitmap, other) return Image(newbitmap, colorSpace=self._colorSpace) def __neg__(self): newbitmap = self.getEmpty() - cv.Not(self.getBitmap(), newbitmap) + cv2.Not(self.getBitmap(), newbitmap) return Image(newbitmap, colorSpace=self._colorSpace) def __invert__(self): @@ -4750,12 +4750,12 @@ def max(self, other): newbitmap = self.getEmpty() if is_number(other): - cv.MaxS(self.getBitmap(), other, newbitmap) + cv2.MaxS(self.getBitmap(), other, newbitmap) else: if self.size() != other.size(): warnings.warn("Both images should have same sizes. Returning None.") return None - cv.Max(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.Max(self.getBitmap(), other.getBitmap(), newbitmap) return Image(newbitmap, colorSpace=self._colorSpace) @@ -4777,17 +4777,17 @@ def min(self, other): newbitmap = self.getEmpty() if is_number(other): - cv.MinS(self.getBitmap(), other, newbitmap) + cv2.MinS(self.getBitmap(), other, newbitmap) else: if self.size() != other.size(): warnings.warn("Both images should have same sizes. Returning None.") return None - cv.Min(self.getBitmap(), other.getBitmap(), newbitmap) + cv2.Min(self.getBitmap(), other.getBitmap(), newbitmap) return Image(newbitmap, colorSpace=self._colorSpace) def _clearBuffers(self, clearexcept = "_bitmap"): - for k, v in self._initialized_buffers.items(): + for k, v in list(self._initialized_buffers.items()): if k == clearexcept: continue self.__dict__[k] = v @@ -4972,7 +4972,7 @@ def findLines(self, threshold=80, minlinelength=30, maxlinegap=10, cannyth1=50, linesFS = FeatureSet() if useStandard: - lines = cv.HoughLines2(em, cv.CreateMemStorage(), cv.CV_HOUGH_STANDARD, 1.0, cv.CV_PI/180.0, threshold, minlinelength, maxlinegap) + lines = cv2.HoughLines2(em, cv2.CreateMemStorage(), cv2.CV_HOUGH_STANDARD, 1.0, cv2.CV_PI/180.0, threshold, minlinelength, maxlinegap) if nLines == -1: nLines = len(lines) # All white points (edges) in Canny edge image @@ -5051,7 +5051,7 @@ def findLines(self, threshold=80, minlinelength=30, maxlinegap=10, cannyth1=50, linesFS.append(Line(self, l)) linesFS = linesFS[:nLines] else: - lines = cv.HoughLines2(em, cv.CreateMemStorage(), cv.CV_HOUGH_PROBABILISTIC, 1.0, cv.CV_PI/180.0, threshold, minlinelength, maxlinegap) + lines = cv2.HoughLines2(em, cv2.CreateMemStorage(), cv2.CV_HOUGH_PROBABILISTIC, 1.0, cv2.CV_PI/180.0, threshold, minlinelength, maxlinegap) if nLines == -1: nLines = len(lines) @@ -5092,10 +5092,10 @@ def findChessboard(self, dimensions = (8, 5), subpixel = True): :py:class:`Chessboard` """ - corners = cv.FindChessboardCorners(self._getEqualizedGrayscaleBitmap(), dimensions, cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv.CV_CALIB_CB_NORMALIZE_IMAGE ) + corners = cv2.FindChessboardCorners(self._getEqualizedGrayscaleBitmap(), dimensions, cv2.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.CV_CALIB_CB_NORMALIZE_IMAGE ) if(len(corners[1]) == dimensions[0]*dimensions[1]): if (subpixel): - spCorners = cv.FindCornerSubPix(self.getGrayscaleMatrix(), corners[1], (11, 11), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01)) + spCorners = cv2.FindCornerSubPix(self.getGrayscaleMatrix(), corners[1], (11, 11), (-1, -1), (cv2.CV_TERMCRIT_ITER | cv2.CV_TERMCRIT_EPS, 10, 0.01)) else: spCorners = corners[1] return FeatureSet([ Chessboard(self, dimensions, spCorners) ]) @@ -5158,7 +5158,7 @@ def _getEdgeMap(self, t1=50, t2=100): self._edgeMap = self.getEmpty(1) - cv.Canny(self._getGrayscaleBitmap(), self._edgeMap, t1, t2) + cv2.Canny(self._getGrayscaleBitmap(), self._edgeMap, t1, t2) self._cannyparam = (t1, t2) @@ -5209,19 +5209,19 @@ def rotate(self, angle, fixed=True, point=[-1, -1], scale = 1.0): if (fixed): retVal = self.getEmpty() - cv.Zero(retVal) - rotMat = cv.CreateMat(2, 3, cv.CV_32FC1) - cv.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat) - cv.WarpAffine(self.getBitmap(), retVal, rotMat) + cv2.Zero(retVal) + rotMat = cv2.CreateMat(2, 3, cv2.CV_32FC1) + cv2.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat) + cv2.WarpAffine(self.getBitmap(), retVal, rotMat) return Image(retVal, colorSpace=self._colorSpace) #otherwise, we're expanding the matrix to fit the image at original size - rotMat = cv.CreateMat(2, 3, cv.CV_32FC1) + rotMat = cv2.CreateMat(2, 3, cv.CV_32FC1) # first we create what we thing the rotation matrix should be - cv.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat) + cv2.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat) A = np.array([0, 0, 1]) B = np.array([self.width, 0, 1]) C = np.array([self.width, self.height, 1]) @@ -5263,15 +5263,15 @@ def rotate(self, angle, fixed=True, point=[-1, -1], scale = 1.0): dst = ((a[0]+tX, a[1]+tY), (b[0]+tX, b[1]+tY), (c[0]+tX, c[1]+tY)) - cv.GetAffineTransform(src, dst, rotMat) + cv2.GetAffineTransform(src, dst, rotMat) #calculate the translation of the corners to center the image #use these new corner positions as the input to cvGetAffineTransform - retVal = cv.CreateImage((int(newWidth), int(newHeight)), 8, int(3)) - cv.Zero(retVal) + retVal = cv2.CreateImage((int(newWidth), int(newHeight)), 8, int(3)) + cv2.Zero(retVal) - cv.WarpAffine(self.getBitmap(), retVal, rotMat) + cv2.WarpAffine(self.getBitmap(), retVal, rotMat) #cv.AddS(retVal,(0,255,0),retVal) return Image(retVal, colorSpace=self._colorSpace) @@ -5303,8 +5303,8 @@ def transpose(self): """ - retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3) - cv.Transpose(self.getBitmap(), retVal) + retVal = cv2.CreateImage((self.height, self.width), cv2.IPL_DEPTH_8U, 3) + cv2.Transpose(self.getBitmap(), retVal) return(Image(retVal, colorSpace=self._colorSpace)) @@ -5341,9 +5341,9 @@ def shear(self, cornerpoints): """ src = ((0, 0), (self.width-1, 0), (self.width-1, self.height-1)) #set the original points - aWarp = cv.CreateMat(2, 3, cv.CV_32FC1) + aWarp = cv2.CreateMat(2, 3, cv.CV_32FC1) #create the empty warp matrix - cv.GetAffineTransform(src, cornerpoints, aWarp) + cv2.GetAffineTransform(src, cornerpoints, aWarp) return self.transformAffine(aWarp) @@ -5388,7 +5388,7 @@ def transformAffine(self, rotMatrix): retVal = self.getEmpty() if(type(rotMatrix) == np.ndarray ): rotMatrix = npArray2cvMat(rotMatrix) - cv.WarpAffine(self.getBitmap(), retVal, rotMatrix) + cv2.WarpAffine(self.getBitmap(), retVal, rotMatrix) return Image(retVal, colorSpace=self._colorSpace) @@ -5428,8 +5428,8 @@ def warp(self, cornerpoints): """ #original coordinates src = ((0, 0), (self.width-1, 0), (self.width-1, self.height-1), (0, self.height-1)) - pWarp = cv.CreateMat(3, 3, cv.CV_32FC1) #create an empty 3x3 matrix - cv.GetPerspectiveTransform(src, cornerpoints, pWarp) #figure out the warp matrix + pWarp = cv2.CreateMat(3, 3, cv2.CV_32FC1) #create an empty 3x3 matrix + cv2.GetPerspectiveTransform(src, cornerpoints, pWarp) #figure out the warp matrix return self.transformPerspective(pWarp) @@ -5480,7 +5480,7 @@ def transformPerspective(self, rotMatrix): retVal = self.getEmpty() if(type(rotMatrix) == np.ndarray ): rotMatrix = npArray2cvMat(rotMatrix) - cv.WarpPerspective(self.getBitmap(), retVal, rotMatrix) + cv2.WarpPerspective(self.getBitmap(), retVal, rotMatrix) return Image(retVal, colorSpace=self._colorSpace) def getPixel(self, x, y): @@ -5518,7 +5518,7 @@ def getPixel(self, x, y): elif( y < 0 or y >= self.height ): logger.warning("getRGBPixel: Y value is not valid.") else: - c = cv.Get2D(self.getBitmap(), y, x) + c = cv2.Get2D(self.getBitmap(), y, x) if( self._colorSpace == ColorSpace.BGR ): retVal = (c[2],c[1],c[0]) else: @@ -5562,7 +5562,7 @@ def getGrayPixel(self, x, y): elif( y < 0 or y >= self.height ): logger.warning("getGrayPixel: Y value is not valid.") else: - retVal = cv.Get2D(self._getGrayscaleBitmap(), y, x) + retVal = cv2.Get2D(self._getGrayscaleBitmap(), y, x) retVal = retVal[0] return retVal @@ -5604,7 +5604,7 @@ def getVertScanline(self, column): if( column < 0 or column >= self.width ): logger.warning("getVertRGBScanline: column value is not valid.") else: - retVal = cv.GetCol(self.getBitmap(), column) + retVal = cv2.GetCol(self.getBitmap(), column) retVal = np.array(retVal) retVal = retVal[:, 0, :] return retVal @@ -5646,7 +5646,7 @@ def getHorzScanline(self, row): if( row < 0 or row >= self.height ): logger.warning("getHorzRGBScanline: row value is not valid.") else: - retVal = cv.GetRow(self.getBitmap(), row) + retVal = cv2.GetRow(self.getBitmap(), row) retVal = np.array(retVal) retVal = retVal[0, :, :] return retVal @@ -5688,7 +5688,7 @@ def getVertScanlineGray(self, column): if( column < 0 or column >= self.width ): logger.warning("getHorzRGBScanline: row value is not valid.") else: - retVal = cv.GetCol(self._getGrayscaleBitmap(), column ) + retVal = cv2.GetCol(self._getGrayscaleBitmap(), column ) retVal = np.array(retVal) #retVal = retVal.transpose() return retVal @@ -5731,7 +5731,7 @@ def getHorzScanlineGray(self, row): if( row < 0 or row >= self.height ): logger.warning("getHorzRGBScanline: row value is not valid.") else: - retVal = cv.GetRow(self._getGrayscaleBitmap(), row ) + retVal = cv2.GetRow(self._getGrayscaleBitmap(), row ) retVal = np.array(retVal) retVal = retVal.transpose() return retVal @@ -5829,7 +5829,7 @@ def crop(self, x , y = None, w = None, h = None, centered=False, smart=False): w = theFeature.width() h = theFeature.height() - elif(isinstance(x, (tuple,list)) and len(x) == 4 and isinstance(x[0],(int, long, float)) + elif(isinstance(x, (tuple,list)) and len(x) == 4 and isinstance(x[0],(int, float)) and y == None and w == None and h == None): x,y,w,h = x # x of the form [(x,y),(x1,y1),(x2,y2),(x3,y3)] @@ -5858,7 +5858,7 @@ def crop(self, x , y = None, w = None, h = None, centered=False, smart=False): elif(isinstance(x, (tuple,list)) and isinstance(y, (tuple,list)) and len(x) > 4 and len(y) > 4 ): - if(isinstance(x[0],(int, long, float)) and isinstance(y[0],(int, long, float))): + if(isinstance(x[0],(int, float)) and isinstance(y[0],(int, float))): xmax = np.max(x) ymax = np.max(y) xmin = np.min(x) @@ -5874,7 +5874,7 @@ def crop(self, x , y = None, w = None, h = None, centered=False, smart=False): # x of the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)] elif(isinstance(x, (list,tuple)) and len(x) > 4 and len(x[0]) == 2 and y == None and w == None and h == None): - if(isinstance(x[0][0],(int, long, float))): + if(isinstance(x[0][0],(int, float))): xs = [pt[0] for pt in x] ys = [pt[1] for pt in x] xmax = np.max(xs) @@ -5919,13 +5919,13 @@ def crop(self, x , y = None, w = None, h = None, centered=False, smart=False): if(y == None or w == None or h == None): - print "Please provide an x, y, width, height to function" + print("Please provide an x, y, width, height to function") if( w <= 0 or h <= 0 ): logger.warning("Can't do a negative crop!") return None - retVal = cv.CreateImage((int(w),int(h)), cv.IPL_DEPTH_8U, 3) + retVal = cv2.CreateImage((int(w),int(h)), cv2.IPL_DEPTH_8U, 3) if( x < 0 or y < 0 ): logger.warning("Crop will try to help you, but you have a negative crop position, your width and height may not be what you want them to be.") @@ -6015,7 +6015,7 @@ def clear(self): Do not use this method unless you have a particularly compelling reason. """ - cv.SetZero(self._bitmap) + cv2.SetZero(self._bitmap) self._clearBuffers() def draw(self, features, color=Color.GREEN, width=1, autocolor=False): @@ -6204,7 +6204,7 @@ def show(self, type = 'window'): self.save(d) return d else: - print "Unknown type to show" + print("Unknown type to show") def _surface2Image(self,surface): imgarray = pg.surfarray.array3d(surface) @@ -6392,7 +6392,7 @@ def removeDrawingLayer(self, index = -1): try: return self._mLayers.pop(index) except IndexError: - print 'Not a valid index or No layers to remove!' + print('Not a valid index or No layers to remove!') def getDrawingLayer(self, index = -1): @@ -6444,7 +6444,7 @@ def getDrawingLayer(self, index = -1): try: return self._mLayers[index] except IndexError: - print 'Not a valid index' + print('Not a valid index') def dl(self, index = -1): @@ -7138,7 +7138,9 @@ def _rectOverlapROIs(self,top, bottom, pos): br = (pos[0]+top[0],pos[1]+top[1]) bl = (pos[0],pos[1]+top[1]) # do an overlap test to weed out corner cases and errors - def inBounds((w,h), (x,y)): + def inBounds(xxx_todo_changeme, xxx_todo_changeme1): + (w,h) = xxx_todo_changeme + (x,y) = xxx_todo_changeme1 retVal = True if( x < 0 or y < 0 or x > w or y > h): retVal = False @@ -7422,7 +7424,7 @@ def applyPixelFunction(self, theFunc): #but I can get vectorize to work with the three channels together... have to split them #TODO: benchmark this against vectorize pixels = np.array(self.getNumpy()).reshape(-1,3).tolist() - result = np.array(map(theFunc,pixels),dtype=uint8).reshape(self.width,self.height,3) + result = np.array(list(map(theFunc,pixels)),dtype=uint8).reshape(self.width,self.height,3) return Image(result) @@ -7612,7 +7614,7 @@ def findTemplate(self, template_image = None, threshold = 5, method = "SQR_DIFF_ else: compute = np.where((matches > mean+threshold*sd) ) - mapped = map(tuple, np.column_stack(compute)) + mapped = list(map(tuple, np.column_stack(compute))) fs = FeatureSet() for location in mapped: fs.append(TemplateMatch(self, template_image, (location[1],location[0]), matches[location[0], location[1]])) @@ -7733,7 +7735,7 @@ def findTemplateOnce(self, template_image = None, threshold = 0.2, method = "SQR compute = np.where( matches == np.max(matches) ) else: return [] - mapped = map(tuple, np.column_stack(compute)) + mapped = list(map(tuple, np.column_stack(compute))) fs = FeatureSet() for location in mapped: fs.append(TemplateMatch(self, template_image, (location[1],location[0]), matches[location[0], location[1]])) @@ -10906,7 +10908,7 @@ def applyUnsharpMask(self,boost=1,dia=400,grayscale=False): """ if boost < 0: - print "boost >= 1" + print("boost >= 1") return None lpIm = self.applyGaussianFilter(dia=dia,grayscale=grayscale,highpass=False) @@ -10933,7 +10935,7 @@ def listHaarFeatures(self): features_directory = os.path.join(LAUNCH_PATH, 'Features','HaarCascades') features = os.listdir(features_directory) - print features + print(features) def _CopyAvg(self, src, dst,roi, levels, levels_f, mode): ''' @@ -11183,7 +11185,7 @@ def fillHoles(self): for cnt in contour: cv2.drawContours(des,[cnt],0,255,-1) - print 'yep' + print('yep') gray = cv2.bitwise_not(des) return gray @@ -11254,7 +11256,7 @@ def edgeIntersections(self, pt0, pt1, width=1, canny1=0, canny2=100): cv.Mul(line,edges,line) intersections = uint8(np.array(cv.GetMat(line)).transpose()) (xs,ys) = np.where(intersections==255) - points = zip(xs,ys) + points = list(zip(xs,ys)) if(len(points)==0): return [None,None] A = np.argmin(spsd.cdist(p0p,points,'cityblock')) @@ -11390,7 +11392,7 @@ def fitEdge(self,guess,window=10,threshold=128, measurements=5, darktolight=True m=0 mo = 0 b = x1 - for i in xrange(0, measurements): + for i in range(0, measurements): s[i][0] = x1 s[i][1] = y1 + i * dy lpstartx[i] = s[i][0] + window @@ -11411,7 +11413,7 @@ def fitEdge(self,guess,window=10,threshold=128, measurements=5, darktolight=True mo = -1/m #slope of orthogonal line segments #obtain points for measurement along the initial guess line - for i in xrange(0, measurements): + for i in range(0, measurements): s[i][0] = x1 + i * dx s[i][1] = y1 + i * dy fx = (math.sqrt(math.pow(window,2))/(1+mo))/2 @@ -11625,7 +11627,7 @@ def fitLines(self,guesses,window=10,threshold=128): # pick the lines above our threshold x,y = np.where(temp>threshold) - pts = zip(x,y) + pts = list(zip(x,y)) gpv = np.array([float(g[0][0]-xminW),float(g[0][1]-yminW)]) gpw = np.array([float(g[1][0]-xminW),float(g[1][1]-yminW)]) def lineSegmentToPoint(p): @@ -11644,7 +11646,7 @@ def lineSegmentToPoint(p): return np.sqrt(np.sum((p-project)**2)) # http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment - distances = np.array(map(lineSegmentToPoint,pts)) + distances = np.array(list(map(lineSegmentToPoint,pts))) closepoints = np.where(distances= 2.3" + print("Tracking is available for OpenCV >= 2.3") return None if type(img) == list: @@ -12436,7 +12438,7 @@ def getLineScan(self,x=None,y=None,pt1=None,pt2=None,channel = -1): try: img = self.getNumpy()[:,:,channel] except IndexError: - print 'Channel missing!' + print('Channel missing!') return None retVal = None @@ -12448,8 +12450,8 @@ def getLineScan(self,x=None,y=None,pt1=None,pt2=None,channel = -1): retVal.pt2 = (x,self.height) retVal.col = x x = np.ones((1,self.height))[0]*x - y = range(0,self.height,1) - pts = zip(x,y) + y = list(range(0,self.height,1)) + pts = list(zip(x,y)) retVal.pointLoc = pts else: warnings.warn("ImageClass.getLineScan - that is not valid scanline.") @@ -12463,8 +12465,8 @@ def getLineScan(self,x=None,y=None,pt1=None,pt2=None,channel = -1): retVal.pt2 = (self.width,y) retVal.row = y y = np.ones((1,self.width))[0]*y - x = range(0,self.width,1) - pts = zip(x,y) + x = list(range(0,self.width,1)) + pts = list(zip(x,y)) retVal.pointLoc = pts else: @@ -12529,7 +12531,7 @@ def setLineScan(self, linescan,x=None,y=None,pt1=None,pt2=None,channel = -1): try: img = np.copy(self.getNumpy()[:,:,channel]) except IndexError: - print 'Channel missing!' + print('Channel missing!') return None if( x is None and y is None and pt1 is None and pt2 is None): @@ -12643,7 +12645,7 @@ def replaceLineScan(self, linescan, x=None, y=None, pt1=None, pt2=None, channel try: img = np.copy(self.getNumpy()[:,:,linescan.channel]) except IndexError: - print 'Channel missing!' + print('Channel missing!') return None if linescan.row is not None: @@ -12717,7 +12719,7 @@ def getPixelsOnLine(self,pt1,pt2): return retVal - def bresenham_line(self, (x,y), (x2,y2)): + def bresenham_line(self, xxx_todo_changeme2, xxx_todo_changeme3): """ Brensenham line algorithm @@ -12725,6 +12727,8 @@ def bresenham_line(self, (x,y), (x2,y2)): This is just a helper method """ + (x,y) = xxx_todo_changeme2 + (x2,y2) = xxx_todo_changeme3 if (not 0 <= x <= self.width-1 or not 0 <= y <= self.height-1 or not 0 <= x2 <= self.width-1 or not 0 <= y2 <= self.height-1): l = Line(self, ((x, y), (x2, y2))).cropToImageEdges() @@ -12894,7 +12898,7 @@ def findGridLines(self): gridIndex = self.getDrawingLayer(self._gridLayer[0]) if self._gridLayer[0]==-1: - print "Cannot find grid on the image, Try adding a grid first" + print("Cannot find grid on the image, Try adding a grid first") lineFS = FeatureSet() try: @@ -12939,12 +12943,12 @@ def logicalAND(self, img, grayscale=True): """ if not self.size() == img.size(): - print "Both images must have same sizes" + print("Both images must have same sizes") return None try: import cv2 except ImportError: - print "This function is available for OpenCV >= 2.3" + print("This function is available for OpenCV >= 2.3") if grayscale: retval = cv2.bitwise_and(self.getGrayNumpyCv2(), img.getGrayNumpyCv2()) else: @@ -12975,12 +12979,12 @@ def logicalNAND(self, img, grayscale=True): """ if not self.size() == img.size(): - print "Both images must have same sizes" + print("Both images must have same sizes") return None try: import cv2 except ImportError: - print "This function is available for OpenCV >= 2.3" + print("This function is available for OpenCV >= 2.3") if grayscale: retval = cv2.bitwise_and(self.getGrayNumpyCv2(), img.getGrayNumpyCv2()) else: @@ -13012,12 +13016,12 @@ def logicalOR(self, img, grayscale=True): """ if not self.size() == img.size(): - print "Both images must have same sizes" + print("Both images must have same sizes") return None try: import cv2 except ImportError: - print "This function is available for OpenCV >= 2.3" + print("This function is available for OpenCV >= 2.3") if grayscale: retval = cv2.bitwise_or(self.getGrayNumpyCv2(), img.getGrayNumpyCv2()) else: @@ -13048,12 +13052,12 @@ def logicalXOR(self, img, grayscale=True): """ if not self.size() == img.size(): - print "Both images must have same sizes" + print("Both images must have same sizes") return None try: import cv2 except ImportError: - print "This function is available for OpenCV >= 2.3" + print("This function is available for OpenCV >= 2.3") if grayscale: retval = cv2.bitwise_xor(self.getGrayNumpyCv2(), img.getGrayNumpyCv2()) else: @@ -13122,12 +13126,12 @@ def matchSIFTKeyPoints(self, template, quality=200): dist = dist[:,0]/2500.0 dist = dist.reshape(-1,).tolist() idx = idx.reshape(-1).tolist() - indices = range(len(dist)) + indices = list(range(len(dist))) indices.sort(key=lambda i: dist[i]) dist = [dist[i] for i in indices] idx = [idx[i] for i in indices] sfs = [] - for i, dis in itertools.izip(idx, dist): + for i, dis in zip(idx, dist): if dis < quality: sfs.append(KeyPoint(template, skp[i], sd, "SIFT")) else: @@ -13137,12 +13141,12 @@ def matchSIFTKeyPoints(self, template, quality=200): dist = dist[:,0]/2500.0 dist = dist.reshape(-1,).tolist() idx = idx.reshape(-1).tolist() - indices = range(len(dist)) + indices = list(range(len(dist))) indices.sort(key=lambda i: dist[i]) dist = [dist[i] for i in indices] idx = [idx[i] for i in indices] tfs = [] - for i, dis in itertools.izip(idx, dist): + for i, dis in zip(idx, dist): if dis < quality: tfs.append(KeyPoint(template, tkp[i], td, "SIFT")) else: @@ -13427,7 +13431,7 @@ def watershed(self, mask=None, erode=2,dilate=2, useMyMask=False): m = np.int32(newmask.getGrayNumpyCv2()) cv2.watershed(self.getNumpyCv2(),m) m = cv2.convertScaleAbs(m) - ret,thresh = cv2.threshold(m,0,255,cv2.cv.CV_THRESH_OTSU) + ret,thresh = cv2.threshold(m,0,255,cv2.CV_THRESH_OTSU) retVal = Image(thresh,cv2image=True) return retVal @@ -13502,7 +13506,7 @@ def maxValue(self,locations=False): if(locations): val = np.max(self.getGrayNumpy()) x,y = np.where(self.getGrayNumpy()==val) - locs = zip(x.tolist(),y.tolist()) + locs = list(zip(x.tolist(),y.tolist())) return int(val),locs else: val = np.max(self.getGrayNumpy()) @@ -13536,7 +13540,7 @@ def minValue(self,locations=False): if(locations): val = np.min(self.getGrayNumpy()) x,y = np.where(self.getGrayNumpy()==val) - locs = zip(x.tolist(),y.tolist()) + locs = list(zip(x.tolist(),y.tolist())) return int(val),locs else: val = np.min(self.getGrayNumpy()) @@ -13732,21 +13736,21 @@ def grayPeaks(self, bins = 255, delta = 0, lookahead = 15): # As range() function is exclusive, # hence bins+2 is passed as parameter. - y_axis, x_axis = np.histogram(self.getGrayNumpy(), bins = range(bins+2)) + y_axis, x_axis = np.histogram(self.getGrayNumpy(), bins = list(range(bins+2))) x_axis = x_axis[0:bins+1] maxtab = [] mintab = [] length = len(y_axis) if x_axis is None: - x_axis = range(length) + x_axis = list(range(length)) #perform some checks if length != len(x_axis): - raise ValueError, "Input vectors y_axis and x_axis must have same length" + raise ValueError("Input vectors y_axis and x_axis must have same length") if lookahead < 1: - raise ValueError, "Lookahead must be above '1' in value" + raise ValueError("Lookahead must be above '1' in value") if not (np.isscalar(delta) and delta >= 0): - raise ValueError, "delta must be a positive number" + raise ValueError("delta must be a positive number") #needs to be a numpy array y_axis = np.asarray(y_axis) @@ -13835,7 +13839,7 @@ def tvDenoising(self, gray=False, weight=50, eps=0.0002, max_iter=200, resize=1) img = self.copy() if resize <= 0: - print 'Enter a valid resize value' + print('Enter a valid resize value') return None if resize != 1: @@ -14055,7 +14059,7 @@ def channelMixer(self, channel = 'r', weight = (100,100,100)): warnings.warn('Value of weights can be from -200 to 200%') return None - weight = map(float,weight) + weight = list(map(float,weight)) channel = channel.lower() if channel == 'r': r = r*(weight[0]/100.0) + g*(weight[1]/100.0) + b*(weight[2]/100.0) @@ -14140,7 +14144,7 @@ def edgeSnap(self,pointList,step = 1): #checking that all values are 0 and 255 if( c1 + c2 != imgArray.size): - raise ValueError,"Image must be binary" + raise ValueError("Image must be binary") if(len(pointList) < 2 ): return None @@ -14545,7 +14549,7 @@ def normalize(self, newMin = 0, newMax = 255, minCut = 2, maxCut = 98): #avoiding the effect of odd pixels try: hist = self.getGrayHistogramCounts() - freq, val = zip(*hist) + freq, val = list(zip(*hist)) maxfreq = (freq[0]-freq[-1])* maxCut/100.0 minfreq = (freq[0]-freq[-1])* minCut/100.0 closestMatch = lambda a,l:min(l, key=lambda x:abs(x-a)) diff --git a/SimpleCV/LineScan.py b/SimpleCV/LineScan.py index 5fd84fd5a..f89e923e6 100644 --- a/SimpleCV/LineScan.py +++ b/SimpleCV/LineScan.py @@ -65,7 +65,7 @@ def __init__(self, args, **kwargs): self.channel = kwargs[key] if(self.pointLoc is None): - self.pointLoc = zip(range(0,len(self)),range(0,len(self))) + self.pointLoc = list(zip(list(range(0,len(self))),list(range(0,len(self))))) def __getitem__(self,key): """ @@ -76,7 +76,7 @@ def __getitem__(self,key): functions on sub-lists """ - if type(key) is types.SliceType: #Or can use 'try:' for speed + if type(key) is slice: #Or can use 'try:' for speed return LineScan(list.__getitem__(self, key)) else: return list.__getitem__(self,key) @@ -90,9 +90,9 @@ def __getslice__(self, i, j): def __sub__(self,other): if len(self) == len(other): - retVal = LineScan(map(operator.sub,self,other)) + retVal = LineScan(list(map(operator.sub,self,other))) else: - print 'Size mismatch' + print('Size mismatch') return None retVal._update(self) return retVal @@ -100,9 +100,9 @@ def __sub__(self,other): def __add__(self,other): if len(self) == len(other): - retVal = LineScan(map(operator.add,self,other)) + retVal = LineScan(list(map(operator.add,self,other))) else: - print 'Size mismatch' + print('Size mismatch') return None retVal._update(self) return retVal @@ -110,9 +110,9 @@ def __add__(self,other): def __mul__(self,other): if len(self) == len(other): - retVal = LineScan(map(operator.mul,self,other)) + retVal = LineScan(list(map(operator.mul,self,other))) else: - print 'Size mismatch' + print('Size mismatch') return None retVal._update(self) @@ -122,12 +122,12 @@ def __div__(self,other): if len(self) == len(other): try: - retVal = LineScan(map(operator.div,self,other)) + retVal = LineScan(list(map(operator.div,self,other))) except ZeroDivisionError: - print 'Second LineScan contains zeros' + print('Second LineScan contains zeros') return None else: - print 'Size mismatch' + print('Size mismatch') return None retVal._update(self) @@ -294,7 +294,7 @@ def minima(self): pts = np.array(self.pointLoc) pts = pts[idxs] pts = [(p[0],p[1]) for p in pts] # un numpy this - return zip(idxs,minvalue,pts) + return list(zip(idxs,minvalue,pts)) def maxima(self): """ @@ -330,7 +330,7 @@ def maxima(self): pts = np.array(self.pointLoc) pts = pts[idxs] pts = [(p[0],p[1]) for p in pts] # un numpy - return zip(idxs,maxvalue,pts) + return list(zip(idxs,maxvalue,pts)) def derivative(self): """ @@ -394,7 +394,7 @@ def localMaxima(self): pts = np.array(self.pointLoc) pts = pts[idx] pts = [(p[0],p[1]) for p in pts] # un numpy - return zip(idx,values,pts) + return list(zip(idx,values,pts)) def localMinima(self): @@ -429,7 +429,7 @@ def localMinima(self): pts = np.array(self.pointLoc) pts = pts[idx] pts = [(p[0],p[1]) for p in pts] # un numpy - return zip(idx,values,pts) + return list(zip(idx,values,pts)) def resample(self,n=100): """ @@ -464,7 +464,7 @@ def resample(self,n=100): # so we can totally do this better manually x = linspace(pts[0,0],pts[-1,0],n) y = linspace(pts[0,1],pts[-1,1],n) - pts = zip(x,y) + pts = list(zip(x,y)) retVal = LineScan(list(signal),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2) retVal._update(self) return retVal @@ -511,7 +511,7 @@ def fitToModel(self,f,p0=None): """ yvals = np.array(self,dtype='float32') - xvals = range(0,len(yvals),1) + xvals = list(range(0,len(yvals),1)) popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0) yvals = f(xvals,*popt) retVal = LineScan(list(yvals),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2) @@ -550,7 +550,7 @@ def getModelParameters(self,f,p0=None): """ yvals = np.array(self,dtype='float32') - xvals = range(0,len(yvals),1) + xvals = list(range(0,len(yvals),1)) popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0) return popt @@ -1083,7 +1083,7 @@ def medianFilter(self, kernel_size=5): return None if kernel_size % 2 == 0: kernel_size-=1 - print "Kernel Size should be odd. New kernel size =" , (kernel_size) + print("Kernel Size should be odd. New kernel size =" , (kernel_size)) medfilt_array = medfilt(np.asarray(self[:]), kernel_size) retVal = LineScan(medfilt_array.astype("uint8").tolist(), image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2, x=self.col, y=self.row) @@ -1158,7 +1158,7 @@ def runningAverage(self, diameter=3, algo="uniform"): r=float(diameter)/2 for i in range(-int(r),int(r)+1): kernel.append(np.exp(-i**2/(2*(r/3)**2))/(np.sqrt(2*np.pi)*(r/3))) - retVal = LineScan(map(int,self.convolve(kernel))) + retVal = LineScan(list(map(int,self.convolve(kernel)))) retVal._update(self) return retVal diff --git a/SimpleCV/MachineLearning/KNNClassifier.py b/SimpleCV/MachineLearning/KNNClassifier.py index f250009c3..591dbdf33 100644 --- a/SimpleCV/MachineLearning/KNNClassifier.py +++ b/SimpleCV/MachineLearning/KNNClassifier.py @@ -105,7 +105,7 @@ def __setstate__(self, mydict): colNames = [] for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) @@ -150,7 +150,7 @@ def _trainPath(self,path,className,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -179,7 +179,7 @@ def _trainImageSet(self,imageset,className,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) @@ -239,7 +239,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) return None # push our data into an orange example table - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) if(savedata is not None): orange.saveTabDelimited (savedata, self.mDataSetOrange) @@ -255,7 +255,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) c = self.mClassifier(self.mDataSetOrange[i]) test = self.mDataSetOrange[i].getclass() if verbose: - print "original", test, "classified as", c + print("original", test, "classified as", c) if(test==c): correct = correct + 1 else: @@ -270,12 +270,12 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) confusion = orngStat.confusionMatrices(crossValidator)[0] if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) classes = self.mDataSetOrange.domain.classVar.values - print "\t"+"\t".join(classes) + print("\t"+"\t".join(classes)) for className, classConfusions in zip(classes, confusion): - print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions)) + print(("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions))) return [good, bad, confusion] @@ -308,7 +308,7 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): colNames = [] for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) dataset = [] for i in range(len(classNames)): @@ -335,12 +335,12 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): good = 100*(float(correct)/float(count)) bad = 100*(float(count-correct)/float(count)) if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) classes = self.mDataSetOrange.domain.classVar.values - print "\t"+"\t".join(classes) + print("\t"+"\t".join(classes)) for className, classConfusions in zip(classes, confusion): - print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions)) + print(("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions))) return [good, bad, confusion] @@ -358,7 +358,7 @@ def _testPath(self,path,className,dataset,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -396,7 +396,7 @@ def _testImageSet(self,imageset,className,dataset,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) diff --git a/SimpleCV/MachineLearning/MLTestSuite.py b/SimpleCV/MachineLearning/MLTestSuite.py index 6ff8bddea..b968aa90f 100644 --- a/SimpleCV/MachineLearning/MLTestSuite.py +++ b/SimpleCV/MachineLearning/MLTestSuite.py @@ -1,18 +1,18 @@ from SimpleCV import * -print "" -print "This program runs a list of test for machine learning on" -print "the SimpleCV library. Not all scores will be high, this" -print "is just to ensure that the libraries are functioning correctly" -print "on your system" -print "" -print "***** WARNING *****" -print "This program is about to download a large data set to run it's test" +print("") +print("This program runs a list of test for machine learning on") +print("the SimpleCV library. Not all scores will be high, this") +print("is just to ensure that the libraries are functioning correctly") +print("on your system") +print("") +print("***** WARNING *****") +print("This program is about to download a large data set to run it's test") -inp = raw_input("Do you want to continue [Y/n]") +inp = input("Do you want to continue [Y/n]") if not (inp == "" or inp.lower() == "y"): - print "Exiting the program" + print("Exiting the program") sys.exit() @@ -81,7 +81,7 @@ for i in range(10): img = Image(files[i]) cname = classifierSVMP.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) classifierSVMP.save('PolySVM.pkl') print('Reloading from file') testSVM = SVMClassifier.load('PolySVM.pkl') @@ -90,7 +90,7 @@ for i in range(10): img = Image(files[i]) cname = testSVM.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) print('###############################################################################') print('SVMRBF ') @@ -120,7 +120,7 @@ for i in range(10): img = Image(files[i]) cname = classifierSVMRBF.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) classifierSVMRBF.save('RBFSVM.pkl') print('Reloading from file') testSVMRBF = SVMClassifier.load('RBFSVM.pkl') @@ -129,7 +129,7 @@ for i in range(10): img = Image(files[i]) cname = testSVMRBF.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) print('###############################################################################') @@ -146,7 +146,7 @@ for i in range(10): img = Image(files[i]) cname = classifierBayes.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) classifierBayes.save('Bayes.pkl') print('Reloading from file') testBayes = NaiveBayesClassifier.load('Bayes.pkl') @@ -155,7 +155,7 @@ for i in range(10): img = Image(files[i]) cname = testBayes.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) print('###############################################################################') @@ -174,7 +174,7 @@ for i in range(10): img = Image(files[i]) cname = classifierForest.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) classifierForest.save('forest.pkl') print('Reloading from file') @@ -184,7 +184,7 @@ for i in range(10): img = Image(files[i]) cname = testForest.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) print('###############################################################################') print('Bagged Tree') @@ -200,7 +200,7 @@ for i in range(10): img = Image(files[i]) cname = classifierBagTree.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) classifierBagTree.save('bagtree.pkl') print('Reloading from file') @@ -210,7 +210,7 @@ for i in range(10): img = Image(files[i]) cname = testBagTree.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) @@ -228,7 +228,7 @@ for i in range(10): img = Image(files[i]) cname = classifierTree.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) print('Reloading from file') classifierTree.save('tree.pkl') testTree = TreeClassifier.load('tree.pkl') @@ -236,7 +236,7 @@ for i in range(10): img = Image(files[i]) cname = testTree.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) print('###############################################################################') print('Boosted Tree') @@ -252,7 +252,7 @@ for i in range(10): img = Image(files[i]) cname = classifierBTree.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) classifierBTree.save('btree.pkl') print('Reloading from file') @@ -263,7 +263,7 @@ for i in range(10): img = Image(files[i]) cname = testBoostTree.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) @@ -282,7 +282,7 @@ for i in range(10): img = Image(files[i]) cname = classifierKNN.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) classifierKNN.save('knn.pkl') print('Reloading from file') @@ -292,8 +292,8 @@ for i in range(10): img = Image(files[i]) cname = testKNN.classify(img) - print(files[i]+' -> '+cname) + print((files[i]+' -> '+cname)) -print "" -print "All the machine learning test have ran correctly" +print("") +print("All the machine learning test have ran correctly") diff --git a/SimpleCV/MachineLearning/NaiveBayesClassifier.py b/SimpleCV/MachineLearning/NaiveBayesClassifier.py index ae821d553..0f5b1fbb6 100644 --- a/SimpleCV/MachineLearning/NaiveBayesClassifier.py +++ b/SimpleCV/MachineLearning/NaiveBayesClassifier.py @@ -72,7 +72,7 @@ def __setstate__(self, mydict): colNames = [] for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) @@ -117,7 +117,7 @@ def _trainPath(self,path,className,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -146,7 +146,7 @@ def _trainImageSet(self,imageset,className,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) @@ -206,7 +206,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) return None # push our data into an orange example table - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) if(savedata is not None): orange.saveTabDelimited (savedata, self.mDataSetOrange) @@ -218,7 +218,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) c = self.mClassifier(self.mDataSetOrange[i]) test = self.mDataSetOrange[i].getclass() if verbose: - print "original", test, "classified as", c + print("original", test, "classified as", c) if(test==c): correct = correct + 1 else: @@ -233,12 +233,12 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) confusion = orngStat.confusionMatrices(crossValidator)[0] if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) classes = self.mDataSetOrange.domain.classVar.values - print "\t"+"\t".join(classes) + print("\t"+"\t".join(classes)) for className, classConfusions in zip(classes, confusion): - print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions)) + print(("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions))) return [good, bad, confusion] @@ -272,7 +272,7 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): colNames = [] for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) dataset = [] for i in range(len(classNames)): @@ -299,12 +299,12 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): good = 100*(float(correct)/float(count)) bad = 100*(float(count-correct)/float(count)) if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) classes = self.mDataSetOrange.domain.classVar.values - print "\t"+"\t".join(classes) + print("\t"+"\t".join(classes)) for className, classConfusions in zip(classes, confusion): - print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions)) + print(("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions))) return [good, bad, confusion] @@ -322,7 +322,7 @@ def _testPath(self,path,className,dataset,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -360,7 +360,7 @@ def _testImageSet(self,imageset,className,dataset,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) diff --git a/SimpleCV/MachineLearning/SVMClassifier.py b/SimpleCV/MachineLearning/SVMClassifier.py index 447034ead..3dc0ba690 100644 --- a/SimpleCV/MachineLearning/SVMClassifier.py +++ b/SimpleCV/MachineLearning/SVMClassifier.py @@ -132,7 +132,7 @@ def __setstate__(self, mydict): colNames = [] for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) self.mClassifier = self.mSVMPrototype(self.mDataSetOrange) @@ -178,7 +178,7 @@ def _trainPath(self,path,className,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -207,7 +207,7 @@ def _trainImageSet(self,imageset,className,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) @@ -267,7 +267,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) return None # push our data into an orange example table - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) if(savedata is not None): orange.saveTabDelimited (savedata, self.mDataSetOrange) @@ -279,7 +279,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) c = self.mClassifier(self.mDataSetOrange[i]) test = self.mDataSetOrange[i].getclass() if verbose: - print "original", test, "classified as", c + print("original", test, "classified as", c) if(test==c): correct = correct + 1 else: @@ -294,10 +294,10 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) confusion = orngStat.confusionMatrices(crossValidator)[0] if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) classes = self.mDataSetOrange.domain.classVar.values - print confusion + print(confusion) #print "\t"+"\t".join(classes) #for className, classConfusions in zip(classes, confusion): # print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions)) @@ -334,7 +334,7 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): colNames = [] for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) dataset = [] for i in range(len(classNames)): @@ -361,12 +361,12 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): good = 100*(float(correct)/float(count)) bad = 100*(float(count-correct)/float(count)) if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) classes = self.mDataSetOrange.domain.classVar.values - print "\t"+"\t".join(classes) + print("\t"+"\t".join(classes)) for className, classConfusions in zip(classes, confusion): - print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions)) + print(("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions))) return [good, bad, confusion] @@ -384,7 +384,7 @@ def _testPath(self,path,className,dataset,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -422,7 +422,7 @@ def _testImageSet(self,imageset,className,dataset,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) diff --git a/SimpleCV/MachineLearning/ShapeContextClassifier.py b/SimpleCV/MachineLearning/ShapeContextClassifier.py index 8f89921d4..e93007a46 100644 --- a/SimpleCV/MachineLearning/ShapeContextClassifier.py +++ b/SimpleCV/MachineLearning/ShapeContextClassifier.py @@ -25,7 +25,7 @@ def __init__(self,images,labels): try: from sklearn import neighbors except: - print "Need scikits learn installed" + print("Need scikits learn installed") self.imgMap = {} self.ptMap = {} @@ -37,7 +37,7 @@ def __init__(self,images,labels): import warnings warnings.simplefilter("ignore") for i in range(0,len(images)): - print "precomputing " + images[i].filename + print("precomputing " + images[i].filename) self.imgMap[labels[i]] = images[i] pts,desc,count = self._image2FeatureVector(images[i]) @@ -45,7 +45,7 @@ def __init__(self,images,labels): self.ptMap[labels[i]] = pts self.descMap[labels[i]] = desc knn = neighbors.KNeighborsClassifier() - knn.fit(desc,range(0,len(pts))) + knn.fit(desc,list(range(0,len(pts)))) self.knnMap[labels[i]] = knn def _image2FeatureVector(self,img): @@ -87,7 +87,7 @@ def _doMatching(self,model_name,test_scd): temp = np.sqrt(np.sum(((sample-scd)**2))) #temp = 0.5*np.sum((sample-scd)**2)/np.sum((sample+scd)) if( math.isnan(temp) ): - temp = sys.maxint + temp = sys.maxsize distance.append(temp) return [otherIdx,distance] @@ -112,7 +112,7 @@ def _buildMatchDict(self,image, countBlobs): points,descriptors,count = self._image2FeatureVector(image) matchDict = {} matchStd = {} - for key,value in self.descMap.items(): + for key,value in list(self.descMap.items()): if( countBlobs and self.blobCount[key] == count ): # only do matching for similar number of blobs #need to hold on to correspondences correspondence, distances = self._doMatching(key,descriptors) @@ -137,9 +137,9 @@ def classify(self,image, blobFilter=True): and match quality. """ points,descriptors,count,matchDict,matchStd = self._buildMatchDict(image, blobFilter) - best = sys.maxint + best = sys.maxsize best_name = "No Match" - for k,v in matchDict.items(): + for k,v in list(matchDict.items()): if ( v < best ): best = v best_name = k diff --git a/SimpleCV/MachineLearning/TemporalColorTracker.py b/SimpleCV/MachineLearning/TemporalColorTracker.py index 4f5f5326a..73a22ef45 100644 --- a/SimpleCV/MachineLearning/TemporalColorTracker.py +++ b/SimpleCV/MachineLearning/TemporalColorTracker.py @@ -108,18 +108,18 @@ def train(self,src,roi=None, extractor=None, doCorr=False, maxFrames=1000, self._extractSignalInfo(forceChannel) self._buildSignalProfile() if verbose: - for key in self.data.keys(): - print 30*'-' - print "Channel: {0}".format(key) - print "Data Points: {0}".format(len(self.data[key])) - print "Steady State: {0}+/-{1}".format(self._steadyState[key][0],self._steadyState[key][1]) - print "Peaks: {0}".format(self.peaks[key]) - print "Valleys: {0}".format(self.valleys[key]) - print "Use Peaks: {0}".format(self.doPeaks[key]) - print 30*'-' - print "BEST SIGNAL: {0}".format(self._bestKey) - print "BEST WINDOW: {0}".format(self._window) - print "BEST CUTOFF: {0}".format(self._cutoff) + for key in list(self.data.keys()): + print(30*'-') + print("Channel: {0}".format(key)) + print("Data Points: {0}".format(len(self.data[key]))) + print("Steady State: {0}+/-{1}".format(self._steadyState[key][0],self._steadyState[key][1])) + print("Peaks: {0}".format(self.peaks[key])) + print("Valleys: {0}".format(self.valleys[key])) + print("Use Peaks: {0}".format(self.doPeaks[key])) + print(30*'-') + print("BEST SIGNAL: {0}".format(self._bestKey)) + print("BEST WINDOW: {0}".format(self._window)) + print("BEST CUTOFF: {0}".format(self._cutoff)) def _getDataFromImg(self,img): """ @@ -150,7 +150,7 @@ def _extract(self,src,maxFrames,verbose): img = src.getImage() count = count + 1 if( verbose ): - print "Got Frame {0}".format(count) + print("Got Frame {0}".format(count)) if( isinstance(src,Camera) ): time.sleep(0.05) # let the camera sleep if( img is None ): @@ -170,7 +170,7 @@ def _findSteadyState(self,windowSzPrct=0.05): # save the mean and sd of this value # as a tuple in the steadyStateDict self._steadyState = {} - for key in self.data.keys(): + for key in list(self.data.keys()): wndwSz = int(np.floor(windowSzPrct*len(self.data[key]))) signal = self.data[key] # slide the window and get the std @@ -188,7 +188,7 @@ def _findPeaks(self,pkWndw,pkDelta): """ self.peaks = {} self.valleys = {} - for key in self.data.keys(): + for key in list(self.data.keys()): ls = LineScan(self.data[key]) # need to automagically adjust the window # to make sure we get a minimum number of @@ -206,7 +206,7 @@ def _extractSignalInfo(self,forceChannel): bestSpread = 0.00 bestDoPeaks = None bestKey = None - for key in self.data.keys(): + for key in list(self.data.keys()): #Look at which signal has a bigger distance from #the steady state behavior if( len(self.peaks[key]) > 0 ): @@ -233,7 +233,7 @@ def _extractSignalInfo(self,forceChannel): # Now we know which signal has the most spread # and what direction we are looking for. if( forceChannel is not None ): - if(self.data.has_key(forceChannel)): + if(forceChannel in self.data): self._bestKey = forceChannel else: raise Exception('That is not a valid data channel') @@ -299,7 +299,7 @@ def _doCorr(self): self._template = sig / len(self.corrTemplates) self._template /= np.max(self._template) corrVals = [np.correlate(peak/np.max(peak),self._template) for peak in self.corrTemplates] - print corrVals + print(corrVals) self.corrThresh = (np.mean(corrVals),np.std(corrVals)) def _getBestValue(self,img): diff --git a/SimpleCV/MachineLearning/TestTemporalColorTracker.py b/SimpleCV/MachineLearning/TestTemporalColorTracker.py index 4e04a62be..c206ecdbe 100644 --- a/SimpleCV/MachineLearning/TestTemporalColorTracker.py +++ b/SimpleCV/MachineLearning/TestTemporalColorTracker.py @@ -9,7 +9,7 @@ # Matplot Lib example plotting plotc = {'r':'r','g':'g','b':'b','i':'m','h':'y'} -for key in tct.data.keys(): +for key in list(tct.data.keys()): plt.plot(tct.data[key],plotc[key]) for pt in tct.peaks[key]: plt.plot(pt[0],pt[1],'r*') diff --git a/SimpleCV/MachineLearning/TreeClassifier.py b/SimpleCV/MachineLearning/TreeClassifier.py index 9cec21f57..dc43444c8 100644 --- a/SimpleCV/MachineLearning/TreeClassifier.py +++ b/SimpleCV/MachineLearning/TreeClassifier.py @@ -127,7 +127,7 @@ def __setstate__(self, mydict): colNames = [] for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) if(self.mFlavor == 0): self.mLearner = orange.TreeLearner() @@ -187,7 +187,7 @@ def _trainPath(self,path,className,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -216,7 +216,7 @@ def _trainImageSet(self,imageset,className,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) @@ -278,7 +278,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) logger.warning("No features extracted - bailing") return None - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) self.mDataSetOrange = orange.ExampleTable(self.mOrangeDomain,self.mDataSetRaw) if(savedata is not None): orange.saveTabDelimited (savedata, self.mDataSetOrange) @@ -305,7 +305,7 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) c = self.mClassifier(self.mDataSetOrange[i]) test = self.mDataSetOrange[i].getclass() if verbose: - print "original", test, "classified as", c + print("original", test, "classified as", c) if(test==c): correct = correct + 1 else: @@ -320,13 +320,13 @@ def train(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True) confusion = orngStat.confusionMatrices(crossValidator)[0] if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) if( confusion != 0 ): classes = self.mDataSetOrange.domain.classVar.values - print "\t"+"\t".join(classes) + print("\t"+"\t".join(classes)) for className, classConfusions in zip(classes, confusion): - print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple( classConfusions)) + print(("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple( classConfusions))) if(self.mFlavor == 0): self._PrintTree(self.mClassifier) @@ -364,7 +364,7 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): for extractor in self.mFeatureExtractors: colNames.extend(extractor.getFieldNames()) if(self.mOrangeDomain is None): - self.mOrangeDomain = orange.Domain(map(orange.FloatVariable,colNames),orange.EnumVariable("type",values=self.mClassNames)) + self.mOrangeDomain = orange.Domain(list(map(orange.FloatVariable,colNames)),orange.EnumVariable("type",values=self.mClassNames)) dataset = [] for i in range(len(classNames)): @@ -391,13 +391,13 @@ def test(self,images,classNames,disp=None,subset=-1,savedata=None,verbose=True): good = 100*(float(correct)/float(count)) bad = 100*(float(count-correct)/float(count)) if verbose: - print("Correct: "+str(good)) - print("Incorrect: "+str(bad)) + print(("Correct: "+str(good))) + print(("Incorrect: "+str(bad))) if( confusion != 0 ): classes = self.mDataSetOrange.domain.classVar.values - print "\t"+"\t".join(classes) + print("\t"+"\t".join(classes)) for className, classConfusions in zip(classes, confusion): - print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple( classConfusions)) + print(("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple( classConfusions))) return [good, bad, confusion] def _testPath(self,path,className,dataset,subset,disp,verbose): @@ -414,7 +414,7 @@ def _testPath(self,path,className,dataset,subset,disp,verbose): for i in range(nfiles): infile = files[i] if verbose: - print "Opening file: " + infile + print("Opening file: " + infile) img = Image(infile) featureVector = [] for extractor in self.mFeatureExtractors: @@ -452,7 +452,7 @@ def _testImageSet(self,imageset,className,dataset,subset,disp,verbose): imageset = imageset[0:subset] for img in imageset: if verbose: - print "Opening file: " + img.filename + print("Opening file: " + img.filename) featureVector = [] for extractor in self.mFeatureExtractors: feats = extractor.extract(img) @@ -499,22 +499,22 @@ def _PrintTree(self,x): elif type(x) == orange.TreeNode: self._PrintTree0(x, 0) else: - raise TypeError, "invalid parameter" + raise TypeError("invalid parameter") def _PrintTree0(self,node,level): #adapted from the orange documentation if not node: - print " "*level + "" + print(" "*level + "") return if node.branchSelector: nodeDesc = node.branchSelector.classVar.name nodeCont = node.distribution - print "\n" + " "*level + "%s (%s)" % (nodeDesc, nodeCont), + print("\n" + " "*level + "%s (%s)" % (nodeDesc, nodeCont), end=' ') for i in range(len(node.branches)): - print "\n" + " "*level + ": %s" % node.branchDescriptions[i], + print("\n" + " "*level + ": %s" % node.branchDescriptions[i], end=' ') self._PrintTree0(node.branches[i], level+1) else: nodeCont = node.distribution majorClass = node.nodeClassifier.defaultValue - print "--> %s (%s) " % (majorClass, nodeCont) + print("--> %s (%s) " % (majorClass, nodeCont)) diff --git a/SimpleCV/MachineLearning/TurkingModule.py b/SimpleCV/MachineLearning/TurkingModule.py index 906ad63f9..047540da8 100644 --- a/SimpleCV/MachineLearning/TurkingModule.py +++ b/SimpleCV/MachineLearning/TurkingModule.py @@ -67,7 +67,7 @@ def __init__(self,source_paths,out_path,classList,key_bindings,preprocess=None, self.out_path = out_path self.keyMap = {} if( len(classList)!=len(key_bindings)): - print "Must have a key for each class." + print("Must have a key for each class.") raise Exception("Must have a key for each class.") for key,cls in zip(key_bindings,classList): self.keyMap[key] = cls @@ -92,9 +92,9 @@ def fakePostProcess(img): self.srcImgs = source_path else: for sp in source_paths: - print "Loading " + sp + print("Loading " + sp) imgSet = ImageSet(sp) - print "Loaded " + str(len(imgSet)) + print("Loaded " + str(len(imgSet))) self.srcImgs += imgSet if( not osp.exists(out_path) ): @@ -114,7 +114,7 @@ def fakePostProcess(img): def _saveIt(self,img,classType): img.clearLayers() path = self.out_path + classType + "/" + classType+str(self.countMap[classType])+".png" - print "Saving: " + path + print("Saving: " + path) img = self.postProcess(img) self.classMap[classType].append(img) img.save(path) @@ -149,7 +149,7 @@ def _drawControls(self,img,font_size,color,spacing ): img.drawText("space - skip",10,spacing,fontsize=font_size,color=color) img.drawText("esc - exit",10,2*spacing,fontsize=font_size,color=color) y = 3*spacing - for k,cls in self.keyMap.items(): + for k,cls in list(self.keyMap.items()): str = k + " - " + cls img.drawText(str,10,y,fontsize=font_size,color=color) y = y + spacing @@ -200,7 +200,7 @@ def turk(self,saveOriginal=False,disp_size=(800,600),showKeys=True,font_size=16, disp = Display(disp_size) bail = False for img in self.srcImgs: - print img.filename + print(img.filename) samples = self.preProcess(img) for sample in samples: if( showKeys ): diff --git a/SimpleCV/MachineLearning/TurkingModuleExample.py b/SimpleCV/MachineLearning/TurkingModuleExample.py index 480e6bd23..e78466e3e 100644 --- a/SimpleCV/MachineLearning/TurkingModuleExample.py +++ b/SimpleCV/MachineLearning/TurkingModuleExample.py @@ -30,11 +30,11 @@ def postprocess(img): turker.turk(font_size=16, color = Color.BLUE, spacing=18) # show what we got -print "="*30 -print "TURKING DONE!" +print("="*30) +print("TURKING DONE!") for c in classes: - print "="*30 - print "Showing " + c + print("="*30) + print("Showing " + c) iset = turker.getClass(c) iset.show(0.1) diff --git a/SimpleCV/MachineLearning/query_imgs/flickrapi2.py b/SimpleCV/MachineLearning/query_imgs/flickrapi2.py index efe93c46d..53e5d8a37 100644 --- a/SimpleCV/MachineLearning/query_imgs/flickrapi2.py +++ b/SimpleCV/MachineLearning/query_imgs/flickrapi2.py @@ -47,10 +47,10 @@ import sys import md5 import string -import urllib -import urllib2 +import urllib.request, urllib.parse, urllib.error +import urllib.request, urllib.error, urllib.parse import mimetools -import httplib +import http.client import os.path import xml.dom.minidom @@ -192,7 +192,7 @@ def __sign(self, data): """ dataName = self.secret - keys = data.keys() + keys = list(data.keys()) keys.sort() for a in keys: dataName += (a + data[a]) #print dataName @@ -222,19 +222,19 @@ def __getattr__(self, method, **arg): """ - if not self.__handlerCache.has_key(method): + if method not in self.__handlerCache: def handler(_self = self, _method = method, **arg): _method = "flickr." + _method.replace("_", ".") url = "http://" + FlickrAPI.flickrHost + \ FlickrAPI.flickrRESTForm arg["method"] = _method - postData = urllib.urlencode(arg) + "&api_sig=" + \ + postData = urllib.parse.urlencode(arg) + "&api_sig=" + \ _self.__sign(arg) #print "--url---------------------------------------------" #print url #print "--postData----------------------------------------" #print postData - f = urllib.urlopen(url, postData) + f = urllib.request.urlopen(url, postData) data = f.read() #print "--response----------------------------------------" #print data @@ -260,7 +260,7 @@ def __getAuthURL(self, perms, frob): data = {"api_key": self.apiKey, "frob": frob, "perms": perms} data["api_sig"] = self.__sign(data) return "http://%s%s?%s" % (FlickrAPI.flickrHost, \ - FlickrAPI.flickrAuthForm, urllib.urlencode(data)) + FlickrAPI.flickrAuthForm, urllib.parse.urlencode(data)) #------------------------------------------------------------------- def upload(self, filename=None, jpegData=None, **arg): @@ -294,7 +294,7 @@ def upload(self, filename=None, jpegData=None, **arg): raise UploadException("filename OR jpegData must be specified") # verify key names - for a in arg.keys(): + for a in list(arg.keys()): if a != "api_key" and a != "auth_token" and a != "title" and \ a != "description" and a != "tags" and a != "is_public" and \ a != "is_friend" and a != "is_family": @@ -319,7 +319,7 @@ def upload(self, filename=None, jpegData=None, **arg): for a in ('title', 'description', 'tags', 'is_public', \ 'is_friend', 'is_family'): - if arg.has_key(a): + if a in arg: body += "--%s\r\n" % (boundary) body += "Content-Disposition: form-data; name=\""+a+"\"\r\n\r\n" body += "%s\r\n" % (arg[a]) @@ -341,11 +341,11 @@ def upload(self, filename=None, jpegData=None, **arg): postData = body.encode("utf_8") + data + \ ("--%s--" % (boundary)).encode("utf_8") - request = urllib2.Request(url) + request = urllib.request.Request(url) request.add_data(postData) request.add_header("Content-Type", \ "multipart/form-data; boundary=%s" % boundary) - response = urllib2.urlopen(request) + response = urllib.request.urlopen(request) rspXML = response.read() return XMLNode.parseXML(rspXML) @@ -515,7 +515,7 @@ def main(argv): # and print them for a in rsp.photos[0].photo: - print "%10s: %s" % (a['id'], a['title'].encode("ascii", "replace")) + print("%10s: %s" % (a['id'], a['title'].encode("ascii", "replace"))) # upload the file foo.jpg #rsp = fapi.upload(filename="foo.jpg", \ diff --git a/SimpleCV/MachineLearning/query_imgs/get_imgs_geo_gps_search.py b/SimpleCV/MachineLearning/query_imgs/get_imgs_geo_gps_search.py index bba10c7fb..901433691 100644 --- a/SimpleCV/MachineLearning/query_imgs/get_imgs_geo_gps_search.py +++ b/SimpleCV/MachineLearning/query_imgs/get_imgs_geo_gps_search.py @@ -55,8 +55,8 @@ def DoSearch(fapi,query_string,desired_photos): - print datetime.fromtimestamp(mintime) - print datetime.fromtimestamp(endtime) + print(datetime.fromtimestamp(mintime)) + print(datetime.fromtimestamp(endtime)) while (maxtime < endtime): @@ -69,9 +69,9 @@ def DoSearch(fapi,query_string,desired_photos): upper_bound = mintime + timeskip * 20 #upper bound of the upper time limit maxtime = .95 * lower_bound + .05 * upper_bound - print '\nBinary search on time range upper bound' - print 'Lower bound is ' + str(datetime.fromtimestamp(lower_bound)) - print 'Upper bound is ' + str(datetime.fromtimestamp(upper_bound)) + print('\nBinary search on time range upper bound') + print('Lower bound is ' + str(datetime.fromtimestamp(lower_bound))) + print('Upper bound is ' + str(datetime.fromtimestamp(upper_bound))) keep_going = 6 #search stops after a fixed number of iterations while( keep_going > 0 and maxtime < endtime): @@ -94,21 +94,21 @@ def DoSearch(fapi,query_string,desired_photos): null_test = int(total_images); #want to make sure this won't crash later on for some reason null_test = float(total_images); - print '\nnumimgs: ' + total_images - print 'mintime: ' + str(mintime) + ' maxtime: ' + str(maxtime) + ' timeskip: ' + str(maxtime - mintime) + print('\nnumimgs: ' + total_images) + print('mintime: ' + str(mintime) + ' maxtime: ' + str(maxtime) + ' timeskip: ' + str(maxtime - mintime)) if( int(total_images) > desired_photos ): - print 'too many photos in block, reducing maxtime' + print('too many photos in block, reducing maxtime') upper_bound = maxtime maxtime = (lower_bound + maxtime) / 2 #midpoint between current value and lower bound. if( int(total_images) < desired_photos): - print 'too few photos in block, increasing maxtime' + print('too few photos in block, increasing maxtime') lower_bound = maxtime maxtime = (upper_bound + maxtime) / 2 - print 'Lower bound is ' + str(datetime.fromtimestamp(lower_bound)) - print 'Upper bound is ' + str(datetime.fromtimestamp(upper_bound)) + print('Lower bound is ' + str(datetime.fromtimestamp(lower_bound))) + print('Upper bound is ' + str(datetime.fromtimestamp(upper_bound))) if( int(total_images) > 0): #only if we're not in a degenerate case keep_going = keep_going - 1 @@ -119,14 +119,14 @@ def DoSearch(fapi,query_string,desired_photos): print('Keyboard exception while querying for images, exiting\n') raise except: - print sys.exc_info()[0] + print(sys.exc_info()[0]) #print type(inst) # the exception instance #print inst.args # arguments stored in .args #print inst # __str__ allows args to printed directly print ('Exception encountered while querying for images\n') #end of while binary search - print 'finished binary search' + print('finished binary search') return([mintime,maxtime,total_images,rsp]) @@ -154,7 +154,7 @@ def DoSearch(fapi,query_string,desired_photos): for line in query_file: if line[0] != '#' and len(line) > 1: #line end character is 2 long? - print line[0:len(line)-1] + print(line[0:len(line)-1]) if line[0] != '-': pos_queries = pos_queries + [line[0:len(line)-1]] num_queries = num_queries + 1 @@ -162,10 +162,10 @@ def DoSearch(fapi,query_string,desired_photos): neg_queries = neg_queries + ' ' + line[0:len(line)-1] query_file.close() -print 'positive queries: ' -print pos_queries -print 'negative queries: ' + neg_queries -print 'num_queries = ' + str(num_queries) +print('positive queries: ') +print(pos_queries) +print('negative queries: ' + neg_queries) +print('num_queries = ' + str(num_queries)) #this is the desired number of photos in each block @@ -193,27 +193,27 @@ def DoSearch(fapi,query_string,desired_photos): #form the query string. query_string = pos_queries[current_tag] + ' ' + neg_queries - print '\n\nquery_string is ' + query_string + print('\n\nquery_string is ' + query_string) total_images_queried = 0; [mintime,maxtime,total_images,rsp] = DoSearch(fapi,query_string,desired_photos) - print('GETTING TOTATL IMAGES:'+str(total_images)) + print(('GETTING TOTATL IMAGES:'+str(total_images))) s = '\nmintime: ' + str(mintime) + ' maxtime: ' + str(maxtime) - print s + print(s) out_file.write(s + '\n') i = getattr(rsp,'photos',None) if i: s = 'numimgs: ' + total_images - print s + print(s) out_file.write(s + '\n') current_image_num = 1; num = 4 # CHANGE THIS BACK int(rsp.photos[0]['pages']) s = 'total pages: ' + str(num) - print s + print(s) out_file.write(s + '\n') #only visit 16 pages max, to try and avoid the dreaded duplicate bug @@ -222,7 +222,7 @@ def DoSearch(fapi,query_string,desired_photos): num_visit_pages = min(16,num) s = 'visiting only ' + str(num_visit_pages) + ' pages ( up to ' + str(num_visit_pages * 250) + ' images)' - print s + print(s) out_file.write(s + '\n') total_images_queried = total_images_queried + min((num_visit_pages * 250), int(total_images)) @@ -235,7 +235,7 @@ def DoSearch(fapi,query_string,desired_photos): while( pagenum <= num_visit_pages ): #for pagenum in range(1, num_visit_pages + 1): #page one is searched twice - print ' page number ' + str(pagenum) + print(' page number ' + str(pagenum)) try: print("PAGE") @@ -273,7 +273,7 @@ def DoSearch(fapi,query_string,desired_photos): print('Keyboard exception while querying for images, exiting\n') raise except: - print sys.exc_info()[0] + print(sys.exc_info()[0]) #print type(inst) # the exception instance #print inst.args # arguments stored in .args #print inst # __str__ allows args to printed directly diff --git a/SimpleCV/Segmentation/SegmentationBase.py b/SimpleCV/Segmentation/SegmentationBase.py index 7af44b6ae..8d7597cca 100644 --- a/SimpleCV/Segmentation/SegmentationBase.py +++ b/SimpleCV/Segmentation/SegmentationBase.py @@ -3,7 +3,7 @@ from SimpleCV.Color import Color from SimpleCV.ImageClass import Image -class SegmentationBase(object): +class SegmentationBase(object, metaclass=abc.ABCMeta): """ Right now I am going to keep this class as brain dead and single threaded as possible just so I can get the hang of abc in python. The idea behind a segmentation @@ -13,8 +13,6 @@ class SegmentationBase(object): specific image processing events. """ - __metaclass__ = abc.ABCMeta - def load(cls, fname): """ load segmentation settings to file. diff --git a/SimpleCV/Shell/Example.py b/SimpleCV/Shell/Example.py index 229d65036..76fcdbfc8 100644 --- a/SimpleCV/Shell/Example.py +++ b/SimpleCV/Shell/Example.py @@ -26,37 +26,37 @@ def magic_examples(self, arg): if isinstance(arg, str) and arg == "": counter = 0 - print "Available Examples:" - print "--------------------------------------------" + print("Available Examples:") + print("--------------------------------------------") for file in file_names: - print "[",counter,"]:",file + print("[",counter,"]:",file) counter += 1 - print "Just type example #, to run the example on the list" - print "for instance: example 1" - print "" - print "Close the window or press ctrl+c to stop the example" + print("Just type example #, to run the example on the list") + print("for instance: example 1") + print("") + print("Close the window or press ctrl+c to stop the example") elif isinstance(iarg, int): - print "running example:", files[iarg] + print("running example:", files[iarg]) try: call([sys.executable, files[iarg]]) except: - print "Couldn't run example:", files[iarg] + print("Couldn't run example:", files[iarg]) elif isinstance(arg, str) and arg.lower() == "joshua": - print "GREETINGS PROFESSOR FALKEN" - print "" - print "HELLO" - print "" - print "A STRANGE GAME." - print "THE ONLY WINNING MOVE IS" - print "NOT TO PLAY." - print "" - print "HOW ABOUT A NICE GAME OF CHESS?" - print "" + print("GREETINGS PROFESSOR FALKEN") + print("") + print("HELLO") + print("") + print("A STRANGE GAME.") + print("THE ONLY WINNING MOVE IS") + print("NOT TO PLAY.") + print("") + print("HOW ABOUT A NICE GAME OF CHESS?") + print("") else: - print "Example: " + arg + " does not exist, or an error occurred" + print("Example: " + arg + " does not exist, or an error occurred") diff --git a/SimpleCV/Shell/Shell.py b/SimpleCV/Shell/Shell.py index 569dae1b9..ab9ad1db7 100755 --- a/SimpleCV/Shell/Shell.py +++ b/SimpleCV/Shell/Shell.py @@ -58,8 +58,8 @@ def plot(arg): logger.warning("Matplotlib is not installed and required") return - print "args", arg - print "type", type(arg) + print("args", arg) + print("type", type(arg)) plt.plot(arg) plt.show() @@ -230,7 +230,7 @@ def main(*args): sys.exit() elif flag == 'update': - print "Updating SimpleCV....." + print("Updating SimpleCV.....") self_update() if flag in ['--headless', 'headless']: diff --git a/SimpleCV/Shell/Tutorial.py b/SimpleCV/Shell/Tutorial.py index 944ef2930..40d6412cb 100644 --- a/SimpleCV/Shell/Tutorial.py +++ b/SimpleCV/Shell/Tutorial.py @@ -32,10 +32,10 @@ def attempt(variable_name, desired_class): if isinstance(variable,desired_class): if desired_class == Image: if variable.isEmpty(): - print lb - print "Although you can create empty Images on SimpleCV, let's not" - print "play with that now!" - print lb + print(lb) + print("Although you can create empty Images on SimpleCV, let's not") + print("play with that now!") + print(lb) return False return True @@ -44,7 +44,7 @@ def attempt(variable_name, desired_class): def prompt_and_run(): - command = raw_input("SimpleCV:> ") + command = input("SimpleCV:> ") tutorial_interpreter.runsource(command) return command @@ -54,112 +54,112 @@ def request_show_command(): return def end_tutorial(): - print lb - print "Type 'quit' to leave the tutorials, or press Enter to move on!" - command = raw_input("SimpleCV:> ") + print(lb) + print("Type 'quit' to leave the tutorials, or press Enter to move on!") + command = input("SimpleCV:> ") return command.lower() == 'quit' def end_of_tutorial(): - print lb - print "This is the end of our tutorial!" - print lb - print "For more help, go to www.simplecv.org, and don't forget about the" - print "help function!" - print lb + print(lb) + print("This is the end of our tutorial!") + print(lb) + print("For more help, go to www.simplecv.org, and don't forget about the") + print("help function!") + print(lb) def command_loop(command, desired_tuple): while True: - print command - print lb + print(command) + print(lb) if attempt(desired_tuple[0], desired_tuple[1]): return - print lb - print "Oops! %s is still not %s" % (desired_tuple[0], str(desired_tuple[1])) + print(lb) + print("Oops! %s is still not %s" % (desired_tuple[0], str(desired_tuple[1]))) def tutorial_image(): shellclear() - print "SimpleCV Image tutorial" - print "-----------------------" - print lb - print "Using images is simple, in SimpleCV." - print lb - print "First thing we are going to do is load an image. Try it yourself:" - print lb + print("SimpleCV Image tutorial") + print("-----------------------") + print(lb) + print("Using images is simple, in SimpleCV.") + print(lb) + print("First thing we are going to do is load an image. Try it yourself:") + print(lb) cmd = "logo = Image(\"simplecv\")" desired_tuple = ('logo', Image) command_loop(cmd, desired_tuple) - print lb - print "Correct! You just loaded SimpleCV logo into memory." - print "Let's try it to use one of your images. There are different ways to" - print "do that. You can try, for example:" - print lb - print "img = Image(URL_TO_MY_PICTURE) or img = Image(PATH_TO_MY_PICTURE)" - print lb + print(lb) + print("Correct! You just loaded SimpleCV logo into memory.") + print("Let's try it to use one of your images. There are different ways to") + print("do that. You can try, for example:") + print(lb) + print("img = Image(URL_TO_MY_PICTURE) or img = Image(PATH_TO_MY_PICTURE)") + print(lb) cmd = "Example: img = Image('http://simplecv.org/logo.jpg')" desired_tuple = ('img', Image) command_loop(cmd, desired_tuple) - print lb - print "Perfect! Now we want to see it:" - print lb + print(lb) + print("Perfect! Now we want to see it:") + print(lb) cmd = "img.show()" - print cmd - print lb + print(cmd) + print(lb) request_show_command() - print lb - print "Alright! This was tutorial 1/6." - print "Next tutorial: Saving Images" + print(lb) + print("Alright! This was tutorial 1/6.") + print("Next tutorial: Saving Images") if not end_tutorial(): tutorial_save() return def tutorial_save(): shellclear() - print "Saving Images" - print lb - print "Once you have an Image Object loaded in memory you can" - print "now save it to disk." - print lb - raw_input("[Press enter to continue]") - print lb - print "Saving an image is very simple, pardon the pun. Once it's loaded" - print "into memory, it's literally just:" - print "img.save()" - print lb - print "This will save the image back to the location it was loaded from" - print "so if you did img = Image('/tmp/test.jpg'), then it would save" - print "it back there, otherwise you can do:" - print "img.save('/any/path/you/want')" - print lb - print "So try it now and save an image somewhere on your system" - print lb + print("Saving Images") + print(lb) + print("Once you have an Image Object loaded in memory you can") + print("now save it to disk.") + print(lb) + input("[Press enter to continue]") + print(lb) + print("Saving an image is very simple, pardon the pun. Once it's loaded") + print("into memory, it's literally just:") + print("img.save()") + print(lb) + print("This will save the image back to the location it was loaded from") + print("so if you did img = Image('/tmp/test.jpg'), then it would save") + print("it back there, otherwise you can do:") + print("img.save('/any/path/you/want')") + print(lb) + print("So try it now and save an image somewhere on your system") + print(lb) if platform.system() == "Windows": - print "img.save('C:/myimg.jpg')" + print("img.save('C:/myimg.jpg')") else: - print "img.save('/tmp/new.jpg')" + print("img.save('/tmp/new.jpg')") - print lb + print(lb) while True: if prompt_and_run().startswith('img.save'): break - print "Please try to save img!" - print lb + print("Please try to save img!") + print(lb) - print "Correct, you just saved a new copy of your image!" - print "As you can see in SimpleCV most of the functions are intuitive." + print("Correct, you just saved a new copy of your image!") + print("As you can see in SimpleCV most of the functions are intuitive.") - print lb - print "Alright! This was tutorial 2/6." - print "Next tutorial: Camera" + print(lb) + print("Alright! This was tutorial 2/6.") + print("Next tutorial: Camera") if not end_tutorial(): tutorial_camera() return @@ -167,56 +167,56 @@ def tutorial_save(): def tutorial_camera(): shellclear() - print "Camera" - print lb - print "As long as your camera driver is supported then you shouldn't have a" - print "problem. Type 'skip' to skip the camera tutorial, or press Enter to" - print "continue." - print lb - - command = raw_input("SimpleCV:> ") + print("Camera") + print(lb) + print("As long as your camera driver is supported then you shouldn't have a") + print("problem. Type 'skip' to skip the camera tutorial, or press Enter to") + print("continue.") + print(lb) + + command = input("SimpleCV:> ") if command.lower() != 'skip': - print lb - print "To load the camera, just type:" - print lb + print(lb) + print("To load the camera, just type:") + print(lb) cmd = "cam = Camera()" desired_tuple = ('cam', Camera) command_loop(cmd, desired_tuple) - print lb - print "Next, to grab an image from the Camera we type:" + print(lb) + print("Next, to grab an image from the Camera we type:") cmd = "img = cam.getImage()" tutorial_interpreter.runsource("del(img)") desired_tuple = ('img', Image) command_loop(cmd, desired_tuple) - print "Just as before, if we want to display it, we just type:" - print lb - print "img.show()" - print lb + print("Just as before, if we want to display it, we just type:") + print(lb) + print("img.show()") + print(lb) request_show_command() - print lb - print "Alright! This was tutorial 3/6." - print "Next tutorial: Copying Images" + print(lb) + print("Alright! This was tutorial 3/6.") + print("Next tutorial: Copying Images") if not end_tutorial(): tutorial_copy() return def tutorial_copy(): shellclear() - print "Copying Images" - print lb - print "If you need a copy of an image, this is also very simple:" - print "Let's try to clone img, which we already have." + print("Copying Images") + print(lb) + print("If you need a copy of an image, this is also very simple:") + print("Let's try to clone img, which we already have.") global img if not img: img = Image("lenna") - print lb + print(lb) cmd = "clone = img.copy()" desired_tuple = ('clone', Image) @@ -225,37 +225,37 @@ def tutorial_copy(): if clone != img: #Returns False if they have different addresses. break - print "You have to use the copy() function!" + print("You have to use the copy() function!") - print lb - print "Correct, you just cloned an image into memory." - print "You need to be careful when using this method though as using as a" - print "reference vs. a copy. For instance, if you just typed:" - print lb - print "clone = img" - print lb - print "clone would actually point at the same thing in memory as img." + print(lb) + print("Correct, you just cloned an image into memory.") + print("You need to be careful when using this method though as using as a") + print("reference vs. a copy. For instance, if you just typed:") + print(lb) + print("clone = img") + print(lb) + print("clone would actually point at the same thing in memory as img.") - print lb - print "Alright! This was tutorial 4/6." - print "Next tutorial: Manipulating Images" + print(lb) + print("Alright! This was tutorial 4/6.") + print("Next tutorial: Manipulating Images") if not end_tutorial(): tutorial_manipulation() return def tutorial_manipulation(): shellclear() - print "Manipulating Images" - print lb - print "Now we can easily load and save images. It's time to start doing some" - print "image processing with them. Let's make img, which we already have, a" - print "90x90 thumbnail:" + print("Manipulating Images") + print(lb) + print("Now we can easily load and save images. It's time to start doing some") + print("image processing with them. Let's make img, which we already have, a") + print("90x90 thumbnail:") global img if not img: img = Image("lenna") - print lb + print(lb) cmd = "thumb = img.scale(90,90)" desired_tuple = ('thumb', Image) @@ -264,45 +264,45 @@ def tutorial_manipulation(): if thumb.size() == (90,90): break - print "Your thumbnail's size isn't 90x90! Try again!" + print("Your thumbnail's size isn't 90x90! Try again!") - print lb - print "Now display it with thumb.show()" - print lb + print(lb) + print("Now display it with thumb.show()") + print(lb) request_show_command() - print lb - print "Now let's erode the picture some:" - print lb + print(lb) + print("Now let's erode the picture some:") + print(lb) cmd = "eroded = img.erode()" desired_tuple = ('eroded', Image) command_loop(cmd, desired_tuple) - print lb - print "Display it with eroded.show(). It should look almost as if the image" - print "was made if ink and had water spoiled on it." - print lb + print(lb) + print("Display it with eroded.show(). It should look almost as if the image") + print("was made if ink and had water spoiled on it.") + print(lb) request_show_command() - print lb - print "Last but not least, let's crop a section of the image out:" - print lb + print(lb) + print("Last but not least, let's crop a section of the image out:") + print(lb) cmd = "cropped = img.crop(100, 100, 50, 50)" desired_tuple = ('cropped', Image) command_loop(cmd, desired_tuple) - print lb - print "Use cropped.show() to display it." - print lb + print(lb) + print("Use cropped.show() to display it.") + print(lb) request_show_command() - print lb - print "That went from the coordinate in (X,Y), which is (0,0) and is the" - print "top left corner of the picture, to coordinates (100,100) in the" - print "(X,Y) and cropped a picture from that which is 50 pixels by 50 pixels." - - print lb - print "Alright! This was tutorial 5/6." - print "Next tutorial: Features" + print(lb) + print("That went from the coordinate in (X,Y), which is (0,0) and is the") + print("top left corner of the picture, to coordinates (100,100) in the") + print("(X,Y) and cropped a picture from that which is 50 pixels by 50 pixels.") + + print(lb) + print("Alright! This was tutorial 5/6.") + print("Next tutorial: Features") if not end_tutorial(): tutorial_features() return @@ -311,127 +311,127 @@ def tutorial_manipulation(): def tutorial_slicing(): shellclear() - print "Slicing Images" - print lb - print "Slicing is sort of a new paradigm to access parts of an image." - print "Typically in vision a region of interest (ROI) is given. " - print "In this case, slicing is a very powerful way to access parts" - print "of an image, or basically any matrix in SimpleCV in general." - print lb - print "This is done by using:" - print "section = img[1:10,1:10]" - print lb - print "What is returned is an image object with that window." - print "the slicing basically acts like a ROI but returns an image" - print "so if you wanted to say run edge detection on a 20x20 box" - print "in the picture that started at x=5,y=10 you use:" - print "foundedges = img[5:25,10:30].edges()" - print lb - raw_input("[Press enter to continue]") + print("Slicing Images") + print(lb) + print("Slicing is sort of a new paradigm to access parts of an image.") + print("Typically in vision a region of interest (ROI) is given. ") + print("In this case, slicing is a very powerful way to access parts") + print("of an image, or basically any matrix in SimpleCV in general.") + print(lb) + print("This is done by using:") + print("section = img[1:10,1:10]") + print(lb) + print("What is returned is an image object with that window.") + print("the slicing basically acts like a ROI but returns an image") + print("so if you wanted to say run edge detection on a 20x20 box") + print("in the picture that started at x=5,y=10 you use:") + print("foundedges = img[5:25,10:30].edges()") + print(lb) + input("[Press enter to continue]") shellclear() in_text = "" shouldbe = "ROI = img[1:6,1:6]" - print "Please type this now:" - print shouldbe - print lb + print("Please type this now:") + print(shouldbe) + print(lb) while (in_text != shouldbe): - in_text = raw_input("SimpleCV:>") + in_text = input("SimpleCV:>") if(in_text != shouldbe): - print "sorry, that is incorrect" - print "please type:" - print shouldbe + print("sorry, that is incorrect") + print("please type:") + print(shouldbe) shellclear() - print "Correct, you just returned a 5 pixel by 5 pixel image object" - print lb + print("Correct, you just returned a 5 pixel by 5 pixel image object") + print(lb) return def tutorial_features(): shellclear() - print "Features" - print lb - print "Features are things you are looking for in the picture. They can be" - print "blobs, corners, lines, etc. Features are sometimes referred to as a" - print "fiducial in computer vision. These features are something that is" - print "measurable, and something that makes images unique. Features are" - print "something like when comparing things like fruit. In this case the" - print "features could be the shape and the color, amongst others." - print lb - print "What features are in SimpleCV is an abstract representation of that." - print "You take your image, then perform a function on it, and get back" - print "features or another image with them applied. The crop example is" - print "a case where an image is returned after we perform something." - print lb - print "In a simple example we will use the famous 'lenna' image, and find" - print "corners in the picture." - print lb + print("Features") + print(lb) + print("Features are things you are looking for in the picture. They can be") + print("blobs, corners, lines, etc. Features are sometimes referred to as a") + print("fiducial in computer vision. These features are something that is") + print("measurable, and something that makes images unique. Features are") + print("something like when comparing things like fruit. In this case the") + print("features could be the shape and the color, amongst others.") + print(lb) + print("What features are in SimpleCV is an abstract representation of that.") + print("You take your image, then perform a function on it, and get back") + print("features or another image with them applied. The crop example is") + print("a case where an image is returned after we perform something.") + print(lb) + print("In a simple example we will use the famous 'lenna' image, and find") + print("corners in the picture.") + print(lb) tutorial_interpreter.runsource("img = Image('lenna')") - print "img = Image('lenna') (already done for you)" - print lb - print "Try it yourself:" - print lb + print("img = Image('lenna') (already done for you)") + print(lb) + print("Try it yourself:") + print(lb) cmd = "corners = img.findCorners()" desired_tuple = ('corners', FeatureSet) command_loop(cmd, desired_tuple) - print lb - print "Correct, you just got a featureset object which contains" - print "feature objects. These feature objects contain data from the" - print "found corners" - print lb + print(lb) + print("Correct, you just got a featureset object which contains") + print("feature objects. These feature objects contain data from the") + print("found corners") + print(lb) - print "Tip: If your are unsure what parameters to pass, you can always use" - print "the built in help support by typing help(Image.findCorners). Keep in" - print "mind that this help works for all of the functions available in" - print "SimpleCV" - print lb + print("Tip: If your are unsure what parameters to pass, you can always use") + print("the built in help support by typing help(Image.findCorners). Keep in") + print("mind that this help works for all of the functions available in") + print("SimpleCV") + print(lb) - print "We can also do that with blobs. Try it:" - print lb + print("We can also do that with blobs. Try it:") + print(lb) cmd = "blobs = img.findBlobs()" desired_tuple = ('blobs', FeatureSet) command_loop(cmd, desired_tuple) - print lb - print "Great, but..." - print "When we show the image we won't notice anything different. This" - print "is because we have to actually tell the blobs to draw themselves" - print "on the image:" - print lb - print "blobs.draw()" - print lb + print(lb) + print("Great, but...") + print("When we show the image we won't notice anything different. This") + print("is because we have to actually tell the blobs to draw themselves") + print("on the image:") + print(lb) + print("blobs.draw()") + print(lb) while True: if prompt_and_run().endswith('.draw()'): break - print "No blobs have been drawn!" - print lb + print("No blobs have been drawn!") + print(lb) - print "Now use img.show() to see the changes!" - print lb + print("Now use img.show() to see the changes!") + print(lb) request_show_command() - print lb - raw_input("[Press enter to continue]") + print(lb) + input("[Press enter to continue]") - print lb - print lb - print "There's also a small trick built into SimpleCV to do this even faster" - print lb + print(lb) + print(lb) + print("There's also a small trick built into SimpleCV to do this even faster") + print(lb) tutorial_interpreter.runsource("img = Image('lenna')") - print "img = Image('lenna') (already done for you)" - print lb + print("img = Image('lenna') (already done for you)") + print(lb) while True: - print "img.findBlobs().show()" - print lb + print("img.findBlobs().show()") + print(lb) if prompt_and_run().endswith('.show()'): break - print "Nothing has been shown!" - print lb + print("Nothing has been shown!") + print(lb) - print lb - print "Alright! This was tutorial 6/6." + print(lb) + print("Alright! This was tutorial 6/6.") #print "Next tutorial: ..." return @@ -443,18 +443,18 @@ def magic_tutorial(self,arg): if (arg == ""): shellclear() - print "+--------------------------------+" - print " Welcome to the SimpleCV tutorial " - print "+--------------------------------+" - print lb - print "At anytime on the SimpleCV Interactive Shell you can type tutorial," - print "then press the tab key and it will autocomplete any tutorial that" - print "is currently available." - print lb - print "Let's start off with Loading and Saving images!" - print lb - print lb - raw_input("[Press enter to continue]") + print("+--------------------------------+") + print(" Welcome to the SimpleCV tutorial ") + print("+--------------------------------+") + print(lb) + print("At anytime on the SimpleCV Interactive Shell you can type tutorial,") + print("then press the tab key and it will autocomplete any tutorial that") + print("is currently available.") + print(lb) + print("Let's start off with Loading and Saving images!") + print(lb) + print(lb) + input("[Press enter to continue]") tutorial_image() end_of_tutorial() return @@ -462,4 +462,4 @@ def magic_tutorial(self,arg): if arg in tutorials_dict: tutorials_dict[arg]() else: - print "%s is not a tutorial!" % arg + print("%s is not a tutorial!" % arg) diff --git a/SimpleCV/Shell/__init__.py b/SimpleCV/Shell/__init__.py index b7f24e3d7..bb7aa574c 100644 --- a/SimpleCV/Shell/__init__.py +++ b/SimpleCV/Shell/__init__.py @@ -1 +1 @@ -from Shell import * +from .Shell import * diff --git a/SimpleCV/Stream.py b/SimpleCV/Stream.py index 7c8fd4b6d..d59153ec2 100644 --- a/SimpleCV/Stream.py +++ b/SimpleCV/Stream.py @@ -59,9 +59,9 @@ def do_GET(self): self.end_headers() self.wfile.write(_jpegstreamers[port].jpgdata.getvalue() + "\r\n") lasttimeserved = time.time() - except socket.error, e: + except socket.error as e: return - except IOError, e: + except IOError as e: return count = count + 1 @@ -118,7 +118,7 @@ def __init__(self, hostandport = 8080, st=0.1 ): if (type(hostandport) == int): self.port = hostandport self.host = "localhost" - elif (isinstance(hostandport, basestring) and re.search(":", hostandport)): + elif (isinstance(hostandport, str) and re.search(":", hostandport)): (self.host, self.port) = hostandport.split(":") self.port = int(self.port) elif (type(hostandport) == tuple): diff --git a/SimpleCV/Tracking/CAMShiftTracker.py b/SimpleCV/Tracking/CAMShiftTracker.py index f1fba652f..c92f1cf1e 100644 --- a/SimpleCV/Tracking/CAMShiftTracker.py +++ b/SimpleCV/Tracking/CAMShiftTracker.py @@ -84,7 +84,7 @@ def camshiftTracker(img, bb, ts, **kwargs): elif key == 'num_frames': num_frames = kwargs[key] - hsv = cv2.cvtColor(img.getNumpyCv2(), cv2.cv.CV_BGR2HSV) + hsv = cv2.cvtColor(img.getNumpyCv2(), cv2.CV_BGR2HSV) if mask is None: mask = cv2.inRange(hsv, lower, upper) diff --git a/SimpleCV/Tracking/LKTracker.py b/SimpleCV/Tracking/LKTracker.py index fa3a81ddd..9c8362c81 100644 --- a/SimpleCV/Tracking/LKTracker.py +++ b/SimpleCV/Tracking/LKTracker.py @@ -102,11 +102,11 @@ def lkTracker(img, bb, ts, oldimg, **kwargs): pt = cv2.goodFeaturesToTrack(g, maxCorners = maxCorners, qualityLevel = qualityLevel, minDistance = minDistance, blockSize = blockSize) if type(pt) == type(None): - print "no points" + print("no points") track = LK(img, bb, pt) return track - for i in xrange(len(pt)): + for i in range(len(pt)): pt[i][0][0] = pt[i][0][0]+bb[0] pt[i][0][1] = pt[i][0][1]+bb[1] @@ -123,7 +123,7 @@ def lkTracker(img, bb, ts, oldimg, **kwargs): d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_pts=[] - for pts, val in itertools.izip(p1, good): + for pts, val in zip(p1, good): if val: new_pts.append([pts[0][0], pts[0][1]]) if ts[-1:]: @@ -134,7 +134,7 @@ def lkTracker(img, bb, ts, oldimg, **kwargs): old_pts = new_pts dx=[] dy=[] - for p1, p2 in itertools.izip(old_pts, new_pts): + for p1, p2 in zip(old_pts, new_pts): dx.append(p2[0]-p1[0]) dy.append(p2[1]-p1[1]) diff --git a/SimpleCV/Tracking/MFTracker.py b/SimpleCV/Tracking/MFTracker.py index 8bfb1a7b4..f5ae29c5a 100644 --- a/SimpleCV/Tracking/MFTracker.py +++ b/SimpleCV/Tracking/MFTracker.py @@ -156,7 +156,7 @@ def fbtrack(imgI, imgJ, bb, numM=10, numN=10,margin=5,winsize_ncc=10, winsize_lk #print newBB, "fbtrack passing newBB" return (newBB, scaleshift) -def lktrack(img1, img2, ptsI, nPtsI, winsize_ncc=10, win_size_lk=4, method=cv2.cv.CV_TM_CCOEFF_NORMED): +def lktrack(img1, img2, ptsI, nPtsI, winsize_ncc=10, win_size_lk=4, method=cv2.CV_TM_CCOEFF_NORMED): """ **SUMMARY** (Dev Zone) @@ -454,7 +454,7 @@ def euclideanDistance(point1,point2): match = ((point1[:,0]-point2[:,0])**2+(point1[:,1]-point2[:,1])**2)**0.5 return match -def normCrossCorrelation(img1, img2, pt0, pt1, status, winsize, method=cv2.cv.CV_TM_CCOEFF_NORMED): +def normCrossCorrelation(img1, img2, pt0, pt1, status, winsize, method=cv2.CV_TM_CCOEFF_NORMED): """ **SUMMARY** (Dev Zone) diff --git a/SimpleCV/Tracking/SURFTracker.py b/SimpleCV/Tracking/SURFTracker.py index f14fe7fc2..00cc0c322 100644 --- a/SimpleCV/Tracking/SURFTracker.py +++ b/SimpleCV/Tracking/SURFTracker.py @@ -105,7 +105,7 @@ def surfTracker(img, bb, ts, **kwargs): skp, sd = descriptor.compute(newimg, skp) if td is None: - print "Descriptors are Empty" + print("Descriptors are Empty") return None if sd is None: @@ -121,7 +121,7 @@ def surfTracker(img, bb, ts, **kwargs): # filter points using distnace criteria dist = (dist[:,0]/2500.0).reshape(-1,).tolist() idx = idx.reshape(-1).tolist() - indices = sorted(range(len(dist)), key=lambda i: dist[i]) + indices = sorted(list(range(len(dist))), key=lambda i: dist[i]) dist = [dist[i] for i in indices] idx = [idx[i] for i in indices] @@ -129,7 +129,7 @@ def surfTracker(img, bb, ts, **kwargs): skp_final_labelled=[] data_cluster=[] - for i, dis in itertools.izip(idx, dist): + for i, dis in zip(idx, dist): if dis < distance: skp_final.append(skp[i]) data_cluster.append((skp[i].pt[0], skp[i].pt[1])) @@ -142,7 +142,7 @@ def surfTracker(img, bb, ts, **kwargs): db = DBSCAN(eps=eps_val, min_samples=min_samples).fit(S) core_samples = db.core_sample_indices_ labels = db.labels_ - for label, i in zip(labels, range(len(labels))): + for label, i in zip(labels, list(range(len(labels)))): if label==0: skp_final_labelled.append(skp_final[i]) diff --git a/SimpleCV/base.py b/SimpleCV/base.py index 972e31e65..ddc1859a5 100644 --- a/SimpleCV/base.py +++ b/SimpleCV/base.py @@ -7,9 +7,9 @@ import time import socket import re -import urllib2 +import urllib.request, urllib.error, urllib.parse import types -import SocketServer +import socketserver import threading import tempfile import zipfile @@ -41,18 +41,19 @@ from copy import copy from math import * from pkg_resources import load_entry_point -from SimpleHTTPServer import SimpleHTTPRequestHandler -from types import IntType, LongType, FloatType, InstanceType -from cStringIO import StringIO +from http.server import SimpleHTTPRequestHandler +#from types import IntType, LongType, FloatType, InstanceType +from io import StringIO from numpy import int32 from numpy import uint8 -from EXIF import * +from .EXIF import * from pygame import gfxdraw from pickle import * # SimpleCV library includes try: - import cv2.cv as cv + #import cv2.cv as cv + import cv2 except ImportError: try: import cv @@ -161,7 +162,8 @@ def is_number(n): Returns: Type """ - return type(n) in (IntType, LongType, FloatType) + #return type(n) in (IntType, LongType, FloatType) + return type(n) in (int, float) def is_tuple(n): """ @@ -195,7 +197,7 @@ def test(): This function is meant to run builtin unittests """ - print 'unit test' + print('unit test') def download_and_extract(URL): @@ -210,14 +212,14 @@ def download_and_extract(URL): tmpdir = tempfile.mkdtemp() filename = os.path.basename(URL) path = tmpdir + "/" + filename - zdata = urllib2.urlopen(URL) + zdata = urllib.request.urlopen(URL) - print "Saving file to disk please wait...." + print("Saving file to disk please wait....") with open(path, "wb") as local_file: local_file.write(zdata.read()) zfile = zipfile.ZipFile(path) - print "Extracting zipfile" + print("Extracting zipfile") try: zfile.extractall(tmpdir) except: @@ -309,7 +311,7 @@ def read_logging_level(log_level): if log_level in levels_dict: return levels_dict[log_level] else: - print "The logging level given is not valid" + print("The logging level given is not valid") return None def get_logging_level(): @@ -324,7 +326,7 @@ def get_logging_level(): 50: "CRITICAL" } - print "The current logging level is:", levels_dict[logger.getEffectiveLevel()] + print("The current logging level is:", levels_dict[logger.getEffectiveLevel()]) def set_logging(log_level,myfilename = None): """ @@ -361,9 +363,9 @@ def set_logging(log_level,myfilename = None): fileHandler.setFormatter(formatter) logger.addHandler(fileHandler) logger.removeHandler(consoleHandler) #Console logging is disabled. - print "Now logging to",myfilename,"with level",log_level + print("Now logging to",myfilename,"with level",log_level) elif level: - print "Now logging with level",log_level + print("Now logging with level",log_level) logger.setLevel(level) @@ -391,37 +393,37 @@ def system(): """ try : import platform - print "System : ", platform.system() - print "OS version : ", platform.version() - print "Python version :", platform.python_version() + print("System : ", platform.system()) + print("OS version : ", platform.version()) + print("Python version :", platform.python_version()) try : from cv2 import __version__ - print "Open CV version : " + __version__ + print("Open CV version : " + __version__) except ImportError : - print "Open CV2 version : " + "2.1" + print("Open CV2 version : " + "2.1") if (PIL_ENABLED) : - print "PIL version : ", pil.VERSION + print("PIL version : ", pil.VERSION) else : - print "PIL module not installed" + print("PIL module not installed") if (ORANGE_ENABLED) : - print "Orange Version : " + orange.version + print("Orange Version : " + orange.version) else : - print "Orange module not installed" + print("Orange module not installed") try : import pygame as pg - print "PyGame Version : " + pg.__version__ + print("PyGame Version : " + pg.__version__) except ImportError: - print "PyGame module not installed" + print("PyGame module not installed") try : import pickle - print "Pickle Version : " + pickle.__version__ + print("Pickle Version : " + pickle.__version__) except : - print "Pickle module not installed" + print("Pickle module not installed") except ImportError : - print "You need to install Platform to use this function" - print "to install you can use:" - print "easy_install platform" + print("You need to install Platform to use this function") + print("to install you can use:") + print("easy_install platform") return class LazyProperty(object): diff --git a/SimpleCV/examples/arduino/CannyStream-arduino.py b/SimpleCV/examples/arduino/CannyStream-arduino.py index 1d4d458ac..49570b467 100644 --- a/SimpleCV/examples/arduino/CannyStream-arduino.py +++ b/SimpleCV/examples/arduino/CannyStream-arduino.py @@ -16,7 +16,7 @@ but the Arduino IDE should tell you where you should mount the Arduino from. """ -print __doc__ +print(__doc__) import time from SimpleCV import Camera @@ -49,7 +49,7 @@ else: t2 *= multiplier - print "t1 " + str(t1) + ", t2 " + str(t2) + ", b13 " + str(b13) + print("t1 " + str(t1) + ", t2 " + str(t2) + ", b13 " + str(b13)) img = cam.getImage().flipHorizontal() edged_img = img.edges(int(t1), int(t2)).invert().smooth() edged_img.show() diff --git a/SimpleCV/examples/detection/CannyCam.py b/SimpleCV/examples/detection/CannyCam.py index 52ee3042d..d95e09854 100644 --- a/SimpleCV/examples/detection/CannyCam.py +++ b/SimpleCV/examples/detection/CannyCam.py @@ -5,7 +5,7 @@ max_threshold and threshhold_step values and run the program you will see it change over time ''' -print __doc__ +print(__doc__) from SimpleCV import * diff --git a/SimpleCV/examples/detection/CoinDetector.py b/SimpleCV/examples/detection/CoinDetector.py index 0bf4e477f..3c615c9e8 100644 --- a/SimpleCV/examples/detection/CoinDetector.py +++ b/SimpleCV/examples/detection/CoinDetector.py @@ -13,7 +13,7 @@ ''' -print __doc__ +print(__doc__) from SimpleCV import * # A quarter is 24.26mm or 0.955in diff --git a/SimpleCV/examples/detection/EdgeSnap.py b/SimpleCV/examples/detection/EdgeSnap.py index 806a356d5..60019f682 100644 --- a/SimpleCV/examples/detection/EdgeSnap.py +++ b/SimpleCV/examples/detection/EdgeSnap.py @@ -5,7 +5,7 @@ Right-click to start the process, Detected Edge points will be shown in Red ''' -print __doc__ +print(__doc__) from SimpleCV import * diff --git a/SimpleCV/examples/detection/FeatureDetection.py b/SimpleCV/examples/detection/FeatureDetection.py index 79739c2e8..d813038c6 100644 --- a/SimpleCV/examples/detection/FeatureDetection.py +++ b/SimpleCV/examples/detection/FeatureDetection.py @@ -12,7 +12,7 @@ ''' -print __doc__ +print(__doc__) import time from SimpleCV import Color, Image, np, Camera diff --git a/SimpleCV/examples/detection/FisherFaceRecognizer.py b/SimpleCV/examples/detection/FisherFaceRecognizer.py index f710ba1f2..4d6f03986 100644 --- a/SimpleCV/examples/detection/FisherFaceRecognizer.py +++ b/SimpleCV/examples/detection/FisherFaceRecognizer.py @@ -20,7 +20,7 @@ def identifyGender(): w, h = f.imageSize crop_image = crop_image.resize(w, h) label, confidence = f.predict(crop_image) - print label + print(label) if label == 0: img.drawText("Female", fontsize=48) diff --git a/SimpleCV/examples/detection/Least-Squares-Circle.py b/SimpleCV/examples/detection/Least-Squares-Circle.py index bf8819a23..e5bc674d2 100644 --- a/SimpleCV/examples/detection/Least-Squares-Circle.py +++ b/SimpleCV/examples/detection/Least-Squares-Circle.py @@ -5,7 +5,7 @@ The program basically takes in a shape and tries to find the size of it. ''' -print __doc__ +print(__doc__) from SimpleCV import * from scipy import optimize @@ -31,8 +31,8 @@ def f_2(c): R_2 = Ri_2.mean() residu_2 = sum((Ri_2 - R_2)**2) -print xc_2,yc_2 -print R_2 +print(xc_2,yc_2) +print(R_2) img.drawCircle((xc_2,yc_2),R_2,color=Color.RED,thickness=3) img.show() time.sleep(10) diff --git a/SimpleCV/examples/detection/MotionTracker.py b/SimpleCV/examples/detection/MotionTracker.py index ed25aa2e2..945612672 100644 --- a/SimpleCV/examples/detection/MotionTracker.py +++ b/SimpleCV/examples/detection/MotionTracker.py @@ -40,7 +40,7 @@ #Draw the message on the screen if(draw_message): newImg.drawText(message_text, 5,5) - print message_text + print(message_text) lastImg = newImg # update the image diff --git a/SimpleCV/examples/detection/TemplateMatching.py b/SimpleCV/examples/detection/TemplateMatching.py index 7f5b8052b..0fe6a4f1d 100644 --- a/SimpleCV/examples/detection/TemplateMatching.py +++ b/SimpleCV/examples/detection/TemplateMatching.py @@ -6,7 +6,7 @@ matching SimpleCV offers. If you are looking for something more complex you will probably want to look into img.findKeypoints() ''' -print __doc__ +print(__doc__) from SimpleCV import * @@ -18,7 +18,7 @@ methods = ["SQR_DIFF","SQR_DIFF_NORM","CCOEFF","CCOEFF_NORM","CCORR","CCORR_NORM"] # the various types of template matching available for m in methods: - print "current method:", m # print the method being used + print("current method:", m) # print the method being used result = Image("templatetest.png", sample=True) dl = DrawingLayer((source.width,source.height)) fs = source.findTemplate(template,threshold=t,method=m) diff --git a/SimpleCV/examples/detection/TrainFacialRecognition.py b/SimpleCV/examples/detection/TrainFacialRecognition.py index 0251dee47..8b2b639e6 100644 --- a/SimpleCV/examples/detection/TrainFacialRecognition.py +++ b/SimpleCV/examples/detection/TrainFacialRecognition.py @@ -44,7 +44,7 @@ def getFaceSet(cam,myStr=""): # Create, train, and save the recognizer. f = FaceRecognizer() -print f.train(imgs, labels) +print(f.train(imgs, labels)) f.save(outfile) # Now show us the results disp = Display((640,480)) diff --git a/SimpleCV/examples/detection/balltrack.py b/SimpleCV/examples/detection/balltrack.py index 8b29d340d..aa92094a0 100644 --- a/SimpleCV/examples/detection/balltrack.py +++ b/SimpleCV/examples/detection/balltrack.py @@ -7,7 +7,7 @@ The demo video can be found at: ''' -print __doc__ +print(__doc__) import SimpleCV @@ -19,7 +19,7 @@ if display.mouseRight: # if right mouse clicked, change mode normaldisplay = not(normaldisplay) - print "Display Mode:", "Normal" if normaldisplay else "Segmented" + print("Display Mode:", "Normal" if normaldisplay else "Segmented") img = cam.getImage().flipHorizontal() # grab image from camera dist = img.colorDistance(SimpleCV.Color.BLACK).dilate(2) # try to separate colors in image diff --git a/SimpleCV/examples/detection/barcode_reader.py b/SimpleCV/examples/detection/barcode_reader.py index 8ad8f80a7..b7a22f934 100644 --- a/SimpleCV/examples/detection/barcode_reader.py +++ b/SimpleCV/examples/detection/barcode_reader.py @@ -12,7 +12,7 @@ the program to try and read the barcode ''' -print __doc__ +print(__doc__) import time @@ -36,8 +36,8 @@ barcode = img.findBarcode() if barcode: # if we have a barcode data = str(barcode.data) - print data - if mydict.has_key(data): + print(data) + if data in mydict: mydict[data] = mydict[data] + 1 else: mydict[data] = 1 @@ -50,6 +50,6 @@ target= open( myfile, "wb" ) wtr= csv.writer( target ) wtr.writerow( ["item","count"]) -for d in mydict.items(): +for d in list(mydict.items()): wtr.writerow(d) target.close() diff --git a/SimpleCV/examples/detection/dealwithit.py b/SimpleCV/examples/detection/dealwithit.py index 6781a465a..7f069c7cf 100755 --- a/SimpleCV/examples/detection/dealwithit.py +++ b/SimpleCV/examples/detection/dealwithit.py @@ -21,7 +21,8 @@ def process_eyes(image, eyes): return (None, None, None) -def draw_glasses(image, (dx, dy, right_eye), glasses): +def draw_glasses(image, xxx_todo_changeme, glasses): + (dx, dy, right_eye) = xxx_todo_changeme rotation = 0.5*dy try: new_glasses = glasses.scale(int(2.75*dx), right_eye.height()) diff --git a/SimpleCV/examples/detection/face-substition.py b/SimpleCV/examples/detection/face-substition.py index 81cb9afc8..f43c85fd9 100644 --- a/SimpleCV/examples/detection/face-substition.py +++ b/SimpleCV/examples/detection/face-substition.py @@ -5,7 +5,7 @@ All this example does is find a face and replace it with another image. The image should auto scale to match the size of the face. """ -print __doc__ +print(__doc__) from SimpleCV import Camera, Display, HaarCascade, Image diff --git a/SimpleCV/examples/detection/facetrack.py b/SimpleCV/examples/detection/facetrack.py index 7627eed48..3cd8e53a6 100644 --- a/SimpleCV/examples/detection/facetrack.py +++ b/SimpleCV/examples/detection/facetrack.py @@ -4,7 +4,7 @@ """ This program basically does face detection an blurs the face out. """ -print __doc__ +print(__doc__) from SimpleCV import Camera, Display, HaarCascade diff --git a/SimpleCV/examples/detection/pills.py b/SimpleCV/examples/detection/pills.py index b9dc55b3e..a0a5bd9cd 100644 --- a/SimpleCV/examples/detection/pills.py +++ b/SimpleCV/examples/detection/pills.py @@ -29,7 +29,7 @@ pillcount = len(pills) if pillcount != expected_pillcount: - print "pack at %d, %d had %d pills" % (packblobs[idx].x, packblobs[idx].y, pillcount) + print("pack at %d, %d had %d pills" % (packblobs[idx].x, packblobs[idx].y, pillcount)) i.drawText("Pills Found: " + str(pillcount), 10, 10, fontsize = 20) i.drawText("Pills Expected: " + str(expected_pillcount), 10, 30, fontsize = 20) for p in pills: diff --git a/SimpleCV/examples/detection/x-ray.py b/SimpleCV/examples/detection/x-ray.py index 55e10ce75..1b979739c 100644 --- a/SimpleCV/examples/detection/x-ray.py +++ b/SimpleCV/examples/detection/x-ray.py @@ -6,7 +6,7 @@ illusion of X-ray vision. It is mearly meant to show how to perform a basic image operation and overlay back onto the original image. """ -print __doc__ +print(__doc__) from SimpleCV import Camera, Display diff --git a/SimpleCV/examples/display/RenderExample.py b/SimpleCV/examples/display/RenderExample.py index 471b608f6..622d7c64d 100644 --- a/SimpleCV/examples/display/RenderExample.py +++ b/SimpleCV/examples/display/RenderExample.py @@ -17,7 +17,7 @@ lineL.line(b,d,width=5) img.addDrawingLayer(lineL) temp = img.applyLayers() -print "line: %s" % temp.save(temp=True) +print("line: %s" % temp.save(temp=True)) img.clearLayers() linesL = DrawingLayer((img.width,img.height)) @@ -28,14 +28,14 @@ pts = (a,b,c,d,a) linesL.lines(pts,alpha=128) #translate over and down 10 -pts = map(lambda x: ((x[0]+10),(x[1]+10)),pts) +pts = [((x[0]+10),(x[1]+10)) for x in pts] linesL.lines(pts,color=Color.BEIGE,width=10) #translate over and down 10 -pts = map(lambda x: ((x[0]+10),(x[1]+10)),pts) +pts = [((x[0]+10),(x[1]+10)) for x in pts] linesL.lines(pts,antialias=True) img.addDrawingLayer(linesL) temp = img.applyLayers() -print "lines: %s" % temp.save(temp=True) +print("lines: %s" % temp.save(temp=True)) img.clearLayers() rectTR = DrawingLayer((img.width,img.height)) @@ -50,7 +50,7 @@ rectTR.rectangle(tr,wh,color=Color.GREEN,filled=True) img.addDrawingLayer(rectTR) temp = img.applyLayers() -print "rectTR: %s" % temp.save(temp=True) +print("rectTR: %s" % temp.save(temp=True)) img.clearLayers() rectC = DrawingLayer((img.width,img.height)) @@ -65,7 +65,7 @@ rectC.centeredRectangle(cxy,wh,color=Color.GREEN,filled=True) img.addDrawingLayer(rectC) temp = img.applyLayers() -print "rectC: %s" % temp.save(temp=True) +print("rectC: %s" % temp.save(temp=True)) img.clearLayers() polyL = DrawingLayer((img.width,img.height)) @@ -74,17 +74,17 @@ c = (150,50) pts = (a,b,c) polyL.polygon(pts,alpha=128) -pts = map(lambda x: ((x[0]+10),(x[1]+10)),pts) +pts = [((x[0]+10),(x[1]+10)) for x in pts] polyL.polygon(pts,antialias=True,width=3,alpha=210,filled=True,color=Color.LIME) #translate over and down 10 -pts = map(lambda x: ((x[0]+10),(x[1]+10)),pts) +pts = [((x[0]+10),(x[1]+10)) for x in pts] polyL.polygon(pts,color=Color.BEIGE,width=10) #translate over and down 10 -pts = map(lambda x: ((x[0]+10),(x[1]+10)),pts) +pts = [((x[0]+10),(x[1]+10)) for x in pts] polyL.polygon(pts,antialias=True,width=3,alpha=210) img.addDrawingLayer(polyL) temp = img.applyLayers() -print "poly: %s" % temp.save(temp=True) +print("poly: %s" % temp.save(temp=True)) img.clearLayers() circleL = DrawingLayer((img.width,img.height)) @@ -101,7 +101,7 @@ circleL.circle(c,r,color=Color.BLUE,alpha=172) img.addDrawingLayer(circleL) temp = img.applyLayers() -print "circle: %s" % temp.save(temp=True) +print("circle: %s" % temp.save(temp=True)) img.clearLayers() ellipseL = DrawingLayer((img.width,img.height)) @@ -116,7 +116,7 @@ ellipseL.ellipse(cxy,wh,color=Color.GREEN,filled=True) img.addDrawingLayer(ellipseL) temp = img.applyLayers() -print "ellipse: %s" % temp.save(temp=True) +print("ellipse: %s" % temp.save(temp=True)) img.clearLayers() bez = DrawingLayer((img.width,img.height)) @@ -129,14 +129,14 @@ bez.bezier(pts,30) img.addDrawingLayer(bez) #translate over and down 10 -pts = map(lambda x: ((x[0]+10),(x[1]+10)),pts) +pts = [((x[0]+10),(x[1]+10)) for x in pts] bez.bezier(pts,5,color=Color.RED) img.addDrawingLayer(bez) -pts = map(lambda x: ((x[0]+10),(x[1]+10)),pts) +pts = [((x[0]+10),(x[1]+10)) for x in pts] bez.bezier(pts,30,color=Color.GREEN, alpha=128) img.addDrawingLayer(bez) temp = img.applyLayers() -print "bez: %s" % temp.save(temp=True) +print("bez: %s" % temp.save(temp=True)) img.clearLayers() words = DrawingLayer((img.width,img.height)) @@ -170,7 +170,7 @@ words.ezViewText("Can you read this better?",pos) img.addDrawingLayer(words) temp = img.applyLayers() -print "words: %s" % temp.save(temp=True) +print("words: %s" % temp.save(temp=True)) img.clearLayers() #Now lets do some layer stuff @@ -179,7 +179,7 @@ img.addDrawingLayer(bez) img.addDrawingLayer(words) temp = img.applyLayers([0,2,3]) -print "layers: %s" % temp.save(temp=True) +print("layers: %s" % temp.save(temp=True)) img.clearLayers() #now lets do some blanket alpha work @@ -192,7 +192,7 @@ img.addDrawingLayer(bez) img.addDrawingLayer(words) temp = img.applyLayers() -print "flatlayers: %s" % temp.save(temp=True) +print("flatlayers: %s" % temp.save(temp=True)) img.clearLayers() sprites = DrawingLayer((img.width,img.height)) @@ -203,5 +203,5 @@ sprites.sprite(mySprite,(0,200), rot=45,scale=1) img.addDrawingLayer(sprites) temp = img.applyLayers() -print "sprites: %s" % temp.save(temp=True) +print("sprites: %s" % temp.save(temp=True)) img.clearLayers() diff --git a/SimpleCV/examples/display/gtk-example-camera.py b/SimpleCV/examples/display/gtk-example-camera.py index 21f1513f8..d4c8e52dd 100644 --- a/SimpleCV/examples/display/gtk-example-camera.py +++ b/SimpleCV/examples/display/gtk-example-camera.py @@ -10,7 +10,7 @@ ''' -print __doc__ +print(__doc__) import gtk import SimpleCV diff --git a/SimpleCV/examples/display/gtk-example.py b/SimpleCV/examples/display/gtk-example.py index fd9acc650..68d2b00e2 100644 --- a/SimpleCV/examples/display/gtk-example.py +++ b/SimpleCV/examples/display/gtk-example.py @@ -8,7 +8,7 @@ ''' -print __doc__ +print(__doc__) import gtk import SimpleCV diff --git a/SimpleCV/examples/display/simplecam.py b/SimpleCV/examples/display/simplecam.py index 4dfe5d285..f83bd000f 100644 --- a/SimpleCV/examples/display/simplecam.py +++ b/SimpleCV/examples/display/simplecam.py @@ -3,7 +3,7 @@ This program is basically the hello world in SimpleCV all it does is grab an image from the camera and display it ''' -print __doc__ +print(__doc__) from SimpleCV import * cam = Camera() diff --git a/SimpleCV/examples/display/tkinter-example.py b/SimpleCV/examples/display/tkinter-example.py index b815b4b0e..b9c4c2881 100644 --- a/SimpleCV/examples/display/tkinter-example.py +++ b/SimpleCV/examples/display/tkinter-example.py @@ -1,13 +1,13 @@ import SimpleCV import ImageTk #This has to be installed from the system repos -import Tkinter +import tkinter import time -Tkinter.Tk() +tkinter.Tk() image = SimpleCV.Image('http://i.imgur.com/FTKqh.jpg') #load the simplecv logo from the web photo = ImageTk.PhotoImage(image.getPIL()) -label = Tkinter.Label(image=photo) +label = tkinter.Label(image=photo) label.image = photo # keep a reference! label.pack() #show the image time.sleep(5) diff --git a/SimpleCV/examples/kinect/kinect-motion-blur.py b/SimpleCV/examples/kinect/kinect-motion-blur.py index f44a2e777..1267ddffa 100644 --- a/SimpleCV/examples/kinect/kinect-motion-blur.py +++ b/SimpleCV/examples/kinect/kinect-motion-blur.py @@ -3,6 +3,7 @@ from operator import add from SimpleCV import * from SimpleCV.Display import Display +from functools import reduce d = Display(flags = pg.FULLSCREEN) #create video streams diff --git a/SimpleCV/examples/machine-learning/color_cluster.py b/SimpleCV/examples/machine-learning/color_cluster.py index 43e88d1d9..127b212b0 100644 --- a/SimpleCV/examples/machine-learning/color_cluster.py +++ b/SimpleCV/examples/machine-learning/color_cluster.py @@ -2,7 +2,7 @@ This program trys to extract the color pallette from an image it could be used in machine learning as a color classifier ''' -print __doc__ +print(__doc__) from SimpleCV import * disp = Display((640,528)) diff --git a/SimpleCV/examples/machine-learning/machine-learning_nuts-vs-bolts.py b/SimpleCV/examples/machine-learning/machine-learning_nuts-vs-bolts.py index f70d0bb29..da37dc5ba 100644 --- a/SimpleCV/examples/machine-learning/machine-learning_nuts-vs-bolts.py +++ b/SimpleCV/examples/machine-learning/machine-learning_nuts-vs-bolts.py @@ -9,7 +9,7 @@ The data set should auto download, if not you can get it from: https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip ''' -print __doc__ +print(__doc__) from SimpleCV import * from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression @@ -18,12 +18,12 @@ #Download the dataset machine_learning_data_set = 'https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip' data_path = download_and_extract(machine_learning_data_set) -print 'Test Images Downloaded at:', data_path +print('Test Images Downloaded at:', data_path) display = Display((800,600)) #Display to show the images target_names = ['bolt', 'nut'] -print 'Loading Bolts for Training' +print('Loading Bolts for Training') bolts = ImageSet(data_path + '/data/supervised/bolts') #Load Bolts for training bolt_blobs = [b.findBlobs()[0] for b in bolts] #exact the blobs for our features tmp_data = [] #array to store data features @@ -33,7 +33,7 @@ tmp_data.append([b.area(), b.height(), b.width()]) tmp_target.append(0) -print 'Loading Nuts for Training' +print('Loading Nuts for Training') nuts = ImageSet(data_path + '/data/supervised/nuts') nut_blobs = [n.invert().findBlobs()[0] for n in nuts] for n in nut_blobs: @@ -43,12 +43,12 @@ dataset = np.array(tmp_data) targets = np.array(tmp_target) -print 'Training Machine Learning' +print('Training Machine Learning') clf = LinearSVC() clf = clf.fit(dataset, targets) clf2 = LogisticRegression().fit(dataset, targets) -print 'Running prediction on bolts now' +print('Running prediction on bolts now') untrained_bolts = ImageSet(data_path + '/data/unsupervised/bolts') unbolt_blobs = [b.findBlobs()[0] for b in untrained_bolts] for b in unbolt_blobs: @@ -58,9 +58,9 @@ img = b.image img.drawText(name) img.save(display) - print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1] + print("Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]) -print 'Running prediction on nuts now' +print('Running prediction on nuts now') untrained_nuts = ImageSet(data_path + '/data/unsupervised/nuts') unnut_blobs = [n.invert().findBlobs()[0] for n in untrained_nuts] for n in unnut_blobs: @@ -70,4 +70,4 @@ img = n.image img.drawText(name) img.save(display) - print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1] + print("Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]) diff --git a/SimpleCV/examples/manipulation/GreenScreen.py b/SimpleCV/examples/manipulation/GreenScreen.py index ea94a0927..3b320257c 100644 --- a/SimpleCV/examples/manipulation/GreenScreen.py +++ b/SimpleCV/examples/manipulation/GreenScreen.py @@ -5,7 +5,7 @@ this should even work with a camera is the user is standing in front of a green background ''' -print __doc__ +print(__doc__) from SimpleCV import * diff --git a/SimpleCV/examples/manipulation/ImageMotionBlur.py b/SimpleCV/examples/manipulation/ImageMotionBlur.py index 720ab3421..7ad4ddcde 100644 --- a/SimpleCV/examples/manipulation/ImageMotionBlur.py +++ b/SimpleCV/examples/manipulation/ImageMotionBlur.py @@ -3,7 +3,7 @@ Use Up/Down Arrow keys to change power Use Left/Right Arrow keys to change angle """ -print __doc__ +print(__doc__) from SimpleCV import * import pygame diff --git a/SimpleCV/examples/manipulation/MorphologyExample.py b/SimpleCV/examples/manipulation/MorphologyExample.py index 4dea8bc49..d106d05a7 100644 --- a/SimpleCV/examples/manipulation/MorphologyExample.py +++ b/SimpleCV/examples/manipulation/MorphologyExample.py @@ -5,7 +5,7 @@ for more information see: http://en.wikipedia.org/wiki/Mathematical_morphology ''' -print __doc__ +print(__doc__) from SimpleCV import * display = Display(resolution = (800, 600)) #create a new display to draw images on diff --git a/SimpleCV/examples/manipulation/Partycam.py b/SimpleCV/examples/manipulation/Partycam.py index 0d313ddb7..bacd8c86b 100644 --- a/SimpleCV/examples/manipulation/Partycam.py +++ b/SimpleCV/examples/manipulation/Partycam.py @@ -2,7 +2,7 @@ ''' This program basically simulates some kind of 80's music video. ''' -print __doc__ +print(__doc__) import sys, time, socket from SimpleCV import * diff --git a/SimpleCV/examples/manipulation/RotationExample.py b/SimpleCV/examples/manipulation/RotationExample.py index 825a7ccdf..9ce00dcca 100644 --- a/SimpleCV/examples/manipulation/RotationExample.py +++ b/SimpleCV/examples/manipulation/RotationExample.py @@ -3,7 +3,7 @@ This example shows how to perform various rotations and warps on images and put back into a display. ''' -print __doc__ +print(__doc__) from SimpleCV import * diff --git a/SimpleCV/examples/manipulation/colorsegmentation.py b/SimpleCV/examples/manipulation/colorsegmentation.py index 506e5936e..8542d4f02 100644 --- a/SimpleCV/examples/manipulation/colorsegmentation.py +++ b/SimpleCV/examples/manipulation/colorsegmentation.py @@ -2,7 +2,7 @@ ''' This program uses a Color model to try and do segmentation based on color ''' -print __doc__ +print(__doc__) import time from SimpleCV import * @@ -20,6 +20,6 @@ time.sleep(0.01) ticks = ticks + 1 if (int(time.time()) > t): - print str(ticks) + " fps" + print(str(ticks) + " fps") ticks = 0 t = int(time.time()) diff --git a/SimpleCV/examples/manipulation/motionblur.py b/SimpleCV/examples/manipulation/motionblur.py index da303d3c8..d8202752c 100644 --- a/SimpleCV/examples/manipulation/motionblur.py +++ b/SimpleCV/examples/manipulation/motionblur.py @@ -3,9 +3,10 @@ This program does basic motion blurring. It averages the number of maxframes that are set using some basic image math ''' -print __doc__ +print(__doc__) from operator import add from SimpleCV import * +from functools import reduce js = JpegStreamer(8080) #create JPEG streamers diff --git a/SimpleCV/examples/manipulation/threedee.py b/SimpleCV/examples/manipulation/threedee.py index fdb2c97ff..4e04713cd 100644 --- a/SimpleCV/examples/manipulation/threedee.py +++ b/SimpleCV/examples/manipulation/threedee.py @@ -17,8 +17,8 @@ def threedee_me(left, right, offset): output = output.crop(offset[0],y=offset[1],w=left.width-offset[0],h=left.height-offset[1]) return output -print "Taking pictures. Please move your camera slightly to its right" -print "after every picture." +print("Taking pictures. Please move your camera slightly to its right") +print("after every picture.") c = Camera() time.sleep(1) @@ -26,7 +26,7 @@ def threedee_me(left, right, offset): for i in range(5): images.append(c.getImage()) - print "Picture %d taken" % (i + 1) + print("Picture %d taken" % (i + 1)) time.sleep(1) offset = (0,0) @@ -35,6 +35,6 @@ def threedee_me(left, right, offset): left = images[i] right = images[i+1] output = threedee_me(left, right, offset) - print output.save(temp = True) + print(output.save(temp = True)) output.show() time.sleep(2) diff --git a/SimpleCV/examples/manipulation/tvexample.py b/SimpleCV/examples/manipulation/tvexample.py index 9525ef0aa..3df6fcf08 100644 --- a/SimpleCV/examples/manipulation/tvexample.py +++ b/SimpleCV/examples/manipulation/tvexample.py @@ -1,7 +1,7 @@ ''' This program super imposes the camera onto the television in the picture ''' -print __doc__ +print(__doc__) from SimpleCV import Camera, Image, Display diff --git a/SimpleCV/examples/tracking/camshift.py b/SimpleCV/examples/tracking/camshift.py index 1376639a2..7ced7ff05 100644 --- a/SimpleCV/examples/tracking/camshift.py +++ b/SimpleCV/examples/tracking/camshift.py @@ -21,9 +21,9 @@ def camshift(): fs1.showPixelVelocityRT() img1.show() except KeyboardInterrupt: - print "Total number of frames tracked", - print fs1.trackLength() - print fs1.processTrack(foo) + print("Total number of frames tracked", end=' ') + print(fs1.trackLength()) + print(fs1.processTrack(foo)) break def getBBFromUser(cam, d): @@ -46,7 +46,7 @@ def getBBFromUser(cam, d): time.sleep(0.05) except KeyboardInterrupt: break - print p1,p2 + print(p1,p2) if not p1 or not p2: return None @@ -54,7 +54,7 @@ def getBBFromUser(cam, d): xmin = np.min((p1[0],p2[0])) ymax = np.max((p1[1],p2[1])) ymin = np.min((p1[1],p2[1])) - print xmin,ymin,xmax,ymax + print(xmin,ymin,xmax,ymax) return (xmin,ymin,xmax-xmin,ymax-ymin) camshift() diff --git a/SimpleCV/examples/tracking/lk.py b/SimpleCV/examples/tracking/lk.py index cc3d09d07..858076653 100644 --- a/SimpleCV/examples/tracking/lk.py +++ b/SimpleCV/examples/tracking/lk.py @@ -15,7 +15,7 @@ def lktest(): img1 = cam.getImage() fs1 = img1.track("lk",fs1,img,bb1, maxCorners = 5000, qualityLevel = 0.08, winSize = (15, 15)) fs1.drawBB(color=Color.RED) - print fs1[-1].getBB() + print(fs1[-1].getBB()) img1.show() except KeyboardInterrupt: break diff --git a/SimpleCV/examples/tracking/mftrack.py b/SimpleCV/examples/tracking/mftrack.py index cd64ae04d..badef5843 100644 --- a/SimpleCV/examples/tracking/mftrack.py +++ b/SimpleCV/examples/tracking/mftrack.py @@ -14,7 +14,7 @@ def mftest(): try: img1 = cam.getImage() fs1 = img1.track("mftrack",fs1,img,bb1, numM=10, numN=10, winsize=10) - print fs1[-1].shift, "shift" + print(fs1[-1].shift, "shift") fs1.drawBB(color=(255,0,0)) fs1.drawPath() img1.show() @@ -41,7 +41,7 @@ def getBBFromUser(cam, d): time.sleep(0.05) except KeyboardInterrupt: break - print p1,p2 + print(p1,p2) if not p1 or not p2: return None @@ -49,7 +49,7 @@ def getBBFromUser(cam, d): xmin = np.min((p1[0],p2[0])) ymax = np.max((p1[1],p2[1])) ymin = np.min((p1[1],p2[1])) - print xmin,ymin,xmax,ymax + print(xmin,ymin,xmax,ymax) return (xmin,ymin,xmax-xmin,ymax-ymin) mftest() diff --git a/SimpleCV/examples/tracking/surftest.py b/SimpleCV/examples/tracking/surftest.py index e585adb3c..118e5889d 100644 --- a/SimpleCV/examples/tracking/surftest.py +++ b/SimpleCV/examples/tracking/surftest.py @@ -16,7 +16,7 @@ def surftest(): fs1 = img1.track("surf",fs1,img,bb1, eps_val=0.8, dist=200, nframes=100) fs1.drawBB(color=Color.RED) fs1[-1].drawTrackerPoints() - print fs1[-1].getBB() + print(fs1[-1].getBB()) img1.show() except KeyboardInterrupt: break diff --git a/SimpleCV/examples/util/CaptureEncodeUpload.py b/SimpleCV/examples/util/CaptureEncodeUpload.py index cef27a7d9..89b352bfc 100644 --- a/SimpleCV/examples/util/CaptureEncodeUpload.py +++ b/SimpleCV/examples/util/CaptureEncodeUpload.py @@ -52,6 +52,6 @@ call('ffmpeg'+params,shell=True) # construct the command line arguments for google command line params = "{0} --title \"{1}\" --tags \"{2}\" --category \"Education\" --summary \"{3}\" --access \"{4}\" ".format(outname,title,tags,summary,access) -print params +print(params) # call the command line call('google youtube post '+params,shell=True) diff --git a/SimpleCV/examples/web-based/cloudanimator/cloudanimator.py b/SimpleCV/examples/web-based/cloudanimator/cloudanimator.py index 7569d06da..2e14ba453 100644 --- a/SimpleCV/examples/web-based/cloudanimator/cloudanimator.py +++ b/SimpleCV/examples/web-based/cloudanimator/cloudanimator.py @@ -5,7 +5,7 @@ # # Using jpegcam as flash webcam library: # http://code.google.com/p/jpegcam/ -import os, tempfile, webbrowser, urllib, cherrypy, socket +import os, tempfile, webbrowser, urllib.request, urllib.parse, urllib.error, cherrypy, socket from SimpleCV import * from images2gif import writeGif import pdb @@ -18,7 +18,7 @@ class CloudAnimator(object): def index(self): - f = urllib.urlopen("index.html") # load the default website + f = urllib.request.urlopen("index.html") # load the default website s = f.read() # read the file f.close() return s @@ -26,7 +26,7 @@ def index(self): def update(self): #update the animation - print "update animation" + print("update animation") update.exposed = True @@ -62,7 +62,7 @@ def reset(self): self.giffile = filepath self.gifname = tmpname self.imageset = [] - print "reset animation" + print("reset animation") reset.exposed = True diff --git a/SimpleCV/examples/web-based/cloudanimator/images2gif.py b/SimpleCV/examples/web-based/cloudanimator/images2gif.py index 5c503edbd..1cc981d52 100644 --- a/SimpleCV/examples/web-based/cloudanimator/images2gif.py +++ b/SimpleCV/examples/web-based/cloudanimator/images2gif.py @@ -201,7 +201,7 @@ def writeGif(filename, images, duration=0.1, loops=0, dither=1): # write try: n = _writeGifToFile(fp, images2, durations, loops) - print n, 'frames written' + print(n, 'frames written') finally: fp.close() diff --git a/SimpleCV/examples/web-based/cloudcam/cloudcam.py b/SimpleCV/examples/web-based/cloudcam/cloudcam.py index ebd6394a6..42906e6ba 100644 --- a/SimpleCV/examples/web-based/cloudcam/cloudcam.py +++ b/SimpleCV/examples/web-based/cloudcam/cloudcam.py @@ -5,14 +5,14 @@ # # Using jpegcam as flash webcam library: # http://code.google.com/p/jpegcam/ -import os, tempfile, webbrowser, urllib, cherrypy, socket +import os, tempfile, webbrowser, urllib.request, urllib.parse, urllib.error, cherrypy, socket from SimpleCV import * class CloudCam(object): def index(self): - f = urllib.urlopen("index.html") # load the default website + f = urllib.request.urlopen("index.html") # load the default website s = f.read() # read the file f.close() return s @@ -27,8 +27,8 @@ def upload(self): outfile.close() # close the temporary file self.process(filepath) #Use SimpleCV to process the image - print "url:" + cherrypy.url() - print "socket:" + socket.gethostbyname(socket.gethostname()) + print("url:" + cherrypy.url()) + print("socket:" + socket.gethostbyname(socket.gethostname())) #~ return "http://localhost:8000/" + tmpname #return the image path via ajax request return tmpname diff --git a/SimpleCV/examples/web-based/webdisplay/flask-server.py b/SimpleCV/examples/web-based/webdisplay/flask-server.py index 1aee7e0cf..a0b151209 100644 --- a/SimpleCV/examples/web-based/webdisplay/flask-server.py +++ b/SimpleCV/examples/web-based/webdisplay/flask-server.py @@ -13,7 +13,7 @@ ''' -print __doc__ +print(__doc__) from flask import Flask, jsonify, render_template, request @@ -46,8 +46,8 @@ def snapshot(): loc = 'static/' + tf.name.split('/')[-1] tf.close() img.save(loc) - print "location",loc - print "json", json.dumps(loc) + print("location",loc) + print("json", json.dumps(loc)) return json.dumps(loc) if __name__ == '__main__': diff --git a/SimpleCV/tests/ShapeContext/test_c.py b/SimpleCV/tests/ShapeContext/test_c.py index d98287664..b6863d94d 100644 --- a/SimpleCV/tests/ShapeContext/test_c.py +++ b/SimpleCV/tests/ShapeContext/test_c.py @@ -29,15 +29,15 @@ fname = "SanityCheckExample"+str(i)+".png" i = i+ 1 result.save(fname) - print "------------------------------" - print metric + print("------------------------------") + print(metric) confuse.append(metric) -print confuse +print(confuse) confuse = np.array(confuse) -print confuse.reshape(4,4) +print(confuse.reshape(4,4)) time.sleep(10) diff --git a/SimpleCV/tests/ShapeContext/test_multi.py b/SimpleCV/tests/ShapeContext/test_multi.py index 61f962526..73d18bce2 100644 --- a/SimpleCV/tests/ShapeContext/test_multi.py +++ b/SimpleCV/tests/ShapeContext/test_multi.py @@ -17,7 +17,7 @@ for i in iset: names.append(i.filename) -print names +print(names) names = names[0:subset] scc = None @@ -29,20 +29,20 @@ scc = ShapeContextClassifier(iset,names) #this needs to be pickled. pickle.dump(scc, open( fname, "wb" ) ) -print "--------------------------" -print "--------------------------" -print "Performing Analysis!" -print "--------------------------" -print "--------------------------" +print("--------------------------") +print("--------------------------") +print("Performing Analysis!") +print("--------------------------") +print("--------------------------") classifications = [] i = 0 for test in testset: - print "--------------------------" + print("--------------------------") best, value, result = scc.classify(test) - print "Total points in result " + str(len(scc.ptMap[best])) - print "Testing: " + test.filename - print "Best Result: " + best + print("Total points in result " + str(len(scc.ptMap[best]))) + print("Testing: " + test.filename) + print("Best Result: " + best) words = string.split(best,'/') words2 = string.split(test.filename,'/') test = test.resize(h=400) @@ -61,5 +61,5 @@ i = i + 1 matchImg.save(fname) classifications.append((test.filename,result)) - print result + print(result) pickle.dump(classifications, open( "classifications.pkl", "wb" ) ) diff --git a/SimpleCV/tests/YCrCbtests.py b/SimpleCV/tests/YCrCbtests.py index b4738dad8..1a9b21088 100644 --- a/SimpleCV/tests/YCrCbtests.py +++ b/SimpleCV/tests/YCrCbtests.py @@ -3,54 +3,54 @@ img1 = img.toYCrCb() if (img1.isYCrCb()): - print "Converted to YCrCb\n" + print("Converted to YCrCb\n") img1 = img.toBGR() img2 = img1.toYCrCb() if (img2.isYCrCb()): - print "Converted BGR to YCrCb\n" + print("Converted BGR to YCrCb\n") img1 = img.toHLS() img2 = img1.toYCrCb() if (img2.isYCrCb()): - print "Converted HLS to YCrCb\n" + print("Converted HLS to YCrCb\n") img1 = img.toHSV() img2 = img1.toYCrCb() if (img2.isYCrCb()): - print "Converted HSV to YCrCb\n" + print("Converted HSV to YCrCb\n") img1 = img.toXYZ() img2 = img1.toYCrCb() if (img2.isYCrCb()): - print "Converted XYZ to YCrCb\n" + print("Converted XYZ to YCrCb\n") img1 = img.toYCrCb() img2 = img1.toRGB() if (img2.isYCrCb()): - print "Converted from YCrCb to RGB\n" + print("Converted from YCrCb to RGB\n") img1 = img.toYCrCb() img2 = img1.toBGR() if (img2.isRGB()): - print "Converted from YCrCb to RGB\n" + print("Converted from YCrCb to RGB\n") img1 = img.toYCrCb() img2 = img1.toHLS() if (img2.isHLS()): - print "Converted from YCrCb to HLS\n" + print("Converted from YCrCb to HLS\n") img1 = img.toYCrCb() img2 = img1.toHSV() if (img2.isHSV()): - print "Converted from YCrCb to HSV\n" + print("Converted from YCrCb to HSV\n") img1 = img.toYCrCb() img2 = img1.toXYZ() if (img2.isXYZ()): - print "Converted from YCrCb to XYZ\n" + print("Converted from YCrCb to XYZ\n") img1 = img.toGray() img2 = img1.toGray() if (img2.isGray()): - print "Converted from Gray to Gray\n" + print("Converted from Gray to Gray\n") diff --git a/SimpleCV/tests/test_cameras.py b/SimpleCV/tests/test_cameras.py index 7e7440b53..1e8f61be1 100644 --- a/SimpleCV/tests/test_cameras.py +++ b/SimpleCV/tests/test_cameras.py @@ -13,8 +13,8 @@ def test_virtual_camera_constructor(): props = mycam.getAllProperties() - for i in props.keys(): - print str(i) + ": " + str(props[i]) + "\n" + for i in list(props.keys()): + print(str(i) + ": " + str(props[i]) + "\n") pass diff --git a/SimpleCV/tests/test_display.py b/SimpleCV/tests/test_display.py index d28648775..e3314432d 100644 --- a/SimpleCV/tests/test_display.py +++ b/SimpleCV/tests/test_display.py @@ -77,7 +77,7 @@ def imgDiffs(test_imgs,name_stem,tolerance,path): diff = (lhs-rhs) val = np.average(diff.getNumpy()) if( val > tolerance ): - print val + print(val) return True return False @@ -783,7 +783,7 @@ def test_blob_maker(): img = Image("../sampleimages/blockhead.png") blobber = BlobMaker() results = blobber.extract(img) - print(len(results)) + print((len(results))) if( len(results) != 7 ): assert False @@ -920,7 +920,8 @@ def test_applyBinaryMask(): def test_applyPixelFunc(): img = Image(logo) - def myFunc((r,g,b)): + def myFunc(xxx_todo_changeme): + (r,g,b) = xxx_todo_changeme return( (b,g,r) ) img = img.applyPixelFunction(myFunc) @@ -1284,7 +1285,7 @@ def test_keypoint_match(): fs0 = match0.findKeypointMatch(template)#test zero fs1 = match1.findKeypointMatch(template,quality=300.00,minDist=0.5,minMatch=0.2) fs3 = match3.findKeypointMatch(template,quality=300.00,minDist=0.5,minMatch=0.2) - print "This should fail" + print("This should fail") fs2 = match2.findKeypointMatch(template,quality=500.00,minDist=0.2,minMatch=0.1) if( fs0 is not None and fs1 is not None and fs2 is None and fs3 is not None): fs0.draw() @@ -1619,7 +1620,7 @@ def test_minrect_blobs(): results = [] for i in range(-10,10): ang = float(i*18.00) - print ang + print(ang) t = img.rotate(ang) b = t.findBlobs(threshval=128) b[-1].drawMinRect(color=Color.RED,width=5) diff --git a/SimpleCV/tests/test_optional.py b/SimpleCV/tests/test_optional.py index 3c0af7ff2..22a4abaa9 100644 --- a/SimpleCV/tests/test_optional.py +++ b/SimpleCV/tests/test_optional.py @@ -88,7 +88,7 @@ def test_detection_ocr(): img = Image(ocrimage) foundtext = img.readText() - print foundtext + print(foundtext) if(len(foundtext) <= 1): assert False else: diff --git a/SimpleCV/tests/test_stereovision.py b/SimpleCV/tests/test_stereovision.py index 11c2201b0..c36b77902 100644 --- a/SimpleCV/tests/test_stereovision.py +++ b/SimpleCV/tests/test_stereovision.py @@ -51,7 +51,7 @@ def imgDiffs(test_imgs,name_stem,tolerance,path): diff = (lhs-rhs) val = np.average(diff.getNumpy()) if( val > tolerance ): - print val + print(val) return True return False diff --git a/SimpleCV/tests/test_vimba.py b/SimpleCV/tests/test_vimba.py index 56c85fb46..6cfde4f2f 100644 --- a/SimpleCV/tests/test_vimba.py +++ b/SimpleCV/tests/test_vimba.py @@ -5,58 +5,58 @@ def printPrettyHeader(msg): - print "*"*80 + "\n* %s *\n" % msg + "*"*80 + print("*"*80 + "\n* %s *\n" % msg + "*"*80) def _getProperty(c): printPrettyHeader("Test getProperty") prop = "ExposureMode" - print "%s=%s" % (prop, c.getProperty(prop)) + print("%s=%s" % (prop, c.getProperty(prop))) def _getAllProperties(c): printPrettyHeader("Test getAllProperties") allprops = c.getAllProperties() - for k in sorted(allprops.iterkeys()) : - print "%s=%s" % (k,allprops[k]) + for k in sorted(allprops.keys()) : + print("%s=%s" % (k,allprops[k])) def _setProperty(c): printPrettyHeader("Test setProperty (toggle AcquisitionMode)") prop = "AcquisitionMode" val = c.getProperty(prop) - print "BEFORE: %s=%s" % (prop, val) + print("BEFORE: %s=%s" % (prop, val)) newval = "Continuous" if val == "SingleFrame" else "SingleFrame" - print "newval=%s" % newval + print("newval=%s" % newval) c.setProperty(prop, "Continuous") time.sleep(0.2) val = c.getProperty(prop) - print "AFTER: %s=%s" % (prop, val) + print("AFTER: %s=%s" % (prop, val)) def _setupASyncMode(c): printPrettyHeader("Test setupASyncMode (toggle TriggerSource)") prop1 = 'AcquisitionMode' prop2 = 'TriggerSource' - print 'BEFORE: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2)) + print('BEFORE: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2))) c.setupASyncMode() - print 'AFTER: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2)) + print('AFTER: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2))) def _setupSyncMode(c): printPrettyHeader("Test setupSyncMode (toggle TriggerSource)") prop1 = 'AcquisitionMode' prop2 = 'TriggerSource' - print 'BEFORE: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2)) + print('BEFORE: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2))) c.setupSyncMode() - print 'AFTER: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2)) + print('AFTER: %s=%s, %s=%s' % (prop1, c.getProperty(prop1), prop2, c.getProperty(prop2))) def _getImage(c): printPrettyHeader("Test getImage") img = c.getImage() img.save("test_getImage_scv.png") - print "test_getImage_scv.png saved" + print("test_getImage_scv.png saved") def _runCommand(c): printPrettyHeader("Test runCommand") @@ -75,14 +75,14 @@ def _runCommand(c): rgb = cv2.cvtColor(moreUsefulImgData, cv2.COLOR_BAYER_RG2RGB) vimbacam.endCapture() cv2.imwrite('test_runCommand.png', rgb) - print "test_runCommand.png saved" + print("test_runCommand.png saved") def _listAllCameras(c): printPrettyHeader("Test listAllCameras") l = c.listAllCameras() for i in l: - print 'Camera Id=%s' % i.cameraIdString + print('Camera Id=%s' % i.cameraIdString) def test_all(): c = VimbaCamera() diff --git a/SimpleCV/tests/test_vimba_async.py b/SimpleCV/tests/test_vimba_async.py index 40afc027f..cdef8baed 100644 --- a/SimpleCV/tests/test_vimba_async.py +++ b/SimpleCV/tests/test_vimba_async.py @@ -6,7 +6,7 @@ from pymba import Vimba def printPrettyHeader(msg): - print "*"*80 + "\n* %s *\n" % msg + "*"*80 + print("*"*80 + "\n* %s *\n" % msg + "*"*80) def _takeShots(cam, numPics, filename): start = time.time() @@ -16,7 +16,7 @@ def _takeShots(cam, numPics, filename): img.save("%s_%d.png" % (filename, i)) end = time.time() elapsed = end - start - print "Took %f seconds" % elapsed + print("Took %f seconds" % elapsed) ''' def test_AVT_threaded_getImage(): diff --git a/SimpleCV/tests/test_vimba_manyshots.py b/SimpleCV/tests/test_vimba_manyshots.py index 9661750a0..b2de5d36c 100644 --- a/SimpleCV/tests/test_vimba_manyshots.py +++ b/SimpleCV/tests/test_vimba_manyshots.py @@ -8,7 +8,7 @@ #time.sleep(0.2) def printPrettyHeader(msg): - print "*"*80 + "\n* %s *\n" % msg + "*"*80 + print("*"*80 + "\n* %s *\n" % msg + "*"*80) def _takeShots(cam, numPics, filename): start = time.time() @@ -18,17 +18,17 @@ def _takeShots(cam, numPics, filename): img.save("%s_%d.png" % (filename, i)) end = time.time() elapsed = end - start - print "Took %f seconds" % elapsed + print("Took %f seconds" % elapsed) def _takeManyVimbaShots(idx): c = VimbaCamera() - print "_takeManyVimbaShots %d" % idx + print("_takeManyVimbaShots %d" % idx) _takeShots(c, 10, "cam_vimba%d" % idx) def _takeAVTManyShots(idx): c = AVTCamera() - print "_takeAVTManyShots %d" % idx + print("_takeAVTManyShots %d" % idx) _takeShots(c, 10, "cam_avtnative%d" % idx) diff --git a/SimpleCV/tests/tests.py b/SimpleCV/tests/tests.py index b90455238..52153cb6a 100644 --- a/SimpleCV/tests/tests.py +++ b/SimpleCV/tests/tests.py @@ -71,7 +71,7 @@ def imgDiffs(test_imgs,name_stem,tolerance,path): diff = (lhs-rhs) val = np.average(diff.getNumpy()) if( val > tolerance ): - print val + print(val) return True return False @@ -975,7 +975,7 @@ def test_image_crop(): xs = [10,10,10,20,20,20,30,30,40,40,40,50,50,50] ys = [10,20,50,20,30,40,30,10,40,50,10,50,10,42] - lots = zip(xs,ys) + lots = list(zip(xs,ys)) tests.append(img.crop(xs,ys)) # 10 tests.append(img.crop(lots)) # 11 @@ -986,7 +986,7 @@ def test_image_crop(): failed = False for img in tests: if( img is None or img.width != 40 and img.height != 40 ): - print "FAILED CROP TEST " + str(i) + " " + str(img) + print("FAILED CROP TEST " + str(i) + " " + str(img)) failed = True i = i + 1 @@ -1247,7 +1247,7 @@ def test_blob_maker(): img = Image("../sampleimages/blockhead.png") blobber = BlobMaker() results = blobber.extract(img) - print(len(results)) + print((len(results))) if( len(results) != 7 ): assert False @@ -1388,7 +1388,7 @@ def test_detection_ocr(): img = Image(ocrimage) foundtext = img.readText() - print foundtext + print(foundtext) if(len(foundtext) <= 1): assert False else: @@ -1532,7 +1532,8 @@ def test_applyBinaryMask(): def test_applyPixelFunc(): img = Image(logo) - def myFunc((r,g,b)): + def myFunc(xxx_todo_changeme): + (r,g,b) = xxx_todo_changeme return( (b,g,r) ) img = img.applyPixelFunction(myFunc) @@ -1811,12 +1812,12 @@ def test_findKeypoints(): flavors = ['SURF','STAR','FAST','MSER','ORB','BRISK','FREAK','SIFT','Dense'] for flavor in flavors: try: - print "trying to find " + flavor + " keypoints." + print("trying to find " + flavor + " keypoints.") kp = img.findKeypoints(flavor=flavor) except: continue if( kp is not None ): - print "Found: " + str(len(kp)) + print("Found: " + str(len(kp))) for k in kp: k.getObject() k.descriptor() @@ -1836,7 +1837,7 @@ def test_findKeypoints(): k.crop() kp.draw() else: - print "Found None." + print("Found None.") results = [img] name_stem = "test_findKeypoints" #~ perform_diff(results,name_stem) @@ -1949,7 +1950,7 @@ def test_keypoint_match(): fs0 = match0.findKeypointMatch(template)#test zero fs1 = match1.findKeypointMatch(template,quality=300.00,minDist=0.5,minMatch=0.2) fs3 = match3.findKeypointMatch(template,quality=300.00,minDist=0.5,minMatch=0.2) - print "This should fail" + print("This should fail") fs2 = match2.findKeypointMatch(template,quality=500.00,minDist=0.2,minMatch=0.1) if( fs0 is not None and fs1 is not None and fs2 is None and fs3 is not None): fs0.draw() @@ -2188,15 +2189,15 @@ def test_detection_spatial_relationships(): feats = [blobFS,lineFS,cornFS,tempFS,moveFS] for f in feats: - print str(len(f)) + print(str(len(f))) for f in feats: for g in feats: sample = f[0] sample2 = f[1] - print type(f[0]) - print type(g[0]) + print(type(f[0])) + print(type(g[0])) g.above(sample) g.below(sample) @@ -2243,8 +2244,8 @@ def test_get_raw_dft(): raw1[0].height != img.height or raw3[0].height != img.height or raw3[0].width != img.width or - raw1[0].depth != 64L or - raw3[0].depth != 64L or + raw1[0].depth != 64 or + raw3[0].depth != 64 or raw3[0].channels != 2 or raw3[0].channels != 2 ): assert False @@ -2464,7 +2465,7 @@ def test_blob_spatial_relationships(): #please see the image blobs = img.findBlobs(threshval=1) blobs = blobs.sortArea() - print(len(blobs)) + print((len(blobs))) center = blobs[-1] top = blobs[-2] @@ -2725,7 +2726,7 @@ def test_minrect_blobs(): results = [] for i in range(-10,10): ang = float(i*18.00) - print ang + print(ang) t = img.rotate(ang) b = t.findBlobs(threshval=128) b[-1].drawMinRect(color=Color.RED,width=5) @@ -2978,7 +2979,7 @@ def test_findKeypoints_all(): img = Image(testimage2) methods = ["ORB", "SIFT", "SURF","FAST", "STAR", "MSER", "Dense"] for i in methods : - print i + print(i) try: kp = img.findKeypoints(flavor = i) except: @@ -3446,7 +3447,7 @@ def subtest(data,effect): i = 0 for d in data: e = effect(d) - print (i,e) + print((i,e)) i = i + 1 if( first != e ): broke = True @@ -3458,7 +3459,7 @@ def subtest(data,effect): roiList.append(ROI(x=x,y=y,image=img)) roiList.append(ROI(x=list(x),y=list(y),image=img)) roiList.append(ROI(x=tuple(x),y=tuple(y),image=img)) - roiList.append(ROI(zip(x,y),image=img)) + roiList.append(ROI(list(zip(x,y)),image=img)) roiList.append(ROI((xmin,ymin),(xmax,ymax),image=img)) roiList.append(ROI(xmin,ymin,w,h,image=img)) roiList.append(ROI([(xmin,ymin),(xmax,ymin),(xmax,ymax),(xmin,ymax)],image=img)) @@ -3479,10 +3480,10 @@ def toXYWH( roi ): broi.toUnitTLAndBR() broi.toUnitPoints() roiList[0].crop() - newROI=ROI(zip(x,y),image=mask) + newROI=ROI(list(zip(x,y)),image=mask) test = newROI.crop() xroi,yroi = np.where(test.getGrayNumpy()>128) - roiPts = zip(xroi,yroi) + roiPts = list(zip(xroi,yroi)) realPts = newROI.CoordTransformPts(roiPts) unitROI = newROI.CoordTransformPts(roiPts,output="ROI_UNIT") unitSRC = newROI.CoordTransformPts(roiPts,output="SRC_UNIT") @@ -3926,7 +3927,7 @@ def test_smartRotate(): st2 = img.rotate(27,fixed = False).resize(500,500) diff = np.average((st1-st2).getNumpy()) if (diff > 1.7): - print diff + print(diff) assert False else: assert True diff --git a/SimpleCV/tests/vcamera_tests.py b/SimpleCV/tests/vcamera_tests.py index 8895cef01..241d2c2ad 100644 --- a/SimpleCV/tests/vcamera_tests.py +++ b/SimpleCV/tests/vcamera_tests.py @@ -34,8 +34,8 @@ def test_camera_constructor(): mycam = VirtualCamera(testimage, "image") props = mycam.getAllProperties() - for i in props.keys(): - print str(i) + ": " + str(props[i]) + "\n" + for i in list(props.keys()): + print(str(i) + ": " + str(props[i]) + "\n") pass diff --git a/SimpleCV/tools/Calibrate.py b/SimpleCV/tools/Calibrate.py index 96eb6f431..d343487e9 100644 --- a/SimpleCV/tools/Calibrate.py +++ b/SimpleCV/tools/Calibrate.py @@ -86,7 +86,7 @@ def verticalTilt(cb): #radio between the 0, 1 and 2,3 point pairs return distance_ratio def introMessage(): - print """ + print(""" This tool will help you calibrate your camera to help remove the effects of lens distortion and give you more accurate measurement. You will need: @@ -97,7 +97,7 @@ def introMessage(): To begin, please put your chessboard close to the camera so the long side is horizontal and it fill most of the screen. Keep it parallel to the camera so it appears within the rectangle. - """ + """) def findLargeFlat(cb, i, calibration_set, dims): drawline(i, (10, 10), (i.width - 10, 10)) diff --git a/doc/conf.py b/doc/conf.py index abc321eb3..f9fe334f3 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -42,8 +42,8 @@ master_doc = 'index' # General information about the project. -project = u'SimpleCV' -copyright = u'2011, Sight Machine' +project = 'SimpleCV' +copyright = '2011, Sight Machine' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -186,8 +186,8 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'SimpleCV.tex', u'SimpleCV Documentation', - u'Ingeuitas', 'manual'), + ('index', 'SimpleCV.tex', 'SimpleCV Documentation', + 'Ingeuitas', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -216,8 +216,8 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'simplecv', u'SimpleCV Documentation', - [u'Ingeuitas'], 1) + ('index', 'simplecv', 'SimpleCV Documentation', + ['Ingeuitas'], 1) ] # If true, show URL addresses after external links. @@ -230,7 +230,7 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'SimpleCV', u'SimpleCV Documentation', u'Ingeuitas', + ('index', 'SimpleCV', 'SimpleCV Documentation', 'Ingeuitas', 'SimpleCV', 'One line description of project.', 'Miscellaneous'), ] @@ -247,10 +247,10 @@ # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. -epub_title = u'SimpleCV' -epub_author = u'Ingeuitas' -epub_publisher = u'Ingeuitas' -epub_copyright = u'2011, Ingeuitas' +epub_title = 'SimpleCV' +epub_author = 'Ingeuitas' +epub_publisher = 'Ingeuitas' +epub_copyright = '2011, Ingeuitas' # The language of the text. It defaults to the language option # or en if the language is not set. diff --git a/requirements.txt b/requirements.txt index 192b357e7..a610c5b70 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,4 @@ scipy PIL ipython svgwrite -pygame==1.9.1release +pygame diff --git a/scripts/install/mac/findmods.py b/scripts/install/mac/findmods.py index e2bded277..c65ec4da8 100644 --- a/scripts/install/mac/findmods.py +++ b/scripts/install/mac/findmods.py @@ -10,7 +10,7 @@ easy_installed_pkgs = dict() easy_installed_path = "/Library/Python/2.6/site-packages"; -for k in sys.modules.keys(): +for k in list(sys.modules.keys()): if type(sys.modules[k]) == NoneType: continue @@ -26,5 +26,5 @@ easy_installed_pkgs[dirs[1]] = 1 -for egg in easy_installed_pkgs.keys(): - print easy_installed_path + "/" + egg +for egg in list(easy_installed_pkgs.keys()): + print(easy_installed_path + "/" + egg) diff --git a/scripts/install/win/OpenKinect/freenect-examples/demo_cv_sync_multi.py b/scripts/install/win/OpenKinect/freenect-examples/demo_cv_sync_multi.py index 51de85d4e..977551adb 100644 --- a/scripts/install/win/OpenKinect/freenect-examples/demo_cv_sync_multi.py +++ b/scripts/install/win/OpenKinect/freenect-examples/demo_cv_sync_multi.py @@ -12,7 +12,7 @@ cv.NamedWindow('Depth') cv.NamedWindow('Video') ind = 0 -print('%s\nPress ESC to stop' % __doc__) +print(('%s\nPress ESC to stop' % __doc__)) def get_depth(ind): diff --git a/scripts/install/win/OpenKinect/freenect-examples/demo_cv_thresh_sweep.py b/scripts/install/win/OpenKinect/freenect-examples/demo_cv_thresh_sweep.py index ec85246b2..052d36adf 100644 --- a/scripts/install/win/OpenKinect/freenect-examples/demo_cv_thresh_sweep.py +++ b/scripts/install/win/OpenKinect/freenect-examples/demo_cv_thresh_sweep.py @@ -25,7 +25,7 @@ def disp_thresh(lower, upper): upper = 100 max_upper = 2048 while upper < max_upper: - print('%d < depth < %d' % (lower, upper)) + print(('%d < depth < %d' % (lower, upper))) disp_thresh(lower, upper) time.sleep(.1) lower += 20 diff --git a/scripts/install/win/OpenKinect/freenect-examples/demo_tilt.py b/scripts/install/win/OpenKinect/freenect-examples/demo_tilt.py index ede190436..b752e7530 100644 --- a/scripts/install/win/OpenKinect/freenect-examples/demo_tilt.py +++ b/scripts/install/win/OpenKinect/freenect-examples/demo_tilt.py @@ -19,7 +19,7 @@ def body(dev, ctx): tilt = random.randint(0, 30) freenect.set_led(dev, led) freenect.set_tilt_degs(dev, tilt) - print('led[%d] tilt[%d] accel[%s]' % (led, tilt, freenect.get_accel(dev))) + print(('led[%d] tilt[%d] accel[%s]' % (led, tilt, freenect.get_accel(dev)))) def handler(signum, frame): diff --git a/scripts/install/win/OpenKinect/freenect-examples/setup.py b/scripts/install/win/OpenKinect/freenect-examples/setup.py index fa4126acd..517951d30 100644 --- a/scripts/install/win/OpenKinect/freenect-examples/setup.py +++ b/scripts/install/win/OpenKinect/freenect-examples/setup.py @@ -16,7 +16,7 @@ def get_cython_version(): match = re.search('^([0-9]+)\.([0-9]+)', Cython.Compiler.Main.Version.version) try: - return map(int, match.groups()) + return list(map(int, match.groups())) except AttributeError: raise ImportError diff --git a/scripts/mkvirt.py b/scripts/mkvirt.py index 6b392f877..b42936811 100644 --- a/scripts/mkvirt.py +++ b/scripts/mkvirt.py @@ -4,7 +4,7 @@ here = os.path.dirname(os.path.abspath(__file__)) base_dir = os.path.dirname(here) -print "Creating SimpleCV Bootstrap Install Script: simplecv-bootstrap.py" +print("Creating SimpleCV Bootstrap Install Script: simplecv-bootstrap.py") output = virtualenv.create_bootstrap_script(textwrap.dedent(""" import os, subprocess