ValueError: too many values to unpack (expected 4) during ORB detection

Issue

I’m detecting Unicode words on a starscape background. They all look a little like the example images below. I have a folder with 183 images, if any of those images are detected I want to click a button on the screen. If no images are detected, I want to repeat the process.

enter image description here
enter image description here
enter image description here

So far I’ve been having the most success with Template Matching. I load the images into an array, loop through the entire array and if anything returns a >90% match I click the button.

This however is returning a large number of false positives. To improve my detection I’ve tried;

  • Canny Edge detection
  • HSV Thresholding
  • MatchTemplate
  • SIFT and SURF
  • and, Brute-Force Matching with ORB Descriptors

The best results by a long shot have been with ORB, hands down, not even close.

I’ve followed this tutorial and all of the tutorials on opencv.org however I’m getting the following error what seems like fairly randomly. Usually it’s when the background application image changes significantly but I’ve no idea why that would matter.

    Traceback (most recent call last):
      File "c:\Users\keypoint_detection_test1.py", line 63, in <module>
        keypoint_detection(ships_to_avoid)
      File "c:\Users\keypoint_detection_test1.py", line 39, in keypoint_detection
        kp1, kp2, matches, match_points = objectToFind.match_keypoints(keypoint_image)
    ValueError: too many values to unpack (expected 4)

What does this error mean and how do I fix it?

import cv2 as cv
import os
import glob
    
# Change the working directory to the folder this script is in.
os.chdir('C:\\Users\\')

avoid = glob.glob(r"C:\Users\*.png")

def loadImages(directory):
    # Intialise empty array
    image_list = []
    # Add images to array
    for i in directory:
        img = cv.imread(i, cv.IMREAD_UNCHANGED)
        image_list.append((img, i))
    return image_list

# initialize the WindowCapture class
wincap = WindowCapture()

def keypoint_detection(image_list):

    for i in image_list:
        needle_img = i[0]
        needle_name = i[1]

        # load image to find
        objectToFind = Vision(needle_img)
        # get an updated image of the screen
        keypoint_image = wincap.get_haystack()
        # crop the image
        x, w, y, h = [600,700,20,50]
        keypoint_image = keypoint_image[y:y+h, x:x+w]

        kp1, kp2, matches, match_points = objectToFind.match_keypoints(keypoint_image)
        match_image = cv.drawMatches(objectToFind.needle_img, kp1, keypoint_image, kp2, matches, None)

        if match_points:
            # find the center point of all the matched features
            center_point = objectToFind.centeroid(match_points)
            # account for the width of the needle image that appears on the left
            center_point[0] += objectToFind.needle_w
            # drawn the found center point on the output image
            match_image = objectToFind.draw_crosshairs(match_image, [center_point])

            # move somewhere/do something
            #py.moveTo(center_point)

        # display the processed image
        cv.imshow('Keypoint Search', match_image)

        # press 'q' with the output window focused to exit.
        if cv.waitKey(1) == ord('q'):
            cv.destroyAllWindows()
    

while(True):
    ships_to_avoid = loadImages(avoid)
    keypoint_detection(ships_to_avoid)
class Vision:

    # properties
    needle_img = None
    needle_w = 0
    needle_h = 0

    # constructor
    def __init__(self, needle_img_path):
        self.needle_img = needle_img_path 

        # Save the dimensions of the needle image
        self.needle_w = self.needle_img.shape[1]
        self.needle_h = self.needle_img.shape[0]

    def match_keypoints(self, original_image, patch_size=32):
        min_match_count = 35

        orb = cv.ORB_create(edgeThreshold=0, patchSize=patch_size)
        keypoints_needle, descriptors_needle = orb.detectAndCompute(self.needle_img, None)
        orb2 = cv.ORB_create(edgeThreshold=0, patchSize=patch_size, nfeatures=2000)
        keypoints_haystack, descriptors_haystack = orb2.detectAndCompute(original_image, None)

        FLANN_INDEX_LSH = 6
        index_params = dict(algorithm=FLANN_INDEX_LSH, table_number=6, key_size=12, multi_probe_level=1)
        search_params = dict(checks=50)

        try:
            flann = cv.FlannBasedMatcher(index_params, search_params)
            matches = flann.knnMatch(descriptors_needle, descriptors_haystack, k=2)
        except cv.error:
            return None, None, [], [], None

        # store all the good matches as per Lowe's ratio test.
        good = []
        points = []

        for pair in matches:
            if len(pair) == 2:
                if pair[0].distance < 0.7*pair[1].distance:
                    good.append(pair[0])

        if len(good) > min_match_count:
            print('match %03d, kp %03d' % (len(good), len(keypoints_needle)))
            for match in good:
                points.append(keypoints_haystack[match.trainIdx].pt)
        
        return keypoints_needle, keypoints_haystack, good, points
class WindowCapture:

    # properties
    w = 0
    h = 0
    hwnd = None
    cropped_x = 0
    cropped_y = 0
    offset_x = 0
    offset_y = 0

    # constructor
    def __init__(self, window_name=None):
        # find the handle for the window we want to capture.
        # if no window name is given, capture the entire screen
        if window_name is None:
            self.hwnd = win32gui.GetDesktopWindow()
        else:
            self.hwnd = win32gui.FindWindow(None, window_name)
            if not self.hwnd:
                raise Exception('Window not found: {}'.format(window_name))

        # get the window size
        window_rect = win32gui.GetWindowRect(self.hwnd)
        self.w = window_rect[2] - window_rect[0]
        self.h = window_rect[3] - window_rect[1]

        # account for the window border and titlebar and cut them off
        border_pixels = 0
        titlebar_pixels = 0
        self.w = self.w - (border_pixels * 2)
        self.h = self.h - titlebar_pixels - border_pixels
        self.cropped_x = border_pixels
        self.cropped_y = titlebar_pixels

        self.offset_x = window_rect[0] + self.cropped_x
        self.offset_y = window_rect[1] + self.cropped_y

    def get_haystack(self):

        # get the window image data
        wDC = win32gui.GetWindowDC(self.hwnd)
        dcObj = win32ui.CreateDCFromHandle(wDC)
        cDC = dcObj.CreateCompatibleDC()
        dataBitMap = win32ui.CreateBitmap()
        dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
        cDC.SelectObject(dataBitMap)
        cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY)

        signedIntsArray = dataBitMap.GetBitmapBits(True)
        img = np.fromstring(signedIntsArray, dtype='uint8')
        img.shape = (self.h, self.w, 4)

        # free resources
        dcObj.DeleteDC()
        cDC.DeleteDC()
        win32gui.ReleaseDC(self.hwnd, wDC)
        win32gui.DeleteObject(dataBitMap.GetHandle())
        img = img[...,:3]
        img = np.ascontiguousarray(img)
        return img

Solution

In match_keypoints you have (at least) two return statements.

One of them, in the except block returns 5 elements , None, None, [], [], None.

The other returns 4 elements, return keypoints_needle, keypoints_haystack, good, points

Thus, whenever match_keypoints encounters cv.error in that try block, it will return 5 elements, which is one more than you attempt to dereference in the line that is failing: kp1, kp2, matches, match_points = objectToFind.match_keypoints(keypoint_image)

Too many values to unpack is the error that occurs when the returned tuple has more elements than the number of variables on the LHS of the assignment.

Answered By – Ben

Answer Checked By – Clifford M. (AngularFixing Volunteer)

Leave a Reply

Your email address will not be published.