Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ValueError: not enough values to unpack (expected 3, got 2) #6

Open
momo1986 opened this issue Mar 22, 2019 · 3 comments
Open

ValueError: not enough values to unpack (expected 3, got 2) #6

momo1986 opened this issue Mar 22, 2019 · 3 comments

Comments

@momo1986
Copy link

您好,感谢您的项目分享,我想做一个其他物体检测追踪的模型。
我有了一个预训练的图像识别分类模型。
然后我load视频做图像腐蚀的时候报错了。
这是我在windows下运行的error-log:

D:\PythonSpace\Hand_Tracking>python track_finger.py --file car.flv
Using TensorFlow backend.
car.flv
D:\PythonSpace\Hand_Tracking
D:\PythonSpace\Hand_Tracking\car.flv
<class 'cv2.VideoCapture'>
2019-03-22 11:51:22.024142: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
C:\Python\Python37\lib\site-packages\keras\engine\saving.py:292: UserWarning: No training configuration found in save file: the model was not compiled. Compile it manually.
warnings.warn('No training configuration found in save file: '
[[0 0 0 ... 0 0 0]
[0 0 0 ... 0 0 0]
[0 0 0 ... 0 0 0]
...
[0 0 0 ... 0 0 0]
[0 0 0 ... 0 0 0]
[0 0 0 ... 0 0 0]]
Traceback (most recent call last):
File "track_finger.py", line 157, in
if name == 'main':
File "track_finger.py", line 44, in main
args = parser.parse_args()
File "track_finger.py", line 109, in track
dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 3)), iterations=2)
ValueError: not enough values to unpack (expected 3, got 2)

几乎没有怎么改变您的原始代码,主要是换了个分类模型:

Date : 7th Jan 2018

Author : xiaochus
Email : [email protected]
Affiliation : School of Computer Science and Communication Engineering
                - Jiangsu University - China

License : MIT

Status : Under Active Development

Description :
OpenCV 3 & Keras implementation of the vehicle tracking.
"""

import sys
import copy
import argparse
import cv2
import numpy as np
from keras.models import load_model
import os

from utils.entity import Entity


def main(argv):
    parser = argparse.ArgumentParser()
    # Required arguments.
    parser.add_argument(
        "--file",
        help="Input video file.",
    )
    # Optional arguments.
    parser.add_argument(
        "--iou",
        default=0.2,
        help="threshold for tracking",
    )
    args = parser.parse_args()
    track(args.file, args.iou)


def overlap(box1, box2):
    """
    Check the overlap of two boxes
    """
    endx = max(box1[0] + box1[2], box2[0] + box2[2])
    startx = min(box1[0], box2[0])
    width = box1[2] + box2[2] - (endx - startx)

    endy = max(box1[1] + box1[3], box2[1] + box2[3])
    starty = min(box1[1], box2[1])
    height = box1[3] + box2[3] - (endy - starty)

    if (width <= 0 or height <= 0):
        return 0
    else:
        Area = width * height
        Area1 = box1[2] * box1[3]
        Area2 = box2[2] * box2[3]
        ratio = Area / (Area1 + Area2 - Area)

        return ratio


def track(video, iou):
    print(video)
    print(os.getcwd())
    print(os.path.join(os.getcwd(), video))
    camera = cv2.VideoCapture(video)
    print(type(camera))
    res, frame = camera.read()
    y_size = frame.shape[0]
    x_size = frame.shape[1]
    

    # Load CNN classification model
    model = load_model('brightness3ch.h5')
    # Definition of MOG2 Background Subtraction
    bs = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
    history = 20
    frames = 0
    counter = 0

    track_list = []
    cv2.namedWindow("detection", cv2.WINDOW_NORMAL)
    while True:
        res, frame = camera.read()
        y_size = frame.shape[0]
        x_size = frame.shape[1]
        cv2.imwrite("result.png", frame)
        if res is False:
            break
        # Train the MOG2 with first frames frame
        fg_mask = bs.apply(frame)

        if frames < history:
            frames += 1
            continue
        # Expansion and denoising the original frame
        th = cv2.threshold(fg_mask.copy(), 244, 255, cv2.THRESH_BINARY)[1]
        print(th)
        th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations=2)
        dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 3)), iterations=2)
        image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # Check the bouding boxs
        for c in contours:
            x, y, w, h = cv2.boundingRect(c)
            if cv2.contourArea(c) > 3000:
                # Extract roi
                img = frame[y: y + h, x: x + w, :]
                rimg = cv2.resize(img, (64, 64), interpolation=cv2.INTER_CUBIC)
                image_data = np.array(rimg, dtype='float32')
                image_data /= 255.
                roi = np.expand_dims(image_data, axis=0)
                flag = model.predict(roi)
                print(flag)

                if flag[0][0] >= 0 and flag[0][0] <=9:
                    e = Entity(counter, (x, y, w, h), frame)

                    # Exclude existing targets in the tracking list
                    if track_list:
                        count = 0
                        num = len(track_list)
                        for p in track_list:
                            if overlap((x, y, w, h), p.windows) < iou:
                                count += 1
                        if count == num:
                            track_list.append(e)
                    else:
                        track_list.append(e)
                    counter += 1

        # Check and update goals
        if track_list:
            tlist = copy.copy(track_list)
            for e in tlist:
                x, y = e.center
                if 10 < x < x_size - 10 and 10 < y < y_size - 10:
                    e.update(frame)
                else:
                    track_list.remove(e)
        frames += 1
        cv2.imshow("detection", frame)
        if cv2.waitKey(110) & 0xff == 27:
            break
    camera.release()


if __name__ == '__main__':
    main(sys.argv)

在做膨胀腐蚀的时候出错了,用您的原始视频和我自己的视频都碰到了同样的error-log

@xiaochus
Copy link
Owner

@momo1986 是不是因为OpenCV版本的问题,这个代码是基于OpenCV 3.2的。

@momo1986
Copy link
Author

momo1986 commented Mar 22, 2019

@momo1986 是不是因为OpenCV版本的问题,这个代码是基于OpenCV 3.2的。

报错在cv2.dilate函数上
另外,我threshold一路拿到0数组是什么原因,也没有看到起来的图片,是一块黑色的区域。但我又可以存frame作为图片

@ShidiDaisy
Copy link

运行原代码我也遇到同样的问题了 请问有人解决了吗?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants