Saturday, January 9, 2021

Feature Detection Using Python GUI (PyQt) Part 4

This content is powered by Balige PublishingVisit this link (collaboration with Rismon Hasiholan Sianipar) PART 1  PART 2  PART 3

In this tutorial, you will learn how to use OpenCV, NumPy library and other libraries to perform feature extraction with Python GUI (PyQt). The feature detection techniques used in this chapter are Harris Corner Detection, Shi-Tomasi Corner Detector, Scale-Invariant Feature Transform (SIFT), Speeded-Up Robust Features (SURF), Features from Accelerated Segment Test (FAST), Binary Robust Independent Elementary Features (BRIEF), and Oriented FAST and Rotated BRIEF (ORB).



Tutorial Steps To Detect Features Using Features from Accelerated Segment Test (FAST)

FAST (Features from Accelerated Segment Test) algorithm was proposed by Edward Rosten and Tom Drummond in their paper "Machine learning for high-speed corner detection" in 2006 (Later revised it in 2010).

In OpenCV, cv.FastFeatureDetector_create() is available to detect feature using FAST. If you like, you can specify the threshold, whether non-maximum suppression to be applied or not, the neighborhood to be used etc.

For the neighborhood, three flags are defined:

cv.FAST_FEATURE_DETECTOR_TYPE_5_8
cv.FAST_FEATURE_DETECTOR_TYPE_7_12
cv.FAST_FEATURE_DETECTOR_TYPE_9_16

Below is a simple code on how to detect and draw the FAST feature points.

#fast.py
import numpy as np
import cv2 as cv
img = cv.imread('chessboard.png')
gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY)

# Initiate FAST object
fast = cv.FastFeatureDetector_create(threshold = 10, nonmaxSuppression = True, type=cv.FAST_FEATURE_DETECTOR_TYPE_7_12)

# find and draw the keypoints
kp = fast.detect(gray,None)
img2 = cv.drawKeypoints(img, kp, None, color=(0,0,255))

# Print all default params
print( "Threshold: {}".format(fast.getThreshold()) )
print( "nonmaxSuppression:{}".format(fast.getNonmaxSuppression()) )
print( "neighborhood: {}".format(fast.getType()) )
print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )

cv.imshow('SIFT',img2)

if cv.waitKey(0) & 0xff == 27:
    cv.destroyAllWindows()

Run fast.py to see the result as shown in Figure below.

Now, you will modify feature_detection.ui to implement Features from Accelerated Segment Test (FAST). Add a Group Box widget and set its objectName property as gbFAST.

Then, add a Spin Box widget and put it inside group box. Set its objectName property as sbThresh. Set its value property to 10, its minimum property to 0, and its maximum property to 100.Put inside group box a Check Box widget and set its objectName property as cbNonMax.

Then, put inside group box three Radio Button widgets. Set their text properties as TYPE_5_8, TYPE_7_12, and TYPE_9_16 and their objectName properties as rb58, rb712, and rb916.

Now, the form looks as shown in Figure below.


Modify initialization() to involve gbFAST widget as follows:

def initialization(self,state):
    self.cboFeature.setEnabled(state)
    self.gbHarris.setEnabled(state)
    self.gbShiTomasi.setEnabled(state)
    self.gbSIFT.setEnabled(state)
    self.gbFAST.setEnabled(state)

Define a new method, set_type(), to determine the flag of neighborhood chosen in radio button:

def set_type(self):
    fast_type = 0
    if self.rb58.isChecked()==True:
        fast_type = cv2.FAST_FEATURE_DETECTOR_TYPE_5_8
    if self.rb712.isChecked()==True:
        fast_type = cv2.FAST_FEATURE_DETECTOR_TYPE_7_12
    if self.rb916.isChecked()==True:
        fast_type = cv2.FAST_FEATURE_DETECTOR_TYPE_9_16
   
    return fast_type
 

Define a new method, fast_detection(), which invoke set_type(), to implement FAST as follows:

def fast_detection(self):
    self.test_im = self.img.copy()
        
    #Reads parameters
    thresh = self.sbThresh.value()
    cbVal = self.cbNonMax.isChecked()
    fast_type = self.set_type()
        
    gray= cv2.cvtColor(self.test_im,cv2.COLOR_BGR2GRAY)
        
    # Initiates FAST object
    fast = cv2.FastFeatureDetector_create(threshold = thresh, \
        nonmaxSuppression = cbVal, type=fast_type)
        
    # finds and draw the keypoints
    kp = fast.detect(gray,None)
    self.test_im = cv2.drawKeypoints(self.test_im, \
        kp, None, color=(255,255,255))
        
    # draws result
    cv2.cvtColor(self.test_im, cv2.COLOR_BGR2RGB, self.test_im)    
    self.display_image(self.test_im, self.labelResult)

Modify choose_feature() so that when user choose Features from Accelerated Segment Test (FAST) from combo box, it will invoke fast_detection() as follows:

def choose_feature(self):        
    strCB = self.cboFeature.currentText()
        
    if strCB == 'Harris Corner Detection':
        self.gbHarris.setEnabled(True)
        self.gbShiTomasi.setEnabled(False)
        self.gbSIFT.setEnabled(False)
        self.gbFAST.setEnabled(False)
        self.harris_detection()
            
    if strCB == 'Shi-Tomasi Corner Detector':
        self.gbHarris.setEnabled(False)
        self.gbShiTomasi.setEnabled(True)
        self.gbSIFT.setEnabled(False)
        self.gbFAST.setEnabled(False)
        self.shi_tomasi_detection()

    if strCB == 'Scale-Invariant Feature Transform (SIFT)':
        self.gbHarris.setEnabled(False)
        self.gbShiTomasi.setEnabled(False)
        self.gbSIFT.setEnabled(True)
        self.gbFAST.setEnabled(False)
        self.sift_detection()

    if strCB == 'Features from Accelerated Segment Test (FAST)':
        self.gbHarris.setEnabled(False)
        self.gbShiTomasi.setEnabled(False)
        self.gbSIFT.setEnabled(False)
        self.gbFAST.setEnabled(True)
        self.fast_detection()

Connect valueChanged() signal of the sbThresh, stateChanged() signal of cbNonMax, and toggled() signal of the three radio buttons to fast_detection() function and put them inside __init__() method as follows:

def __init__(self):
    QMainWindow.__init__(self)
    loadUi("feature_detection.ui",self)
    self.setWindowTitle("Feature Detection")
    self.pbReadImage.clicked.connect(self.read_image)
    self.initialization(False)
    self.cboFeature.currentIndexChanged.connect(self.choose_feature)
        
    self.hsBlockSize.valueChanged.connect(self.set_hsBlockSize)
    self.hsKSize.valueChanged.connect(self.set_hsKSize)
    self.hsK.valueChanged.connect(self.set_hsK)
    self.hsThreshold.valueChanged.connect(self.set_hsThreshold)
        
    self.sbCorner.valueChanged.connect(self.shi_tomasi_detection)
    self.sbEuclidean.valueChanged.connect(self.shi_tomasi_detection)
    self.dsbQuality.valueChanged.connect(self.shi_tomasi_detection)
        
    self.sbFeature.valueChanged.connect(self.sift_detection)
    self.sbOctave.valueChanged.connect(self.sift_detection)
    self.dsbContrast.valueChanged.connect(self.sift_detection)
    self.sbEdge.valueChanged.connect(self.sift_detection)
    self.dsbSigma.valueChanged.connect(self.sift_detection)
        
    self.sbThresh.valueChanged.connect(self.fast_detection)
    self.cbNonMax.stateChanged.connect(self.fast_detection)
    self.rb58.toggled.connect(self.fast_detection)
    self.rb712.toggled.connect(self.fast_detection)
    self.rb916.toggled.connect(self.fast_detection)

Run feature_detection.py, open an image, and choose Features from Accelerated Segment Test (FAST) from combo box. Change the parameters and the result is shown in both Figures below.

Below is the full script of feature_detection.py so far:

#feature_detection.py
import sys
import cv2
import numpy as np
from PyQt5.QtWidgets import*
from PyQt5 import QtGui, QtCore
from PyQt5.uic import loadUi
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)
from PyQt5.QtWidgets import QDialog, QFileDialog
from PyQt5.QtGui import QIcon, QPixmap, QImage
from PyQt5.uic import loadUi

class FormFeatureDetection(QMainWindow):
    def __init__(self):
        QMainWindow.__init__(self)
        loadUi("feature_detection.ui",self)
        self.setWindowTitle("Feature Detection")
        self.pbReadImage.clicked.connect(self.read_image)
        self.initialization(False)
        self.cboFeature.currentIndexChanged.connect(self.choose_feature)
        
        self.hsBlockSize.valueChanged.connect(self.set_hsBlockSize)
        self.hsKSize.valueChanged.connect(self.set_hsKSize)
        self.hsK.valueChanged.connect(self.set_hsK)
        self.hsThreshold.valueChanged.connect(self.set_hsThreshold)
        
        self.sbCorner.valueChanged.connect(self.shi_tomasi_detection)
        self.sbEuclidean.valueChanged.connect(self.shi_tomasi_detection)
        self.dsbQuality.valueChanged.connect(self.shi_tomasi_detection)
        
        self.sbFeature.valueChanged.connect(self.sift_detection)
        self.sbOctave.valueChanged.connect(self.sift_detection)
        self.dsbContrast.valueChanged.connect(self.sift_detection)
        self.sbEdge.valueChanged.connect(self.sift_detection)
        self.dsbSigma.valueChanged.connect(self.sift_detection)
        
        self.sbThresh.valueChanged.connect(self.fast_detection)
        self.cbNonMax.stateChanged.connect(self.fast_detection)
        self.rb58.toggled.connect(self.fast_detection)
        self.rb712.toggled.connect(self.fast_detection)
        self.rb916.toggled.connect(self.fast_detection)

    def read_image(self):
        self.fname = QFileDialog.getOpenFileName(self, 'Open file', \
            'd:\\',"Image Files (*.jpg *.gif *.bmp *.png)")
        self.pixmap = QPixmap(self.fname[0])        
        self.labelImage.setPixmap(self.pixmap)
        self.labelImage.setScaledContents(True)
        self.img = cv2.imread(self.fname[0], cv2.IMREAD_COLOR)  
        self.cboFeature.setEnabled(True)

    def initialization(self,state):
        self.cboFeature.setEnabled(state)
        self.gbHarris.setEnabled(state)
        self.gbShiTomasi.setEnabled(state)
        self.gbSIFT.setEnabled(state)
        self.gbFAST.setEnabled(state)

    def set_hsBlockSize(self, value):
        self.leBlockSize.setText(str(value))
        self.harris_detection()
        
    def set_hsKSize(self, value):
        self.leKSize.setText(str(value))
        self.harris_detection()
        
    def set_hsK(self, value):
        self.leK.setText(str(round((value/100),2)))
        self.harris_detection()

    def set_hsThreshold(self, value):
        self.leThreshold.setText(str(round((value/100),2)))
        self.harris_detection()

    def choose_feature(self):        
        strCB = self.cboFeature.currentText()
        
        if strCB == 'Harris Corner Detection':
            self.gbHarris.setEnabled(True)
            self.gbShiTomasi.setEnabled(False)
            self.gbSIFT.setEnabled(False)
            self.gbFAST.setEnabled(False)
            self.harris_detection()
            
        if strCB == 'Shi-Tomasi Corner Detector':
            self.gbHarris.setEnabled(False)
            self.gbShiTomasi.setEnabled(True)
            self.gbSIFT.setEnabled(False)
            self.gbFAST.setEnabled(False)
            self.shi_tomasi_detection()

        if strCB == 'Scale-Invariant Feature Transform (SIFT)':
            self.gbHarris.setEnabled(False)
            self.gbShiTomasi.setEnabled(False)
            self.gbSIFT.setEnabled(True)
            self.gbFAST.setEnabled(False)
            self.sift_detection()

        if strCB == 'Features from Accelerated Segment Test (FAST)':
            self.gbHarris.setEnabled(False)
            self.gbShiTomasi.setEnabled(False)
            self.gbSIFT.setEnabled(False)
            self.gbFAST.setEnabled(True)
            self.fast_detection()
            
    def harris_detection(self):
        self.test_im = self.img.copy()
        gray = cv2.cvtColor(self.test_im,cv2.COLOR_BGR2GRAY) 
        gray = np.float32(gray)
        blockSize = int(self.leBlockSize.text())
        kSize = int(self.leKSize.text())
        K = float(self.leK.text())
        dst = cv2.cornerHarris(gray,blockSize,kSize,K)

        #dilated for marking the corners
        dst = cv2.dilate(dst,None)

        # Threshold for an optimal value, it may vary depending on the image.
        Thresh = float(self.leThreshold.text())
        self.test_im[dst>Thresh*dst.max()]=[0,0,255] 
        cv2.cvtColor(self.test_im, cv2.COLOR_BGR2RGB, self.test_im)
        self.display_image(self.test_im, self.labelResult)

    def display_image(self, img, label):
        height, width, channel = img.shape
        bytesPerLine = 3 * width  
        
        qImg = QImage(img, width, height, \
                      bytesPerLine, QImage.Format_RGB888)
        pixmap = QPixmap.fromImage(qImg)
        label.setPixmap(pixmap)
        label.setScaledContents(True)
    
    def shi_tomasi_detection(self):
        self.test_im = self.img.copy()
        number_corners = self.sbCorner.value()
        euclidean_dist = self.sbEuclidean.value()
        min_quality = self.dsbQuality.value()
        gray = cv2.cvtColor(self.test_im,cv2.COLOR_BGR2GRAY)
        corners = cv2.goodFeaturesToTrack(gray,number_corners,\
            min_quality,euclidean_dist)
        corners = np.int0(corners)
        for i in corners:
            x,y = i.ravel()
            cv2.circle(self.test_im,(x,y),5,[0,255,0],-1)
            
        cv2.cvtColor(self.test_im, cv2.COLOR_BGR2RGB, self.test_im)    
        self.display_image(self.test_im, self.labelResult)

    def sift_detection(self):
        self.test_im = self.img.copy()
        nfeatures = self.sbFeature.value()
        nOctaveLayers = self.sbOctave.value()
        contrastThreshold = self.dsbContrast.value()
        edgeThreshold = self.sbEdge.value()
        sigma = self.dsbSigma.value()
        
        gray= cv2.cvtColor(self.test_im,cv2.COLOR_BGR2GRAY)
        sift = cv2.SIFT_create(nfeatures, nOctaveLayers, \
            contrastThreshold, edgeThreshold, sigma)
        kp = sift.detect(gray,None)
        self.test_im=cv2.drawKeypoints(self.test_im,kp,\
            self.test_im,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            
        cv2.cvtColor(self.test_im, cv2.COLOR_BGR2RGB, self.test_im)    
        self.display_image(self.test_im, self.labelResult)

    def fast_detection(self):
        self.test_im = self.img.copy()
        
        #Reads parameters
        thresh = self.sbThresh.value()
        cbVal = self.cbNonMax.isChecked()
        fast_type = self.set_type()
        
        gray= cv2.cvtColor(self.test_im,cv2.COLOR_BGR2GRAY)
        
        # Initiates FAST object
        fast = cv2.FastFeatureDetector_create(threshold = thresh, \
            nonmaxSuppression = cbVal, type=fast_type)
        
        # finds and draw the keypoints
        kp = fast.detect(gray,None)
        self.test_im = cv2.drawKeypoints(self.test_im, \
            kp, None, color=(255,255,255))
        
        # draws result
        cv2.cvtColor(self.test_im, cv2.COLOR_BGR2RGB, self.test_im)    
        self.display_image(self.test_im, self.labelResult)
        
    def set_type(self):
        fast_type = 0
        if self.rb58.isChecked()==True:
            fast_type = cv2.FAST_FEATURE_DETECTOR_TYPE_5_8
        if self.rb712.isChecked()==True:
            fast_type = cv2.FAST_FEATURE_DETECTOR_TYPE_7_12
        if self.rb916.isChecked()==True:
            fast_type = cv2.FAST_FEATURE_DETECTOR_TYPE_9_16
   
        return fast_type

if __name__=="__main__":
    app = QApplication(sys.argv)    
    w = FormFeatureDetection()
    w.show()
    sys.exit(app.exec_())




No comments:

Post a Comment