Using CV2 to solve sliding verification code

preface
Due to the need of work, I began to study the slider verification code. After all, I am engaged in data, not image recognition. According to my previous experience, I first went to git to look for the relevant projects and found the experience written by a big man. Because too long ago, simply changed, found a Python new module, life is short, I use Python, honest don’t deceive me!
git address
git address
Analysis of the
This is mainly OpenCV-Python, which is a bit too big, and it will take at least a month to fully understand it. A final decision: just work
in accordance with the train of thought:
1. Initialization browser

def __init__(self, url, username, password):
        """
        Initialize browser configuration, set constant light

        :param url : target url
        :param username : username
        :param password : password

        """
        # headless browser, you can open it if you need to, because I want to see the sliding process so I don't choose headless for now
        # profile = webdriver.FirefoxProfile()
        self.browser = webdriver.Chrome()
        self.wait = WebDriverWait(self.browser, 30)
        self.url = url
        self.username = username
        self.password = password

2. Save the picture

def save_img(block_img_url):
        """
        Save Image

        :param block_img_url: Image url
        :return:
        """
        try:
            img = requests.get(block_img_url).content
            with open('block.jpeg', "wb") as f:
                f.write(img)
            return True
        except:
            return False

3. Identify the gap, which is mainly used in OpenCV-Python

def get_gap():
        """
        Identify gaps

        :return:
        """
        img = cv.imread('block.jpeg')
        # Process Gaussian filtering
        G_Blur = cv.GaussianBlur(img,(5, 5), 0)
        # Edge detection Tweakable threshold
        canny = cv.Canny(G_Blur, 150, 400)
        # Contour detection
        contours, hierachy = cv.findContours(
            canny, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)


        # TODO here is a bit around so far have not completely eaten parameters is difficult to control nothing more than to find out the specific location of the profile
        
        for i, contour in enumerate(contours):
            m = cv.moments(contour)
            print(m)
            if m['m00'] == 0:
                cx = cy = 0
            else:
                cx, cy = m['m10']/m['m00'], m['m01']/m['m00']
            if 6000 < cv.contourArea(contour) < 8000 and 370 < cv.arcLength(contour, True) < 390:
                if cx < 400:
                    continue
                    
                # External Rectangle
                x, y, w, h = cv.boundingRect(contour) 
                cv.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
                # Display recognition results
                cv.imshow('img', img)  
                
                print('【Notch identification】 {x}px'.format(x=x/2))
                return x/2
        return 0

Initial velocity: V0, Displacement: X, Time: T, Acceleration: A, satisfies the following formula:
= V0 * t + 1/2 * a * t^2

    def trajectory(distance):
        """
        Simulated trajectory
        
        :param distance: distance
        :return: 
        """
        # Initial position
        distance -= 50
        # Initial velocity
        v = 0
        # Unit time is 0.2s to count the trajectory, the trajectory is the displacement within 0.2
        t = 0.2
        # list of displacements/tracks, one element in the list represents 0.2s displacement
        tracks = []
        # the current displacement
        current = 0
        # The mid value is reached and deceleration begins
        mid = distance * 4/5
        # Slide through a little bit first, then slide back the other way at the end
        distance += 10
        # a = random.randint(1,3)
        while current < distance:
            if current < mid:
                # The smaller the acceleration, the smaller the displacement per unit time, the more detailed the simulated trajectory
                # a = random.randint(2, 4) # accelerated motion
                a = 3
            else:
                # a = -random.randint(3, 5) # deceleration
                a = -2
            # Initial velocity
            v0 = v
            # displacement in 0.2 seconds time
            s = v0 * t + 0.5 * a * (t ** 2)
            # Current position
            current += s
            # Add to the track list
            tracks.append(round(s))

            # The velocity has reached v, which is used as the initial velocity for the next time
            v = v0 + a * t
            
        # TODO to improve the passing rate, not sure if it's desirable
        # Slide backwards to the approximate exact position
        for i in range(4):
            tracks.append(-random.randint(2, 3))
        for i in range(4):
            tracks.append(-random.randint(1, 3))
        return tracks
 

5. Main functions

	def tx_main(self):
        """
        Main Function

        :return:
        """

        # Move slider routine
        WebDriverWait(self.browser, 20, 1).until(
            EC.presence_of_element_located((By.ID, 'tcaptcha_iframe'))
        )
        self.browser.switch_to.frame(
            self.browser.find_element_by_id('tcaptcha_iframe'))  # Loading iframe
        time.sleep(0.5)
        block_img_url = self.browser.find_element_by_xpath(
            '//img[@id="slideBg"]').get_attribute('src')
        # Make sure to include the url prefix or not
        print(block_img_url)

        if self.save_img(block_img_url):
            getGap = self.get_gap()
            if getGap:
                track_list = self.trajectory(getGap)
                time.sleep(0.5)
                # Slider positioning
                attribute_ing = self.browser.find_element_by_xpath(
                    '//div[@id="tcaptcha_drag_thumb"]'
                )
                # Click the mouse
                ActionChains(self.browser).click_and_hold(
                    on_element=attribute_ing
                ).perform()
                time.sleep(0.5)
                print(track_list)

                for track in track_list:
                    ActionChains(self.browser).move_by_offset(
                        xoffset=track, yoffset=0
                    ).perform()
                time.sleep(1.5)
                ActionChains(self.browser).release(
                    on_element=attribute_ing
                ).perform()
                time.sleep(1.5)

                return True
            else:
                # TODO Failure to retry Here you can add the number of failures
                self.re_start()
        else:
            print('Failed to get image')
            return False

Here you need to build a subclass to achieve the QQ number account password login

Read More: