Search code examples
javascriptpythonseleniumweb-crawlerpastebin

Python Selenium can't go through links. Pastebin Crawling


Hello I am trying to extract all the links, over the 10 pages I am given, for the search ssh.

I can extract the first 10 links from the first page, after loading the JavaScripts, then, I am able to click once, the first page, and extract the next 10 links, but, when trying the go to the third page, I get an error.

This is my code:

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests
import re

links = []
driver = webdriver.Firefox()
driver.get("http://pastebin.com/search?q=ssh")

# wait for the search results to be loaded
wait = WebDriverWait(driver, 10)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".gsc-result-info")))
for link in driver.find_elements_by_xpath("//div[@class='gs-title']/a[@class='gs-title']"):
        if link.get_attribute("href") != None:
            print link.get_attribute("href")
# get all search results links
for page in driver.find_elements_by_xpath("//div[@class='gsc-cursor-page']"):
    driver.implicitly_wait(10) # seconds
    page.click()

    for link in driver.find_elements_by_xpath("//div[@class='gs-title']/a[@class='gs-title']"):
        if link.get_attribute("href") != None:
            print link.get_attribute("href")

And this is what I am able to obtain, as well as the error I take:

python pastebinselenium.py 
http://pastebin.com/u/ssh
http://pastebin.com/gsQWBEZP
http://pastebin.com/gfA12TWk
http://pastebin.com/udWMWdPR
http://pastebin.com/J55238CB
http://pastebin.com/DN2aHvRr
http://pastebin.com/f0rh66kU
http://pastebin.com/3zvY3DSm
http://pastebin.com/fqHVJGEm
http://pastebin.com/3aB7h0fm
http://pastebin.com/3uBAxXu3
http://pastebin.com/cxjRqeSh
http://pastebin.com/5nJPNr3Q
http://pastebin.com/qV0rPNfP
http://pastebin.com/zubt2Yc7
http://pastebin.com/jFrjWYpE
http://pastebin.com/DU7yqjQ1
http://pastebin.com/AFtWHmtE
http://pastebin.com/UVP5behK
http://pastebin.com/hP7XTyv1
Traceback (most recent call last):
  File "pastebinselenium.py", line 21, in <module>
    page.click()
  File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webelement.py", line 74, in click
    self._execute(Command.CLICK_ELEMENT)
  File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webelement.py", line 457, in _execute
    return self._parent.execute(command, params)
  File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.py", line 233, in execute
    self.error_handler.check_response(response)
  File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/errorhandler.py", line 194, in check_response
    raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.StaleElementReferenceException: Message: Element not found in the cache - perhaps the page has changed since it was looked up
Stacktrace:
    at fxdriver.cache.getElementAt (resource://fxdriver/modules/web-element-cache.js:9454)
    at Utils.getElementAt (file:///tmp/tmpzhZSEC/extensions/[email protected]/components/command-processor.js:9039)
    at fxdriver.preconditions.visible (file:///tmp/tmpzhZSEC/extensions/[email protected]/components/command-processor.js:10090)
    at DelayedCommand.prototype.checkPreconditions_ (file:///tmp/tmpzhZSEC/extensions/[email protected]/components/command-processor.js:12644)
    at DelayedCommand.prototype.executeInternal_/h (file:///tmp/tmpzhZSEC/extensions/[email protected]/components/command-processor.js:12661)
    at fxdriver.Timer.prototype.setTimeout/<.notify (file:///tmp/tmpzhZSEC/extensions/[email protected]/components/command-processor.js:625)

I want to take the 10 links from the 10 pages (100 in total) and I am only able to extract 20 =(

I have also tried this:

wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".gsc-cursor-box")))

right before the click, but no success.


Solution

  • The idea would be to click the pagination links in a loop waiting for the next page number to become active collecting links on the way. Implementation:

    from pprint import pprint
    
    from selenium import webdriver
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    
    
    driver = webdriver.Firefox()
    driver.get("http://pastebin.com/search?q=ssh")
    
    # wait for the search results to be loaded
    wait = WebDriverWait(driver, 10)
    wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".gsc-result-info")))
    
    links = [link.get_attribute("href") for link in driver.find_elements_by_css_selector(".gsc-results .gs-result > .gsc-thumbnail-inside > .gs-title > a.gs-title")]
    for page_number in range(2, 11):
        driver.find_element_by_xpath("//div[@class='gsc-cursor-page' and . = '%d']" % page_number).click()
    
        wait.until(EC.visibility_of_element_located((By.XPATH, "//div[contains(@class, 'gsc-cursor-current-page') and . = '%d']" % page_number)))
    
        links.extend([link.get_attribute("href") for link in driver.find_elements_by_css_selector(".gsc-results .gs-result > .gsc-thumbnail-inside > .gs-title > a.gs-title")])
    
    print(len(links))
    pprint(links)
    

    Prints:

    100
    ['http://pastebin.com/u/ssh',
     'http://pastebin.com/gsQWBEZP',
      ...
     'http://pastebin.com/vtBgrndi',
     'http://pastebin.com/WgXrebLq',
     'http://pastebin.com/Nxui56Gh',
     'http://pastebin.com/Qef0LZPR',
     'http://pastebin.com/yNUh1fRe',
     'http://pastebin.com/2j0d8FzL',
     'http://pastebin.com/g92A2jAq']