【发布时间】:2014-08-22 23:21:59
【问题描述】:
我很难让“def get_next_page”浏览整个搜索结果页面。到目前为止,它只上到第二页。
此代码的主要功能是从所有页面获取一般信息(公司、产品、位置等)。
import unittest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import time
import re
def openPage():
driver = webdriver.Firefox()
driver.get("http://www.made-in-china.com/companysearch.do?subaction=hunt&order=0&style=b&code=0&word=aerator")
elem = WebDriverWait(driver,60).until(EC.presence_of_element_located((By.CLASS_NAME,'search-list')))
analyzePage(driver)
get_next_page(driver)
def analyzePage(driver):
center = driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[1]/div/div[4]')
companyBox=center.find_elements_by_class_name('list-node')
for items in companyBox:
companyName = items.find_element_by_tag_name('h2').text.encode('utf-8')
print 'companyName: ' ,companyName
companyLink= items.find_element_by_tag_name('h2').find_element_by_tag_name('a').get_attribute('href')
print 'companyLink: ', companyLink
companyInfo=items.find_elements_by_tag_name('tr')
companyType=companyInfo[0].text.encode('utf-8')
print companyType
companyProduct=companyInfo[1].text.encode('utf-8')
print companyProduct
def get_next_page(driver):
page = driver.find_element_by_xpath ("/html/body/div[6]/div[1]/div[1]/div/div[6]/div[1]/div")
start_link = page.find_elements_by_tag_name('a')
for item in start_link:
href = item.get_attribute('href')
print href
print"==========================================================="
driver.execute_script(href)
analyzePage(driver)
return driver
if __name__ == "__main__":
openPage()
提前很多。
编辑:仍然很难过,有什么建议吗?
编辑:再次碰撞。
【问题讨论】:
标签: python selenium web web-crawler