baker95935

python实现列表页数据的批量抓取,练手的,下回带分页的

#!/usr/bin/env python
# coding=utf-8

import requests
from bs4 import BeautifulSoup
import pymysql

import sys, io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding=\'utf8\') # Change default encoding to utf8

print(\'连接到mysql服务器...\')
db = pymysql.connect("localhost","root","root","python")
print(\'连接上了!\')
cursor = db.cursor()

hdrs = {\'User-Agent\':\'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)\'}

url = "http://www.zztez.com/tezgl/"

r = requests.get(url, headers = hdrs)
soup = BeautifulSoup(r.content.decode(\'gbk\', \'ignore\'), \'lxml\')


def has_class_but_no_id(tag):
    return tag.has_attr(\'title\') and tag.has_attr(\'href\') and not tag.has_attr(\'target\')

urls = []
for link in soup.find_all(has_class_but_no_id):
            url="http://www.zztez.com" + link.get(\'href\')
            r = requests.get(url, headers = hdrs)
            soup = BeautifulSoup(r.content.decode(\'gbk\', \'ignore\'), \'lxml\')

            title=soup.find("h1")
            title=title.string.encode("utf-8")

            intro=soup.select(".intro")
            rintro=intro[0].string.encode("utf-8")

            content=soup.select(".content")
            rcontent=content[0].encode("utf-8")

            #查询数据
            sql="SELECT count(*) as total FROM article WHERE title like %s"
            data=(title)
            row_affected=cursor.execute(sql,data)
            one=cursor.fetchone()

            if one==(0,):
                insert = ("INSERT INTO article(title,intro,content)" "VALUES(%s,%s,%s)")
                data = (title, rintro, rcontent)
                cursor.execute(insert, data)
                db.commit()

print(\'爬取数据并插入mysql数据库完成...\')

 

分类:

技术点:

相关文章:

  • 2021-06-28
  • 2022-12-23
  • 2021-09-12
  • 2021-11-11
  • 2022-01-03
  • 2022-12-23
  • 2021-05-16
  • 2021-04-18
猜你喜欢
  • 2022-02-02
  • 2022-12-23
  • 2022-12-23
  • 2021-04-17
  • 2022-12-23
  • 2021-12-14
  • 2021-07-31
相关资源
相似解决方案