twoheads

 

 

 

test_urllib2.py

import http.cookiejar
from urllib import request

url = "http://www.baidu.com"
print(\'第一种方法\')

response1 = request.urlopen(url)
print(response1.getcode())
print(len(response1.read()))

print("第二种方法")
req = request.Request(url)
req.add_header("user-agent","Mozilla/5.0")
response2 = request.urlopen(req)
print(response2.getcode())
print(len(response2.read()))

print(\'第三种方法\')

cj = http.cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cj))
request.install_opener(opener)
response3 = request.urlopen(url)
print(response3.getcode())
print(cj)
print(response3.read())

 

test_bs4.py:

import re
from bs4 import BeautifulSoup

 #  文档字符串
html_doc = """
<html><head><title>The Dormouse\'s story</title></head>
<body>
<p class="title"><b>The Dormouse\'s story</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""

soup = BeautifulSoup(html_doc, \'html.parser\')
# python3 缺省的编码是unicode, 再在from_encoding设置为utf8, 会被忽视掉, 故去掉,否则会报错

# 第一种方法
print(\'获取所有的链接\')
links = soup.find_all(\'a\')
for link in links:
    print(link.name, link[\'href\'], link.get_text())


# 第二种方法
print(\'获取lacie的链接\')
link_node = soup.find(\'a\', href=\'http://example.com/lacie\')
print(link_node.name, link_node[\'href\'], link_node.get_text())


# 第三种方法
print(\'正则匹配\')
link_node = soup.find(\'a\', href=re.compile(r\'ill\'))
print(link_node.name, link_node[\'href\'], link_node.get_text())


# 第四种方法
print(\'获取p段落文字\')
p_node = soup.find(\'p\', class_=\'title\')
print(p_node.name, p_node.get_text())

 

 

分类:

技术点:

相关文章: