This article mainly introduces three python crawler project example codes, using the urllib2 library. The example codes in the article are very detailed and have certain reference learning value for everyone's study or work. Friends in need can refer to it.

Python crawler code example sharing
1. Crawling story paragraph: Recommended learning:PythonVideoTutorial
Note: Some codes cannot run normally, but they still have certain reference value.
#encoding=utf-8
import urllib2
import re
class neihanba():
def spider(self):
'''
爬虫的主调度器
'''
isflow=True#判断是否进行下一页
page=1
while isflow:
url="http://www.neihanpa.com/article/list_5_"+str(page)+".html"
html=self.load(url)
self.deal(html,page)
panduan=raw_input("是否继续(y/n)!")
if panduan=="y":
isflow=True
page+=1
else:
isflow=False
def load(self,url):
'''
针对url地址进行全部爬去
:param url: url地址
:return: 返回爬去的内容
'''
header = {
"User-Agent": " Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
request = urllib2.Request(url, headers=header)
response = urllib2.urlopen(request)
html = response.read()
return html
def deal(self,html,page):
'''
对之前爬去的内容进行正则匹配,匹配出标题和正文内容
:param html:之前爬去的内容
:param page: 正在爬去的页码
'''
parrten=re.compile('<li class="piclist\d+">(.*?)</li>',re.S)
titleList=parrten.findall(html)
for title in titleList:
parrten1=re.compile('<a href="/article/\d+.html" rel="external nofollow" >(.*)</a>')
ti1=parrten1.findall(title)
parrten2=re.compile('<div class="f18 mb20">(.*?)</div>',re.S)
til2=parrten2.findall(title)
for t in ti1:
tr=t.replace("<b>","").replace("</b>","")
self.writeData(tr,page)
for t in til2:
tr=t.replace("<p>","").replace("</p>","").replace("<br>","").replace("<br />","").replace("&ldquo","\"").replace("&rdquo","\"")
self.writeData(tr,page)
def writeData(self,context,page):
'''
将最终爬去的内容写入文件中
:param context: 匹配好的内容
:param page: 当前爬去的页码数
'''
fileName = "di" + str(page) + "yehtml.txt"
with open(fileName, "a") as file:
file.writelines(context + "\n")
if __name__ == '__main__':
n=neihanba()
n.spider()2. Crawl Zhilian:
#encoding=utf-8
import urllib
import urllib2
import re
class zhiLian():
def spider(self,position,workPlace):
'''
爬虫的主调度器
:param position: 职位
:param workPlace: 工作地点
'''
url="http://sou.zhaopin.com/jobs/searchresult.ashx?"
url+=urllib.urlencode({"jl":workPlace})
url+="&"
url+=urllib.urlencode({"kw":position})
isflow=True#是否进行下一页的爬去
page=1
while isflow:
url+="&"+str(page)
html=self.load(url)
self.deal1(html,page)
panduan = raw_input("是否继续爬虫下一页(y/n)!")
if panduan == "y":
isflow = True
page += 1
else:
isflow = False
def load(self,url):
'''
针对url地址进行全部爬去
:param url: url地址
:return: 返回爬去的内容
'''
header = {
"User-Agent": " Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
request = urllib2.Request(url, headers=header)
response = urllib2.urlopen(request)
html = response.read()
return html
def deal1(self,html,page):
'''
对之前爬去的内容进行正则匹配,匹配职位所对应的链接
:param html:之前爬去的内容
:param page: 正在爬去的页码
'''
parrten=re.compile('<a\s+style="font-weight:\s+bold"\s+par="ssidkey=y&ss=\d+&ff=\d+&sg=\w+&so=\d+"\s+href="(.*?)" rel="external nofollow" target="_blank">.*?</a>',re.S)
til=parrten.findall(html)#爬去链接
for t in til:
self.deal2(t,page)
def deal2(self,t,page):
'''
进行二次爬虫,然后在新的页面中对公司、薪资、工作经验进行匹配
:param t: url地址
:param page: 当前匹配的页数
'''
html=self.load(t)#返回二次爬虫的内容
parrten1=re.compile('<a\s+onclick=".*?"\s+href=".*?" rel="external nofollow" \s+target="_blank">(.*?)\s+.*?<img class=".*? lazy" src="/static/imghwm/default1.png" data-src=".*?" \s+\s+\s+border="\d+"\s+vinfo=".*?" alt="Python crawler code sample sharing" ></a>',re.S)
parrten2=re.compile('<li><span>职位月薪:</span><strong>(.*?) <a.*?>.*?</a></strong></li>',re.S)
parrent3=re.compile('<li><span>工作经验:</span><strong>(.*?)</strong></li>',re.S)
til1=parrten1.findall(html)
til2=parrten2.findall(html)
til3=parrent3.findall(html)
str=""
for t in til1:
t=t.replace('<img src="/static/imghwm/default1.png" data-src="//img03.zhaopin.cn/2012/img/jobs/icon.png" class="lazy" title="专属页面" border="0" / alt="Python crawler code sample sharing" >',"")
str+=t
str+="\t"
for t in til2:
str+=t
str += "\t"
for t in til3:
str+=t
self.writeData(str,page)
def writeData(self,context,page):
'''
将最终爬去的内容写入文件中
:param context: 匹配好的内容
:param page: 当前爬去的页码数
'''
fileName = "di" + str(page) + "yehtml.txt"
with open(fileName, "a") as file:
file.writelines(context + "\n")
if __name__ == '__main__':
position=raw_input("请输入职位:")
workPlace=raw_input("请输入工作地点:")
z=zhiLian()
z.spider(position,workPlace)3. Crawl Tieba:
#encoding=utf-8
import urllib
import urllib2
import re
class teiba():
def spider(self,name,startPage,endPage):
url="http://tieba.baidu.com/f?ie=utf-8&"
url+=urllib.urlencode({"kw":name})
for page in range(startPage,endPage+1):
pn=50*(page-1)
urlFull=url+"&"+urllib.urlencode({"pn":pn})
html=self.loadPage(url)
self.dealPage(html,page)
def loadPage(self,url):
header={
"User-Agent":" Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
request=urllib2.Request(url,headers=header)
response=urllib2.urlopen(request)
html=response.read()
return html
def dealPage(self,html,page):
partten=re.compile(r'<a\s+rel="noreferrer"\s+href="/p/\d+" rel="external nofollow" \s+title=".*?"\s+target="_blank" class="j_th_tit\s+">(.*?)</a>',re.S)
titleList=partten.findall(html)
rstr=r'<span\s+class="topic-tag"\s+data-name=".*?">#(.*?)#</span>'
for title in titleList:
title=re.sub(rstr,"",title)
self.writePage(title,page)
def writePage(self,context,page):
fileName="di"+str(page)+"yehtml.txt"
with open(fileName,"a") as file:
file.writelines(context+"\n")
if __name__ == '__main__':
name=raw_input("请输入贴吧名:")
startPage=raw_input("请输入起始页:")
endPage=raw_input("请输入终止页:")
t=teiba()
t.spider(name,int(startPage),int(endPage))More related tutorials, Please pay attention to the Python Tutorial column.
The above is the detailed content of Python crawler code sample sharing. For more information, please follow other related articles on the PHP Chinese website!
The Main Purpose of Python: Flexibility and Ease of UseApr 17, 2025 am 12:14 AMPython's flexibility is reflected in multi-paradigm support and dynamic type systems, while ease of use comes from a simple syntax and rich standard library. 1. Flexibility: Supports object-oriented, functional and procedural programming, and dynamic type systems improve development efficiency. 2. Ease of use: The grammar is close to natural language, the standard library covers a wide range of functions, and simplifies the development process.
Python: The Power of Versatile ProgrammingApr 17, 2025 am 12:09 AMPython is highly favored for its simplicity and power, suitable for all needs from beginners to advanced developers. Its versatility is reflected in: 1) Easy to learn and use, simple syntax; 2) Rich libraries and frameworks, such as NumPy, Pandas, etc.; 3) Cross-platform support, which can be run on a variety of operating systems; 4) Suitable for scripting and automation tasks to improve work efficiency.
Learning Python in 2 Hours a Day: A Practical GuideApr 17, 2025 am 12:05 AMYes, learn Python in two hours a day. 1. Develop a reasonable study plan, 2. Select the right learning resources, 3. Consolidate the knowledge learned through practice. These steps can help you master Python in a short time.
Python vs. C : Pros and Cons for DevelopersApr 17, 2025 am 12:04 AMPython is suitable for rapid development and data processing, while C is suitable for high performance and underlying control. 1) Python is easy to use, with concise syntax, and is suitable for data science and web development. 2) C has high performance and accurate control, and is often used in gaming and system programming.
Python: Time Commitment and Learning PaceApr 17, 2025 am 12:03 AMThe time required to learn Python varies from person to person, mainly influenced by previous programming experience, learning motivation, learning resources and methods, and learning rhythm. Set realistic learning goals and learn best through practical projects.
Python: Automation, Scripting, and Task ManagementApr 16, 2025 am 12:14 AMPython excels in automation, scripting, and task management. 1) Automation: File backup is realized through standard libraries such as os and shutil. 2) Script writing: Use the psutil library to monitor system resources. 3) Task management: Use the schedule library to schedule tasks. Python's ease of use and rich library support makes it the preferred tool in these areas.
Python and Time: Making the Most of Your Study TimeApr 14, 2025 am 12:02 AMTo maximize the efficiency of learning Python in a limited time, you can use Python's datetime, time, and schedule modules. 1. The datetime module is used to record and plan learning time. 2. The time module helps to set study and rest time. 3. The schedule module automatically arranges weekly learning tasks.
Python: Games, GUIs, and MoreApr 13, 2025 am 12:14 AMPython excels in gaming and GUI development. 1) Game development uses Pygame, providing drawing, audio and other functions, which are suitable for creating 2D games. 2) GUI development can choose Tkinter or PyQt. Tkinter is simple and easy to use, PyQt has rich functions and is suitable for professional development.


Hot AI Tools

Undresser.AI Undress
AI-powered app for creating realistic nude photos

AI Clothes Remover
Online AI tool for removing clothes from photos.

Undress AI Tool
Undress images for free

Clothoff.io
AI clothes remover

AI Hentai Generator
Generate AI Hentai for free.

Hot Article

Hot Tools

SublimeText3 English version
Recommended: Win version, supports code prompts!

mPDF
mPDF is a PHP library that can generate PDF files from UTF-8 encoded HTML. The original author, Ian Back, wrote mPDF to output PDF files "on the fly" from his website and handle different languages. It is slower than original scripts like HTML2FPDF and produces larger files when using Unicode fonts, but supports CSS styles etc. and has a lot of enhancements. Supports almost all languages, including RTL (Arabic and Hebrew) and CJK (Chinese, Japanese and Korean). Supports nested block-level elements (such as P, DIV),

MinGW - Minimalist GNU for Windows
This project is in the process of being migrated to osdn.net/projects/mingw, you can continue to follow us there. MinGW: A native Windows port of the GNU Compiler Collection (GCC), freely distributable import libraries and header files for building native Windows applications; includes extensions to the MSVC runtime to support C99 functionality. All MinGW software can run on 64-bit Windows platforms.

SublimeText3 Chinese version
Chinese version, very easy to use

SublimeText3 Mac version
God-level code editing software (SublimeText3)






