In den letzten zwei Tagen habe ich gelernt, wie man Python3 zum Crawlen von Webressourcen verwendet, und viele Methoden gefunden, daher werde ich heute ein paar Anmerkungen hinzufügen.
In den letzten zwei Tagen habe ich gelernt, wie man Python3 zum Crawlen von Webressourcen verwendet, und viele Methoden gefunden, daher werde ich heute ein paar Anmerkungen hinzufügen.
1. Die einfachste
import urllib.request response = urllib.request.urlopen('http://python.org/') html = response.read()
2. Verwendungsanfrage
import urllib.request req = urllib.request.Request('http://python.org/') response = urllib.request.urlopen(req) the_page = response.read()
3. Daten und Header senden
#! /usr/bin/env python3 import urllib.parse import urllib.request url = 'http://localhost/login.php' user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' values = { 'act' : 'login', 'login[email]' : 'yzhang@i9i8.com', 'login[password]' : '123456' } data = urllib.parse.urlencode(values) req = urllib.request.Request(url, data) req.add_header('Referer', 'http://www.python.org/') response = urllib.request.urlopen(req) the_page = response.read() print(the_page.decode("utf8"))
5. Ausnahmebehandlung 1
#! /usr/bin/env python3 import urllib.parse import urllib.request url = 'http://localhost/login.php' user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' values = { 'act' : 'login', 'login[email]' : 'yzhang@i9i8.com', 'login[password]' : '123456' } headers = { 'User-Agent' : user_agent } data = urllib.parse.urlencode(values) req = urllib.request.Request(url, data, headers) response = urllib.request.urlopen(req) the_page = response.read() print(the_page.decode("utf8"))
7 >
8. HTTP-Authentifizierung#! /usr/bin/env python3 import urllib.request req = urllib.request.Request('http://www.python.org/fish.html') try: urllib.request.urlopen(req) except urllib.error.HTTPError as e: print(e.code) print(e.read().decode("utf8"))
#! /usr/bin/env python3 from urllib.request import Request, urlopen from urllib.error import URLError, HTTPError req = Request("http://twitter.com/") try: response = urlopen(req) except HTTPError as e: print('The server couldn\'t fulfill the request.') print('Error code: ', e.code) except URLError as e: print('We failed to reach a server.') print('Reason: ', e.reason) else: print("good!") print(response.read().decode("utf8"))
#! /usr/bin/env python3 from urllib.request import Request, urlopen from urllib.error import URLError req = Request("http://twitter.com/") try: response = urlopen(req) except URLError as e: if hasattr(e, 'reason'): print('We failed to reach a server.') print('Reason: ', e.reason) elif hasattr(e, 'code'): print('The server couldn\'t fulfill the request.') print('Error code: ', e.code) else: print("good!") print(response.read().decode("utf8"))
1.
Python-Kostenloses Video-Tutorial#! /usr/bin/env python3 import urllib.request # create a password manager password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() # Add the username and password. # If we knew the realm, we could use it instead of None. top_level_url = "https://cms.tetx.com/" password_mgr.add_password(None, top_level_url, 'yzhang', 'cccddd') handler = urllib.request.HTTPBasicAuthHandler(password_mgr) # create "opener" (OpenerDirector instance) opener = urllib.request.build_opener(handler) # use the opener to fetch a URL a_url = "https://cms.tetx.com/" x = opener.open(a_url) print(x.read()) # Install the opener. # Now all calls to urllib.request.urlopen use our opener. urllib.request.install_opener(opener) a = urllib.request.urlopen(a_url).read().decode('utf8') print(a)
#! /usr/bin/env python3 import urllib.request proxy_support = urllib.request.ProxyHandler({'sock5': 'localhost:1080'}) opener = urllib.request.build_opener(proxy_support) urllib.request.install_opener(opener) a = urllib.request.urlopen("http://g.cn").read().decode("utf8") print(a)
#! /usr/bin/env python3 import socket import urllib.request # timeout in seconds timeout = 2 socket.setdefaulttimeout(timeout) # this call to urllib.request.urlopen now uses the default timeout # we have set in the socket module req = urllib.request.Request('http://twitter.com/') a = urllib.request.urlopen(req).read() print(a)
Die vollständige Erklärung der Python-Grundgrammatik Video
Das obige ist der detaillierte Inhalt vonTeilen Sie zehn Methoden zum Crawlen von Webressourcen mit py3. Für weitere Informationen folgen Sie bitte anderen verwandten Artikeln auf der PHP chinesischen Website!