Python crawler obtain parse store detailed explanation
- 2021-12-04 19:23:43
- OfStack
Catalog 1. Obtain data 2. Parse data 3. Save data in CSV format and store it in database summary
Step 1 Get data
import requests
def drg(url):
try:
head ={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/\
537.36 (KHTML, like Gecko) Chrome/\
91.0.4472.164 Safari/537.36'}
r = requests.get(url,headers=head)
r.raise_for_status() # If the status is not 200 , triggering HTTPError Anomaly
r.encoding = r.apparent_encoding
return r.text
except:
return " Generate an exception "
url = "https://www.ip138.com/mobile.asp?mobile=13018305773&action=mobile"
print(drg(url))
2. Parse the data
import requests
def login():
try:
# Object of the interface after login url
urllogin="http://www.cqooc.com/user/login?username=12608199000635&password=48C032612C2A6777D28A969307B52127E198D59AA78522943C1B283CF7B89E69&nonce=6BA36BBB1F623279&cnonce=8257070573EFE28F"
s=requests.session()
r=s.post(urllogin,data=Form,headers=headers)
r.encoding = r.apparent_encoding
r.raise_for_status()
return s
except Exception as error:
print(error)
def get_html(s,url):
try:
r=s.get(url,headers=headers)
r.encoding = r.apparent_encoding
r.raise_for_status()
return r.text
except Exception as error:
print(error)
if __name__=="__main__":
# Interface after login user-agent
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36",
}
# Follow your own changes
Form = {
"username": "12608199000635",
"password": "48C032612C2A6777D28A969307B52127E198D59AA78522943C1B283CF7B89E69",
"nonce": "6BA36BBB1F623279",
"cnonce": "8257070573EFE28F"
}
lin=login()
# Website of Personal Center
url="http://www.cqooc.com/my/learn"
html=get_html(lin,url)
print(html)
3. Save data in CSV format and store it in database
Save as CSV
import requests
from lxml import etree
import csv
# Get data
def get_html(url,time=30):
try:
r = requests.get(url, timeout=time)
r.encoding = r.apparent_encoding
r.raise_for_status()
return r.text
except Exception as error:
print(error)
def parser(html): # Analytic function
doc=etree.HTML(html) #html Convert to soup Object
out_list=[] # List of output data of parse function
#2 Secondary search method
for row in doc.xpath("//*[@class='book-img-text']//li/*[@class='book-mid-info']"):
row_data=[
row.xpath("h4/a/text()")[0], # Book title
row.xpath("p[@class='author']/a/text()")[0], # Author
row.xpath("p[2]/text()")[0].strip(), # Introduction
row.xpath("p[@class='update']/span/text()")[0] # Update Date
]
out_list.append(row_data) # Insert each parsed row of data into the output list
return out_list
def save_csv(item,path): # Data store, setting the list Write data to file to prevent garbled code
with open(path, "a+", newline='',encoding="utf-8") as f: # Create utf8 Coded file
csv_write = csv.writer(f) # Create a write object
csv_write.writerows(item) #1 Secondary writing to multiple lines
if __name__=="__main__":
for i in range(1,6):
url="https://www.qidian.com/rank/fengyun?style=1&page={0}".format(i)
html=get_html(url) # Getting Web Page Data
out_list=parser(html) # Parse Web pages and output list data
save_csv(out_list,"d:\\book.csv") # Data storage
Save in database
import pymysql
import requests
from lxml import etree
def get_html(url, time=3000):
try:
headers ={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31"
}
r = requests.get(url, timeout=time,headers=headers)
r.encoding = r.apparent_encoding
r.raise_for_status()
return r.text
except Exception as err:
print(err)
result = []
def parse_html(html):
html = etree.HTML(html)
for row in html.xpath('//*[@id="content"]/div/div[1]/ul/li'):
Naame = row.xpath("div[2]/h2/a/text()")[0].strip()#//*[@id="content"]/div/div[1]/ul[1]/div[2]/h2/a
score = row.xpath("div[2]/p[2]/span[2]/text()")[0].strip()#//*[@id="content"]/div/div[1]/ul[1]/div[2]/p[2]/span[2]
price = row.xpath("div[2]/p[1]/text()")[0].strip().split("/")#//*[@id="content"]/div/div[1]/ul[1]/div[2]/p[1]/text()
price= price[0]
content= price[1]
a=price[2]
b= price[-1]
detail = [Naame,score,price,content,a,b]
result.append(detail)
def join_all(sql_insert,vals,**dbinfo):
try:
connet = pymysql.connect(**dbinfo)
cursor = connet.cursor()
cursor.executemany(sql_insert,vals)
connet.commit()
print(' Add successfully! ')
except Exception as err:
print(err)
connet.rollback()
cursor.close()
if __name__=="__main__":
for page in range(1,16):
url="https://book.douban.com/latest?subcat=%E5%85%A8%E9%83%A8&p={0}".format(str(page))
parms ={
"host":"127.0.0.1",
"port":3306,
"user":"root",
"passwd":"123456",
"db":"db",
"charset":"utf8"
}
html=get_html(url)
parse_html(html)
sql_insert = "INSERT INTO db(Naame,score,price,content,a,b)\
Values(%s,%s,%s,%s,%s,%s)"
join_all(sql_insert,result,**parms)
print(result)
Summarize
This article is here, I hope to give you help, but also hope that you can pay more attention to this site more content!