Example of python crawling news portal site
- 2021-11-02 01:22:28
- OfStack
Directory project address: How to use sample code
Project address:
https://github.com/Python3Spiders/AllNewsSpider
How to use
The code under each folder is the news crawler of the corresponding platform
The py file runs directly The pyd file is required, assuming pengpai_news_spider. pydDownload the pyd file locally, create a new project, and put the pyd file in
Create a new runner. py under the root directory of the project, and write the following code to run and grab it
import pengpai_news_spider
pengpai_news_spider.main()
Sample code
Baidu News
# -*- coding: utf-8 -*-
# Document comment information If you can't open it, you can open it in the browser first 1 Baidu search engine
import requests
from datetime import datetime, timedelta
from lxml import etree
import csv
import os
from time import sleep
from random import randint
def parseTime(unformatedTime):
if ' Minutes ' in unformatedTime:
minute = unformatedTime[:unformatedTime.find(' Minutes ')]
minute = timedelta(minutes=int(minute))
return (datetime.now() -
minute).strftime('%Y-%m-%d %H:%M')
elif ' Hours ' in unformatedTime:
hour = unformatedTime[:unformatedTime.find(' Hours ')]
hour = timedelta(hours=int(hour))
return (datetime.now() -
hour).strftime('%Y-%m-%d %H:%M')
else:
return unformatedTime
def dealHtml(html):
results = html.xpath('//div[@class="result-op c-container xpath-log new-pmd"]')
saveData = []
for result in results:
title = result.xpath('.//h3/a')[0]
title = title.xpath('string(.)').strip()
summary = result.xpath('.//span[@class="c-font-normal c-color-text"]')[0]
summary = summary.xpath('string(.)').strip()
# ./ Is a direct subordinate, .// Is direct / Indirect subordinate
infos = result.xpath('.//div[@class="news-source"]')[0]
source, dateTime = infos.xpath(".//span[last()-1]/text()")[0], \
infos.xpath(".//span[last()]/text()")[0]
dateTime = parseTime(dateTime)
print(' Title ', title)
print(' Source ', source)
print(' Time ', dateTime)
print(' Summary ', summary)
print('\n')
saveData.append({
'title': title,
'source': source,
'time': dateTime,
'summary': summary
})
with open(fileName, 'a+', encoding='utf-8-sig', newline='') as f:
writer = csv.writer(f)
for row in saveData:
writer.writerow([row['title'], row['source'], row['time'], row['summary']])
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Referer': 'https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&word=%B0%D9%B6%C8%D0%C2%CE%C5&fr=zhidao'
}
url = 'https://www.baidu.com/s'
params = {
'ie': 'utf-8',
'medium': 0,
# rtt=4 Sort by time rtt=1 Sort by Focus
'rtt': 1,
'bsst': 1,
'rsv_dl': 'news_t_sk',
'cl': 2,
'tn': 'news',
'rsv_bp': 1,
'oq': '',
'rsv_btype': 't',
'f': 8,
}
def doSpider(keyword, sortBy = 'focus'):
'''
:param keyword: Search keywords
:param sortBy: Collation, optional: focus( Sort by focus), time( Sort by time), the default focus
:return:
'''
global fileName
fileName = '{}.csv'.format(keyword)
if not os.path.exists(fileName):
with open(fileName, 'w+', encoding='utf-8-sig', newline='') as f:
writer = csv.writer(f)
writer.writerow(['title', 'source', 'time', 'summary'])
params['wd'] = keyword
if sortBy == 'time':
params['rtt'] = 4
response = requests.get(url=url, params=params, headers=headers)
html = etree.HTML(response.text)
dealHtml(html)
total = html.xpath('//div[@id="header_top_bar"]/span/text()')[0]
total = total.replace(',', '')
total = int(total[7:-1])
pageNum = total // 10
for page in range(1, pageNum):
print(' No. 1 {} Page \n\n'.format(page))
headers['Referer'] = response.url
params['pn'] = page * 10
response = requests.get(url=url, headers=headers, params=params)
html = etree.HTML(response.text)
dealHtml(html)
sleep(randint(2, 4))
...
if __name__ == "__main__":
doSpider(keyword = ' Ma Baoguo ', sortBy='focus')
The above is the python crawling news portal example details, more about python crawling news portal information please pay attention to this site other related articles!