Python multi threaded grab tianya post content sample

  • 2020-04-02 13:34:21
  • OfStack

Using re, urllib, threading multi-threading to grab the content of the tianya post, set the url as the first page of the tianya post to be grabbed, and set the file_name as the file name after downloading


#coding:utf-8
import urllib
import re
import threading
import os, time
class Down_Tianya(threading.Thread):
    """ Multithreaded download """
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt
    def run(self):
        print 'downling from %s' % self.url
        self.down_text()
    def down_text(self):
        """ According to incoming url Grab the contents of each page, press the number of pages as the key to save the dictionary """
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<span> Time: (.*?)</span>.*?<!-- <div class="host-ico"> The original poster </div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['rnrnrnrn'.join(item) for item in text]
        self.txt_dict[self.num] = text_join
 


def page(url):
    """ Fetch the total number of pages based on the first page address """
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?"> On the next page </a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num
 
def write_text(dict, fn):
    """ To write a dictionary content key (number of pages) to text, with each key value per page list The list of """
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', 'rn').replace('<br />', 'rn').replace(' ', '')
            tx_file.write(tx.strip()+'rn'*4)
    tx_file.close()

def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}
    print 'page num is : %s' % my_page
    threads = []

    """ Structure by number of pages urls Multi-threaded download """
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """ Check that the download is complete before writing """
    for t in threads:
        t.join()
    write_text(my_dict, file_name)
    print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
    main()

Down_tianya. Py


#coding:utf-8
import urllib
import re
import threading
import os
class Down_Tianya(threading.Thread):
    """ Multithreaded download """
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt
    def run(self):
        print 'downling from %s' % self.url
        self.down_text()
    def down_text(self):
        """ According to incoming url Grab the contents of each page, press the number of pages as the key to save the dictionary """
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<div class="atl-item".*?<span> Time: (.*?)</span>.*?<!-- <div class="host-ico"> The original poster </div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['rnrnrnrn'.join(item) for item in text]
        self.txt_dict[self.num] = text_join
 


def page(url):
    """ Fetch the total number of pages based on the first page address """
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?"> On the next page </a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num
 
def write_text(dict, fn):
    """ To write a dictionary content key (number of pages) to text, with each key value per page list The list of """
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', 'rn').replace('<br />', 'rn').replace(' ', '')
            tx_file.write(tx.strip()+'rn'*4)
    tx_file.close()

def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}
    print 'page num is : %s' % my_page
    threads = []

    """ Structure by number of pages urls Multi-threaded download """
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """ Check that the download is complete before writing """
    for t in threads:
        t.join()
    write_text(my_dict, file_name)
    print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
    main()


Related articles: