实战案例

实战1:图片爬取

网站:https://www.mzitu.com/mm

import requests
from pyquery import PyQuery as pq

def get_page(page):
    url='https://www.mzitu.com/mm/page/'+str(page)
    # print(url)
    headers={
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
    }
    response=requests.get(url,headers=headers)
    if response.status_code==200:
        return response.text
    else :
        return None

def parse_inpage(html):
    doc = pq(html)
    img = doc('.main .content .main-image img')
    url=img.attr('src')
    return url

def get_inpage(item):
    url=item.get('image')
    title=item.get('title')
    # print(url)
    headers={
        'Referer': 'https://www.mzitu.com',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
    }
    for i in range(1,11):#解析内部网页10页图片url
        response=requests.get(url+'/'+str(i),headers=headers)
        if response.status_code==200:
            in_url=parse_inpage(response.text)
            img_name=str(in_url)[-9:-4]
            yield{
                'image':in_url,
                'img_name':img_name,
                'title':title
            }
def page_parese(html):
    if (html!=None) :
        doc=pq(html)
        if (doc != None):
            items=doc('.main .main-content .postlist ul li').items()
            for item in items:
                image=item.find('a').attr('href')
                title=item.find('img').attr('alt')
                yield{
                    'image':image,
                    'title':title
                }

#保存图片
import os
def save_images(item):
    if not os.path.exists('img/'+item.get('title')):
        os.mkdir('img/'+item.get('title'))
    try:
        headers = {
            'Referer': 'https://www.mzitu.com',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
        }
        response=requests.get(item.get('image'),headers=headers)
        if response.status_code == 200 :
            file_path='img/{0}/{1}.{2}'.format(item.get('title'),item.get('img_name'),'jpg')
            # print(file_path)
            # print(response.content)
            if not os.path.exists(file_path):
                with open(file_path,'wb') as f:
                    f.write(response.content)
            else:
                print('Already Downloaded',file_path)
    except requests.ConnectionError as e:
        print(e.args)
        print('Filed to save images')

def main(page):
    html=get_page(page)

    for item in page_parese(html):#item就是page_parse()函数返回的字典,返回一页的字典
        print(item)#得到30页的分组图片链接
        for innerpage in get_inpage(item):#每组每张图片的字典
            save_images(innerpage)
            print(innerpage)
        print('='*50)
starting=1
ending=20

from multiprocessing.pool import Pool
if __name__ == '__main__':
    pool=Pool()
    groups=([x for x in range(starting,ending+1)])
    pool.map(main,groups)
    pool.close()
    pool.join()
    # for i in range(1,21):
    #     main(i)
实战2.微博动态获取
#XXX为微博号
from urllib.parse import urlencode
import requests
base_url='https://m.weibo.cn/api/container/getIndex?'
#url请求的前半部分
headers={
    'Host':'m.weibo.cn',
    'Referer': 'https://m.weibo.cn/u/XXX',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}
def get_page(page):
    params={
        'type':'uid',
        'values': 'XXX',
        'containerid': '107603XXX',
        'page': page
    }
    # 构造参数字典,其中type,values,containerid为固定值,page为可变参数
    url=base_url+urlencode(params)
    # urlencode()方法将参数转化为url的GET请求参数。
    # 即类似 type=uid&value=2830678474&containerid=1076032830678474&page=2
    try:
        response=requests.get(url,headers=headers)
        if response.status_code==200 :
            return response.json()
    except requests.ConnectionError as e:
        print('Error',e.args)
#定义解析方法
from pyquery import PyQuery as pq
def parse_page(json):
    if json:
        cards=json.get('data').get('cards')
        for card in cards:
            card=card.get('mblog')
            weibo={}
            weibo['id']=card.get('id')
            weibo['text']=pq(card.get('text')).text()
            weibo['attitudes'] = card.get('attitudes_count')
            weibo['comments'] = card.get('comments_count')
            weibo['reposts'] = card.get('reposts_count')
            yield weibo
#将结果写入MongoDB数据库
from pymongo import MongoClient
client = MongoClient()
db = client['weibo']
collection = db['weibo']
def save_to_mongo(result):
    if collection.insert(result):
        print('saved to mongodb')


if __name__ =='__main__':
    for page in range(1,11):
        json=get_page(page)
        print(json)
        # print(json.get('data').get('cards')[0].get('mblog')['comments_count'])
        # 'clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values
        results=parse_page(json)
        for result in results:
            print(result)
            save_to_mongo(result)

   转载规则


《实战案例》 White Spider 采用 知识共享署名 4.0 国际许可协议 进行许可。
  目录