您好, 欢迎来到 !    登录 | 注册 | | 设为首页 | 收藏本站

Python爬虫之网页图片抓取

5b51 2022/1/14 8:24:14 python 字数 12242 阅读 610 来源 www.jb51.cc/python

一、引入 这段时间一直在学习Python的东西,以前就听说Python爬虫多厉害,正好现在学到这里,跟着小甲鱼的Python视频写了一个爬虫程序,能实现简单的网页图片下载。 二、代码 三、总结 由于代

概述

<p class="title">一、引入

  这段时间一直在学习Python的东西,以前就听说Python爬虫多厉害,正好现在学到这里,跟着小甲鱼的Python视频写了一个爬虫程序,能实现简单的网页图片下载。

代码

import urllib.request
import os
import random
import re

def url_open(url):
'''
打开网页
:param url:
:return:
'''
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/49.0.2623.75 Safari/537.36')

# 应用代理
'''
proxyies = ["111.155.116.237:8123","101.236.23.202:8866","122.114.31.177:808"]
proxy = random.choice(proxyies)
proxy_support = urllib.request.ProxyHandler({"http": proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
'''

response = urllib.request.urlopen(url)
html = response.read()

return html

def save_img(folder,img_addrs):
'''
保存图片
:param folder: 要保存的文件
:param img_addrs: 图片地址(列表)
:return:
'''

创建文件夹用来存放图片

if not <a href="https://www.jb51.cc/tag/ospath/" target="_blank" class="keywords">os.path</a>.exists(folder):
    os.mkdir(folder)
os.chdir(folder)
for each in img_addrs:
    filename = each.split('/')[-1]
    try:
        with open(filename,'wb') as f:
            img = url_open("http:" + each)
            f.write(img)
    except urllib.error.HTTPError as e:
        # print(e.reason)
        pass
print('完毕!')

def find_imgs(url):
'''
获取全部的图片链接
:param url: 连接地址
:return: 图片地址的列表
'''
html = url_open(url).decode("utf-8")
img_addrs = re.findall(r'src="(.+?.gif)',html)
return img_addrs

def get_page(url):
'''
获取当前一共有多少页的图片
:param url: 网页地址
:return:
'''
html = url_open(url).decode('utf-8')
a = html.find("current-comment-page") + 23
b = html.find("]",a)

return html[a:b]

def download_mm(url="http://jandan.net/ooxx/",folder="OOXX",pages=1):
'''
主程序(下载图片
:param folder:认存放的文件
:param pages: 下载的页数
:return:
'''
page_num = int(get_page(url))

for i in range(pages):
    page_num -= i
    page_url = url + "page-" + str(page_num) + "#comments"
    img_addrs = find_imgs(page_url)
    save_img(folder,img_addrs)

if name == "main":
download_mm()

# 应用代理
'''
proxyies = ["111.155.116.237:8123","101.236.23.202:8866","122.114.31.177:808"]
proxy = random.choice(proxyies)
proxy_support = urllib.request.ProxyHandler({"http": proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
'''

response = urllib.request.urlopen(url)
html = response.read()

return html
if not <a href="https://www.jb51.cc/tag/ospath/" target="_blank" class="keywords">os.path</a>.exists(folder):
    os.mkdir(folder)
os.chdir(folder)
for each in img_addrs:
    filename = each.split('/')[-1]
    try:
        with open(filename,'wb') as f:
            img = url_open("http:" + each)
            f.write(img)
    except urllib.error.HTTPError as e:
        # print(e.reason)
        pass
print('完毕!')
return html[a:b]
for i in range(pages):
    page_num -= i
    page_url = url + "page-" + str(page_num) + "#comments"
    img_addrs = find_imgs(page_url)
    save_img(folder,img_addrs)

def url_open(url):
'''
打开网页
:param url:
:return:
'''
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/49.0.2623.75 Safari/537.36')

def save_img(folder,img_addrs):
'''
保存图片
:param folder: 要保存的文件
:param img_addrs: 图片地址(列表)
:return:
'''

def find_imgs(url):
'''
获取全部的图片链接
:param url: 连接地址
:return: 图片地址的列表
'''
html = url_open(url).decode("utf-8")
img_addrs = re.findall(r'src="(.+?.gif)',html)
return img_addrs

def get_page(url):
'''
获取当前一共有多少页的图片
:param url: 网页地址
:return:
'''
html = url_open(url).decode('utf-8')
a = html.find("current-comment-page") + 23
b = html.find("]",a)

def download_mm(url="http://jandan.net/ooxx/",folder="OOXX",pages=1):
'''
主程序(下载图片
:param folder:认存放的文件
:param pages: 下载的页数
:return:
'''
page_num = int(get_page(url))

if name == "main":
download_mm()

def url_open(url):
'''
打开网页
:param url:
:return:
'''
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/49.0.2623.75 Safari/537.36')

# 应用代理
'''
proxyies = ["111.155.116.237:8123","101.236.23.202:8866","122.114.31.177:808"]
proxy = random.choice(proxyies)
proxy_support = urllib.request.ProxyHandler({"http": proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
'''

response = urllib.request.urlopen(url)
html = response.read()

return html

def save_img(folder,img_addrs):
'''
保存图片
:param folder: 要保存的文件
:param img_addrs: 图片地址(列表)
:return:
'''

if not <a href="https://www.jb51.cc/tag/ospath/" target="_blank" class="keywords">os.path</a>.exists(folder):
    os.mkdir(folder)
os.chdir(folder)
for each in img_addrs:
    filename = each.split('/')[-1]
    try:
        with open(filename,'wb') as f:
            img = url_open("http:" + each)
            f.write(img)
    except urllib.error.HTTPError as e:
        # print(e.reason)
        pass
print('完毕!')

def find_imgs(url):
'''
获取全部的图片链接
:param url: 连接地址
:return: 图片地址的列表
'''
html = url_open(url).decode("utf-8")
img_addrs = re.findall(r'src="(.+?.gif)',html)
return img_addrs

def get_page(url):
'''
获取当前一共有多少页的图片
:param url: 网页地址
:return:
'''
html = url_open(url).decode('utf-8')
a = html.find("current-comment-page") + 23
b = html.find("]",a)

return html[a:b]

def download_mm(url="http://jandan.net/ooxx/",folder="OOXX",pages=1):
'''
主程序(下载图片
:param folder:认存放的文件
:param pages: 下载的页数
:return:
'''
page_num = int(get_page(url))

for i in range(pages):
    page_num -= i
    page_url = url + "page-" + str(page_num) + "#comments"
    img_addrs = find_imgs(page_url)
    save_img(folder,img_addrs)

if name == "main":
download_mm()

  

  由于代码中访问的网址已经运用了反爬虫的算法。所以已经爬不到想要的图片啦,so,就当是记了个爬虫的笔记吧。仅供学习参考[捂脸]。。。。

  最后:我把jpg格式换成gif,还能爬到可怜的一张gif图

  第一张正是反爬虫机制的一个图片占位符,完全没有任何内容

总结

以上是编程之家为你收集整理的Python爬虫之网页图片抓取全部内容,希望文章能够帮你解决Python爬虫之网页图片抓取所遇到的程序开发问题。


如果您也喜欢它,动动您的小指点个赞吧

除非注明,文章均由 laddyq.com 整理发布,欢迎转载。

转载请注明:
链接:http://laddyq.com
来源:laddyq.com
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。


联系我
置顶