python批量下载图⽚的三种⽅法
⼀是⽤微软提供的扩展库win32com来操作IE:
win32com可以获得类似js⾥⾯的document对象,但貌似是只读的(⽂档都没到)。
⼆是⽤selenium的webdriver:
selenium则提供了Chrome,IE,FireFox等的⽀持,每种浏览器都有execute_script和find_element_by_xx⽅法,可以⽅便的执⾏js脚本(包括修改元素)和读取html⾥⾯的元素。不⾜是selenium只提供对python2.6和2.7的⽀持。
三是⽤python⾃带的HTMLParser解析:
HTMLParser则是需要⾃⼰写个类继承基类,重写解析元素的⽅法。
1.win32com
1#将滚动条滑到底,最多滑动20000像素
2#模拟键盘右键,查看多张图⽚
3import sys
4import win32com.client,win32api
quest
6import time
7import os
8def main():
9#获取参数
10 url=sys.argv[1]
11#操作IE
12 ie=win32com.client.Dispatch("InternetExplorer.Application")
13 ie.Navigate(url)
14 ie.Visible=True
15 last_url=''
16 dir_name=''
17while last_url!=url:
18print('\nThe URL is:',url,'\n')
19while ie.ReadyState != 4:
20 time.sleep(1)
21while adyState != "complete":
22 time.sleep(1)
23#滑动滚动条
24 win=ie.Document.parentWindow
25 lastY=-1;
26for i in range(40):
27 win.scrollTo(0,500*i)
28 nowY=win.pageYOffset
29if(nowY==lastY):
30break
31 lastY=nowY
32 time.sleep(0.4)
33print('Document load state:',adyState)
34 doc=ie.Document
35#第⼀次需要创建⽬录
36if(dir_name==''):
37 root_dir='E:\\img'
38 dir_name=root_dir+'\\'+doc.title
39 dir_name=place('|','-')
40if(ists(root_dir)!=True):
41 os.mkdir(root_dir)
42if(ists(dir_name)!=True):
43 os.mkdir(dir_name)
44 all_image=doc.images
45print('共有',all_image.length,'张图⽚')
46 count=0;
47for img in all_image:
48if(img.id=='b_img'):
49 count=count+1
50print(count,img.src)
51 time.sleep(1)
52 img_quest.urlopen(img.src)
53 byte=ad()
54print(count,'donwload complete!','-'*10,'size:','{:.3}'.format(byte.__len__()/1024),'KB')
55if(byte.__len__()>7000):
56 file_name=place('/','_')
57 file_name=place(':','_')
58 end=file_name.__len__()
chrome直接下载59if(file_name.rfind('!')!=-1):
60 end=file_name.rfind('!')
61if(file_name.rfind('?')!=-1):
62 end=file_name.rfind('?')
64 write_file=open(dir_name+'\\'+file_name,'wb')
65 write_file.write(byte)
66 write_file.close()
67print(count,file_name,'complete!')
68#下⼀张
69 last_url=url
70 win32api.keybd_event(39,0)
71 time.sleep(1)
72 url=ie.Document.url
73print(last_url,url)
74#ie.Quit()
75if__name__ == '__main__':
76 main()
2.selenium
1# -*- coding: cp936 -*-
2import sys
3import urllib
4import time
5import os
6from selenium import webdriver
7def main():
8#获取参数
9 url=sys.argv[1]
10#操作IE
11 driver=webdriver.Chrome()
12 (url)
13 ute_script("window.scrollTo(0, document.body.scrollHeight);")
14#创建⽬录
15 dir_name=driver.find_element_by_tag_name('title').text
16print dir_name
17 root_dir='E:\\img'
18 dir_name=root_dir+'\\'+dir_name
19 dir_name=place('|','-')
20if(ists(root_dir)!=True):
21 os.mkdir(root_dir)
22if(ists(dir_name)!=True):
23 os.mkdir(dir_name)
24 images=driver.find_elements_by_tag_name('img')
25 count=0
26for image in images:
27 count=count+1
28 image_url=_attribute('src'))
29 img_file=urllib.urlopen(image_url)
30 byte=ad()
31print count,'donwload complete!','-'*10,'size:',byte.__len__()/1024,'KB'
32if(byte.__len__()>7000):
33 file_name=place('/','_')
34 file_name=place(':','_')
35 end=file_name.__len__()
36if(file_name.rfind('!')!=-1):
37 end=file_name.rfind('!')
38if(file_name.rfind('?')!=-1):
39 end=file_name.rfind('?')
40 file_name=file_name[:end]
41 write_file=open(dir_name+'\\'+file_name,'wb')
42 write_file.write(byte)
43 write_file.close()
44print count,file_name,'complete!'
45 driver.quit()
46if__name__ == '__main__':
47 main()
3.HTMLParser:
1# import modules used here -- sys is a very standard one
2import sys
quest
4# Gather our code in a main() function
5from html.parser import HTMLParser
6class MyHTMLParser(HTMLParser):
7def handle_starttag(self,tag,attrs):
8if(tag=='img'):
9for attr in attrs:
10if(attr[0]=='src'):
11 img_quest.urlopen(attr[1])
12 byte=ad()
13#⽂件⼤于1000b则⽣成⽂件,添加计数,下载多少图⽚,显⽰html代码14if(byte.__len__()>1000):
15 file_name=attr[1].replace('/','_')
16 file_name=place(':','_')
18if(file_name.rfind('!')!=-1):
19 end=file_name.rfind('!')
20if(file_name.rfind('?')!=-1):
21 end=file_name.rfind('?')
22 file_name=file_name[:end]
23## print(file_name)
24 write_file=open('E:\\img\\'+file_name,'wb')
25 write_file.write(byte)
26 write_file.close()
27def main():
28#获取参数
29 url=sys.argv[1]
30print('\nThe URL is:',url,'\n')
31#读取url所指向的资源
32 html_quest.urlopen(url)
33 byte_content=ad()
34#将html⽹页保存起来
35 url_file=open('E:\\img\\html\\result.htm','wb')
36 url_file.write(byte_content)
37 url_file.close()
38#从字节转换为字符串
39 s=str(byte_content, encoding = "utf-8")
40#print(s)
41#bytes.decode(ad())
42 parser=MyHTMLParser(strict=False)
43 parser.feed(s)
44# Standard boilerplate to call the main() function to begin 45# the program.
46if__name__ == '__main__':
47 main()
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论