- #encoding=utf-8
- import requests,re,os,time
- from bs4 import BeautifulSoup
- class Stone():
- #获取照片url
- def get_url(self):
- #创建图片链接列表
- pictures = []
- url = 'http://hs.blizzard.cn/article/16/11477'
- urls = requests.get(url)
- t = urls.content
- soup =BeautifulSoup(t,"html.parser")
- #获取所有链接的元素
- pic_div = soup.find_all('img',style="border: none; box-shadow: none;")
- #循环将图片链接加到pictures列表中
- for i in pic_div:
- png_rl = i["src"]
- pictures.append(png_rl)
- return pictures
- #下载图片
- def download(self,pictures):
- t = 1 #用数字给文件命名
- for i in pictures:
- #获取当前文件路径,事先创建好存储图片的文件夹stone
- with open(os.getcwd() + "\\stone\\" +"%d.png" %t,'wb') as f:
- f.write(requests.get(i).content)
- t+=1 #每过一张文件名就加1
- t = Stone()
- pictures = t.get_url()
- print u"开始时间:"+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
- t.download(pictures)
- print u"下载完成"
- print u"结束时间:"+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
来源: http://www.bubuko.com/infodetail-2440605.html