|
楼主 |
发表于 2017-8-1 09:20:37
|
显示全部楼层
import urllib.request
import os
import re
import random
def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36')
req.add_header('Referer','https://www.nvshens.com/gallery/')
proxies = ['61.143.228.162:3128','180.168.179.193:8080','119.29.7.113:80','123.7.38.31:9999']
proxy = random.choice(proxies)
proxy_support = urllib.request.ProxyHandler({'http':proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
response = urllib.request.urlopen(url)
html = response.read()
return html
def get_page(url):
html = url_open(url).decode('utf-8')
a = html.find('galleryli_link') + 25
b = html.find('/',a)
return html[a:b]
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')
while a != -1:
b = html.find('.jpg',a,a+255)
if b != -1:
img_addrs.append(html[a+9:b+4])
else:
b = a + 9
a = html.find('img src=',b)
#return img_addrs #后来我试着只下载第一页,但是下的图片都是防盗链的东西。补全所有的headers也没用
#翻页(这里需要翻页,但就是找不到)
p = re.findall(r'<a class="a1" href="(/g/.*?\.html)">下一页',html)
print(p)
def save_imgs(folders,img_addrs):
'''for each in img_addrs:
filename = each.split('/')[-1]
with open(filename,'wb') as f:
img = url_open(each)
f.write(img)
'''
pass
def download_mm(folder='picture',pages=1):
os.mkdir(folder)
os.chdir(folder)
url = 'https://www.nvshens.com'
page_num = int(get_page(url))
for each in range(pages):
page_num -= each
page_url = url + '/g/' + str(page_num) + '/'
img_addrs = find_imgs(page_url)
save_imgs(folder,img_addrs)
if __name__=='__main__':
download_mm()
按着小甲鱼的一路敲敲敲,现在想试试换个网站试试 但是不行了。
https://www.nvshens.com/g/23492/ <-网址 |
|