admin管理员组文章数量:1619278
网络图片的爬取与存储
- 1.获取爬取图片的URL
- 2.代码
- 3.批量下载lol皮肤图片完整代码
1.获取爬取图片的URL
以艾希的源计划联合为例
这个图片的URL:https://game.gtimg/images/lol/act/img/skin/big22008.jpg
2.代码
import requests
import os
def getImage(url):
name = input('请输入文件夹名:')
path = name + '/' + name + '.jpg'
file = os.path.exists(name)
try:
if not file:
os.makedirs(name)
if not os.path.exists(path):
r = requests.get(url, headers = headers)
print(r.status_code)
r.raise_for_status()
with open(path,'wb') as f:
f.write(r.content)
f.close()
print('下载成功')
except:
print('下载失败')
if __name__ == "__main__":
headers = {
'user - agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 81.0.4044.113Safari / 537.36'
}
url = "https://game.gtimg/images/lol/act/img/skin/big22008.jpg"
getImage(url)
3.批量下载lol皮肤图片完整代码
代码更新于2020.04.01
import requests
import os
import json
headers = {
'user - agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 81.0.4044.113Safari / 537.36'
}
def get_hero_id():
'''获取英雄明以及对应的英文名'''
hero_info_url ='https://game.gtimg/images/lol/act/img/js/heroList/hero_list.js'
resp = requests.get(hero_info_url,headers=headers).content.decode('GBK')
data = json.loads(resp)
Alias_list = []
Title_list = []
for info in data['hero']:
alias = info['alias']
title = info['title']
Alias_list.append(alias)
Title_list.append(title)
# 将两个列表转化为字典
infodict = dict(zip(Title_list,Alias_list))
print('*'*30)
print('欢迎使用小鼠标LOL皮肤下载器')
print('*'*30)
print(infodict.keys())
return infodict
# print(infodict)
# decision = input('输入您的选择:')
# print(infodict[decision])
def get_img_id(infodict):
'''获取某个英雄的所有皮肤id'''
# 这里用户输入英雄的中文名
decision = input('请输入您要下载皮肤的英雄名:')
# 这里hero返回infodict中对应的英文名
hero = infodict[decision]
print(hero)
img_id = 'https://lol.qq/biz/hero/{}.js'.format(hero)
#print(img_id)
# no ='{''if(!LOLherojs)var LOLherojs={champion:{}};LOLherojs.champion.'+str(hero)+'=''}'
# print(no)
resp = requests.get(img_id,headers=headers).content.decode('GBK')
data = json.loads ('{'+resp.strip('if(!LOLherojs)var LOLherojs={champion:{}};LOLherojs.champion.'+str(hero)+'=')+'}')
listid=[]
listname =[]
for id in data['data']['skins']:
#print(id)
ids = id['id']
name = id['name']
listid.append(ids)
listname.append(name)
#print(listid)
#print(listname)
datadict =dict(zip(listid,listname))
return datadict,decision
#get_img_id()
def save_img(id,name,decision):
img_url = 'https://game.gtimg/images/lol/act/img/skin/big{}.jpg'.format(id)
resp = requests.get(img_url,headers=headers)
file = os.path.exists(decision)
try:
if not file:
os.makedirs(decision)
else:
with open(decision+'/'+name+'.jpg','wb') as f:
f.write(resp.content)
f.close()
print(name+'下载成功')
except:
print('下载失败')
if __name__ == '__main__':
infodict = get_hero_id()
datadict,decision = get_img_id(infodict=infodict)
for id_s,name in datadict.items():
save_img(id=id_s,name=name,decision=decision)
版权声明:本文标题:Requests库应用实例4:网络图片的爬取与存储(以爬取英雄联盟皮肤图片为例) 内容由热心网友自发贡献,该文观点仅代表作者本人, 转载请联系作者并注明出处:https://m.elefans.com/xitong/1728795576a1174132.html, 本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容,一经查实,本站将立刻删除。
发表评论