|
5鱼币
问题,知道字典中字典的键,怎么取它的值,不知道外层字典键的情况下
- import requests
- import bs4,pickle
- def open_url(url):
- headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'}
- res=requests.get(url,headers=headers)
- soup=bs4.BeautifulSoup(res.text,'lxml')
- return soup
- def getxinxi(url):
- soup=open_url(url)
- #获取招聘信息
- tager1=soup.find_all('li',class_='job_item clearfix')
- for each in tager1:
- xinxi=""
- for each2 in each.div.div.a.strings:
- xinxi+=each2.strip()
- print('%-25s %10s %25s'%(xinxi.strip(),each.div.p.text,each.div.next_sibling.div.a.text.strip()))
- def getcity(dict1,city):#取城市在字典中的值
- dict2=dict1.items()
- for each in dict2:
- for each2 in each:
- if type(each2)==dict():
- getcity(each2)
- else:
- if city in each2:
- return each2
- def main():
- city=input('请输入要查询的城市:')
- list1=open('my_citylist.pkl','rb')
- citylist=pickle.load(list1)
- diqu=getcity(citylist,city)[city].split('|')[0]#获取地区代码
- #地区是字典,字典里面有字典,怎么查询,我这个有问题,有帮助
- url='http://'+diqu+'.58.com'
- soup=open_url(url)
- zhiwei=input('请输入你要查询的职位:')
- tager=soup.find_all('div',class_='col4')
- tager2=tager[1].find_all('a')
- for each in tager2:
- if each.string==zhiwei:
- zhiwei=url+each['href']
- soup=open_url(zhiwei)
- tager=soup.find('i',class_='total_page')
- yeshu=int(tager.string)
- cont=1
- while 1:
- print('一共%d页当前第%d页'%(yeshu,cont))
- url='http://hshi.58.com/pugong/pn'+str(cont)+'/'
- getxinxi(url)
- page=input('按q上一页,按p下一页')
- if page=='p':
- if cont==yeshu:
- print('已经是最后一页,没有下一页了')
- else:
- cont+=1
- elif page=='q':
- if cont==1:
- print('已经是第一页,没有上一页了')
- else:
- cont-=1
- if __name__=="__main__":
- main()
复制代码
程序是OK的,就那个问题没处理掉
|
|