|
| 1 | +from bs4 import BeautifulSoup |
| 2 | +import requests |
| 3 | +import re |
| 4 | +import random |
| 5 | +import os |
| 6 | + |
| 7 | +headers = { |
| 8 | + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36' |
| 9 | +} |
| 10 | + |
| 11 | +def get_detail_urls(url): |
| 12 | + url_list = [] |
| 13 | + response = requests.get(url, headers=headers) |
| 14 | + response.encoding = 'gbk' |
| 15 | + soup = BeautifulSoup(response.text, 'lxml') |
| 16 | + name = soup.select('.red12')[0].strong.text |
| 17 | + if not os.path.exists(name): |
| 18 | + os.makedirs(name) |
| 19 | + div_list = soup.select('div.list a') |
| 20 | + for item in div_list: |
| 21 | + url_list.append({'name': item.string, 'url': 'https://www.tingchina.com/yousheng/{}'.format(item['href'])}) |
| 22 | + return name, url_list |
| 23 | + |
| 24 | +def get_mp3_path(url): |
| 25 | + response = requests.get(url, headers=headers) |
| 26 | + response.encoding = 'gbk' |
| 27 | + soup = BeautifulSoup(response.text, 'lxml') |
| 28 | + script_text = soup.select('script')[-1].string |
| 29 | + fileUrl_search = re.search('fileUrl= "(.*?)";', script_text, re.S) |
| 30 | + if fileUrl_search: |
| 31 | + return 'https://t3344.tingchina.com' + fileUrl_search.group(1) |
| 32 | + |
| 33 | +def get_key(url): |
| 34 | + url = 'https://img.tingchina.com/play/h5_jsonp.asp?{}'.format(str(random.random())) |
| 35 | + headers['referer'] = url |
| 36 | + response = requests.get(url, headers=headers) |
| 37 | + matched = re.search('(key=.*?)";', response.text, re.S) |
| 38 | + if matched: |
| 39 | + temp = matched.group(1) |
| 40 | + return temp[len(temp)-42:] |
| 41 | + |
| 42 | +if __name__ == "__main__": |
| 43 | + url = input("请输入浏览器书页的地址:") |
| 44 | + dir,url_list = get_detail_urls() |
| 45 | + |
| 46 | + for item in url_list: |
| 47 | + audio_url = get_mp3_path(item['url']) |
| 48 | + key = get_key(item['url']) |
| 49 | + audio_url = audio_url + '?key=' + key |
| 50 | + headers['referer'] = item['url'] |
| 51 | + r = requests.get(audio_url, headers=headers,stream=True) |
| 52 | + with open(os.path.join(dir, item['name']),'ab') as f: |
| 53 | + f.write(r.content) |
| 54 | + f.flush() |
0 commit comments