|
| 1 | + |
| 2 | +from selenium import webdriver |
| 3 | +from selenium.webdriver.support.ui import WebDriverWait |
| 4 | +from selenium.webdriver.support import expected_conditions as EC |
| 5 | +from selenium.webdriver.common.by import By |
| 6 | +from lxml import etree |
| 7 | +import requests |
| 8 | +import re |
| 9 | +import threading |
| 10 | +import os |
| 11 | + |
| 12 | + |
| 13 | + |
| 14 | +def crawler_front_page(): |
| 15 | + headers = { |
| 16 | + 'Referer': 'http://fund.eastmoney.com/data/fundranking.html', |
| 17 | + 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36', |
| 18 | + 'Cookie': 'qgqp_b_id=c7106e55b1ba3768660d7e8411ea4759; EMFUND1=null; EMFUND2=null; EMFUND3=null; EMFUND4=null; EMFUND5=null; EMFUND6=null; EMFUND7=null; st_si=59746599501191; st_asi=delete; ASP.NET_SessionId=1gwfj4cqojpf5ktiylgmy43o; EMFUND0=null; EMFUND8=11-17%2019%3A05%3A47@%23%24%u94F6%u6CB3%u6587%u4F53%u5A31%u4E50%u6DF7%u5408@%23%24005585; EMFUND9=11-18 11:03:50@#$%u524D%u6D77%u5F00%u6E90%u65B0%u7ECF%u6D4E%u6DF7%u5408A@%23%24000689; st_pvi=17535190085817; st_sp=2021-09-27%2017%3A30%3A48; st_inirUrl=https%3A%2F%2Fnews.google.com%2F; st_sn=9; st_psi=20211118112357985-112200312936-9119693474' |
| 19 | + } |
| 20 | + |
| 21 | + response = requests.get('http://fund.eastmoney.com/data/rankhandler.aspx?op=ph&dt=kf&ft=all&rs=&gs=0&sc=6yzf&st=desc&sd=2020-11-18&ed=2021-11-18&qdii=&tabSubtype=,,,,,&pi=1&pn=10000&dx=1&v=0.6791917206798068', headers=headers) |
| 22 | + |
| 23 | + response.encoding = 'utf-8' |
| 24 | + return response.text |
| 25 | + |
| 26 | +def parse_front_page(html): |
| 27 | + |
| 28 | + return re.findall(r"\d{6}",html) |
| 29 | + |
| 30 | +def get_stock_url(codes): |
| 31 | + |
| 32 | + url = [] |
| 33 | + for code in codes: |
| 34 | + url.append("http://fundf10.eastmoney.com/ccmx_{}.html".format(code)) |
| 35 | + |
| 36 | + return url |
| 37 | + |
| 38 | +def is_element(driver, type, element_name): |
| 39 | + try: |
| 40 | + WebDriverWait(driver,2).until(EC.presence_of_element_located((type ,element_name))) |
| 41 | + return True |
| 42 | + except: |
| 43 | + return False |
| 44 | + |
| 45 | +def crawler_stock_page(c,stock_url_list): |
| 46 | + count = c.split(",") |
| 47 | + driver = webdriver.Chrome('D:\personal\gitpython\chromedriver.exe') |
| 48 | + file = "D:/fund/fund_{}.txt".format(count[0]) |
| 49 | + |
| 50 | + |
| 51 | + for url in stock_url_list[int(count[0]):int(count[1])]: |
| 52 | + stock_result = [] |
| 53 | + title = "没有数据" |
| 54 | + |
| 55 | + try: |
| 56 | + driver.get(url) |
| 57 | + |
| 58 | + element_result = is_element(driver, By.CLASS_NAME, "tol") |
| 59 | + if element_result: |
| 60 | + wait = WebDriverWait(driver, 3) |
| 61 | + wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'tol'))) |
| 62 | + |
| 63 | + if is_element(driver, By.XPATH, '//*[@id="cctable"]/div[1]/div/div[3]/font/a'): |
| 64 | + driver.find_element_by_xpath('//*[@id="cctable"]/div[1]/div/div[3]/font/a').click() |
| 65 | + wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'tol'))) |
| 66 | + |
| 67 | + stock_xpath = etree.HTML(driver.page_source ) |
| 68 | + stock_result = stock_xpath.xpath("//div[@id='cctable']//div[@class='box'][1]//td[3]//text()") |
| 69 | + title = stock_xpath.xpath('//*[@id="cctable"]/div[1]/div/h4/label[1]/a')[0].text |
| 70 | + |
| 71 | + with open(file, 'a+') as f: |
| 72 | + f.write("{'name': '" + title + "', 'stock': ['"+'\',\''.join(stock_result) + "']}\n") |
| 73 | + except: |
| 74 | + continue |
| 75 | + |
| 76 | + |
| 77 | + |
| 78 | +def thread_test(*args): |
| 79 | + threads = [] |
| 80 | + for crawler_count in ["0,3"]: |
| 81 | + t = threading.Thread(target=crawler_stock_page, args=(crawler_count, args[0])) |
| 82 | + threads.append(t) |
| 83 | + |
| 84 | + for t in threads: |
| 85 | + t.start() |
| 86 | + for t in threads: |
| 87 | + t.join() |
| 88 | + |
| 89 | +def parse_data(): |
| 90 | + result = {} |
| 91 | + stock = {} |
| 92 | + |
| 93 | + files= os.listdir('D:/fund/') |
| 94 | + |
| 95 | + for file in files: |
| 96 | + for line in open('D:/fund/' + file): |
| 97 | + data = eval(line.strip()) |
| 98 | + key = data['name'] |
| 99 | + if key == '没有数据' or key in result: |
| 100 | + continue |
| 101 | + |
| 102 | + result[key] = data['stock'] |
| 103 | + |
| 104 | + for value in data['stock']: |
| 105 | + if value in stock: |
| 106 | + stock[value] = stock[value] + 1 |
| 107 | + else: |
| 108 | + stock[value] = 1 |
| 109 | + |
| 110 | + with open('D:/fund_result/stock.csv', 'a+') as f: |
| 111 | + for key in stock: |
| 112 | + f.write(key + "," + str(stock[key]) + "\n") |
| 113 | + |
| 114 | + with open('D:/fund_result/fund.csv', 'a+') as f: |
| 115 | + for key in result: |
| 116 | + values = [] |
| 117 | + for value in result[key]: |
| 118 | + values.append('{}({})'.format(value, stock[value])) |
| 119 | + f.write(key + ',' + ','.join(values) + '\n') |
| 120 | + |
| 121 | + |
| 122 | + |
| 123 | +if __name__ == '__main__': |
| 124 | + html = crawler_front_page() |
| 125 | + codes = parse_front_page(html) |
| 126 | + url_list = get_stock_url(codes) |
| 127 | + thread_test(url_list) |
| 128 | + # parse_data() |
0 commit comments