Skip to content

Commit bb63188

Browse files
committed
works on newatcoder
1 parent 4c833fb commit bb63188

File tree

1 file changed

+132
-0
lines changed

1 file changed

+132
-0
lines changed

core/AtCoder.py

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
from utils import normalized
2+
import re
3+
import urllib.request
4+
from collections import OrderedDict
5+
from bs4 import BeautifulSoup
6+
7+
import http.cookiejar
8+
9+
10+
class LoginError(Exception):
11+
pass
12+
13+
14+
class SampleParseError(Exception):
15+
pass
16+
17+
18+
class AtCoder:
19+
20+
def __init__(self, username, password):
21+
self.cj = http.cookiejar.CookieJar()
22+
self.opener = urllib.request.build_opener(
23+
urllib.request.HTTPCookieProcessor(self.cj))
24+
postdata = {
25+
'name': username,
26+
'password': password
27+
}
28+
encoded_postdata = urllib.parse.urlencode(postdata).encode('utf-8')
29+
req = self.opener.open(
30+
"https://arc001.contest.atcoder.jp/login", encoded_postdata)
31+
html = req.read().decode('utf-8')
32+
if html.find("パスワードを忘れた方はこちら") != -1:
33+
raise LoginError
34+
35+
def get_problem_list(self, contestid):
36+
'''
37+
入力
38+
contestid#str : http://***.contest.atcoder.jp/)だったら***の部分
39+
出力
40+
#OrderedDict<str:str> : 問題番号("A","B","C",..)→URLのディクショナリ
41+
'''
42+
req = self.opener.open(
43+
"http://%s.contest.atcoder.jp/assignments" % contestid)
44+
soup = BeautifulSoup(req, "html.parser")
45+
46+
res = OrderedDict()
47+
for tag in soup.select('.linkwrapper')[0::2]:
48+
res[tag.text] = ("http://%s.contest.atcoder.jp" %
49+
contestid) + tag.get("href")
50+
return res
51+
52+
def get_all(self, url):
53+
'''
54+
入力
55+
url#str : 問題ページのURL
56+
出力
57+
#(str,list((str,str))) : 指定したページから得られた(入力形式,[(サンプル入力1,出力1),(サンプル入力2,出力2)...]のリスト)の組
58+
'''
59+
req = self.opener.open(url)
60+
soup = BeautifulSoup(req, "html.parser")
61+
62+
# 英語のほうタグ削除
63+
for e in soup.findAll("span", {"class":"lang-en"}):
64+
e.extract()
65+
66+
# AtCoder Formatぽかったらそっちはpartタグがついてていい感じなので,そっちを解析する
67+
soup_tmp = soup.select('.part')
68+
if soup_tmp != []:
69+
soup_tmp[0].extract()
70+
71+
pretags = soup.select('pre')
72+
73+
sample_tags = pretags[1:]
74+
input_tags = sample_tags[0::2]
75+
output_tags = sample_tags[1::2]
76+
if len(input_tags) != len(output_tags):
77+
raise SampleParseError
78+
res = [(normalized(in_tag.text), normalized(out_tag.text))
79+
for in_tag, out_tag in zip(input_tags, output_tags)]
80+
input_format = normalized(pretags[0].text)
81+
82+
return (input_format, res)
83+
84+
def get_samples(self, url):
85+
'''
86+
入力
87+
url#str : 問題ページのURL
88+
出力
89+
#list((str,str) : [(サンプル入力1,出力1),(サンプル入力2,出力2)...]のリスト
90+
コメント
91+
get_all()関数のwrapper
92+
'''
93+
return self.get_all(url)[1]
94+
95+
def get_all_contestids(self):
96+
req = self.opener.open("http://atcoder.jp/")
97+
soup = BeautifulSoup(req, "html.parser")
98+
text = str(soup)
99+
url_re = re.compile(
100+
r'http://([A-Za-z0-9\'~+\-_]+).contest.atcoder.jp/')
101+
res = url_re.findall(text)
102+
res = sorted(res)
103+
return res
104+
105+
def submit_source_code(self, contestid, pid, lang, source):
106+
url = "https://%s.contest.atcoder.jp/submit" % contestid
107+
req = self.opener.open(url)
108+
soup = BeautifulSoup(req, "html.parser")
109+
session_id = soup.find("input", attrs={"type": "hidden"}).get("value")
110+
111+
task_select_area = soup.find(
112+
'select', attrs={"id": "submit-task-selector"})
113+
task_field_name = task_select_area.get("name")
114+
task_number = task_select_area.find(
115+
"option", text=re.compile('%s -' % pid)).get("value")
116+
117+
language_select_area = soup.find(
118+
'select', attrs={"id": "submit-language-selector-%s" % task_number})
119+
language_field_name = language_select_area.get("name")
120+
language_number = language_select_area.find(
121+
"option", text=re.compile('%s' % lang)).get("value")
122+
# print(session_id)
123+
postdata = {
124+
"__session": session_id,
125+
task_field_name: task_number,
126+
language_field_name: language_number,
127+
"source_code": source
128+
}
129+
encoded_postdata = urllib.parse.urlencode(postdata).encode('utf-8')
130+
req = self.opener.open(url, encoded_postdata)
131+
html = req.read().decode('utf-8')
132+
return True

0 commit comments

Comments
 (0)