blob: e6058bd175c0a7f7317b7f0cd3f4b458491e6935 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
|
import argparse
import json
import re
import sys
import time
from html.parser import HTMLParser
import requests
KILL_HREF = re.compile(r"/kill/(\d+)/?")
SYSTEM_AND_DATE = re.compile(r"/(\d+)/(\d+)/$")
def get_hash(kill):
response = requests.get("https://zkillboard.com/api/killID/{}/".format(kill))
response.raise_for_status()
data = response.json()
if len(data) > 1:
raise ValueError()
return data[0]["zkb"]["hash"]
class RelatedParser(HTMLParser):
def __init__(self):
super().__init__()
self._team = 0
self._kills = set()
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == "table" and attrs.get("id", "") == "killlist":
self._team += 1
if tag == "a" and self._team > 0:
match = KILL_HREF.search(attrs.get("href", ""))
if match:
kill = (self._team, match.group(1))
if kill not in self._kills:
self._kills.add(kill)
@property
def kills(self):
return self._kills
def main():
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-o", "--output")
args = parser.parse_args()
response = requests.get(args.url)
response.raise_for_status()
page = response.text
related = RelatedParser()
related.feed(page)
output = []
for team, kill in related.kills:
time.sleep(1.05) # Zkillboard is very sensitive.
output.append({"id": kill, "hash": get_hash(kill), "team": team})
if args.output:
filename = args.output
else:
match = SYSTEM_AND_DATE.search(args.url)
if match:
filename = "{}_{}.json".format(*match.groups())
else:
filename = "scrapped.json"
with open(filename, "w") as file:
file.write(json.dumps(output))
if __name__ == "__main__":
main()
|