chinese-holidays-calendar/crawler.py
2022-09-25 17:37:25 +00:00

113 lines
4.1 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

"""从国务院官网抓取放假信息"""
import os
import re
from datetime import datetime, timezone, timedelta
from typing import Iterator, Tuple
import requests
def main():
"""更新节假日信息"""
comments: list[str] = [
"// automatically generated by crawler.py",
"// manually checked by DATA NOT VERIFIED",
]
for year, link, holidays in data():
print(year, link, holidays, sep='\n')
file = f'./data/{year}.txt'
if os.path.isfile(file):
with open(file, encoding='utf-8') as f_obj:
existing = f_obj.read()
if comments[0] in existing and comments[1] not in existing:
continue # 数据已人工确认
with open(file, 'w', encoding='utf-8') as f_obj:
f_obj.write(
f"{comments[0]} ({beijing_time().strftime('%-m/%-d/%Y')})\n"
f"{comments[1]}\n// source: {link}\n\n{holidays}"
)
with open('./README.md', 'r', encoding='utf-8') as f_obj:
content = f_obj.read().split('\n')
update_info = "> Calendar data updated "
for i, line in enumerate(content):
if line.startswith(update_info):
content[i] = update_info + beijing_time().strftime("at %-H:%M on %B %-d, %Y")
with open('./README.md', 'w', encoding='utf-8') as f_obj:
f_obj.write('\n'.join(content))
def data() -> Iterator[Tuple[str, str, str]]:
"""爬取国务院网站数据"""
for year, link in source():
print(f"\n\n{year}: {link}")
results: list[str] = []
response = requests.get(link, timeout=(5, 10))
response.encoding = response.apparent_encoding
line_regex = r"(?P<id>.)、(?P<name>.*)(</.*?>)?(?P<detail>.*放假.*。)"
for line in response.text.replace('<br/>', '\n').split('\n'):
if match := re.search(line_regex, line):
work, rest, *_ = match.group('detail').split('')
dates = ';'.join((match.group('name'), parse(work), parse(rest)))
print(dates) # 已知需要人工干预如下情况: 1.与周末连休, 2.补休
results.append(f"{dates:30} // {match.group('detail')}")
yield year, link, '\n'.join(results)
def parse(text: str) -> str:
"""解析节假日安排数据"""
results: list[str] = []
range_type_a = r"(?P<m1>\d?\d)月(?P<d1>\d?\d)日至(?P<m2>\d?\d)月(?P<d2>\d?\d)日"
range_type_b = r"(?P<m1>\d?\d)月(?P<d1>\d?\d)日至(?P<d2>\d?\d)日"
single_date = r"(?P<m1>\d?\d)月(?P<d1>\d?\d)日"
for item in text.split(''):
if match := re.search(range_type_a, item):
results.append(f"{match.group('m1')}.{match.group('d1')}-"
f"{match.group('m2')}.{match.group('d2')}")
print(f"\tA: {results[-1]:15} {item}")
elif match := re.search(range_type_b, item):
results.append(f"{match.group('m1')}.{match.group('d1')}-"
f"{match.group('m1')}.{match.group('d2')}")
print(f"\tB: {results[-1]:15} {item}")
elif match := re.search(single_date, item):
results.append(f"{match.group('m1')}.{match.group('d1')}")
print(f"\tS: {results[-1]:15} {item}")
else:
print(f"\tX: {'':15} {item}")
return ','.join(results)
def source() -> Iterator[Tuple[str, str]]:
"""获取官网发布通知列表"""
search_url = "http://sousuo.gov.cn/s.htm?t=paper&advance=false&n=&codeYear=&codeCode=" \
"&searchfield=title&sort=&q=%E8%8A%82%E5%81%87%E6%97%A5%E5%AE%89%E6%8E%92"
link_regex = r"href=['\"](?P<link>.*?)['\"].*国务院办公厅关于(?P<year>20\d\d)年.*通知"
for line in requests.get(search_url, timeout=(5, 10)).text.split('\n'):
if match := re.search(link_regex, line):
yield match.group('year'), match.group('link')
def beijing_time() -> datetime:
"""获取当前北京时间"""
utc_time = datetime.utcnow().replace(tzinfo=timezone.utc)
return utc_time.astimezone(timezone(timedelta(hours=8)))
if __name__ == '__main__':
main()