improved maintenance.py

This commit is contained in:
Trilarion
2018-06-07 23:56:39 +02:00
parent 6070bd5360
commit c738e995b7
20 changed files with 472 additions and 398 deletions

View File

@ -13,6 +13,7 @@ import re
import urllib.request
import http.client
import datetime
import json
TOC = '_toc.md'
@ -66,29 +67,19 @@ def extract_overview_for_toc(file):
with open(file, mode='r', encoding='utf-8') as f:
text = f.read()
output = [None, None, None]
info = parse_entry(text)
# TODO unify this with the statistics
output = []
# language
regex = re.compile(r"- Language\(s\): (.*)")
matches = regex.findall(text)
if matches:
output[0] = matches[0]
if 'code language' in info:
output.extend(info['code language'])
# license
regex = re.compile(r"- License: (.*)")
matches = regex.findall(text)
if matches:
output[1] = matches[0]
if 'code license' in info:
output.extend(info['code license'])
# state
regex = re.compile(r"- State: (.*)")
matches = regex.findall(text)
if matches:
output[2] = matches[0]
output = [x for x in output if x] # eliminate empty entries
if 'state' in info:
output.extend(info['state'])
output = ", ".join(output)
@ -143,7 +134,7 @@ def update_readme():
update = "{} entries\n".format(sum(n_entries)) + "".join(update)
# insert new text in the middle
text = start + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "[comment]: # (end of autogenerated content)" + end
text = start + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "\n[comment]: # (end of autogenerated content)" + end
# write to readme
write_text(readme_file, text)
@ -186,7 +177,7 @@ def update_category_tocs():
update = "".join(update)
# combine with toc header
text = toc_header + '\n' + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "[comment]: # (end of autogenerated content)"
text = toc_header + '\n' + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "\n[comment]: # (end of autogenerated content)"
# write to toc file
with open(toc_file, mode='w', encoding='utf-8') as f:
@ -323,13 +314,13 @@ def parse_entry(content):
for field in essential_fields:
if field not in info:
print('Essential field "{}" missing in entry {}'.format(field, info['title']))
return {}
return info # so that the rest can run through
# state must contain either beta or mature but not both
v = info['state']
if 'beta' in v != 'mature' in v:
printf('State must be one of <beta, mature> in entry {}'.format(info['title']))
return {}
return info # so that the rest can run through
# extract inactive
phrase = 'inactive since '
@ -340,15 +331,10 @@ def parse_entry(content):
return info
def generate_statistics():
def assemble_infos():
"""
Generates the statistics page.
Should be done everytime the entries change.
Parses all entries and assembles interesting infos about them.
"""
statistics_path = os.path.join(games_path, 'statistics.md')
statistics = '[comment]: # (autogenerated content, do not edit)\n# Statistics\n\n'
# get category paths
category_paths = get_category_paths()
@ -360,6 +346,9 @@ def generate_statistics():
# get paths of all entries in this category
entry_paths = get_entry_paths(category_path)
# get titles (discarding first two ("# ") and last ("\n") characters)
category = read_first_line(os.path.join(category_path, TOC))[2:-1]
for entry_path in entry_paths:
# read entry
content = read_text(entry_path)
@ -367,12 +356,31 @@ def generate_statistics():
# parse entry
info = parse_entry(content)
# add category
info['category'] = category
# add file information
info['file'] = os.path.basename(entry_path)[:-3] # [:-3] to cut off the .md
# add to list
infos.append(info)
return infos
def generate_statistics():
"""
Generates the statistics page.
Should be done every time the entries change.
"""
# start the page
statistics_path = os.path.join(games_path, 'statistics.md')
statistics = '[comment]: # (autogenerated content, do not edit)\n# Statistics\n\n'
# assemble infos
infos = assemble_infos()
# total number
number_entries = len(infos)
rel = lambda x: x / number_entries * 100 # conversion to percent
@ -446,6 +454,35 @@ def generate_statistics():
f.write(statistics)
def export_json():
"""
Parses all entries, collects interesting info and stores it in a json file suitable for displaying
with a dynamic table in a browser.
"""
# assemble info
infos = assemble_infos()
# make database out of it
db = {}
db['header'] = ['Name', 'Download']
entries = []
for info in infos:
entry = [info['title']]
if 'download' in info:
entry.append(info['download'][0])
else:
entry.append(None)
entries.append(entry)
db['data'] = entries
# output
json_path = os.path.join(games_path, os.path.pardir, 'docs', 'data.json')
text = json.dumps(db)
write_text(json_path, text)
if __name__ == "__main__":
# paths
@ -453,13 +490,16 @@ if __name__ == "__main__":
readme_file = os.path.realpath(os.path.join(games_path, os.pardir, 'README.md'))
# recount and write to readme
# update_readme()
#update_readme()
# generate list in toc files
# update_category_tocs()
#update_category_tocs()
# generate report
generate_statistics()
#generate_statistics()
# update database for html table
export_json()
# check for unfilled template lines
# check_template_leftovers()