fixed keywords and rewrote readme and genre tocs
This commit is contained in:
@ -16,22 +16,11 @@ import json
|
||||
import textwrap
|
||||
from utils.utils import *
|
||||
|
||||
TOC = '_toc.md'
|
||||
|
||||
|
||||
def get_category_paths():
|
||||
"""
|
||||
Returns all sub folders of the games path.
|
||||
"""
|
||||
return [os.path.join(games_path, x) for x in os.listdir(games_path) if os.path.isdir(os.path.join(games_path, x))]
|
||||
|
||||
|
||||
def get_entry_paths(category_path):
|
||||
"""
|
||||
Returns all files of a category path, except for '_toc.md'.
|
||||
"""
|
||||
return [os.path.join(category_path, x) for x in os.listdir(category_path) if x != TOC and os.path.isfile(os.path.join(category_path, x))]
|
||||
|
||||
essential_fields = ('Home', 'State', 'Keywords', 'Code repository', 'Code language', 'Code license')
|
||||
valid_fields = ('Home', 'Media', 'State', 'Play', 'Download', 'Platform', 'Keywords', 'Code repository', 'Code language',
|
||||
'Code license', 'Code dependencies', 'Assets license', 'Build system', 'Build instructions')
|
||||
valid_platforms = ('Windows', 'Linux', 'macOS', 'Android', 'Browser')
|
||||
recommended_keywords = ('action', 'arcade', 'adventure', 'visual novel', 'sports', 'platform', 'puzzle', 'role playing', 'simulation', 'strategy', 'card game', 'board game', 'music', 'educational', 'tool', 'game engine', 'framework', 'library')
|
||||
|
||||
def extract_overview_for_toc(file):
|
||||
"""
|
||||
@ -58,7 +47,7 @@ def extract_overview_for_toc(file):
|
||||
return output
|
||||
|
||||
|
||||
def update_readme():
|
||||
def update_readme_and_tocs(infos):
|
||||
"""
|
||||
Recounts entries in sub categories and writes them to the readme.
|
||||
Also updates the _toc files in the categories directories.
|
||||
@ -70,6 +59,12 @@ def update_readme():
|
||||
"""
|
||||
print('update readme file')
|
||||
|
||||
# delete all toc files
|
||||
entries = os.listdir(games_path)
|
||||
entries = (x for x in entries if x.startswith('_'))
|
||||
for entry in entries:
|
||||
os.remove(os.path.join(games_path, entry))
|
||||
|
||||
# read readme
|
||||
readme_text = read_text(readme_file)
|
||||
|
||||
@ -78,40 +73,58 @@ def update_readme():
|
||||
|
||||
# apply regex
|
||||
matches = regex.findall(readme_text)
|
||||
assert len(matches) == 1
|
||||
if len(matches) != 1:
|
||||
raise RuntimeError('readme file has invalid structure')
|
||||
matches = matches[0]
|
||||
start = matches[0]
|
||||
end = matches[2]
|
||||
|
||||
# get sub folders
|
||||
category_paths = get_category_paths()
|
||||
# create all toc and readme entry
|
||||
title = 'All'
|
||||
file = '_all.md'
|
||||
update = ['- **[{}](games/{})** ({})\n'.format(title, file, len(infos))]
|
||||
create_toc(title, file, infos)
|
||||
|
||||
# assemble paths
|
||||
toc_paths = [os.path.join(path, TOC) for path in category_paths]
|
||||
|
||||
# get titles (discarding first two ("# ") and last ("\n") characters)
|
||||
category_titles = [read_first_line(path)[2:-1] for path in toc_paths]
|
||||
|
||||
# get number of files (minus 1 for the already existing TOC file) in each sub folder
|
||||
n_entries = [len(os.listdir(path)) - 1 for path in category_paths]
|
||||
|
||||
# combine titles, category names, numbers in one list
|
||||
info = zip(category_titles, [os.path.basename(path) for path in category_paths], n_entries)
|
||||
|
||||
# sort according to sub category title (should be unique)
|
||||
info = sorted(info, key=lambda x:x[0])
|
||||
|
||||
# assemble output
|
||||
update = ['- **[{}](games/{}/{})** ({})\n'.format(entry[0], entry[1], TOC, entry[2]) for entry in info]
|
||||
update = "{} entries\n".format(sum(n_entries)) + "".join(update)
|
||||
for keyword in recommended_keywords:
|
||||
infos_filtered = [x for x in infos if keyword in x['keywords']]
|
||||
title = keyword.capitalize()
|
||||
file = '_{}.md'.format(keyword)
|
||||
update.append('- **[{}](games/{})** ({})\n'.format(title, file, len(infos_filtered)))
|
||||
create_toc(title, file, infos_filtered)
|
||||
update = ''.join(update)
|
||||
|
||||
# insert new text in the middle
|
||||
text = start + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "\n[comment]: # (end of autogenerated content)" + end
|
||||
text = start + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "[comment]: # (end of autogenerated content)" + end
|
||||
|
||||
# write to readme
|
||||
write_text(readme_file, text)
|
||||
|
||||
|
||||
def create_toc(title, file, entries):
|
||||
"""
|
||||
|
||||
"""
|
||||
# file path
|
||||
toc_file = os.path.join(games_path, file)
|
||||
|
||||
# header line
|
||||
text = '[comment]: # (autogenerated content, do not edit)\n# {}\n\n'.format(title)
|
||||
|
||||
# assemble rows
|
||||
rows = []
|
||||
for entry in entries:
|
||||
rows.append('- **[{}]({})** ({})'.format(entry['title'], entry['file'], ', '.join(entry['code language'] + entry['code license'] + entry['state'])))
|
||||
|
||||
# sort rows (by title)
|
||||
rows.sort()
|
||||
|
||||
# add to text
|
||||
text += '\n'.join(rows)
|
||||
|
||||
write_text(toc_file, text)
|
||||
|
||||
|
||||
|
||||
def update_category_tocs():
|
||||
"""
|
||||
Lists all entries in all sub folders and generates the list in the toc file.
|
||||
@ -240,6 +253,60 @@ def check_template_leftovers():
|
||||
if content.find(check_string) >= 0:
|
||||
print('{}: found {}'.format(os.path.basename(entry_path), check_string))
|
||||
|
||||
def fix_keywords():
|
||||
"""
|
||||
Fixes the keywords.
|
||||
"""
|
||||
|
||||
regex = re.compile(r"(.*)(- Keywords:.*)(- Code repository: .*)", re.DOTALL)
|
||||
|
||||
# get all entries
|
||||
entries = os.listdir(games_path)
|
||||
|
||||
# iterate over all entries
|
||||
for entry in entries:
|
||||
entry_path = os.path.join(games_path, entry)
|
||||
|
||||
# read entry
|
||||
content = read_text(entry_path)
|
||||
|
||||
# match with regex
|
||||
matches = regex.findall(content)
|
||||
if len(matches) != 1:
|
||||
raise RuntimeError('Could not find keywords in entry "{}"'.format(entry))
|
||||
|
||||
match = matches[0]
|
||||
|
||||
# get keywords out, split, strip, delete duplicates
|
||||
keywords = match[1][11:]
|
||||
keywords = keywords.split(',')
|
||||
keywords = [x.strip() for x in keywords]
|
||||
keywords = list(set(keywords))
|
||||
|
||||
# special treatments here
|
||||
keywords = [x if x != 'TBS' and x != 'TB' else 'turn based' for x in keywords]
|
||||
keywords = [x if x != 'RTS' else 'real time' for x in keywords]
|
||||
keywords = [x if x != 'MMO' else 'massive multiplayer online' for x in keywords]
|
||||
keywords = [x if x != 'MMO' else 'multiplayer online' for x in keywords]
|
||||
keywords = [x if x != 'SP' else 'singleplayer' for x in keywords]
|
||||
keywords = [x if x != 'MP' else 'multiplayer' for x in keywords]
|
||||
keywords = [x if x != 'engine' else 'game engine' for x in keywords]
|
||||
keywords = [x if x != 'rpg' else 'role playing' for x in keywords]
|
||||
keywords = [x if x != 'turn based' else 'turn-based' for x in keywords]
|
||||
for keyword in ('browser', 'misc', 'tools'):
|
||||
if keyword in keywords:
|
||||
keywords.remove(keyword)
|
||||
|
||||
# sort
|
||||
keywords.sort()
|
||||
|
||||
keywords = '- Keywords: {}\n'.format(', '.join(keywords))
|
||||
|
||||
new_content = match[0] + keywords + match[2]
|
||||
|
||||
if new_content != content:
|
||||
# write again
|
||||
write_text(entry_path, new_content)
|
||||
|
||||
def parse_entry(content):
|
||||
"""
|
||||
@ -251,15 +318,15 @@ def parse_entry(content):
|
||||
# read title
|
||||
regex = re.compile(r"^# (.*)") # start of content, starting with "# " and then everything until the end of line
|
||||
matches = regex.findall(content)
|
||||
assert len(matches) == 1
|
||||
assert matches[0]
|
||||
if len(matches) != 1 or not matches[0]:
|
||||
raise RuntimeError('Title not found in entry "{}"'.format(content))
|
||||
info['title'] = matches[0]
|
||||
|
||||
# read description
|
||||
regex = re.compile(r"^.*\n\n_(.*)_\n") # third line from top, everything between underscores
|
||||
matches = regex.findall(content)
|
||||
assert len(matches) == 1, info['title']
|
||||
assert matches[0]
|
||||
if len(matches) != 1 or not matches[0]:
|
||||
raise RuntimeError('Description not found in entry "{}"'.format(content))
|
||||
info['description'] = matches[0]
|
||||
|
||||
# first read all field names
|
||||
@ -267,33 +334,31 @@ def parse_entry(content):
|
||||
fields = regex.findall(content)
|
||||
|
||||
# check that essential fields are there
|
||||
essential_fields = ['Home', 'State', 'Code repository', 'Code language']
|
||||
for field in essential_fields:
|
||||
if field not in fields:
|
||||
print('Error: Essential field "{}" missing in entry "{}"'.format(field, info['title']))
|
||||
return info # so that the remaining entries can also be parsed
|
||||
raise RuntimeError('Essential field "{}" missing in entry "{}"'.format(field, info['title']))
|
||||
|
||||
# check that all fields are valid fields and are existing in that order
|
||||
valid_fields = ('Home', 'Media', 'State', 'Play', 'Download', 'Platform', 'Keywords', 'Code repository', 'Code language', 'Code license', 'Code dependencies', 'Assets license', 'Build system', 'Build instructions')
|
||||
index = 0
|
||||
for field in fields:
|
||||
while index < len(valid_fields) and field != valid_fields[index]:
|
||||
index += 1
|
||||
if index == len(valid_fields):
|
||||
print('Error: Field "{}" in entry "{}" either misspelled or in wrong order'.format(field, info['title']))
|
||||
return info # so that the remaining entries can also be parsed
|
||||
raise RuntimeError('Field "{}" in entry "{}" either misspelled or in wrong order'.format(field, info['title']))
|
||||
|
||||
# iterate over found fields
|
||||
for field in fields:
|
||||
regex = re.compile(r"- {}: (.*)".format(field))
|
||||
matches = regex.findall(content)
|
||||
assert len(matches) == 1 # every field should only be present once
|
||||
if len(matches) != 1:
|
||||
# every field should only be present once
|
||||
raise RuntimeError('Field "{}" in entry "{}" exist multiple times.'.format(field, info['title']))
|
||||
v = matches[0]
|
||||
|
||||
# first store as is
|
||||
info[field.lower()+'-raw'] = v
|
||||
|
||||
# remove parenthesis
|
||||
# remove parenthesis with content
|
||||
v = re.sub(r'\([^)]*\)', '', v)
|
||||
|
||||
# split on ','
|
||||
@ -302,7 +367,7 @@ def parse_entry(content):
|
||||
# strip
|
||||
v = [x.strip() for x in v]
|
||||
|
||||
# remove all being false (empty)
|
||||
# remove all being false (empty) that were for example just comments
|
||||
v = [x for x in v if x]
|
||||
|
||||
# if entry is of structure <..> remove <>
|
||||
@ -315,15 +380,15 @@ def parse_entry(content):
|
||||
# store in info
|
||||
info[field.lower()] = v
|
||||
|
||||
# now checks on the content of fields
|
||||
|
||||
# state (essential field) must contain either beta or mature but not both, but at least one
|
||||
v = info['state']
|
||||
for t in v:
|
||||
if t != 'beta' and t != 'mature' and not t.startswith('inactive since '):
|
||||
print('Error: Unknown state tage "{}" in entry "{}"'.format(t, info['title']))
|
||||
return info # so that the rest can run through
|
||||
raise RuntimeError('Unknown state tage "{}" in entry "{}"'.format(t, info['title']))
|
||||
if 'beta' in v != 'mature' in v:
|
||||
print('Error: State must be one of <"beta", "mature"> in entry "{}"'.format(info['title']))
|
||||
return info # so that the rest can run through
|
||||
raise RuntimeError('State must be one of <"beta", "mature"> in entry "{}"'.format(info['title']))
|
||||
|
||||
# extract inactive year
|
||||
phrase = 'inactive since '
|
||||
@ -332,28 +397,42 @@ def parse_entry(content):
|
||||
if inactive_year:
|
||||
info['inactive'] = inactive_year[0]
|
||||
|
||||
# urls in home, download, play and code repositories must start with http or https (or git) and should not contain space
|
||||
# urls in home, download, play and code repositories must start with http or https (or git) and should not contain spaces
|
||||
for field in ['home', 'download', 'play', 'code repository']:
|
||||
if field in info:
|
||||
for url in info[field]:
|
||||
if not (url.startswith('http://') or url.startswith('https://') or url.startswith('git://')):
|
||||
print('URL "{}" in entry "{}" does not start with http'.format(url, info['title']))
|
||||
raise RuntimeError('URL "{}" in entry "{}" does not start with http'.format(url, info['title']))
|
||||
if ' ' in url:
|
||||
print('URL "{}" in entry "{}" contains a space'.format(url, info['title']))
|
||||
raise RuntimeError('URL "{}" in entry "{}" contains a space'.format(url, info['title']))
|
||||
|
||||
# github repositories should end on .git
|
||||
if 'code repository' in info:
|
||||
for repo in info['code repository']:
|
||||
if repo.startswith('https://github.com/') and not repo.endswith('.git'):
|
||||
print('Github repo {} in entry "{}" should end on .git.'.format(repo, info['title']))
|
||||
raise RuntimeError('Github repo {} in entry "{}" should end on .git.'.format(repo, info['title']))
|
||||
|
||||
# check valid platform tags
|
||||
valid_platforms = ('Android', 'Windows', 'Linux', 'macOS', 'Browser')
|
||||
# check that all platform tags are valid tags and are existing in that order
|
||||
if 'platform' in info:
|
||||
index = 0
|
||||
for platform in info['platform']:
|
||||
if platform not in valid_platforms:
|
||||
print('Error: invalid platform tag "{}" in entry "{}"'.format(platform, info['title']))
|
||||
return info # so that the rest can run through
|
||||
while index < len(valid_platforms) and platform != valid_platforms[index]:
|
||||
index += 1
|
||||
if index == len(valid_platforms):
|
||||
raise RuntimeError('Platform tag "{}" in entry "{}" either misspelled or in wrong order'.format(platform, info['title']))
|
||||
|
||||
# there must be at least one keyword
|
||||
if 'keywords' not in info:
|
||||
raise RuntimeError('Need at least one keyword in entry "{}"'.format(info['title']))
|
||||
|
||||
# check for existence of at least one recommended keywords
|
||||
fail = True
|
||||
for recommended_keyword in recommended_keywords:
|
||||
if recommended_keyword in info['keywords']:
|
||||
fail = False
|
||||
break
|
||||
if fail:
|
||||
raise RuntimeError('Entry "{}" contains no recommended keyword'.format(info['title']))
|
||||
|
||||
return info
|
||||
|
||||
@ -362,36 +441,29 @@ def assemble_infos():
|
||||
"""
|
||||
Parses all entries and assembles interesting infos about them.
|
||||
"""
|
||||
# get category paths
|
||||
category_paths = get_category_paths()
|
||||
|
||||
# a database of all important infos about the entries
|
||||
infos = {}
|
||||
infos = []
|
||||
|
||||
# for each category
|
||||
for category_path in category_paths:
|
||||
# get paths of all entries in this category
|
||||
entry_paths = get_entry_paths(category_path)
|
||||
# get all entries (ignore everything starting with underscore)
|
||||
entries = os.listdir(games_path)
|
||||
entries = (x for x in entries if not x.startswith('_'))
|
||||
|
||||
# get titles (discarding first two ("# ") and last ("\n") characters)
|
||||
category = read_first_line(os.path.join(category_path, TOC))[2:-1]
|
||||
# iterate over all entries
|
||||
for entry in entries:
|
||||
entry_path = os.path.join(games_path, entry)
|
||||
|
||||
for entry_path in entry_paths:
|
||||
# read entry
|
||||
content = read_text(entry_path)
|
||||
# read entry
|
||||
content = read_text(entry_path)
|
||||
|
||||
# parse entry
|
||||
info = parse_entry(content)
|
||||
# parse entry
|
||||
info = parse_entry(content)
|
||||
|
||||
# add category
|
||||
info['category'] = category
|
||||
# add file information
|
||||
info['file'] = entry
|
||||
|
||||
# add file information
|
||||
info['file'] = os.path.basename(entry_path)[:-3] # [:-3] to cut off the .md
|
||||
info['path'] = os.path.basename(category_path) + '/' + os.path.basename(entry_path)
|
||||
|
||||
# add to list
|
||||
infos[entry_path] = info
|
||||
# add to list
|
||||
infos.append(info)
|
||||
|
||||
return infos
|
||||
|
||||
@ -778,23 +850,23 @@ if __name__ == "__main__":
|
||||
# assemble info
|
||||
infos = assemble_infos()
|
||||
|
||||
# recount and write to readme
|
||||
update_readme()
|
||||
|
||||
# generate list in toc files
|
||||
update_category_tocs()
|
||||
# recount and wriite to readme and to tocs
|
||||
update_readme_and_tocs(infos)
|
||||
|
||||
# generate report
|
||||
generate_statistics()
|
||||
#generate_statistics()
|
||||
|
||||
# update database for html table
|
||||
export_json()
|
||||
#export_json()
|
||||
|
||||
# check for unfilled template lines
|
||||
check_template_leftovers()
|
||||
#check_template_leftovers()
|
||||
|
||||
# fix keywords
|
||||
# fix_keywords()
|
||||
|
||||
# check external links (only rarely)
|
||||
# check_validity_external_links()
|
||||
|
||||
# collect list of primary code repositories
|
||||
update_primary_code_repositories()
|
||||
#update_primary_code_repositories()
|
||||
|
Reference in New Issue
Block a user