diff options
| author | Charles Cabergs <me@cacharle.xyz> | 2021-06-18 20:17:07 +0200 |
|---|---|---|
| committer | Charles Cabergs <me@cacharle.xyz> | 2021-06-18 20:17:07 +0200 |
| commit | c0aeed4578bdaea39ddbed1b55896948f56b23f3 (patch) | |
| tree | e3959fe7472c63eab19bab0c81c72e60ae5f6c55 | |
| parent | 815fbb2a697a060411f19f70ecdaa5c775b430cb (diff) | |
| download | project_euler-c0aeed4578bdaea39ddbed1b55896948f56b23f3.tar.gz project_euler-c0aeed4578bdaea39ddbed1b55896948f56b23f3.tar.bz2 project_euler-c0aeed4578bdaea39ddbed1b55896948f56b23f3.zip | |
Refactored generate script
| -rw-r--r-- | config.json | 58 | ||||
| -rwxr-xr-x | generate | 94 | ||||
| -rwxr-xr-x | generate.py | 82 | ||||
| -rw-r--r-- | languages.json | 50 |
4 files changed, 144 insertions, 140 deletions
diff --git a/config.json b/config.json deleted file mode 100644 index 812e3d7..0000000 --- a/config.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "languages": { - "python": { - "extension": "py", - "comment" : { - "top": "###", - "prefix" : "# ", - "bottom": "###" - } - }, - - "haskell": { - "extension": "hs", - "comment" : { - "top": "----", - "prefix" : "-- ", - "bottom": "----" - } - }, - - "c": { - "extension": "c", - "comment" : { - "top": "/*", - "prefix" : "* ", - "bottom": "*/" - } - }, - - "lisp": { - "extension": "lisp", - "comment" : { - "top": ";;;;;;;;", - "prefix" : ";;;; ", - "bottom": ";;;;;;;;" - } - }, - "rust": { - "extension": "rs", - "comment": { - "top": "/*", - "prefix" : "* ", - "bottom": "*/" - } - }, - "julia": { - "extension": "jl", - "comment": { - "top": "###", - "prefix" : "# ", - "bottom": "###" - } - } - }, - "url_format": "http://projecteuler.net/problem={index}", - "line_wrap": 89, - "problem_padding": 2 -} diff --git a/generate b/generate new file mode 100755 index 0000000..fef0aed --- /dev/null +++ b/generate @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 + +import json +import itertools +import textwrap +from pathlib import Path +from argparse import ArgumentParser + +import requests +from bs4 import BeautifulSoup + + +LANGUAGES_FILENAME = 'languages.json' +URL_FORMAT = 'http://projecteuler.net/problem={index}' +LINE_WRAP = 89 +PROBLEM_PADDING = 3 + + +class Problem: + def __init__(self, index: int, language: dict): + self.index = index + self.language = language + + def fetch(self): + url = URL_FORMAT.format(index=self.index) + print(f"Fetching problem {self.index} at {url}") + data = requests.get(url) + soup = BeautifulSoup(data.text, 'html.parser') + data = soup.find('div', {'id': 'content'}) + self.title = data.h2.text + self.sub_title = data.h3.text + self.content = soup.find('div', {'class': 'problem_content'}).text + print(self) + return self + + def write(self): + file_name = f'{self.index:03}-{self._slug}.{self.language["extension"]}' + file_path = Path(self.language['name']) / file_name + if file_path.exists(): + raise FileExistsError(f'{file_path} already exists') + file_path.parent.mkdir(exist_ok=True) + with open(file_path, 'w') as file: + file.write(str(self)) + return self + + def __str__(self) -> str: + title = self.title.strip() + sub_title = self.sub_title.strip() + content = self.content.strip() + + content_lines = [] + for line in content.splitlines(): + content_lines.extend(textwrap.wrap(line, width=LINE_WRAP)) + + lines = [title, sub_title, "", *content_lines] + lines = [self.language['comment']['prefix'] + line for line in lines] + lines.insert(0, self.language['comment']['top']) + lines.append(self.language['comment']['bottom']) + lines.extend(itertools.repeat("", PROBLEM_PADDING)) + return '\n'.join(lines) + + @property + def _slug(self) -> str: + title = self.title.lower().replace(' ', '_') + title = [c for c in title if c.isalnum() or c == '_'] + return ''.join(title) + + +def main(): + with open(LANGUAGES_FILENAME, 'r') as file: + languages = json.load(file) + parser = ArgumentParser(description='Project Euler problem file generator') + parser.add_argument( + 'language', + metavar='LANGUAGE', + choices=languages, + help='file programming language', + ) + parser.add_argument( + 'indices', + metavar='INDEX', + nargs='+', + type=int, + help='Project Euler problems index', + ) + args = parser.parse_args() + for index in args.indices: + language = languages[args.language] + language['name'] = args.language + Problem(index, language).fetch().write() + + +if __name__ == '__main__': + main() diff --git a/generate.py b/generate.py deleted file mode 100755 index b6a6f76..0000000 --- a/generate.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python3 - -import requests -import json -from bs4 import BeautifulSoup -from argparse import ArgumentParser - - -CONFIG_FILENAME = 'config.json' - -def parse_args(languages): - parser = ArgumentParser(description='Problem file generator') - parser.add_argument('indexes', nargs='+', type=int, - help='problems indexes for which files are generated') - parser.add_argument('-l', '--language', choices=languages, required=True, - help='the file programming language') - return vars(parser.parse_args()) - -def read_config(): - try: - with open(CONFIG_FILENAME, 'r') as file: - config = json.load(file) - except FileNotFoundError: - print(f'Error: file {CONFIG_FILENAME} not found') - exit(1) - return config - - -def write_problem(index, title, sub_title, content, language_config, config): - text = '\n'.join([ - title, - sub_title, - '', - content, - ]) - - text = '\n'.join( - [language_config['comment']['prefix'] + line for line in text.split('\n')] - ) - text = language_config['comment']['top'] + '\n' + text - text += '\n' + language_config['comment']['bottom'] - text += '\n' * config['problem_padding'] - - - slug = ''.join([c for c in title.lower().replace(' ', '_') - if c.isalpha() or c.isdigit() or c == '_']) - - filename = (str(index).zfill(3) + '-' + slug + '.' + language_config['extension']) - try: - with open(filename, 'w') as file: - file.write(text) - print(f'{filename} created') - except: - print(f'Error: could not create file {filename}') - - -def fetch_problem(index, config, args): - url = config['url_format'].format(index=index) - - print(f"fetching problem {index} at {url}...") - data = requests.get(url) - soup = BeautifulSoup(data.text, 'html.parser') - - print('parsing data...') - data = soup.find('div', {'id': 'content'}) - problem_title = data.h2.text.strip(' \n\t') - problem_sub_title = data.h3.text.strip(' \n\t') - problem_text = soup.find('div', {'class': 'problem_content'}).text.strip(' \n\t') - - print('\nTitle:', problem_title) - print('Sub title:', problem_sub_title) - print('Text:', problem_text, '\n') - - write_problem(index, problem_title, problem_sub_title, problem_text, - config['languages'][args['language']], config) - - -if __name__ == '__main__': - config = read_config() - args = parse_args(config['languages'].keys()) - for p in args['indexes']: - fetch_problem(p, config, args) diff --git a/languages.json b/languages.json new file mode 100644 index 0000000..8e4224b --- /dev/null +++ b/languages.json @@ -0,0 +1,50 @@ +{ + "python": { + "extension": "py", + "comment" : { + "top": "###", + "prefix" : "# ", + "bottom": "###" + } + }, + "haskell": { + "extension": "hs", + "comment" : { + "top": "----", + "prefix" : "-- ", + "bottom": "----" + } + }, + "c": { + "extension": "c", + "comment" : { + "top": "/*", + "prefix" : "* ", + "bottom": "*/" + } + }, + "lisp": { + "extension": "lisp", + "comment" : { + "top": ";;;;;;;;", + "prefix" : ";;;; ", + "bottom": ";;;;;;;;" + } + }, + "rust": { + "extension": "rs", + "comment": { + "top": "/*", + "prefix" : "* ", + "bottom": "*/" + } + }, + "julia": { + "extension": "jl", + "comment": { + "top": "###", + "prefix" : "# ", + "bottom": "###" + } + } +} |
