refactor #1

Merged
jim merged 20 commits from refactor into main 2026-02-03 01:08:46 -05:00
23 changed files with 452 additions and 1644 deletions
Showing only changes of commit 2dfb9fa7ed - Show all commits

View File

@ -1,20 +1,22 @@
author: Jim Shepich III author: Jim Shepich III
templates_folder: ./templates templates_folder: ./templates
site_defaults: site_defaults:
base_url: http://localhost:8000
web_root: ./dist
templates: templates:
partials: ./templates/partials partials: ./templates/partials
components: ./templates/components components: ./templates/components
pages: ./templates/pages pages: ./templates/pages
sites: sites:
main: main:
base_url: http://localhost:8000
web_root: ./dist
build_cache: ./site build_cache: ./site
assets: assets:
- /assets - /assets
articles: articles:
- ./pages/*.md - ./pages/*.md
resume: resume:
base_url: http://localhost:8000
web_root: ./dist
git_repo: ssh://gitea/jim/resume.git git_repo: ssh://gitea/jim/resume.git
build_cache: ./build/resume build_cache: ./build/resume
assets: assets:

File diff suppressed because it is too large Load Diff

View File

@ -1,50 +0,0 @@
{
"home" : {
"name" : "Home",
"query_value" : "home",
"file" : "home.html",
"index" : 0
},
"404" : {
"name" : "404",
"query_value" : "404",
"file" : "404.html",
"index" : -1
},
"about" : {
"name" : "About",
"query_value" : "about",
"file" : "about.html",
"index" : 1
},
"resume" : {
"name" : "Resume",
"query_value" : "resume",
"link" : "pages/shepich resume.pdf",
"index" : 2
},
"epics" : {
"name" : "Epics & Emprises",
"query_value" : "epics",
"link" : "https://epics.shepich.com",
"index" : -1
},
"lists" : {
"name" : "Lists",
"query_value" : "lists",
"file" : "lists.html",
"index" : -1
},
"don-info" : {
"name" : "Info for Don",
"query_value" : "don-info",
"file" : "don-info.html",
"index" : -1
}
}

View File

@ -1,27 +0,0 @@
[
{
"platform": "Instagram",
"icon": "instagram",
"link": "https://instagram.com/epicshepich"
},
{
"platform": "GitHub",
"icon": "github",
"link": "https://github.com/epicshepich"
},
{
"platform": "Facebook",
"icon": "facebook-square",
"link": "https://www.facebook.com/jim.shepich/"
},
{
"platform": "LinkedIn",
"icon": "linkedin",
"link": "https://www.linkedin.com/in/jshepich/"
},
{
"platform": "Discord",
"icon": "discord",
"link": "https://discordapp.com/users/epicshepich#0131"
}
]

View File

@ -1,342 +0,0 @@
import os
import re
import glob
import shutil
import subprocess
import markdown
import yaml
import pydantic
from typing import Optional
from datetime import datetime, date
from dotmap import DotMap
class GlobalVars(pydantic.BaseModel):
'''Static-valued global variables to be interpolated into any HTML templates.'''
today: date = datetime.today()
def filepath_or_string(s: str) -> str:
'''Loads the contents of a string if it is a filepath, otherwise returns the string.'''
if os.path.isfile(s):
with open(s, 'r') as f:
return f.read()
else:
return s
def extract_placeholders(s: str) -> set:
'''Extracts placeholder variables in the format `{variable}` from
an unformatted template string.'''
# Regex pattern to match placeholders with alphanumerics, dots, and underscores.
placeholder_pattern = r'\{([\w\.]+)\}'
# Find all matches in the string.
matches = re.findall(placeholder_pattern, s)
# Return the set of distinct placeholders.
return set(matches)
def find_cyclical_placeholders(s: str, _parents: tuple = None, _cycles: set = None, **kwargs) -> set[tuple]:
'''Recursively interpolates supplied kwargs into a template string to validate
that there are no cyclical dependencies that would cause infinite recursion.
Returns a list of paths (expressed as tuples of nodes) of cyclical placeholders.
'''
# Track the lineage of each placeholder so we can see if it is its own ancestor.
if _parents is None:
_parents = tuple()
# Keep track of any cycles encountered.
if _cycles is None:
_cycles = set()
# Extract the placeholders from the input.
placeholders = extract_placeholders(s)
# Recursion will naturally end once there are no more nested placeholders.
for p in placeholders:
# Any placeholder that has itself in its ancestry forms a cycle.
if p in _parents:
_cycles.add(_parents + (p,))
# For placeholders that are not their own ancestor, recursively
# interpolate the kwargs into the nested placeholders until we reach
# strings without placeholders.
else:
find_cyclical_placeholders(
('{'+p+'}').format(**kwargs),
_parents = _parents+(p,),
_cycles = _cycles,
**kwargs
)
return _cycles
with open('config.yaml', 'r') as config_file:
config = yaml.safe_load(config_file.read())
class SiteConfig(pydantic.BaseModel):
base_url: Optional[str] = config['site_defaults'].get('base_url')
git_repo: Optional[str] = config['site_defaults'].get('git_repo')
build_cache: Optional[str] = config['site_defaults'].get('build_cache')
assets: Optional[list] = config['site_defaults'].get('assets')
web_root: Optional[str] = config['site_defaults'].get('web_root')
articles: Optional[list] = config['site_defaults'].get('articles')
class ArticleMetadata(pydantic.BaseModel):
title: str
author: Optional[str] = config.get('author')
date: date
lastmod: Optional[date] = None
published: bool
tags: list
thumbnail: Optional[str] = None
def load_markdown(md: str) -> tuple[ArticleMetadata|None, str]:
'''Loads a Markdown file into a (metadata: ArticleMetadata, content: str) pair.'''
# Load the file contents if a filepath is specified, and strip document delimiters ('---').
md = filepath_or_string(md).strip().strip('---').strip()
# If there is no `---` delimiter, then the article has no metadata.
if '---' not in md.strip('---'):
return None, md
# Split the metadata from the contents.
[raw_metadata, raw_article] = md.split('---')
# Use YAML to parse the metadata.
metadata = yaml.safe_load(raw_metadata)
# Convert the contents to a HTML string.
content = markdown.markdown(raw_article)
return ArticleMetadata(**metadata), content
def format_html_template(template: str, **kwargs) -> str:
'''Interpolates variables specified as keyword arguments
into the given HTML template.
# Example
```python
kwargs = {'a': '1', 'b': '2', 'c': '{d}+{e}', 'd': '3', 'e': '{c}'}
s = '{a} + {b} = {c}'
find_cyclical_placeholders(s, **kwargs)
>>> {('c', 'e', 'c')}
```
'''
# Load the template if a filepath is given.
template = filepath_or_string(template)
# Ensure the template does not have cyclical placeholder references.
cycles = find_cyclical_placeholders(template, globalvars = GlobalVars(), **kwargs)
if len(cycles) > 0:
raise ValueError('Template has cyclical dependencies: {cycles}')
# Iteratively interpolate global variables and the kwargs into the template until
# there are no more placeholders. The loop is used to account for nested template references.
formatted_html = template
while len(extract_placeholders(formatted_html)) > 0:
formatted_html = formatted_html.format(globalvars = GlobalVars(), **kwargs)
# Return the formatted HTML.
return formatted_html
run = lambda cmd: subprocess.run(cmd.split(' '), stdout = subprocess.PIPE, stderr = subprocess.PIPE)
def pull_git_repo(repo: str, build_cache: str) -> None:
'''Pulls/clones a repo into the build cache directory.'''
if os.path.exists(f'{build_cache}/.git'):
run(f'git -C {build_cache} pull origin')
else:
run(f'git clone {repo} {build_cache}')
def load_partials() -> dict:
"""Loads partial templates from the templates/partials directory."""
partials = {}
for filename in os.listdir('templates/partials'):
with open(f'templates/partials/{filename}') as partial_file:
partial_template = partial_file.read()
partials[f'partials.{os.path.splitext(filename)[0]}'] = format_html_template(
partial_template,
current_year = datetime.now().year
)
return partials
def import_resume():
# Use a sentinel value for the loop.
max_date = '0000-00-00'
# Loop through the folders in the resume repo to find the most recent one.
for resume_folder in os.listdir('build/resume'):
# Skip folders that are not in YYYY-MM-DD format.
try:
datetime.strptime(resume_folder,'%Y-%m-%d')
except Exception:
continue
# Keep track of the most recent date.
if resume_folder > max_date:
max_date = resume_folder
# Copy the resume into the /dist directory.
run(f'cp build/resume/{max_date}/shepich_resume.pdf dist/shepich_resume.pdf')
def format_blog_tags(tags: list[str], template = 'templates/components/blog_tag.html') -> list[str]:
'''Generates HTML blog tag components from a list of tag names.'''
return [
format_html_template(template, tag_name = t) for t in tags
]
def build_blog_archive(
index: dict[str, tuple[str, str]],
page_template = 'templates/pages/default.html',
li_template = 'templates/components/blog_archive_li.html',
**kwargs
) -> str:
'''Converts an index, formatted as filestem: (metadata, contents) dict,
into an HTML page containing the list of articles, sorted from newest to oldest.
Note: partials must be expanded into the kwargs, as they are needed to generate
the overall page.
'''
# Add each article as a list item to an unordered list.
archive_html_content = '<ul>'
for article, (metadata, contents) in sorted(index.items(), key = lambda item: item[1][0].date)[::-1]:
# Generate HTML for the article (including metadata tags).
archive_html_content += format_html_template(
li_template,
article_filestem = article,
blog_tags = ' '.join(format_blog_tags(metadata.tags)),
metadata = metadata
)
archive_html_content +='</ul>'
# Interpolate the article into the overall page template.
archive_html_page = format_html_template(
page_template,
content = archive_html_content,
**kwargs
)
return archive_html_page
def copy_assets(site: SiteConfig):
'''Copies the list of site assets from the build cache to the web root.'''
# Expand any globbed expressions.
expanded_asset_list = []
for a in site.assets:
expanded_asset_list.extend(
# Assets are defined relative to the build cache; construct the full path.
glob.glob(f'{site.build_cache}/{a.lstrip("/")}')
)
for asset in expanded_asset_list:
# Construct the destination path analogous to the source path
# but in the web root instead of the build cache.
destination = f'{site.web_root}/{a.lstrip("/")}'
# Delete existing files.
shutil.rmtree(destination, ignore_errors=True)
# Copy the asset.
if os.path.isdir(asset):
shutil.copytree(asset, destination)
elif os.path.isfile(asset):
shutil.copyfile(asset, destination)
else:
continue
return None
def build_index(site: SiteConfig) -> dict:
'''Loads the sites articles into an index mapping the filename stem
to a (metadata: dict, content: str) tuple.'''
index = {}
# Expand any globbed expressions.
expanded_article_list = []
for a in site.articles or {}:
expanded_article_list.extend(
# Article paths are defined relative to the build cache; construct the full path.
glob.glob(f'{site.build_cache}/{a.lstrip("/")}')
)
for article in expanded_article_list:
metadata, content = load_markdown(article)
# Skip unpublished articles.
if not metadata.published:
continue
article_filestem = os.path.splitext(os.path.basename(article))[0]
index[article_filestem] = (metadata, content)
return index
def map_templates(dir: str, parent = '') -> DotMap:
'''Recursively maps the templates directory into a nested dict structure.
Leaves map the filestems of .html template files to their contents.
'''
output = {}
# List the files and subdirectories at the top level.
for sub in os.listdir(os.path.join(parent,dir)):
# Construct the full path to the file or subdir from the root of the tree.
full_path = os.path.join(parent,dir,sub)
# Recursively map subdirectories.
if os.path.isdir(full_path):
output[sub] = map_templates(sub, parent = dir)
continue
# Templates must be .html files.
filestem, ext = os.path.splitext(sub)
if ext != '.html':
continue
# Load template file.
with open(full_path, 'r') as file:
html = file.read()
output[filestem] = html
return DotMap(output)
if __name__ == '__main__':
pass

58
jimsite/__init__.py Normal file
View File

@ -0,0 +1,58 @@
import os
import re
import glob
import shutil
import subprocess
import markdown
import yaml
import pydantic
from typing import Optional
from datetime import datetime, date
from dotmap import DotMap
from .common import filepath_or_string, GlobalVars, SiteConfig
from .templating import format_html_template, map_templates
from .assets import pull_git_repo, copy_assets
from .articles import ArticleMetadata, load_markdown, build_articles, build_index
def build_site(site: SiteConfig, templates: DotMap):
# Initialize the build cache and web root, in case they do not exist.
os.makedirs(site.build_cache, exist_ok = True)
os.makedirs(site.web_root, exist_ok = True)
# If the site is built from a git repo, pull that repo into the build cache.
if site.git_repo:
pull_git_repo(site.git_repo, site.build_cache)
# Copy the sites assets into the web root.
copy_assets(site)
# Load the site's articles into an index.
index = build_index(site)
# Generate HTML pages for the articles.
build_articles(site, index, templates)
def main():
with open('/home/jim/projects/shepich.com/config.yaml', 'r') as config_file:
config = yaml.safe_load(config_file.read())
templates = map_templates(config['templates_folder'])
for site in config['sites'].values():
build_site(SiteConfig(**site), templates)
if __name__ == '__main__':
main()

103
jimsite/articles.py Normal file
View File

@ -0,0 +1,103 @@
import os
import glob
import yaml
import markdown
import pydantic
from typing import Optional
from dotmap import DotMap
from datetime import date
from .common import filepath_or_string, SiteConfig
from .templating import format_html_template
class ArticleMetadata(pydantic.BaseModel):
title: str
date: date
published: bool
tags: list
author: Optional[str] = None
lastmod: Optional[date] = None
thumbnail: Optional[str] = None
def load_markdown(md: str) -> tuple[ArticleMetadata|None, str]:
'''Loads a Markdown file into a (metadata: ArticleMetadata, content: str) pair.'''
# Load the file contents if a filepath is specified, and strip document delimiters ('---').
md = filepath_or_string(md).strip().strip('---').strip()
# If there is no `---` delimiter, then the article has no metadata.
if '---' not in md.strip('---'):
return None, md
# Split the metadata from the contents.
[raw_metadata, raw_article] = md.split('---')
# Use YAML to parse the metadata.
metadata = yaml.safe_load(raw_metadata)
# Convert the contents to a HTML string.
content = markdown.markdown(raw_article)
return ArticleMetadata(**metadata), content
def build_index(site: SiteConfig) -> dict:
'''Loads the sites articles into an index mapping the filename stem
to a (metadata: dict, content: str) tuple.'''
index = {}
# Expand any globbed expressions.
expanded_article_list = []
for a in site.articles or {}:
expanded_article_list.extend(
# Article paths are defined relative to the build cache; construct the full path.
glob.glob(f'{site.build_cache}/{a.lstrip("/")}')
)
for article in expanded_article_list:
metadata, content = load_markdown(article)
# Skip unpublished articles.
if not metadata.published:
continue
article_filestem = os.path.splitext(os.path.basename(article))[0]
index[article_filestem] = (metadata, content)
return index
def format_article_tags(tags: list[str], template = 'templates/components/blog_tag.html') -> list[str]:
'''Generates HTML article tag components from a list of tag names.'''
return [
format_html_template(template, tag_name = t) for t in tags
]
def build_articles(site: SiteConfig, index: dict[str, tuple[ArticleMetadata, str]], templates: DotMap):
'''Generates HTML files for all of a given site's Markdown articles
by interpolating the contents and metadata into the HTML templates.'''
for filestem, (metadata, content) in index.items():
article = format_html_template(
templates.components.blog_article,
content = content,
blog_tags = ' '.join(format_article_tags(metadata.tags)),
metadata = metadata,
templates = templates
)
page = format_html_template(
templates.pages.default,
content = article,
templates = templates
)
with open(f'{site.web_root.rstrip('/')}/{filestem}.html', 'w') as f:
f.write(page)

43
jimsite/assets.py Normal file
View File

@ -0,0 +1,43 @@
import os
import glob
import shutil
from .common import run, SiteConfig
def pull_git_repo(repo: str, build_cache: str) -> None:
'''Pulls/clones a repo into the build cache directory.'''
if os.path.exists(f'{build_cache}/.git'):
run(f'git -C {build_cache} pull origin')
else:
run(f'git clone {repo} {build_cache}')
def copy_assets(site: SiteConfig):
'''Copies the list of site assets from the build cache to the web root.'''
# Expand any globbed expressions.
expanded_asset_list = []
for a in site.assets:
expanded_asset_list.extend(
# Assets are defined relative to the build cache; construct the full path.
glob.glob(f'{site.build_cache}/{a.lstrip("/")}')
)
for asset in expanded_asset_list:
# Construct the destination path analogous to the source path
# but in the web root instead of the build cache.
destination = f'{site.web_root}/{a.lstrip("/")}'
# Delete existing files.
shutil.rmtree(destination, ignore_errors=True)
# Copy the asset.
if os.path.isdir(asset):
shutil.copytree(asset, destination)
elif os.path.isfile(asset):
shutil.copyfile(asset, destination)
else:
continue
return None

68
jimsite/blog.py Normal file
View File

@ -0,0 +1,68 @@
import rfeed
import datetime
from .common import SiteConfig
from .articles import ArticleMetadata, format_article_tags
from .templating import format_html_template
def build_blog_archive(
index: dict[str, tuple[str, str]],
page_template = 'templates/pages/default.html',
li_template = 'templates/components/blog_archive_li.html',
**kwargs
) -> str:
'''Converts an index, formatted as filestem: (metadata, contents) dict,
into an HTML page containing the list of articles, sorted from newest to oldest.
Note: partials must be expanded into the kwargs, as they are needed to generate
the overall page.
'''
# Add each article as a list item to an unordered list.
archive_html_content = '<ul>'
for article, (metadata, contents) in sorted(index.items(), key = lambda item: item[1][0].date)[::-1]:
# Generate HTML for the article (including metadata tags).
archive_html_content += format_html_template(
li_template,
article_filestem = article,
blog_tags = ' '.join(format_article_tags(metadata.tags)),
metadata = metadata
)
archive_html_content +='</ul>'
# Interpolate the article into the overall page template.
archive_html_page = format_html_template(
page_template,
content = archive_html_content,
**kwargs
)
return archive_html_page
# TODO: Finish
def build_rss_feed(site: SiteConfig, index: dict[str, tuple[ArticleMetadata, str]]):
feed = rfeed.Feed(
title = site.title,
link = f'{site.base_url.rstrip('/')}/rss.xml',
description = site.description,
language = "en-US",
lastBuildDate = datetime.now(),
items = [
rfeed.Item(
title = metadata.title,
link = f'{site.base_url.rstrip('/')}/{filestem}.md',
description = metadata.description,
author = metadata.author,
guid = rfeed.Guid(filestem),
pubDate = datetime(metadata.date.year, metadata.date.month, metadata.date.day)
)
for filestem, (metadata, _) in index.items()
]
)
# print(rss_feed.rss())

33
jimsite/common.py Normal file
View File

@ -0,0 +1,33 @@
import os
import subprocess
import pydantic
from typing import Optional
from datetime import date, datetime
run = lambda cmd: subprocess.run(
cmd.split(' '),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
def filepath_or_string(s: str) -> str:
'''Loads the contents of a string if it is a filepath, otherwise returns the string.'''
if os.path.isfile(s):
with open(s, 'r') as f:
return f.read()
else:
return s
class GlobalVars(pydantic.BaseModel):
'''Static-valued global variables to be interpolated into any HTML templates.'''
today: date = datetime.today()
class SiteConfig(pydantic.BaseModel):
base_url: str
web_root: str
build_cache: str
git_repo: Optional[str] = None
assets: Optional[list] = None
articles: Optional[list] = None

123
jimsite/templating.py Normal file
View File

@ -0,0 +1,123 @@
import os
import re
from dotmap import DotMap
from .common import filepath_or_string, GlobalVars
def extract_placeholders(s: str) -> set:
'''Extracts placeholder variables in the format `{variable}` from
an unformatted template string.'''
# Regex pattern to match placeholders with alphanumerics, dots, and underscores.
placeholder_pattern = r'\{([\w\.]+)\}'
# Find all matches in the string.
matches = re.findall(placeholder_pattern, s)
# Return the set of distinct placeholders.
return set(matches)
def find_cyclical_placeholders(s: str, _parents: tuple = None, _cycles: set = None, **kwargs) -> set[tuple]:
'''Recursively interpolates supplied kwargs into a template string to validate
that there are no cyclical dependencies that would cause infinite recursion.
Returns a list of paths (expressed as tuples of nodes) of cyclical placeholders.
# Example
```python
kwargs = {'a': '1', 'b': '2', 'c': '{d}+{e}', 'd': '3', 'e': '{c}'}
s = '{a} + {b} = {c}'
find_cyclical_placeholders(s, **kwargs)
>>> {('c', 'e', 'c')}
```
'''
# Track the lineage of each placeholder so we can see if it is its own ancestor.
if _parents is None:
_parents = tuple()
# Keep track of any cycles encountered.
if _cycles is None:
_cycles = set()
# Extract the placeholders from the input.
placeholders = extract_placeholders(s)
# Recursion will naturally end once there are no more nested placeholders.
for p in placeholders:
# Any placeholder that has itself in its ancestry forms a cycle.
if p in _parents:
_cycles.add(_parents + (p,))
# For placeholders that are not their own ancestor, recursively
# interpolate the kwargs into the nested placeholders until we reach
# strings without placeholders.
else:
find_cyclical_placeholders(
('{'+p+'}').format(**kwargs),
_parents = _parents+(p,),
_cycles = _cycles,
**kwargs
)
return _cycles
def format_html_template(template: str, **kwargs) -> str:
'''Interpolates variables specified as keyword arguments
into the given HTML template.
'''
# Load the template if a filepath is given.
template = filepath_or_string(template)
# Ensure the template does not have cyclical placeholder references.
cycles = find_cyclical_placeholders(template, globalvars = GlobalVars(), **kwargs)
if len(cycles) > 0:
raise ValueError('Template has cyclical dependencies: {cycles}')
# Iteratively interpolate global variables and the kwargs into the template until
# there are no more placeholders. The loop is used to account for nested template references.
formatted_html = template
while len(extract_placeholders(formatted_html)) > 0:
formatted_html = formatted_html.format(globalvars = GlobalVars(), **kwargs)
# Return the formatted HTML.
return formatted_html
def map_templates(dir: str, parent = '') -> DotMap:
'''Recursively maps the templates directory into a nested dict structure.
Leaves map the filestems of .html template files to their contents.
'''
output = {}
# List the files and subdirectories at the top level.
for sub in os.listdir(os.path.join(parent,dir)):
# Construct the full path to the file or subdir from the root of the tree.
full_path = os.path.join(parent,dir,sub)
# Recursively map subdirectories.
if os.path.isdir(full_path):
output[sub] = map_templates(sub, parent = dir)
continue
# Templates must be .html files.
filestem, ext = os.path.splitext(sub)
if ext != '.html':
continue
# Load template file.
with open(full_path, 'r') as file:
html = file.read()
output[filestem] = html
return DotMap(output)

View File

@ -2,9 +2,9 @@
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<head> <head>
{partials.default_css} {templates.partials.default_css}
{partials.header} {templates.partials.header}
{partials.nav} {templates.partials.nav}
</head> </head>
<body> <body>
<main> <main>
@ -18,5 +18,5 @@
{content} {content}
</article> </article>
</main> </main>
{partials.footer} {templates.partials.footer}
</body> </body>

View File

@ -2,13 +2,13 @@
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<head> <head>
{partials.default_css} {templates.partials.default_css}
{partials.header} {templates.partials.header}
{partials.nav} {templates.partials.nav}
</head> </head>
<body> <body>
<main> <main>
{content} {content}
</main> </main>
{partials.footer} {templates.partials.footer}
</body> </body>

View File

@ -2,13 +2,13 @@
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<head> <head>
{partials.default_css} {templates.partials.default_css}
{partials.header} {templates.partials.header}
{partials.nav} {templates.partials.nav}
</head> </head>
<body> <body>
<main> <main>
{content} {content}
</main> </main>
{partials.footer} {templates.partials.footer}
</body> </body>

View File

@ -27,7 +27,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "8f435a12", "id": "8f435a12",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -55,60 +55,15 @@
] ]
} }
], ],
"source": [ "source": []
"def build_rss_feed(site: SiteConfig, index: dict[str, tuple[ArticleMetadata, str]]):\n",
" feed = rfeed.Feed(\n",
" title = site.title,\n",
" link = f'{site.base_url.rstrip('/')}/rss.xml',\n",
" description = site.description,\n",
" language = \"en-US\",\n",
" lastBuildDate = datetime.now(),\n",
" items = [\n",
" rfeed.Item(\n",
" title = metadata.title,\n",
" link = f'{site.base_url.rstrip('/')}/{filestem}.md', \n",
" description = metadata.description,\n",
" author = metadata.author,\n",
" guid = rfeed.Guid(filestem),\n",
" pubDate = datetime(metadata.date.year, metadata.date.month, metadata.date.day)\n",
" )\n",
" for filestem, (metadata, _) in index.items()\n",
" ]\n",
" )\n",
"\n",
" # print(rss_feed.rss())"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 15, "execution_count": null,
"id": "70408b85", "id": "70408b85",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": []
"def build_articles(site: SiteConfig, index: dict[str, tuple[ArticleMetadata, str]]):\n",
" '''Generates HTML files for all of a given site's Markdown articles\n",
" by interpolating the contents and metadata into the HTML templates.'''\n",
"\n",
" for filestem, (metadata, content) in index.items():\n",
" article = format_html_template(\n",
" 'templates/components/blog_article.html',\n",
" content = content,\n",
" blog_tags = ' '.join(format_blog_tags(metadata.tags)),\n",
" metadata = metadata\n",
" )\n",
"\n",
" page = format_html_template(\n",
" 'templates/pages/default.html',\n",
" content = article,\n",
" partials = templates.partials\n",
" )\n",
"\n",
" with open(f'{site.web_root.rstrip('/')}/{filestem}.html', 'w') as f:\n",
" f.write(page)\n",
"\n",
" "
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -139,7 +94,7 @@
" article_html = format_html_template(\n", " article_html = format_html_template(\n",
" 'templates/components/blog_article.html',\n", " 'templates/components/blog_article.html',\n",
" content = content,\n", " content = content,\n",
" blog_tags = ' '.join(format_blog_tags(metadata.tags)),\n", " blog_tags = ' '.join(format_article_tags(metadata.tags)),\n",
" metadata = metadata\n", " metadata = metadata\n",
" )\n", " )\n",
" html = format_html_template('templates/pages/default.html', content = article_html, **PARTIALS)\n", " html = format_html_template('templates/pages/default.html', content = article_html, **PARTIALS)\n",
@ -161,37 +116,16 @@
"id": "e3171afd", "id": "e3171afd",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": []
"def build_site(site: SiteConfig):\n",
"\n",
" # Initialize the build cache and web root, in case they do not exist.\n",
" os.makedirs(site.build_cache, exist_ok = True)\n",
" os.makedirs(site.web_root, exist_ok = True)\n",
"\n",
" # If the site is built from a git repo, pull that repo into the build cache.\n",
" if site.git_repo:\n",
" pull_git_repo(site.git_repo, site.build_cache)\n",
"\n",
" # Copy the sites assets into the web root.\n",
" copy_assets(site)\n",
"\n",
" # Load the site's articles into an index.\n",
" index = build_index(site)\n",
"\n",
" # Generate HTML pages for the articles.\n",
" build_articles(site, index)\n",
"\n",
" "
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 16, "execution_count": 10,
"id": "a28b95a6", "id": "a28b95a6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"build_site(sites['dogma_jimfinium'])" "build_site(sites['resume'])"
] ]
} }
], ],