sphinx_gemini_builder/sphinx_gemini_builder/ablog_compatibility.py

213 lines
6.7 KiB
Python

#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ablog compatibility
"""
import os
import logging
from feedgen.feed import FeedGenerator
from sphinx.locale import _
from sphinx.util.osutil import relative_uri
from docutils.utils import new_document
from docutils.io import StringOutput
from docutils import nodes
import ablog
from ablog.blog import Blog, os_path_join, revise_pending_xrefs
logger = logging.getLogger(__name__)
text_type = str
def to_gemini(builder, post, pagename, fulltext=False):
"""
Convert post to gemini format
"""
doctree = new_document("")
if fulltext:
deepcopy = post.doctree.deepcopy()
if isinstance(deepcopy, nodes.document):
doctree.extend(deepcopy.children)
else:
doctree.append(deepcopy)
else:
for node in post.excerpt:
doctree.append(node.deepcopy())
revise_pending_xrefs(doctree, pagename)
builder.env.resolve_references(doctree, pagename, builder)
destination = StringOutput(encoding="utf-8")
builder.secnumbers = {}
builder.imgpath = relative_uri(builder.get_target_uri(pagename), "_images")
builder.dlpath = relative_uri(builder.get_target_uri(pagename), "_downloads")
builder.current_docname = pagename
builder.writer.write(doctree, destination)
builder.writer.assemble_parts()
gemini = builder.writer.parts["whole"]
return gemini
def generate_archive_pages(builder):
"""
Generate archive pages for all posts, categories, tags, authors, and
drafts (from ablog).
"""
if not ablog.builder_support(builder.app):
return
blog = Blog(builder.app)
for post in blog.posts:
for redirect in post.redirect:
yield (redirect, {"redirect": post.docname, "post": post}, "redirect.gmi")
found_docs = builder.env.found_docs
atom_feed = bool(blog.blog_baseurl)
feed_archives = blog.blog_feed_archives
blog_path = blog.blog_path
for title, header, catalog in [
(_("Authors"), _("Posts by"), blog.author),
(_("Locations"), _("Posts from"), blog.location),
(_("Languages"), _("Posts in"), blog.language),
(_("Categories"), _("Posts in"), blog.category),
(_("All posts"), _("Posted in"), blog.archive),
(_("Tags"), _("Posts tagged"), blog.tags),
]:
if not catalog:
continue
context = {"parents": [], "title": title, "header": header, "catalog": catalog, "summary": True}
if catalog.docname not in found_docs:
yield (catalog.docname, context, "catalog.gmi")
for collection in catalog:
if not collection:
continue
context = {
"parents": [],
"title": f"{header} {collection}",
"header": header,
"collection": collection,
"summary": True,
"feed_path": collection.path if feed_archives else blog_path,
"archive_feed": atom_feed and feed_archives,
}
context["feed_title"] = context["title"]
if collection.docname not in found_docs:
yield (collection.docname, context, "collection.gmi")
context = {
"parents": [],
"title": _("All Posts"),
"header": _("All"),
"collection": blog.posts,
"summary": True,
"atom_feed": atom_feed,
"feed_path": blog.blog_path,
}
docname = blog.posts.docname
yield (docname, context, "collection.gmi")
context = {"parents": [], "title": _("Drafts"), "collection": blog.drafts, "summary": True}
yield (blog.drafts.docname, context, "collection.gmi")
def generate_atom_feeds(builder):
"""
Generate archive pages for all posts, categories, tags, authors, and
drafts (from ablog).
"""
blog = Blog(builder.app)
url = blog.blog_baseurl
if not url:
return
feed_path = os.path.join(builder.outdir, blog.blog_path, "atom.xml")
feeds = [
(
blog.posts,
blog.blog_path,
feed_path,
blog.blog_title,
os_path_join(url, blog.blog_path, "atom.xml"),
)
]
if blog.blog_feed_archives:
for header, catalog in [
(_("Posts by"), blog.author),
(_("Posts from"), blog.location),
(_("Posts in"), blog.language),
(_("Posts in"), blog.category),
(_("Posted in"), blog.archive),
(_("Posts tagged"), blog.tags),
]:
for coll in catalog:
# skip collections containing only drafts
if not len(coll):
continue
folder = os.path.join(builder.outdir, coll.path)
if not os.path.isdir(folder):
os.makedirs(folder)
feeds.append(
(
coll,
coll.path,
os.path.join(folder, "atom.xml"),
blog.blog_title + " - " + header + " " + text_type(coll),
os_path_join(url, coll.path, "atom.xml"),
)
)
# Config options
feed_length = blog.blog_feed_length
feed_fulltext = blog.blog_feed_fulltext
for feed_posts, pagename, feed_path, feed_title, feed_url in feeds:
feed = FeedGenerator()
feed.id(blog.blog_baseurl)
feed.title(feed_title)
feed.link(href=url)
feed.subtitle(blog.blog_feed_subtitle)
feed.link(href=feed_url)
feed.language(builder.config.language)
feed.generator("ABlog", ablog.__version__, "https://ablog.readthedocs.org")
for i, post in enumerate(feed_posts):
if feed_length and i == feed_length:
break
post_url = os_path_join(url, builder.get_target_uri(post.docname))
if blog.blog_feed_titles:
content = None
else:
content = to_gemini(builder, post, pagename, fulltext=feed_fulltext)
feed_entry = feed.add_entry()
feed_entry.id(post_url)
feed_entry.title(post.title)
feed_entry.link(href=post_url)
feed_entry.author({"name": author.name for author in post.author})
feed_entry.pubDate(post.date.astimezone())
feed_entry.updated(post.update.astimezone())
feed_entry.content(content=content, type="text/gemini")
parent_dir = os.path.dirname(feed_path)
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
with open(feed_path, "w", encoding="utf-8") as out:
feed_str = feed.atom_str(pretty=True)
out.write(feed_str.decode())