Add: ABlog support - atom.xml
This commit is contained in:
parent
e80603940d
commit
dc975e0bd1
6 changed files with 246 additions and 6 deletions
1
CHANGES
1
CHANGES
|
@ -5,6 +5,7 @@ Changes
|
|||
1.1.0 (*2021-02-??*)
|
||||
====================
|
||||
|
||||
- Add ABlog support
|
||||
- Fix table generation with literal blocks
|
||||
- Fix image copy when using glob
|
||||
|
||||
|
|
20
README.rst
20
README.rst
|
@ -1,7 +1,18 @@
|
|||
sphinx_gemini_builder
|
||||
#####################
|
||||
|
||||
Build gemini blog from Sphinx.
|
||||
Build `Gemini <https://gemini.circumlunar.space/>`_ blog from
|
||||
`Sphinx <https://www.sphinx-doc.org>`_ with
|
||||
`ABlog <https://ablog.readthedocs.io/>`_ compatibility.
|
||||
|
||||
|
||||
Gemini is a simple protocol between gopher and web. Sphinx is
|
||||
a documentation tool. This project builds Gemini capsule from
|
||||
Sphinx documentation. It supports ABlog extensions and manage
|
||||
Atom feeds.
|
||||
|
||||
Installation and use
|
||||
--------------------
|
||||
|
||||
Install with `python setup.py install` and do `make gemini` in
|
||||
your project.
|
||||
|
@ -9,9 +20,8 @@ your project.
|
|||
|
||||
You can add a `gemini_footer` in config, formatted under the
|
||||
Gemini specification. You need to set `gemini_baseurl` for
|
||||
good url in links.
|
||||
good url in Atom feeds.
|
||||
|
||||
TODO
|
||||
----
|
||||
|
||||
- ablog support with RSS generation
|
||||
This project contains some parts of code from Sphinx and from
|
||||
ABlog.
|
||||
|
|
1
setup.py
1
setup.py
|
@ -44,6 +44,7 @@ setup(
|
|||
"sphinx_gemini_builder": [
|
||||
"locale/*/LC_MESSAGES/*.po",
|
||||
"locale/*/LC_MESSAGES/*.mo",
|
||||
"templates/*.gmi",
|
||||
]
|
||||
},
|
||||
entry_points={
|
||||
|
|
|
@ -44,6 +44,7 @@ class GeminiBuilder(TextBuilder):
|
|||
|
||||
def __init__(self, app) -> None:
|
||||
super().__init__(app)
|
||||
self.add_footer = True
|
||||
self.baseurl = self.config.gemini_baseurl
|
||||
self.images = []
|
||||
|
||||
|
@ -101,9 +102,23 @@ class GeminiBuilder(TextBuilder):
|
|||
def get_target_uri(self, docname: str, typ: str = None) -> str:
|
||||
return self.baseurl + quote(docname) + self.out_suffix
|
||||
|
||||
def proxy_generate_atom_feeds(self):
|
||||
if 'ablog' in self.config.extensions:
|
||||
from .ablog_compatibility import generate_atom_feeds
|
||||
self.add_footer = False
|
||||
generate_atom_feeds(self)
|
||||
self.add_footer = True
|
||||
|
||||
def proxy_generate_archive_pages(self):
|
||||
if 'ablog' in self.config.extensions:
|
||||
from .ablog_compatibility import generate_archive_pages
|
||||
generate_archive_pages(self)
|
||||
|
||||
def finish(self) -> None:
|
||||
self.finish_tasks.add_task(self.copy_image_files)
|
||||
self.finish_tasks.add_task(self.copy_download_files)
|
||||
self.finish_tasks.add_task(self.proxy_generate_archive_pages)
|
||||
self.finish_tasks.add_task(self.proxy_generate_atom_feeds)
|
||||
super().finish()
|
||||
|
||||
|
||||
|
|
212
sphinx_gemini_builder/ablog_compatibility.py
Normal file
212
sphinx_gemini_builder/ablog_compatibility.py
Normal file
|
@ -0,0 +1,212 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
ablog compatibility
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
from sphinx.locale import _
|
||||
from sphinx.util.osutil import relative_uri
|
||||
from docutils.utils import new_document
|
||||
from docutils.io import StringOutput
|
||||
from docutils import nodes
|
||||
|
||||
import ablog
|
||||
|
||||
from ablog.blog import Blog, os_path_join, revise_pending_xrefs
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
text_type = str
|
||||
|
||||
|
||||
def to_gemini(builder, post, pagename, fulltext=False):
|
||||
"""
|
||||
Convert post to gemini format
|
||||
"""
|
||||
doctree = new_document("")
|
||||
if fulltext:
|
||||
deepcopy = post.doctree.deepcopy()
|
||||
if isinstance(deepcopy, nodes.document):
|
||||
doctree.extend(deepcopy.children)
|
||||
else:
|
||||
doctree.append(deepcopy)
|
||||
else:
|
||||
for node in post.excerpt:
|
||||
doctree.append(node.deepcopy())
|
||||
revise_pending_xrefs(doctree, pagename)
|
||||
builder.env.resolve_references(doctree, pagename, builder)
|
||||
|
||||
destination = StringOutput(encoding="utf-8")
|
||||
|
||||
builder.secnumbers = {}
|
||||
builder.imgpath = relative_uri(builder.get_target_uri(pagename), "_images")
|
||||
builder.dlpath = relative_uri(builder.get_target_uri(pagename), "_downloads")
|
||||
builder.current_docname = pagename
|
||||
builder.writer.write(doctree, destination)
|
||||
builder.writer.assemble_parts()
|
||||
gemini = builder.writer.parts["whole"]
|
||||
return gemini
|
||||
|
||||
|
||||
def generate_archive_pages(builder):
|
||||
"""
|
||||
Generate archive pages for all posts, categories, tags, authors, and
|
||||
drafts (from ablog).
|
||||
"""
|
||||
if not ablog.builder_support(builder.app):
|
||||
return
|
||||
|
||||
blog = Blog(builder.app)
|
||||
for post in blog.posts:
|
||||
for redirect in post.redirect:
|
||||
yield (redirect, {"redirect": post.docname, "post": post}, "redirect.gmi")
|
||||
|
||||
found_docs = builder.env.found_docs
|
||||
atom_feed = bool(blog.blog_baseurl)
|
||||
feed_archives = blog.blog_feed_archives
|
||||
blog_path = blog.blog_path
|
||||
for title, header, catalog in [
|
||||
(_("Authors"), _("Posts by"), blog.author),
|
||||
(_("Locations"), _("Posts from"), blog.location),
|
||||
(_("Languages"), _("Posts in"), blog.language),
|
||||
(_("Categories"), _("Posts in"), blog.category),
|
||||
(_("All posts"), _("Posted in"), blog.archive),
|
||||
(_("Tags"), _("Posts tagged"), blog.tags),
|
||||
]:
|
||||
|
||||
if not catalog:
|
||||
continue
|
||||
|
||||
context = {"parents": [], "title": title, "header": header, "catalog": catalog, "summary": True}
|
||||
if catalog.docname not in found_docs:
|
||||
yield (catalog.docname, context, "catalog.gmi")
|
||||
|
||||
for collection in catalog:
|
||||
|
||||
if not collection:
|
||||
continue
|
||||
context = {
|
||||
"parents": [],
|
||||
"title": f"{header} {collection}",
|
||||
"header": header,
|
||||
"collection": collection,
|
||||
"summary": True,
|
||||
"feed_path": collection.path if feed_archives else blog_path,
|
||||
"archive_feed": atom_feed and feed_archives,
|
||||
}
|
||||
context["feed_title"] = context["title"]
|
||||
if collection.docname not in found_docs:
|
||||
yield (collection.docname, context, "collection.gmi")
|
||||
|
||||
context = {
|
||||
"parents": [],
|
||||
"title": _("All Posts"),
|
||||
"header": _("All"),
|
||||
"collection": blog.posts,
|
||||
"summary": True,
|
||||
"atom_feed": atom_feed,
|
||||
"feed_path": blog.blog_path,
|
||||
}
|
||||
docname = blog.posts.docname
|
||||
yield (docname, context, "collection.gmi")
|
||||
|
||||
context = {"parents": [], "title": _("Drafts"), "collection": blog.drafts, "summary": True}
|
||||
yield (blog.drafts.docname, context, "collection.gmi")
|
||||
|
||||
|
||||
def generate_atom_feeds(builder):
|
||||
"""
|
||||
Generate archive pages for all posts, categories, tags, authors, and
|
||||
drafts (from ablog).
|
||||
"""
|
||||
blog = Blog(builder.app)
|
||||
|
||||
url = blog.blog_baseurl
|
||||
if not url:
|
||||
return
|
||||
|
||||
feed_path = os.path.join(builder.outdir, blog.blog_path, "atom.xml")
|
||||
|
||||
feeds = [
|
||||
(
|
||||
blog.posts,
|
||||
blog.blog_path,
|
||||
feed_path,
|
||||
blog.blog_title,
|
||||
os_path_join(url, blog.blog_path, "atom.xml"),
|
||||
)
|
||||
]
|
||||
|
||||
if blog.blog_feed_archives:
|
||||
for header, catalog in [
|
||||
(_("Posts by"), blog.author),
|
||||
(_("Posts from"), blog.location),
|
||||
(_("Posts in"), blog.language),
|
||||
(_("Posts in"), blog.category),
|
||||
(_("Posted in"), blog.archive),
|
||||
(_("Posts tagged"), blog.tags),
|
||||
]:
|
||||
|
||||
for coll in catalog:
|
||||
# skip collections containing only drafts
|
||||
if not len(coll):
|
||||
continue
|
||||
folder = os.path.join(builder.outdir, coll.path)
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
|
||||
feeds.append(
|
||||
(
|
||||
coll,
|
||||
coll.path,
|
||||
os.path.join(folder, "atom.xml"),
|
||||
blog.blog_title + " - " + header + " " + text_type(coll),
|
||||
os_path_join(url, coll.path, "atom.xml"),
|
||||
)
|
||||
)
|
||||
|
||||
# Config options
|
||||
feed_length = blog.blog_feed_length
|
||||
feed_fulltext = blog.blog_feed_fulltext
|
||||
|
||||
for feed_posts, pagename, feed_path, feed_title, feed_url in feeds:
|
||||
|
||||
feed = FeedGenerator()
|
||||
feed.id(blog.blog_baseurl)
|
||||
feed.title(feed_title)
|
||||
feed.link(href=url)
|
||||
feed.subtitle(blog.blog_feed_subtitle)
|
||||
feed.link(href=feed_url)
|
||||
feed.language(builder.config.language)
|
||||
feed.generator("ABlog", ablog.__version__, "https://ablog.readthedocs.org")
|
||||
|
||||
for i, post in enumerate(feed_posts):
|
||||
if feed_length and i == feed_length:
|
||||
break
|
||||
post_url = os_path_join(url, builder.get_target_uri(post.docname))
|
||||
|
||||
if blog.blog_feed_titles:
|
||||
content = None
|
||||
else:
|
||||
content = to_gemini(builder, post, pagename, fulltext=feed_fulltext)
|
||||
|
||||
feed_entry = feed.add_entry()
|
||||
feed_entry.id(post_url)
|
||||
feed_entry.title(post.title)
|
||||
feed_entry.link(href=post_url)
|
||||
feed_entry.author({"name": author.name for author in post.author})
|
||||
feed_entry.pubDate(post.date.astimezone())
|
||||
feed_entry.updated(post.update.astimezone())
|
||||
feed_entry.content(content=content, type="text/gemini")
|
||||
|
||||
parent_dir = os.path.dirname(feed_path)
|
||||
if not os.path.isdir(parent_dir):
|
||||
os.makedirs(parent_dir)
|
||||
|
||||
with open(feed_path, "w", encoding="utf-8") as out:
|
||||
feed_str = feed.atom_str(pretty=True)
|
||||
out.write(feed_str.decode())
|
|
@ -96,6 +96,7 @@ class GeminiTranslator(SphinxTranslator):
|
|||
pass
|
||||
|
||||
def depart_document(self, node: Element) -> None:
|
||||
if self.builder.add_footer:
|
||||
self.add_text(self.config.gemini_footer)
|
||||
|
||||
def visit_section(self, node: Element) -> None:
|
||||
|
|
Loading…
Reference in a new issue