diff --git a/README.md b/README.md index 47a9a85..11c3588 100644 --- a/README.md +++ b/README.md @@ -18,3 +18,14 @@ It will allow you to do stuff like: - find the largest pdfs in the collection: `csvcut -c title_sort,formats,size books.csv | csvgrep -c formats -m pdf | csvsort -c size -r | head` - `csvjson books.csv | jq | whatever` - You can also perform actual SQL queries on it, and convert the data between csv and sqlite database: + +## RSS feed + +An RSS feed has been kindly provided by [the Rsszard of Syndication](https://tilde.town/~lucidiot) +and is available at https://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA. + +Generating the feed requires you to have Python 3.7 or later installed, as well +as the [xmltodict](https://pypi.org/project/xmltodict) package: +`pip3 install xmltodict`. + +To generate the feed, run `./geenfeed.py lefeed.xml`. diff --git a/genfeed.py b/genfeed.py new file mode 100755 index 0000000..2842186 --- /dev/null +++ b/genfeed.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +from datetime import datetime, timezone +from typing import Mapping, MutableMapping +import csv +import sys +import xmltodict + +RSS_DATE_FORMAT = '%a, %d %b %Y %T %z' +ISO_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S%z' + + +def parse_book(book: MutableMapping[str, str]) -> Mapping: + item = { + "title": book["title_sort"], + "pubDate": datetime.strptime(book.pop("timestamp"), ISO_DATE_FORMAT) + .strftime(RSS_DATE_FORMAT), + "guid": { + "@isPermaLink": "false", + "#text": book.pop("uuid"), + }, + "description": book.pop("comments"), + # The CSV's first character is a non-breaking space for some reason, + # which breaks the author column + "author": book.get("author_sort") or book["\ufeffauthor_sort"], + } + + # Prepend metadata to the item description + item["description"] = "
{}
{}".format( + "".join( + "
{}
{}
".format( + key.replace('_sort', '').replace('_', ' ').replace('\ufeff', '').capitalize(), + value, + ) + for key, value in book.items() + # Ignore empty columns + if value + ), + item['description'] + ) + + if book.get("tags"): + item["category"] = [ + { + "@domain": "https://git.tilde.town/dozens/books", + "#text": tag + } + for tag in book["tags"].split(", ") + ] + + return item + + +def main(): + sys.stdout.write(xmltodict.unparse({ + "rss": { + "@version": "2.0", + "@xmlns:atom": "http://www.w3.org/2005/Atom", + "@xmlns:sy": "http://purl.org/rss/1.0/modules/syndication/", + "channel": { + "title": "dozens books", + "description": "the cool calibre library of dozens", + "link": "https://git.tilde.town/dozens/books", + "atom:link": { + "@rel": "self", + "@type": "application/rss+xml", + # TODO: set the correct public URL of your feed! + "@href": "http://fuck", + }, + "language": "en-US", + "pubDate": datetime.now(timezone.utc) + .strftime(RSS_DATE_FORMAT), + "docs": "https://www.rssboard.org/rss-specification", + "webMaster": "dozens@tilde.town (~dozens)", + "generator": "Python " + ".".join(map(str, sys.version_info[:3])), + # Update on the first of every month, at midnight UTC + "sy:updatePeriod": "monthly", + "sy:updateFrequency": "1", + "sy:updateBase": "1971-01-01T00:00+00:00", + # One month, roughly, for clients that do not support mod_syndication + "ttl": 60 * 24 * 30, + "item": list(map(parse_book, csv.DictReader(sys.stdin))), + } + } + }, pretty=True, short_empty_elements=True)) + + +if __name__ == '__main__': + main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..265b62a --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +xmltodict>=0.12