Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Blogofile initial commit

  • Loading branch information...
commit 974b2a58b04269338d17e8d83379a8144686bdea 0 parents
@EnigmaCurry EnigmaCurry authored
3  .gitignore
@@ -0,0 +1,3 @@
+*~
+*.pyc
+*.ropeproject/*
5 ERRATA
@@ -0,0 +1,5 @@
+BeautifulSoup current doesn't work.
+BeautifulSoup==3.0.7a does.
+
+Wordpress converter outputs pages in addition to posts
+
43 LICENSE.txt
@@ -0,0 +1,43 @@
+################################################################################
+### Blogofile is written by Ryan McGuire (EnigmaCurry.com)
+################################################################################
+
+I believe in free software. For me, this also means that I do not believe in
+copyrights, trademarks, patents, nor any other government enforced monopoly
+privilege. Accordingly, this software claims none of these privileges.
+
+You probably downloaded this software from my website. By doing so, you did not
+first enter into any sort of agreement or contract with me. There were no rules
+established before you downloaded it -- I simply offered the file on my website
+and you simply downloaded it. I am not your master -- it is immoral for me to use
+any (aggressive) force against you, including the force of government.
+
+This software is now completely yours to do with as you please.
+
+However, I am a human being. I enjoy praise, attribution, and other rewards for
+my work. If you find this software useful, I appreciate comments to that effect.
+If you find this software _very_ useful, I appreciate gifts. If you make useful
+modifications to this software, I appreciate patches. If you incorporate this
+software into a product of your own, I like to know about it, and I like being
+mentioned in your product's documentation or website. But again, I am not your
+master -- It is wrong for me to force you to do anything. I can only ask.
+
+-- Ryan McGuire, aka "EnigmaCurry"
+
+################################################################################
+### To say the same thing in legalese:
+################################################################################
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do the same.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
63 blogofile/__init__.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+This is Blogofile -- http://www.Blogofile.com
+
+Definition: Blogophile --
+ A person who is fond of or obsessed with blogs or blogging.
+
+Definition: Blogofile --
+ A static file blog engine/compiler, inspired by Jekyll.
+
+Blogofile transforms a set of templates into an entire blog consisting of static
+HTML files. All categories, tags, RSS/Atom feeds are automatically maintained by
+Blogofile. This blog can be hosted on any HTTP web server. Since the blog is just
+HTML, CSS, and Javascript, no CGI environment, or database is required. With the
+addition of a of third-party comment and trackback provider (like Disqus or
+IntenseDebate) a modern and interactive blog can be hosted very inexpensively.
+
+Please take a moment to read LICENSE.txt. It's short.
+"""
+
+__author__ = "Ryan McGuire (ryan@enigmacurry.com)"
+__date__ = "Tue Feb 3 12:52:52 2009"
+__version__ = "0.1"
+
+import ConfigParser
+import os
+import sys
+
+import post
+from writer import Writer
+
+def parse_config(config_file_path):
+ return config
+
+def main():
+ from optparse import OptionParser
+ parser = OptionParser(version="Blogofile "+__version__+" -- http://www.blogofile.com")
+ parser.add_option("-c","--config-file",dest="config_file",
+ help="The config file to load (default './_config.cfg')",
+ metavar="FILE", default="./_config.cfg")
+ parser.add_option("-b","--build",dest="do_build",
+ help="Build the blog again from source",
+ default=False, action="store_true")
+ (options, args) = parser.parse_args()
+
+ #load config
+ config = ConfigParser.ConfigParser()
+ config.read(options.config_file)
+ config_dir = os.path.split(os.path.abspath(options.config_file))[0]
+ os.chdir(config_dir)
+
+ if not options.do_build:
+ parser.print_help()
+ sys.exit(1)
+
+ posts = post.parse_posts("_posts", timezone=config.get("blogofile","timezone"))
+ writer = Writer(output_dir=os.path.join(config_dir,"_site"), config=config)
+ writer.write_blog(posts)
+
+if __name__ == '__main__':
+ main()
149 blogofile/post.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+post.py parses post sources from the ./_post directory.
+"""
+
+__author__ = "Ryan McGuire (ryan@enigmacurry.com)"
+__date__ = "Mon Feb 2 21:21:04 2009"
+
+import os
+import datetime
+import re
+import operator
+import urlparse
+
+import pytz
+import yaml
+import textile
+
+date_format = "%Y/%m/%d %H:%M:%S"
+
+class Post:
+ """
+ Class to describe a blog post and associated metadata
+
+ A simple post:
+
+ >>> src = '''
+ ... ---
+ ... title: First Post
+ ... date: 2008/10/20
+ ... categories: Cool Stuff , Emacs, Python, other stuff
+ ... permalink: /2008/10/20/first-post
+ ... ---
+ ...
+ ... This is a test.
+ ... '''
+ >>> p = Post(src)
+ >>> p.title
+ u'First Post'
+ >>> p.date
+ datetime.datetime(2008, 10, 20, 0, 0)
+ >>> p.categories == set([u'Cool Stuff',u'Emacs',u'Python',u'other stuff'])
+ True
+ >>> p.permalink
+ u'/2008/10/20/first-post'
+ """
+ def __init__(self, source, timezone):
+ self.source = source
+ self.yaml = yaml
+ self.title = u"Untitled - " + datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
+ self.date = datetime.datetime.now(pytz.timezone(timezone))
+ self.__timezone = timezone
+ self.updated = self.date
+ self.categories = set([u'Uncategorized'])
+ self.tags = set()
+ self.permalink = None
+ self.content = u""
+ self.draft = False
+ self.format = "textile"
+ self.author = ""
+ self.guid = None #Default guid is permalink
+ self.__parse()
+
+ def __repr__(self):
+ return "<Post title='%s' date='%s'>" % \
+ (self.title, self.date.strftime("%Y/%m/%d %H:%M:%S"))
+
+ def __parse(self):
+ """Parse the yaml and fill fields"""
+ yaml_sep = re.compile("^---$", re.MULTILINE)
+ content_parts = yaml_sep.split(self.source, maxsplit=2)
+ if len(content_parts) < 2:
+ #No yaml to extract
+ post_src = self.content
+ else:
+ #Extract the yaml at the top
+ self.__parse_yaml(content_parts[1])
+ post_src = content_parts[2]
+ #Convert post to HTML
+ if self.format == "textile":
+ self.content = textile.textile(post_src).decode("utf-8")
+ else:
+ #Assume it's raw html to begin with
+ self.content = post_src.decode("utf-8")
+
+ def __parse_yaml(self, yaml_src):
+ y = yaml.load(yaml_src)
+ try:
+ self.title = y['title']
+ except KeyError:
+ pass
+ try:
+ self.permalink = y['permalink']
+ except KeyError:
+ pass
+ try:
+ self.date = pytz.timezone(self.__timezone).localize(
+ datetime.datetime.strptime(y['date'],date_format))
+ except KeyError:
+ pass
+ try:
+ self.updated = pytz.timezone(self.__timezone).localize(
+ datetime.datetime.strptime(y['updated'],date_format))
+ except KeyError:
+ pass
+ try:
+ self.categories = set([x.strip() for x in y['categories'].split(",")])
+ except KeyError:
+ pass
+ try:
+ self.tags = set([x.strip() for x in y['tags'].split(",")])
+ except KeyError:
+ pass
+ try:
+ self.guid = y['guid']
+ except KeyError:
+ pass
+ try:
+ self.format = y['format']
+ except KeyError:
+ pass
+
+ def permapath(self):
+ """Get just the path portion of a permalink"""
+ return urlparse.urlparse(self.permalink)[2]
+
+def parse_posts(directory, timezone):
+ """Retrieve all the posts from the directory specified.
+
+ Returns a list of the posts sorted in reverse by date."""
+ posts = []
+ textile_files = [f for f in os.listdir(directory) if f.endswith(".textile")]
+ for texi in textile_files:
+ src = open(os.path.join(directory,texi)).read()
+ p = Post(src, timezone)
+ #Exclude some posts
+ if not (p.draft == True or p.permalink == None):
+ posts.append(p)
+ posts.sort(key=operator.attrgetter('date'), reverse=True)
+ return posts
+
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod(verbose=True)
+
308 blogofile/writer.py
@@ -0,0 +1,308 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+writer.py writes out the static blog to ./_site based on templates found in the
+current working directory.
+"""
+
+__author__ = "Ryan McGuire (ryan@enigmacurry.com)"
+__date__ = "Tue Feb 3 12:50:17 2009"
+
+import os
+import shutil
+import urlparse
+import re
+import operator
+
+from mako.template import Template
+from mako.lookup import TemplateLookup
+import BeautifulSoup
+
+class Writer:
+ def __init__(self, output_dir, config):
+ #Base templates are templates (usually in ./_templates) that are only
+ #referenced by other templates.
+ self.base_template_dir = os.path.join(".","_templates")
+ self.output_dir = output_dir
+ self.template_lookup = TemplateLookup(
+ directories=[".", self.base_template_dir],
+ input_encoding='utf-8', output_encoding='utf-8',
+ encoding_errors='replace')
+ self.config=config
+
+ #Behavioural settings:
+ self.do_prettify = eval(config.get("blogofile","pretty_html"))
+ #Kodos, you rule (http://kodos.sourceforge.net/):
+ self.files_exclude_regex = re.compile("(^_.*)|(^\..*)|(^#.*)|(^.*~$)",re.MULTILINE)
+
+ def write_blog(self, posts):
+ self.archive_links = self.__get_archive_links(posts)
+ self.all_categories = self.__get_all_categories(posts)
+ self.category_link_names = self.__compute_category_link_names(self.all_categories)
+ self.__setup_output_dir()
+ self.__write_files(posts)
+ self.__write_blog_chron(posts)
+ self.__write_blog_permanent(posts)
+ self.__write_monthly_archives(posts)
+ self.__write_blog_categories(posts)
+ self.__write_feed(posts, "/feed", "rss.mako")
+ self.__write_feed(posts, "/feed/atom", "atom.mako")
+
+ def __get_archive_links(self, posts):
+ """Return a list of monthly archive links and nice name:
+ """
+ d = {} #(link, name) -> number that month
+ for post in posts:
+ link = post.date.strftime("/%Y/%m/1")
+ name = post.date.strftime("%B %Y")
+ try:
+ d[(link, name)] += 1
+ except KeyError:
+ d[(link, name)] = 1
+ l = [key+(value,) for key, value in d.items()]
+ l = sorted(l, key=operator.itemgetter(0), reverse=True)
+ return l
+
+ def __get_all_categories(self, posts):
+ """Return a list of all the categories of all posts"""
+ d = {} #category -> number of posts
+ for post in posts:
+ for category in post.categories:
+ try:
+ d[category] += 1
+ except KeyError:
+ d[category] = 1
+ l = sorted(d.items(), key=operator.itemgetter(0))
+ return l
+
+ def __write_feed(self, posts, root, template):
+ root = root.lstrip("/")
+ feed_template = self.template_lookup.get_template(template)
+ feed_template.output_encoding = "utf-8"
+ xml = feed_template.render(posts=posts,config=self.config)
+ try:
+ os.makedirs(os.path.join(self.output_dir,root))
+ except OSError:
+ pass
+ f = open(os.path.join(self.output_dir,root,"index.xml"),"w")
+ f.write(xml)
+ f.close()
+
+ def __compute_category_link_names(self, categories):
+ """Transform category names into URL friendly names
+
+ example: "Cool Stuff" -> "cool-stuff" """
+ d = {} #name->nice_name
+ for category, n in categories:
+ nice_name = category.lower().replace(" ","-")
+ d[category] = nice_name
+ return d
+
+ def __setup_output_dir(self):
+ # Clear out the old staging directory. I *would* just shutil.rmtree the
+ # whole thing and recreate it, but I want the output_dir to retain it's
+ # same inode on the filesystem to be compatible with some HTTP servers. So
+ # this just deletes the *contents* of output_dir
+ try:
+ os.makedirs(self.output_dir)
+ except OSError:
+ pass
+ for f in os.listdir(self.output_dir):
+ f = os.path.join(self.output_dir,f)
+ try:
+ os.remove(f)
+ except OSError:
+ pass
+ try:
+ shutil.rmtree(f)
+ except OSError:
+ pass
+
+ def __write_files(self, posts):
+ """Write all files for the blog to _site
+
+ Convert all templates to straight HTML
+ Copy other non-template files directly"""
+ #find mako templates in template_dir
+ for root, dirs, files in os.walk("."):
+ if root.startswith("./"):
+ root = root[2:]
+ if root.startswith("_"):
+ #Ignore all _dirs (_site, _posts, _templates etc)
+ continue
+ try:
+ os.makedirs(os.path.join(self.output_dir, root))
+ except OSError:
+ pass
+ for t_fn in files:
+ if self.files_exclude_regex.match(t_fn):
+ #Ignore this file.
+ continue
+ elif t_fn.endswith(".mako"):
+ #Process this template file
+ t_name = t_fn[:-5]
+ t_file = open(os.path.join(root, t_fn))
+ template = Template(t_file.read(), output_encoding="utf-8",
+ lookup=self.template_lookup)
+ t_file.close()
+ path = os.path.join(self.output_dir,root,t_name)
+ html_file = open(path,"w")
+ html = template.render(posts=posts,
+ config=self.config,
+ archive_links=self.archive_links,
+ all_categories=self.all_categories,
+ category_link_names=self.category_link_names)
+ #Prettyify the html
+ if self.do_prettify:
+ soup = BeautifulSoup.BeautifulSoup(html)
+ html = soup.prettify()
+ #Write to disk
+ html_file.write(html)
+ else:
+ #Copy this non-template file
+ f_path = os.path.join(root, t_fn)
+ shutil.copyfile(f_path,os.path.join(self.output_dir,f_path))
+
+ def __write_blog_chron(self, posts, num_per_page=5, root="/page"):
+ """Write all the blog posts in reverse chronological order
+
+ Writes the first num_per_page posts to /root/1
+ Writes the second num_per_page posts to /root/2 etc
+ """
+ root = root.lstrip("/")
+ chron_template = self.template_lookup.get_template("chronological.mako")
+ chron_template.output_encoding = "utf-8"
+ page_num = 1
+ post_num = 0
+ html = []
+ while len(posts) > post_num:
+ #Write the pages, num_per_page posts per page:
+ page_posts = posts[post_num:post_num+num_per_page]
+ post_num += num_per_page
+ if page_num > 1:
+ prev_link = "../" + str(page_num - 1)
+ else:
+ prev_link = None
+ if len(posts) > post_num:
+ next_link = "../" + str(page_num + 1)
+ else:
+ next_link = None
+ page_dir = os.path.join(self.output_dir,root,str(page_num))
+ os.makedirs(page_dir)
+ fn = os.path.join(page_dir,"index.html")
+ f = open(fn,"w")
+ html = chron_template.render(posts=page_posts,
+ next_link=next_link,
+ prev_link=prev_link,
+ config=self.config,
+ archive_links=self.archive_links,
+ all_categories=self.all_categories,
+ category_link_names=self.category_link_names)
+ #Prettify html
+ if self.do_prettify:
+ soup = BeautifulSoup.BeautifulSoup(html)
+ html = soup.prettify()
+ f.write(html)
+ f.close()
+ page_num += 1
+
+
+ def __write_monthly_archives(self, posts):
+ m = {} # "/%Y/%m" -> [post, post, ... ]
+ for post in posts:
+ link = post.date.strftime("/%Y/%m")
+ try:
+ m[link].append(post)
+ except KeyError:
+ m[link] = [post]
+ for link, posts in m.items():
+ self.__write_blog_chron(posts,root=link)
+
+ def __write_blog_permanent(self, posts):
+ """Write blog posts to their permalink locations"""
+ perma_template = self.template_lookup.get_template("permapage.mako")
+ perma_template.output_encoding = "utf-8"
+ for post in posts:
+ if post.permalink:
+ path = os.path.join(self.output_dir,
+ urlparse.urlparse(post.permalink)[2].lstrip("/"))
+ else:
+ #Permalinks MUST be specified. No permalink, no page.
+ continue
+ try:
+ os.makedirs(path)
+ except OSError:
+ pass
+ html = perma_template.render(post=post,
+ config=self.config,
+ archive_links=self.archive_links,
+ all_categories=self.all_categories,
+ category_link_names=self.category_link_names)
+ #Prettify html
+ if self.do_prettify:
+ soup = BeautifulSoup.BeautifulSoup(html)
+ html = soup.prettify()
+ f = open(os.path.join(path,"index.html"), "w")
+ f.write(html)
+ f.close()
+
+ def __write_blog_categories(self, posts, root="/category", posts_per_page=5):
+ """Write all the blog posts in categories"""
+ #TODO: Paginate this.
+ root = root.lstrip("/")
+ chron_template = self.template_lookup.get_template("chronological.mako")
+ chron_template.output_encoding = "utf-8"
+ #Find all the categories:
+ categories = set()
+ for post in posts:
+ categories.update(post.categories)
+ for category in categories:
+ category_posts = [post for post in posts if category in post.categories]
+ category_link_name = self.category_link_names[category]
+ #Write category RSS feed
+ self.__write_feed(category_posts,os.path.join(
+ root,category_link_name,"feed"),"rss.mako")
+ self.__write_feed(category_posts,os.path.join(
+ root,category_link_name,"feed","atom"),"atom.mako")
+ page_num = 1
+ while True:
+ path = os.path.join(self.output_dir,root,category_link_name,str(page_num),"index.html")
+ try:
+ os.makedirs(os.path.split(path)[0])
+ except OSError:
+ pass
+ f = open(path, "w")
+ page_posts = category_posts[:posts_per_page]
+ category_posts = category_posts[posts_per_page:]
+ #Forward and back links
+ if page_num > 1:
+ prev_link = "/%s/%s/%s" % (root, category_link_name, str(page_num - 1))
+ else:
+ prev_link = None
+ if len(category_posts) > 0:
+ next_link = "/%s/%s/%s" % (root, category_link_name, str(page_num + 1))
+ else:
+ next_link = None
+ html = chron_template.render(posts=page_posts,
+ prev_link=prev_link,
+ next_link=next_link,
+ config=self.config,
+ archive_links=self.archive_links,
+ all_categories=self.all_categories,
+ category_link_names=self.category_link_names)
+ #Prettify html
+ if self.do_prettify:
+ soup = BeautifulSoup.BeautifulSoup(html)
+ html = soup.prettify()
+ f.write(html)
+ f.close()
+ #Copy category/1 to category/index.html
+ if page_num == 1:
+ shutil.copyfile(path,os.path.join(
+ self.output_dir,root,category_link_name,"index.html"))
+ #Prepare next iteration
+ page_num += 1
+ if len(category_posts) == 0:
+ break
+
43 converters/wordpress2blogofile.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Export a Wordpress blog to Blogofile textile format"""
+
+__author__ = "Ryan McGuire (ryan@enigmacurry.com)"
+__date__ = "Sun Feb 15 16:55:56 2009"
+
+import os
+import sys
+import yaml
+import wordpress_schema
+
+post_format = "html" # textile, ReST etc
+
+if __name__ == '__main__':
+ #Output textile files in ./_posts
+ if os.path.isdir("_posts"):
+ print "There's already a _posts directory here, I'm not going to overwrite it."
+ sys.exit(1)
+ else:
+ os.mkdir("_posts")
+
+ for post in wordpress_schema.get_published_posts():
+ yaml_data = {
+ "title": post.post_title,
+ "date": post.post_date.strftime("%Y/%m/%d %H:%M:%S"),
+ "permalink": post.permalink(),
+ "categories": ", ".join(post.categories()),
+ "tags": ", ".join(post.tags()),
+ "format": post_format,
+ "guid": post.guid
+ }
+ fn = "%s. %s.textile" % (str(post.id).zfill(4), post.post_name.strip())
+ print "writing %s" % fn
+ f = open(os.path.join("_posts",fn),"w")
+ f.write("---\n")
+ f.write(yaml.dump(yaml_data, default_flow_style=False))
+ f.write("---\n")
+ f.write(post.post_content.replace("\r\n","\n"))
+ f.close()
115 converters/wordpress_schema.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Wordpress schema (2.7)"""
+
+__author__ = "Ryan McGuire (ryan@enigmacurry.com)"
+__date__ = "Sun Feb 15 14:41:26 2009"
+
+import sqlalchemy as sa
+import sqlalchemy.orm as orm
+from sqlalchemy.ext.declarative import declarative_base
+
+#########################
+## Config
+#########################
+table_prefix = "ec"
+db_conn = "mysql://ryan:ryan@localhost/wordpressenigmacurry"
+# End Config
+#########################
+
+
+engine = sa.create_engine(db_conn)
+Session = orm.scoped_session(
+ orm.sessionmaker(autocommit=False,
+ autoflush=False,
+ bind=engine))
+Base = declarative_base(bind=engine)
+
+session = Session()
+
+class Post(Base):
+ __tablename__ = table_prefix + "wp_posts"
+ __table_args__ = {'autoload': True}
+ id = sa.Column("ID", sa.Integer, primary_key=True)
+ author_id = sa.Column("post_author", sa.ForeignKey(table_prefix+'wp_users.ID'))
+ author = orm.relation("User", primaryjoin="Post.author_id == User.id")
+ term_relationship = orm.relation("TermRelationship",
+ primaryjoin="Post.id == TermRelationship.id")
+ def categories(self):
+ return [r.taxonomy.term.name for r in self.term_relationship if r.taxonomy.taxonomy == "category"]
+ def tags(self):
+ return [r.taxonomy.term.name for r in self.term_relationship if r.taxonomy.taxonomy == "post_tag"]
+
+ def __repr__(self):
+ return "<Post '%s' id=%s status='%s'>" % (self.post_title,self.id,self.post_status)
+
+ def permalink(self):
+ site_url = get_blog_url()
+ structure = get_blog_permalink_structure()
+ structure = structure.replace("%year%", str(self.post_date.year))
+ structure = structure.replace("%monthnum%", str(self.post_date.month).zfill(2))
+ structure = structure.replace("%day%", str(self.post_date.day).zfill(2))
+ structure = structure.replace("%hour%", str(self.post_date.hour).zfill(2))
+ structure = structure.replace("%minute%", str(self.post_date.minute).zfill(2))
+ structure = structure.replace("%second%", str(self.post_date.second).zfill(2))
+ structure = structure.replace("%postname%", self.post_name)
+ structure = structure.replace("%post_id%", str(self.id))
+ try:
+ structure = structure.replace("%category%", self.categories()[0])
+ except IndexError:
+ pass
+ try:
+ structure = structure.replace("%tag%", self.tags()[0])
+ except IndexError:
+ pass
+ structure = structure.replace("%author%", self.author.user_nicename)
+ return site_url.rstrip("/") + "/" + structure.lstrip("/")
+
+class User(Base):
+ __tablename__ = table_prefix + "wp_users"
+ __table_args__ = {'autoload': True}
+ id = sa.Column("ID", sa.Integer, primary_key=True)
+ def __repr__(self):
+ return "<User '%s'>" % self.user_nicename
+
+class Term(Base):
+ __tablename__ = table_prefix + "wp_terms"
+ __table_args__ = {'autoload': True}
+ id = sa.Column("term_id", sa.Integer, primary_key=True)
+
+ def __repr__(self):
+ return "<Term '%s'>" % self.name
+
+class TermTaxonomy(Base):
+ __tablename__ = table_prefix + "wp_term_taxonomy"
+ __table_args__ = {'autoload': True}
+ id = sa.Column('term_taxonomy_id', primary_key=True)
+ term_id = sa.Column("term_id", sa.ForeignKey(table_prefix+"wp_terms.term_id"))
+ term = orm.relation("Term", primaryjoin="Term.id == TermTaxonomy.term_id")
+
+class TermRelationship(Base):
+ __tablename__ = table_prefix + "wp_term_relationships"
+ __table_args__ = {'autoload': True}
+ id = sa.Column('object_id', sa.ForeignKey(table_prefix+"wp_posts.ID"),
+ primary_key=True)
+ taxonomy_id = sa.Column("term_taxonomy_id", sa.ForeignKey(
+ table_prefix+"wp_term_taxonomy.term_id"), primary_key=True)
+ taxonomy = orm.relation("TermTaxonomy", primaryjoin=
+ "TermTaxonomy.id == TermRelationship.taxonomy_id")
+
+class WordpressOptions(Base):
+ __tablename__ = table_prefix + "wp_options"
+ __table_args__ = {'autoload': True}
+
+def get_published_posts(blog_id=0):
+ return [p for p in session.query(Post).all() if p.post_status=="publish"]
+
+def get_blog_url(blog_id=0):
+ return session.query(WordpressOptions).filter(WordpressOptions.blog_id==blog_id).\
+ filter(WordpressOptions.option_name=="siteurl").first().option_value
+
+def get_blog_permalink_structure(blog_id=0):
+ return session.query(WordpressOptions).filter(WordpressOptions.blog_id==blog_id).\
+ filter(WordpressOptions.option_name=="permalink_structure").first().option_value
+
21 setup.py
@@ -0,0 +1,21 @@
+from setuptools import setup
+
+setup(name='Blogofile',
+ version='0.1',
+ description='A blog engine/compiler, inspired by Jekyll.',
+ author='Ryan McGuire',
+ author_email='ryan@enigmacurry.com',
+ url='http://www.blogofile.com',
+ license='Public Domain',
+ packages=['blogofile'],
+ include_package_data = True,
+ install_requires =['mako',
+ 'BeautifulSoup',
+ 'pytz',
+ 'pyyaml',
+ 'textile'],
+ entry_points="""
+ [console_scripts]
+ blogofile = blogofile:main
+ """,
+ )
Please sign in to comment.
Something went wrong with that request. Please try again.