Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

initial

  • Loading branch information...
commit 45e2188065c1a998863f6804e83b496fb1b4d993 0 parents
@joearms authored
Showing with 15,986 additions and 0 deletions.
  1. +76 −0 src/Makefile
  2. +177 −0 src/best.notes
  3. +35 −0 src/book.book
  4. 0  src/chap2html.xsl
  5. +457 −0 src/chap2pdf.xsl
  6. +154 −0 src/design.chap1
  7. +33 −0 src/edit.html
  8. +44 −0 src/elib1_best.erl
  9. +43 −0 src/elib1_blob_store.erl
  10. +356 −0 src/elib1_chunks.erl
  11. +111 −0 src/elib1_content_edit.erl
  12. +110 −0 src/elib1_diff.erl
  13. +60 −0 src/elib1_doc.erl
  14. +452 −0 src/elib1_docmaker.erl
  15. +31 −0 src/elib1_ensure_copyrighted.erl
  16. +191 −0 src/elib1_expand.erl
  17. +129 −0 src/elib1_fast_read.erl
  18. +83 −0 src/elib1_fast_write.erl
  19. +138 −0 src/elib1_file_finder.erl
  20. +134 −0 src/elib1_find.erl
  21. +128 −0 src/elib1_find_reent.erl
  22. +251 −0 src/elib1_gamma.erl
  23. +30 −0 src/elib1_geom.erl
  24. +143 −0 src/elib1_guid_store.erl
  25. +409 −0 src/elib1_html_tokenise.erl
  26. +258 −0 src/elib1_http_driver.erl
  27. +643 −0 src/elib1_indexer.erl
  28. +117 −0 src/elib1_indexer_plugin_erl.erl
  29. +42 −0 src/elib1_indexer_results.erl
  30. +2,538 −0 src/elib1_misc.erl
  31. +370 −0 src/elib1_ml9.erl
  32. +103 −0 src/elib1_ml9_2_html.erl
  33. +38 −0 src/elib1_ml9_parse_header.yrl
  34. +585 −0 src/elib1_mysql.erl
  35. +549 −0 src/elib1_new_webkit.erl
  36. +78 −0 src/elib1_org2latex.erl
  37. +397 −0 src/elib1_parse_dtd.yrl
  38. +378 −0 src/elib1_porter.erl
  39. +429 −0 src/elib1_rfc4627.erl
  40. +185 −0 src/elib1_screen.erl
  41. +26 −0 src/elib1_search.erl
  42. +207 −0 src/elib1_seq_web_server1.erl
  43. +103 −0 src/elib1_sha1.erl
  44. +203 −0 src/elib1_similar.erl
  45. +67 −0 src/elib1_simple_kv_db.erl
  46. +137 −0 src/elib1_spy.erl
  47. +137 −0 src/elib1_spy.erl~
  48. +432 −0 src/elib1_store.erl
  49. +641 −0 src/elib1_tagger.erl
  50. +130 −0 src/elib1_telnet.erl
  51. +89 −0 src/elib1_txt2xml.erl
  52. +576 −0 src/elib1_webkit.erl
  53. +25 −0 src/elib1_webquery.erl
  54. +1,045 −0 src/elib1_xml.erl
  55. +460 −0 src/ezxml.chap1
  56. +119 −0 src/gen_component.erl
  57. +22 −0 src/log
  58. +572 −0 src/log.log
  59. +59 −0 src/lorem.chap
  60. +5 −0 src/mkdoc
  61. +12 −0 src/myapp.app.src
  62. +16 −0 src/myapp_app.erl
  63. +28 −0 src/myapp_sup.erl
  64. +2 −0  src/notes
  65. +12 −0 src/password.erl
  66. +246 −0 src/readme.html
  67. +33 −0 src/slides.ehtml
  68. +3 −0  src/todo
  69. +50 −0 src/tryxform.erl
  70. +44 −0 src/wiki.chap1
76 src/Makefile
@@ -0,0 +1,76 @@
+.SUFFIXES: .erl .beam .yrl
+
+MODS := $(wildcard *.erl)
+YRL := $(wildcard *.yrl)
+CHAPS := $(wildcard *.chap)
+BOOKS := $(wildcard *.book)
+LOGS := $(wildcard *.log)
+
+CWD := $(shell pwd)
+
+../ebin/%.beam: %.erl
+ ## erlc +warn_missing_spec -o ../ebin -W $<
+ ## grep --silent --invert-match "_test"
+ erlc -o ../ebin -W $<
+
+../ebin/%.beam: ../tmp/%.erl
+ erlc -o ../ebin -W $<
+
+../doc/%.html: %.chap
+ @erl -noshell -pa ../ebin -s elib1_docmaker batch $< -s init stop
+
+../doc/%.html: %.log
+ @erl -noshell -pa ../ebin -s elib1_chunks batch $< -s init stop
+
+../doc/%.html: %.erl
+ ./mkdoc $<
+
+../tmp/%.erl: %.yrl
+ erlc -o ../tmp -W $<
+
+../../pdf/%.pdf: %.chap
+ erl -s elib1_doc batch $<
+ fop -xml ../tmp/$*.xml -xsl chap2pdf.xsl -pdf ../../pdf/$*.pdf
+
+# ../../html/%.html: %.chap
+# erl -s elib1_doc batch $<
+# fop -xml ../tmp/$*.xml -xsl chap2html.xsl -txt ../../html/$*.html
+
+all: yecc beam html #chapHTML
+
+test:
+ dialyzer -Wno_return --src -c "."
+
+utest: beam
+ erl -noshell -eval "eunit:test(elib1_misc, [verbose])" -s init stop
+
+edoc:
+ erl -noshell -eval "edoc:application(lib, \".\", [{dir,\"../doc\"}])" \
+ -s init stop
+
+html: ${MODS:%.erl=../doc/%.html}
+
+beam: ${MODS:%.erl=../ebin/%.beam}
+
+yecc: ${YRL:%.yrl=../tmp/%.erl} ${YRL:%.yrl=../ebin/%.beam}
+
+chapPDF: ${CHAPS:%.chap=../../pdf/%.pdf}
+
+# chapHTML: ${CHAPS:%.chap=../../html/%.html}
+
+books: ${BOOKS:%.book=../doc/%.html}
+
+logs: ${LOGS:%.log=../doc/%.html}
+
+clean:
+ rm ../ebin/*.beam
+ rm -rf *.aux *.beam
+ rm -rf *.log *.tmp erl_crash.dump
+
+veryclean:
+ rm ../bin/* ../doc/* ../tmp/*
+
+
+
+
+
177 src/best.notes
@@ -0,0 +1,177 @@
+Best practice for writing documenting and testing code
+
+I'd like to try and define "best practice" for writing documenting and
+testing Erlang code. I want to use:
+
+ - only the tools supplied in the OTP release
+
+So I use:
+
+ - eunit for unit testing
+ - the dialyzer for checking my code
+ - edoc for documenting things
+ - type specifications for specifying types
+
+These tools do not completely "play together" in a satisfactory manner,
+so I'd like to define what I thing is "best practice" and hope that by doing
+so the tools will converge.
+
+Let's suppose I want to define the good 'ol factorial. Here's a module
+called elib1_best.erl. I've written it in such a way that it can be
+processed by erlc,eunit,edoc and the dialyzer - read the footnotes
+in brackets for an explanation.
+
+ -module(elib1_best). %% [1]
+
+ %% elib1_best: Best practice template for library modules [2]
+ %% Time-stamp: <2009-12-02 09:43:12 ejoearm> [3]
+
+ %%----------------------------------------------------------------------
+ %% Copyright (c) 2009 Joe Armstrong <erlang@gmail.com> [4]
+ %% Copyright (c) 2009 Whoomph Software AB
+ %%
+ %% Permission is hereby granted, free of charge, to any person
+ %% obtaining a copy of this software and associated documentation
+ %% files (the "Software"), to deal in the Software without
+ %% restriction, including without limitation the rights to use, copy,
+ %% modify, merge, publish, distribute, sublicense, and/or sell copies
+ %% of the Software, and to permit persons to whom the Software is
+ %% furnished to do so, subject to the following conditions:
+ %%
+ %% The above copyright notice and this permission notice shall be
+ %% included in all copies or substantial portions of the Software.
+ %%
+ %% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ %% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ %% MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ %% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ %% BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ %% ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ %% CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ %% SOFTWARE.
+ %%-----------------------------------------------------------------------
+
+ -include_lib("eunit/include/eunit.hrl"). %% [5]
+
+ -export([fac/1]). %% [6]
+
+
+ %% @doc fac(N) computes factorial(N) using a fast
+ %% iterative algorithm. [7]
+
+ -spec fac(integer()) -> integer(). [8]
+
+ fac(N) when is_integer(N), N >= 0 -> fac1(N, 1).
+
+ fac1(0, K) -> K;
+ fac1(N, K) -> fac1(N-1, K*N).
+
+ fac_test() -> %% [9]
+ 6 = fac(3),
+ 24 = fac(4).
+
+ %% Notes:
+ %% [1] - module on line 1
+ %% [2] - module comment
+ %% [3] - Time stamp auto generated by emacs.
+ %% Must be near start of file
+ %% [4] - Copyright (I always forget this, but adding a
+ %% copyright reduces the pain later
+ %% [5] - Needed for eunit
+ %% [6] - use export and NOT compile(export_all)
+ %% [7] - @doc comes first
+ %% [8] - -spec comes immediately *before* the function
+ %% [9] - test cases come immediately after the function
+
+ %% end of module
+...
+
+Now let's see what happens:
+
+ 1) Compiling
+
+ erlc +warn_missing_spec -o ../ebin -W elib1_best.erl
+
+ ./elib1_best.erl:0: Warning: missing specification for function test/0
+ ./elib1_best.erl:44: Warning: missing specification for function fac_test/0
+
+Best practice is to support type specifications for all exported
+functions. But eunit magically adds a function test/0 and I really
+don't want to have to add manual exports and type specs for
+fac_test/0.
+
+ [A fix is needed to erlc here, OR eunit can add type specs,
+ I think the latter is better - erlc should not need to know about eunit]
+
+ 2) Dialyzing
+
+dialyzer --src elib1_best.erl
+ Checking whether the PLT /home/ejoearm/.dialyzer_plt is up-to-date... yes
+ Proceeding with analysis...
+Unknown functions:
+ eunit:test/1
+ done in 0m0.32s
+done (passed successfully)
+
+ This is ok - I could add eunit to my plt if I wanted ...
+ the dialyzer warns for missing functions so I don't need to run
+ xref
+
+ 3) Documentation
+
+ I'll run edoc on everything in the current directory putting the
+results in ../doc
+
+ > erl -noshell -eval "edoc:application(lib, \".\", [{dir,\"../doc\"}])" \
+ -s init stop
+
+ This works fine and ../doc/elib1_best.html has the documentation
+ but now edoc has not found my nice -spec declaration and thinks that
+ fac has type: fac(N) -> any()
+
+ Why: because edoc and erlc don't use the same type parser.
+
+ Current best practice is to use -spec (in code) and
+ not @spec (in edoc comments)
+
+ [A fix is needed here to edoc, to understand -spec's]
+
+4) Testing
+
+ 1> eunit:test(elib1_best, [verbose]).
+ ======================== EUnit ========================
+ elib1_best: fac_test (module 'elib1_best')...ok
+ =======================================================
+ Test passed.
+ ok
+
+ Great ...
+
+Now for questions.
+
+ 1) Does this represent best practice? Is this the best way to
+ write code? - can anybody improve on this?
+
+ [And yes I know about quickcheck, but I'm only concerned
+ with SW in the OTP release]
+
+ 2) If I write like this can I assume that one day edoc
+ and eunit and erlc will converge so that I get correctly displayed
+ types in edoc and no warnings in erlc etc?
+
+ 3) Does anything else have to be fixed?
+
+ 4) Improvements..
+
+ I can think of one. I have some code to convert .erl to
+ .html with correctly colored code and hyperlinks etc.
+ So I can "surf" the code. It would be nice to have hooks
+ into edoc so I can call this code
+
+That's all for now ...
+
+/Joe
+
+
+
+
35 src/book.book
@@ -0,0 +1,35 @@
+= An Erlang Library
+
+This book describes an Erlang library called ''elib1''.
+
+== Todo (formatting program)
+
+<ul>
++ Add a diagram language (inline svg)
++ ~~Make the Book version~~
++ ~~Make compact lists, so I can leave out the blank line in paragraphs like
+this~~
++ ~~add processing of ''footnotes''~~
++ Make commands for /bin
++ check that all list entries do not end with a dot.
++ Check that all list entries start with a big letter.
++ punctuation check.
++ ~~add "typographic quotes."~~
++ Nice to make paragraphs "clickable-editable" in the browser.
++ Add some nive paragraph hover effect that colors paras as we move over them. Makes text easier to read.
+</ul>
+
+== To do (programs)
+
+<ul>
++ mysql full text searching and metadata
++ go through all old library adding good stuff (example topological sort)
++ javascript slide show from wiki markup
++ move over to GIT
++ xref
+</ul>
+
+<include chapter="design" />
+<include chapter="ezxml" />
+<include chapter="lorum" />
+
0  src/chap2html.xsl
No changes.
457 src/chap2pdf.xsl
@@ -0,0 +1,457 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsl:stylesheet
+ xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:fo="http://www.w3.org/1999/XSL/Format"
+ version="1.0">
+ <xsl:output method="xml"/>
+
+ <xsl:template match="/">
+ <fo:root xmlns:fo="http://www.w3.org/1999/XSL/Format">
+ <fo:layout-master-set>
+ <fo:simple-page-master
+ master-name="simple"
+ page-height ="29.7cm"
+ page-width ="21cm"
+ margin-left ="2.5cm"
+ margin-right ="2.5cm">
+
+ <fo:region-body
+ region-name="Content"
+ margin-top="1.0cm" margin-bottom="1in"
+ />
+
+ <fo:region-before extent=".8cm"/>
+ <fo:region-after extent=".5in" background-color="silver"/>
+ </fo:simple-page-master>
+ </fo:layout-master-set>
+ <fo:page-sequence
+ master-reference="simple">
+
+ <fo:static-content flow-name="xsl-region-before">
+ <fo:block>
+ <fo:inline keep-together.within-line="always">
+ <fo:leader leader-pattern="rule"
+ leader-length="2.95cm"/>
+ <fo:inline vertical-align="sub">
+ Chapter NNNNN
+ </fo:inline>
+ <fo:leader leader-pattern="rule"
+ leader-length="2.95cm" /></fo:inline>
+ </fo:block>
+ </fo:static-content>
+
+ <fo:flow flow-name="Content">
+ <xsl:apply-templates/>
+ </fo:flow>
+ </fo:page-sequence>
+ </fo:root>
+ </xsl:template>
+
+ <xsl:template match="chap">
+ <xsl:apply-templates/>
+ </xsl:template>
+
+ <xsl:template match="h2">
+ <fo:block font-size="14pt"
+ font-weight="bold"
+ space-before="1em"
+ space-after="0.5em"
+ space-after.conditionality = 'retain'
+ >
+ <xsl:apply-templates/>
+ </fo:block>
+ </xsl:template>
+
+ <!-- this is my h1 -->
+
+ <xsl:template match="title">
+ <fo:block
+ margin-top="4mm"
+ margin-bottom="4mm"
+ padding-before="0.4em"
+ padding-after="0.4em"
+ text-align="center"
+ background-color="black"
+ color="white"
+ font-family="Times"
+ font-size="30pt"
+ span = "all"
+ font-weight="bold"
+ space-after="1cm">
+ <xsl:apply-templates/>
+ </fo:block>
+ </xsl:template>
+
+ <xsl:template match="strike">
+ <fo:inline text-decoration="line-through">
+ <xsl:apply-templates select="*|text()"/>
+ </fo:inline>
+ </xsl:template>
+
+ <xsl:template match="note">
+ <fo:block border-style="solid"
+ margin="0.2cm"
+ border-width="1px"
+ padding="4mm"
+ text-align="justify"
+ font-family="Helvetica">
+ <xsl:apply-templates/>
+ </fo:block>
+ </xsl:template>
+
+
+ <xsl:template match="warn">
+ <fo:block margin="0px" padding="8px" background-color="#aabbcc">
+ <xsl:apply-templates/>
+ </fo:block>
+ </xsl:template>
+
+ <xsl:template match="box">
+ <fo:block border-style="solid"
+ margin="0.2cm"
+ border-width="5px"
+ border-color="red"
+ padding="4mm"
+ text-align="justify"
+ font-family="Helvetica">
+ <xsl:apply-templates/>
+ </fo:block>
+ </xsl:template>
+
+ <xsl:template match="p">
+ <fo:block text-align="justify" font-family="Times">
+ <xsl:apply-templates/>
+ </fo:block>
+ </xsl:template>
+
+
+ <!-- ============================================
+ We handle an ordered list with two complications:
+ If the list appears inside another list (either
+ an <ol> or <ul>), we don't put any vertical space
+ after it. The other issue is that we indent the
+ list according to how deeply nested the list is.
+ =============================================== -->
+
+ <xsl:template match="ol">
+ <fo:list-block provisional-distance-between-starts="1cm"
+ provisional-label-separation="0.5cm">
+ <xsl:attribute name="space-after">
+ <xsl:choose>
+ <xsl:when test="ancestor::ul or ancestor::ol">
+ <xsl:text>0pt</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>12pt</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:attribute>
+ <xsl:attribute name="start-indent">
+ <xsl:variable name="ancestors">
+ <xsl:choose>
+ <xsl:when test="count(ancestor::ol) or count(ancestor::ul)">
+ <xsl:value-of select="1 +
+ (count(ancestor::ol) +
+ count(ancestor::ul)) *
+ 1.25"/>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>1</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:variable>
+ <xsl:value-of select="concat($ancestors, 'cm')"/>
+ </xsl:attribute>
+ <xsl:apply-templates select="*"/>
+ </fo:list-block>
+ </xsl:template>
+
+ <!-- ============================================
+ When we handle items in an ordered list, we need
+ to check if the list has a start attribute. If
+ it does, we change the starting number accordingly.
+ Once we've figured out where to start counting,
+ we check the type attribute to see what format
+ the numbers should use.
+ =============================================== -->
+
+ <xsl:template match="ol/li">
+ <fo:list-item>
+ <fo:list-item-label end-indent="label-end()">
+ <fo:block>
+ <xsl:variable name="value-attr">
+ <xsl:choose>
+ <xsl:when test="../@start">
+ <xsl:number value="position() + ../@start - 1"/>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:number value="position()"/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:variable>
+ <xsl:choose>
+ <xsl:when test="../@type='i'">
+ <xsl:number value="$value-attr" format="i. "/>
+ </xsl:when>
+ <xsl:when test="../@type='I'">
+ <xsl:number value="$value-attr" format="I. "/>
+ </xsl:when>
+ <xsl:when test="../@type='a'">
+ <xsl:number value="$value-attr" format="a. "/>
+ </xsl:when>
+ <xsl:when test="../@type='A'">
+ <xsl:number value="$value-attr" format="A. "/>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:number value="$value-attr" format="1. "/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </fo:block>
+ </fo:list-item-label>
+ <fo:list-item-body start-indent="body-start()">
+ <fo:block>
+ <xsl:apply-templates select="*|text()"/>
+ </fo:block>
+ </fo:list-item-body>
+ </fo:list-item>
+ </xsl:template>
+
+ <!-- ============================================
+ The unordered list is pretty straightforward;
+ the only complication is calculating the space-
+ after and start-indent properties. If this
+ list is inside another list, we don't put any
+ space after this one, and we calculate the
+ indentation based on the nesting level of this
+ list.
+ =============================================== -->
+
+ <xsl:template match="ul">
+ <fo:list-block provisional-distance-between-starts="1cm"
+ provisional-label-separation="0.5cm">
+ <xsl:attribute name="space-after">
+ <xsl:choose>
+ <xsl:when test="ancestor::ul or ancestor::ol">
+ <xsl:text>0pt</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>12pt</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:attribute>
+ <xsl:attribute name="start-indent">
+ <xsl:variable name="ancestors">
+ <xsl:choose>
+ <xsl:when test="count(ancestor::ol) or count(ancestor::ul)">
+ <xsl:value-of select="1 +
+ (count(ancestor::ol) +
+ count(ancestor::ul)) *
+ 1.25"/>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>1</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:variable>
+ <xsl:value-of select="concat($ancestors, 'cm')"/>
+ </xsl:attribute>
+ <xsl:apply-templates select="*"/>
+ </fo:list-block>
+ </xsl:template>
+
+
+ <!-- ============================================
+ Preformatted text is rendered in a monospaced
+ font. We also have to set the wrap-option
+ and white-space-collapse properties.
+ =============================================== -->
+
+ <xsl:template match="pre">
+ <fo:block font-family="monospace"
+ linefeed-treatment="preserve"
+ white-space-collapse="false"
+ white-space-treatment="preserve"
+ >
+ <xsl:apply-templates select="*|text()"/>
+ </fo:block>
+ </xsl:template>
+
+ <!-- ============================================
+ We don't do anything with the <dl> element, we
+ just handle the elements it contains. Notice
+ that we're ignoring any text that appears
+ in the <dl> itself; I'm not sure if that's
+ the right call.
+ =============================================== -->
+
+ <xsl:template match="dl">
+ <xsl:apply-templates select="*"/>
+ </xsl:template>
+
+ <!-- ============================================
+ A definition term is rendered in bold. We
+ specify keep-with-next here, although it doesn't
+ always work with FOP.
+ =============================================== -->
+
+ <xsl:template match="dt">
+ <fo:block font-weight="bold" space-after="2pt"
+ keep-with-next="always">
+ <xsl:apply-templates select="*|text()"/>
+ </fo:block>
+ </xsl:template>
+
+ <!-- ============================================
+ We handle each <dd> element as an indented block
+ beneath the defined term. If the following
+ element is another <dd>, that means it's another
+ definition for the same term. In that case,
+ we don't put as much vertical space after the
+ block.
+ =============================================== -->
+
+ <xsl:template match="dd">
+ <fo:block start-indent="1cm">
+ <xsl:attribute name="space-after">
+ <xsl:choose>
+ <xsl:when test="name(following::*[1]) = 'dd'">
+ <xsl:text>3pt</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>12pt</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:attribute>
+ <xsl:apply-templates select="*|text()"/>
+ </fo:block>
+ </xsl:template>
+
+ <!-- ============================================
+ List items inside unordered lists are easy; we
+ just have to use the correct Unicode character
+ for the bullet.
+ =============================================== -->
+
+ <xsl:template match="ul/li">
+ <fo:list-item>
+ <fo:list-item-label end-indent="label-end()">
+ <fo:block>&#x2022;</fo:block>
+ </fo:list-item-label>
+ <fo:list-item-body start-indent="body-start()">
+ <fo:block>
+ <xsl:apply-templates select="*|text()"/>
+ </fo:block>
+ </fo:list-item-body>
+ </fo:list-item>
+ </xsl:template>
+
+
+ <!-- ============================================
+ Teletype text is rendered in a monospaced font.
+ =============================================== -->
+
+ <xsl:template match="c">
+ <fo:inline font-family="monospace">
+ <xsl:apply-templates select="*|text()"/>
+ </fo:inline>
+ </xsl:template>
+
+
+ <!-- ============================================
+ For bold elements, we just change the font-weight.
+ =============================================== -->
+
+ <xsl:template match="b">
+ <fo:inline font-weight="bold">
+ <xsl:apply-templates select="*|text()"/>
+ </fo:inline>
+ </xsl:template>
+
+
+ <!-- ============================================
+ Italics. You can't get much simpler than that.
+ =============================================== -->
+
+ <xsl:template match="i">
+ <fo:inline font-style="italic">
+ <xsl:apply-templates select="*|text()"/>
+ </fo:inline>
+ </xsl:template>
+
+ <xsl:template match="by">
+ <fo:inline
+ padding='.3mm'
+ font-style="bold"
+ background-color="orange">
+ <xsl:apply-templates select="*|text()"/>
+ </fo:inline>
+ </xsl:template>
+
+
+ <!-- ============================================
+ For underlined text, we use the text-decoration
+ property.
+ =============================================== -->
+
+ <xsl:template match="u">
+ <fo:inline text-decoration="underline">
+ <xsl:apply-templates select="*|text()"/>
+ </fo:inline>
+ </xsl:template>
+
+
+
+ <!-- ============================================
+ For the <img> element, we use the src attribute
+ as it comes from HTML. We also check for any
+ width and height attributes. If those attributes
+ are there, we try to use them; height="300px" is
+ used as-is, while height="300" is converted to
+ the value "300px".
+ =============================================== -->
+
+ <xsl:template match="img">
+ <fo:block text-align="center" space-after="12pt">
+ <fo:external-graphic src="{@src}">
+ <xsl:if test="@width">
+ <xsl:attribute name="content-width">
+ <xsl:choose>
+ <xsl:when test="contains(@width, 'px')">
+ <xsl:value-of select="@width"/>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:value-of select="concat(@width, 'px')"/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:attribute>
+ </xsl:if>
+ <xsl:if test="@height">
+ <xsl:attribute name="content-height">
+ <xsl:choose>
+ <xsl:when test="contains(@height, 'px')">
+ <xsl:value-of select="@height"/>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:value-of select="concat(@height, 'px')"/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:attribute>
+ </xsl:if>
+ </fo:external-graphic>
+ </fo:block>
+ </xsl:template>
+
+ <xsl:template match="em">
+ <fo:inline font-style="italic">
+ <xsl:apply-templates/>
+ </fo:inline>
+ </xsl:template>
+
+ <xsl:template match="*">
+ <fo:block background-color="red">
+ <xsl:apply-templates/>
+ </fo:block>
+ </xsl:template>
+
+</xsl:stylesheet>
+
154 src/design.chap1
@@ -0,0 +1,154 @@
+= Projects
+
+Before you can do ''anything at all'' you need to impose a little order
+on your masterwork. Any project that is destined to be ''the next great hack''
+must have ''some structure.'' The temptation to ''just start hacking'' is
+enormous, but there is really bit of infrastructure that is needed before
+you start hacking. Having said this, I must admit it is difficult
+to follow the rules, you really want to start hacking ''immediately''
+but it's a good ides to have some basic infrastructure in-place before you start.
+The larger the project becomes, the more important this is.
+
+Here's the order I've decided upon:
+
+<ol>
++ I'll use and must define a fixed and defined directory structure.
++ I need a documentation tool to produce ~~nice~~ beautiful documentation
++ I need a search tool and meta-data in my code
++ I need Xref - types etc to check my code
+</ol>
+
+Why do I want these things in-place before I start hacking?
+
+<dl>
+[directory structures] Should I put everything in one directory
+or define a directory structure and force my files into this
+structure? This is a difficult question. If I had a very small
+project involving only a few files I would obviously stick everything
+into a single directory---anything else would be overkill. For large
+projects this doesn't work, so we need a directory structure. It is
+best to think about and design this directory structure ''before''
+writing the code.
+
+This is especially important in a multi-person
+project. The folks in the project need to know in which directories
+they should put their code.
+
+[documentation] We need a way to write documentation. ~~Preferably this
+ill result in ''beautiful'' documentation.~~ This ''must'' produce
+''beautiful'' documentation (so that people will want to read it)---the
+documentation should be a "pleasure" to read. The documentation system
+needs to be in-place ''before'' we write the project code, so we need
+pre-phase to set this up (or use an existing documentation system).
+The documentation system should be tied to the code base so that
+relations between the code and the documentation can be automated.
+
+[finding stuff] As projects grow the difficulty in finding stuff
+increases. When you have ten thousand files you begin to wish that
+every document had been tagged with meta data and index in a search
+engine. Now is too late. Retrofitting meta data to large projects
+never works (or if it does it requires massive effort). So right from
+the beginning we should be thinking about the meta data we will need in
+our files and how we will store and find this data.
+
+[finding errors at compile time] There are many tools that can help us
+find errors at compile time. Cross reference tools, type inference
+tools, unit test frameworks. These tools are being continually
+improved, but how they are used and how they are customised to just
+''your'' project needs thinking about. It's better to set this stuff
+up ''before'' you get started. When you are in the ,middle of your
+project you certainly won't have time for this.
+</dl>
+
+== Layout
+
+Let's have some <<code>> here, and ''italics''.
+We can ~~strike~~ some text.
+
+We have the following sub-directories:
+
+<dl>
+[src] Source code. This is restricted to the following file types:
+ <<.erl>>, <<.chap>> <<.book>>, <<.hrl>>. The content of this file
+ are not actually intended to be read ''directly'', by this we
+ mean we will always read content that is automatically generated
+ from this directory. Files generated in the compilation process, or,
+ for example while generating documentation are not in this
+ directory.
+
+[ebin] <<.beam>> files. These are put in one directory so that we can
+ point search paths to this directory. We might add additional
+ executables to this directory. At any time the entire contents of
+ this directory can be deleted without problems.
+
+[doc] Generated documentation. This will contain (for example)
+ <<.pdf>> and <<.html>> files. These files should not be hand-edited.
+ The contents of this directory can be deleted.
+
+[save] The contents of this directory must not be deleted. This might
+ for example have a database that records all edits to the system
+ etc.
+
+[tmp] Temporary files. There may be a large number of files here don't
+ worry, the entire contents can safely be deleted.
+
+[bin] Programs, scripts that can be run. These aren't in ebin these
+ are the top-level commands that the user can execute.
+</dl>
+
+== Programs in bin
+
+<dl>
+[bin/publish X] Takes a file <<X.lit>> file in the current directory
+ and produces two beautiful files <<X.html>> and <<X.pdf>> in the
+ <<../doc>> directory. Temporary files are put in <<../tmp>>. This
+ command should leave no crud in the current directory. If there are
+ syntax errors then they will be written somewhere.
+</dl>
+
+
+== Design
+
+The documentation system uses two commands <<bin/mkchap>> and <<bin/mkbook>>.
+
+<dl>
+[mkchap X.chap] is run in the <<src>> directory. If there are no
+errors in <<X.cap>> then the following files
+are created <<../tmp/X.inc>>, <<../tmp/X.tex>>, <<../doc/X.html>>,
+and <<../doc/X.pdf>>. If anything goes wrong <warn>more here</warn>.
+
+[mkbook X.book] is run in the <<src>> directory. If there are no
+errors in <<X.book>> then the following files
+are created <<../tmp/X.inc>>, <<../tmp/X.tex>>, <<../doc/X.html>>,
+and <<../doc/X.pdf>>. If anything goes wrong <warn>more here</warn>.
+</dl>
+
+The workhorse of the system is <<mkcap>> this converts ''wiki text''
+into PDF and HTML. In writing <<mkchap>> I have adopted a
+''minimalistic approach'', namely:
+
+<ul>
++ Reuse as much code as possible.
++ Write as little as possible
++ Only implement what I need for the documents I have written
++ Make the output look nice
+</ul>
+
+<include file="elib1_docmaker.erl" tag="tag1" />
+
+=== Parse tree of DL
+
+
+<pre>
+[aaa]p1
+[bbb]p1
+p2
+...
+</pre>
+
+is:
+
+<pre>
+{dl, [{tag,"aaa",[{p,p1},{p,P2}]},
+ {tag, "bbb",[{p,p1}]}}
+</pre>
33 src/edit.html
@@ -0,0 +1,33 @@
+<title>edit</title>
+<link rel="stylesheet" href="./me.css" type="text/css" media="screen"/>
+
+<div class="box1">
+<a id="commentForm"></a>
+
+<form method="post" action="http://meAuto-Detach" accept-charset="utf-8"
+ id="comments_form" onsubmit="if (this.bakecookie.checked) rememberMe(this)">
+ <input name="parent" type="hidden" value="3009"/>
+ <input name="title" type="hidden" value="Auto Detach"/>
+
+ <table>
+ <tr>
+ <td><label for="name">Title:</label></td>
+ <td><input id="name" name="name" size="40" type="text" value=""/></td>
+ </tr>
+ <tr>
+ <td><label for="name">Keywords:</label></td>
+ <td><input id="name" name="name" size="40" type="text" value=""/></td>
+ </tr>
+ <tr>
+ <td colspan="2">
+ <textarea cols="72" id="comment" name="comment" rows="15"></textarea>
+ </td>
+ </tr>
+ <tr>
+ <td colspan="2">
+ <input name="preview" type="submit" value="Preview"/>
+ </td>
+ </tr>
+ </table>
+</form>
+</div>
44 src/elib1_best.erl
@@ -0,0 +1,44 @@
+%% Copyright (c) 2006-2009 Joe Armstrong
+%% See MIT-LICENSE for licensing information.
+
+%% ^ [4]
+
+-module(elib1_best). %% [1]
+
+%% elib1_best: Best practice template for library modules [2]
+%% Time-stamp: <2009-12-02 12:28:04 ejoearm> [3]
+
+-include_lib("eunit/include/eunit.hrl"). %% [5]
+
+-export([fac/1]). %% [6]
+
+
+%% @doc fac(N) computes factorial(N) using a fast iterative algorithm. [7]
+
+-spec fac(non_neg_integer()) -> non_neg_integer(). %% [8]
+
+fac(N) when is_integer(N), N >= 0 -> fac1(N, 1).
+
+fac1(0, K) -> K;
+fac1(N, K) -> fac1(N-1, K*N).
+
+fac_test() -> %% [9]
+ 6 = fac(3),
+ 24 = fac(4).
+
+%% Notes:
+%% [1] - module on line 1
+%% [2] - module comment
+%% [3] - Time stamp auto genertaed by emacs. Must be near start of file
+%% [4] - Copyright (I always forget this, but adding a ciopyright reduces
+%% the pain later
+%% [5] - Needed for eunit
+%% [6] - use export and NOT compile(export_all)
+%% [7] - @doc comes first
+%% [8] - -spec comes immediately *before* the function
+%% [9] - test cases come immediatly after the function
+
+%% end of module
+
+
+
43 src/elib1_blob_store.erl
@@ -0,0 +1,43 @@
+%% Copyright (c) 2006-2009 Joe Armstrong
+%% See MIT-LICENSE for licensing information.
+
+-module(elib1_blob_store).
+
+%% The guid store is a two level store
+%%
+%% M:init(File)
+%% M:store(Key, Blob)
+%% M:fetch(Key) -> Blob raises eNoKey
+%% M:keys() -> [Key]
+
+-export([open/1, close/0, fetch/1, store/2, keys/0]).
+
+open(File) ->
+ %% io:format("dets opened:~p~n", [File]),
+ case dets:open_file(?MODULE, [{file, File}]) of
+ {ok, ?MODULE} ->
+ true;
+ {error,_Reason} ->
+ io:format("cannot open dets table~n"),
+ exit(eDetsOpen)
+ end.
+
+close() -> dets:close(?MODULE).
+
+store(Key, Blob) when is_binary(Blob) ->
+ %% io:format("storing blob key=~p size=~p~n",[Key,size(Blob)]),
+ ok = dets:insert(?MODULE, [{Key,Blob}]).
+
+fetch(Key) ->
+ case dets:lookup(?MODULE, Key) of
+ [] -> error;
+ [{_,Blob}] -> {ok, Blob}
+ end.
+
+keys() ->
+ %% I guess there is a better way of doing this ...
+ dets:foldl(fun({K,_},A) -> [K|A] end,[],?MODULE).
+
+
+
+
356 src/elib1_chunks.erl
@@ -0,0 +1,356 @@
+%% Copyright (c) 2006-2009 Joe Armstrong
+%% See MIT-LICENSE for licensing information.
+
+-module(elib1_chunks).
+-compile(export_all).
+-import(lists, [filter/2, flatten/1, reverse/1]).
+-import(elib1_misc, [dump/2]).
+
+%% convert .log file to .html
+%% by converting each chunk in the log file
+
+batch(X) ->
+ try
+ begin
+ [A] = X,
+ File = atom_to_list(A),
+ convert(File)
+ end
+ catch
+ P:Q ->
+ io:format("Some error ocurred ~p:~p ~p~n",
+ [P,Q,erlang:get_stacktrace()])
+ end.
+
+file2chunks(File) ->
+ Str = elib1_misc:file2string(File),
+ str2chunks(Str).
+
+convert(File) ->
+ Root = filename:rootname(File),
+ Chunks = file2chunks(File),
+ %% dump("logtmp", Chunks),
+ H1 = [ chunk2html(C) || C <- Chunks],
+ OutFile = "../doc/" ++ Root ++ ".html",
+ elib1_misc:expand_file_template("me.template",
+ [{"content", H1}],
+ OutFile),
+ io:format("created ~s~n",[OutFile]).
+
+chunk2html({chunk,Headers,Content}) ->
+ div_box([headers2html(Headers), content2html(Content)]).
+
+content2html(Str) ->
+ wikiA2html(parse_wiki_str(Str)).
+
+headers2html(L) ->
+ [h2([atom_to_list(Key),": ",Val]) || {Key,Val} <- L].
+
+h2(X) ->
+ ["<h2>",X,"</h2>\n"].
+
+div_box(X) ->
+ ["<div class='box'>\n", X, "\n</div>"].
+
+
+test1() ->
+ str2chunks("@chunk\n@tag: abc\n\nabc\n\n+123\n+234\n\ndef").
+
+
+str2chunks(Str) ->
+ Str1 = elib1_misc:dos2unix(Str),
+ parse_chunks(Str1).
+
+parse_chunks("@chunk" ++ _ = T) ->
+ parse_chunks(T, 1, []);
+parse_chunks(Str) ->
+ io:format("**** string does not begin with @chunk~n"
+ " starts:~s ...~n"
+ " skipping some data~n",[string:sub_string(Str,1,10)]),
+ {_, Ln1, Str1} = collect_chunk(Str, 1, []),
+ parse_chunks(Str1, Ln1, []).
+
+parse_chunks([], _, L) ->
+ reverse(L);
+parse_chunks("@chunk" ++ Str, Ln, L) ->
+ {C, Ln1, Str1} = collect_chunk(Str, Ln, []),
+ {Header, Body} = parse_chunk_headers(C, Ln, []),
+ parse_chunks(Str1, Ln1, [{chunk,Header,Body}|L]).
+
+parse_chunk_headers("\n\n" ++ T, _Ln, L) ->
+ {reverse(L), T};
+parse_chunk_headers("\n@" ++ T, Ln, L) ->
+ %% io:format("isolate tag:~p~n",[T]),
+ {Tag, T1} = isolate_meta_tag(T, Ln, []),
+ %% io:format("Tag=~p T1=~s~n",[Tag, string:sub_string(T1,1,10)]),
+ {Body, Ln1, T2} = collect_tag_body(T1, Ln+1, []),
+ parse_chunk_headers(T2, Ln1, [{Tag,trim(Body)}|L]).
+
+trim(X) ->
+ elib1_misc:remove_leading_and_trailing_whitespace(X).
+
+-define(IN(A,X,B), A =< X, X =< B).
+
+isolate_meta_tag([H|T], Ln, L) when ?IN($a,H,$z) ; ?IN($A,H,$Z) ->
+ isolate_meta_tag(T, Ln, [H|L]);
+isolate_meta_tag([$:|T], _, L) ->
+ {list_to_atom(reverse(L)), T};
+isolate_meta_tag(Str, Ln, L) ->
+ exit({ebadTag,Ln,string:sub_string(Str,1,10),reverse(L)}).
+
+collect_tag_body("\n@" ++ _ = T, Ln, L) -> {reverse(L), Ln, T};
+collect_tag_body("\n\s" ++ T, Ln, L) ->
+ collect_tag_body(T, Ln+1, L);
+collect_tag_body("\n\n" ++ _ = T, Ln, L) ->
+ {reverse(L), Ln, T};
+collect_tag_body("\n" ++ _T, Ln, L) ->
+ exit({ebadTag, Ln, reverse(L)});
+collect_tag_body([H|T], Ln, L) ->
+ collect_tag_body(T, Ln, [H|L]);
+collect_tag_body([], Ln, L) ->
+ {reverse(L), Ln, []}.
+
+collect_chunk("\n@chunk" ++ _ = T, Ln, L) ->
+ {reverse(L), Ln+1, tl(T)};
+collect_chunk([$\n|T], Ln, L) ->
+ collect_chunk(T, Ln+1, [$\n|L]);
+collect_chunk([H|T], Ln, L) ->
+ collect_chunk(T, Ln, [H|L]);
+collect_chunk([], Ln, L) ->
+ {reverse(L), Ln, []}.
+
+parse_wiki_str(Str) ->
+ parse_str0(Str, 0).
+
+parse_str0(Str, Ln) ->
+ Pass1 = parse(Str, Ln, []),
+ %% dump("t1", Pass1),
+ Pass2 = flatten([pass2(I) || I <- Pass1]),
+ [pass3(I) || I <- Pass2].
+
+pass2({str,_Ln,L}) -> split_into_paras(L);
+pass2({dl,_Ln,L}) -> parse_dl(L);
+pass2({pre,_Ln,L}) -> {pre,L};
+pass2(X) -> exit({pass2, X}).
+
+pass3({para,[]}) -> drop;
+pass3({para,"+" ++ _ =T}) -> parse_list1(ol, "\n\\+", [$\n|T]);
+pass3({para,"*" ++ _ =T}) -> parse_list1(ul, "\n\\*", [$\n|T]);
+pass3({para,"=" ++ S}) -> parse_header(S, 1);
+pass3({para,S}) -> parse_para(S);
+pass3(X) -> X.
+
+parse_list1(Tag, Re, Str) ->
+ L = re:split(Str, Re, [{return,list}]),
+ L1 = remove_blank_lines(L),
+ L2 = [parse_para(I) || I <- L1],
+ {Tag, L2}.
+
+parse_dl(S) ->
+ %% look for lines that start \n
+ %% io:format("parse_dl =~p~n",[S]),
+ Lines = split_into_regions(S),
+ %% io:format("Lines =~p~n",[Lines]),
+ Lines1 = [parse_dl_item(I) || I <- Lines],
+ %% io:format("Lines1 =~p~n",[Lines1]),
+ {dl, Lines1}.
+
+parse_dl_item(Str) ->
+ {Tag, Rest} = extract_tag(Str, []),
+ {tag,Tag,paras,parse_paras(split_into_paras(Rest))}.
+
+extract_tag([$\n|_],_) -> exit(nlNotAllowedInTag);
+extract_tag([], _) -> exit(eofInTag);
+extract_tag([$\\,$]|T], L) -> extract_tag(T, [$]|L]);
+extract_tag([$]|T], L) -> {reverse(L), T};
+extract_tag([H|T], L) -> extract_tag(T, [H|L]).
+
+split_into_regions(S) ->
+ L = re:split(S, "[\r]?\n[\s\t]*\\[",[{return,list}]),
+ remove_blank_lines(L).
+
+
+%% split_into_para(Str) -> [Str].
+
+split_into_paras(S) ->
+ L = re:split(S, "[\r]?\n[\s\t]*[\r]?\n",[{return,list},trim]),
+ L1 = remove_blank_lines(L),
+ [{para,elib1_misc:remove_leading_whitespace(I)} || I <- L1].
+
+remove_blank_lines(L) ->
+ filter(fun(I) -> not elib1_misc:is_blank_line(I) end, L).
+
+
+%% collect top-level objects (only <dl> and <pre> in column one
+
+parse([], _, L) -> reverse(L);
+parse("\n<dl>" ++ T, Ln, L) -> parse1(dl, T, "</dl>", Ln+1, L);
+parse("\n<pre>" ++ T, Ln, L) ->
+ %% Pre **must** have a WS /n
+ T1 = skip_to_pre_start(T, Ln),
+ io:format("Here:Ln=~p T1=~p~n",[Ln,T1]),
+ parse1(pre, T1, "\n</pre>", Ln+1, L);
+parse(Str, Ln, L) ->
+ {Body, Ln1, T1} = collect_str(Str, Ln, []),
+ parse(T1, Ln1, [{str,Ln,Body}|L]).
+
+skip_to_pre_start([$\n|T], _) -> T;
+skip_to_pre_start([$\s|T], Ln) -> skip_to_pre_start(T, Ln);
+skip_to_pre_start([$\t|T], Ln) -> skip_to_pre_start(T, Ln);
+skip_to_pre_start([$\r|T], Ln) -> skip_to_pre_start(T, Ln);
+skip_to_pre_start(_Str, Ln) ->
+ exit({pre,line,Ln,nonBlankOnSameLine}).
+
+collect_str("\n<pre>" ++ _ = T, Ln, L) -> {reverse(L), Ln+1, T};
+collect_str("\n<dl>" ++ _ = T, Ln, L) -> {reverse(L), Ln+1, T};
+collect_str([$\n|T], Ln, L) -> collect_str(T, Ln+1, [$\n|L]);
+collect_str([H|T], Ln, L) -> collect_str(T, Ln, [H|L]);
+collect_str([], Ln, L) -> {reverse(L), Ln, []}.
+
+parse1(Tag, Str, Stop, Ln, L) ->
+ {Body, Ln1, Str1} = collect_thing(Str, Stop, Ln, []),
+ parse(Str1, Ln1, [{Tag,Ln,Body}|L]).
+
+collect_thing([H|T] = Str, Stop, Ln, L) ->
+ case elib1_misc:is_prefix(Stop, Str) of
+ {yes, Rest} ->
+ {reverse(L), Ln + count_nls(Stop), Rest};
+ no ->
+ collect_thing(T, Stop, bump(H, Ln), [H|L])
+ end;
+collect_thing([], Stop, Ln, _) ->
+ exit({eof,line,Ln,expecting,Stop}).
+
+count_nls([$\n|T]) -> 1 + count_nls(T);
+count_nls([_|T]) -> count_nls(T);
+count_nls([]) -> 0.
+
+bump($\n, N) -> N+1;
+bump(_, N) -> N.
+
+parse_header([$=|T], N) -> parse_header(T, N+1);
+parse_header(T, N) -> {header, N, T}.
+
+parse_paras(L) ->
+ [parse_para(I) || {para,I} <- L].
+
+parse_para(Str) ->
+ {p, parse_para(Str, [])}.
+
+parse_para([], L) -> reverse(L);
+parse_para("<br/>" ++ T, L) -> parse_para(T, [br|L]);
+parse_para("<u>" ++ T, L) -> parse_para1(u, T, "</u>", L);
+parse_para("<tt>" ++ T, L) -> parse_para1(u, T, "</tt>", L);
+parse_para("<<" ++ T, L) -> parse_para1(code, T, ">>", L);
+parse_para("<code>" ++ T, L) -> parse_para1(code, T, "</code>", L);
+parse_para("<footnote>" ++ T, L) -> parse_para1(footnote, T, "</footnote>", L);
+parse_para("''" ++ T, L) -> parse_para1(i, T, "''", L);
+parse_para("**" ++ T, L) -> parse_para1(b, T, "**", L);
+parse_para("[[" ++ T, L) -> parse_para1(link, T, "]]", L);
+parse_para("<s>" ++ T, L) -> parse_para2(strike, T, "</s>", L);
+parse_para("~~" ++ T, L) -> parse_para2(strike, T, "~~", L);
+parse_para("<warn>" ++ T, L) -> parse_para2(warn, T, "</warn>", L);
+parse_para("\"" ++ T, L) -> parse_para2(quoted,T, "\"", L);
+parse_para(T, L) ->
+ {B, T1} = collect_str(T, []),
+ parse_para(T1, [{str,B}|L]).
+
+collect_str("[[" ++ _ = T, L) -> {reverse(L), T};
+collect_str("''" ++ _ = T, L) -> {reverse(L), T};
+collect_str("**" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<tt>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<s>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<br/>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("~~" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<u>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<code>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<warn>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<footnote>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<<" ++ _ = T, L) -> {reverse(L), T};
+collect_str("\"" ++ _ = T, L) -> {reverse(L), T};
+collect_str([H|T], L) -> collect_str(T, [H|L]);
+collect_str([], L) -> {reverse(L), []}.
+
+parse_para1(Tag, Str, Stop, L) ->
+ {B, Str1} = collect(Str, Stop, []),
+ parse_para(Str1, [{Tag,B}|L]).
+
+parse_para2(Tag, Str, Stop, L) ->
+ {B, Str1} = collect(Str, Stop, []),
+ parse_para(Str1, [{Tag,parse_para(B)}|L]).
+
+collect([], _, L) ->
+ {reverse(L), []};
+collect([$\\,H|T], Stop, L) ->
+ collect(T, Stop, [H|L]);
+collect(Str, Stop, L) ->
+ case elib1_misc:is_prefix(Stop, Str) of
+ {yes, Rest} ->
+ {reverse(L), Rest};
+ no ->
+ collect(tl(Str), Stop, [hd(Str)|L])
+ end.
+
+
+wikiA2html({header,N,S}) ->
+ M = integer_to_list(N+1),
+ ["<h",M,">",quote(S),"</h",M,">\n"];
+wikiA2html({p, L}) ->
+ ["<p>",inlines2html(L),"</p>\n"];
+wikiA2html({pre, L}) ->
+ ["<pre>\n", quote(L),"\n</pre>\n"];
+wikiA2html({dl, L}) ->
+ ["<dl>\n", dl_body_to_html(L),"\n</dl>\n"];
+wikiA2html({ol, L}) ->
+ ["<ol>",[["<li>", inlines2html(I),"</li>\n"] || {p, I} <-L],"</ol>\n"];
+wikiA2html({ul, L}) ->
+ ["<ul>",[["<li>",inlines2html(I),"</li>\n"]|| {p, I} <- L],"</ul>\n"];
+wikiA2html(L) when is_list(L) ->
+ [wikiA2html(I) || I <- L];
+wikiA2html(X) ->
+ io:format("wikiA2html???:~p~n",[X]),
+ pre(X).
+
+list_paras([{p,P}|T]) ->
+ %% the first para has no <p> .. </p> wrapper the rest do
+ [inlines2html(P) | [wikiA2html(I) || I <- T]].
+
+dl_body_to_html(L) ->
+ [["<dt><b>",Tag,"</b></dt>"|dl_body1(Ps)] || {tag,Tag,paras,Ps} <- L].
+
+dl_body1([{p,H}|T]) ->
+ ["<dd>",inlines2html(H),"\n" , [wikiA2html(I) || I <- T],"</dd>\n"].
+
+li(X) ->
+ ["<li>",quote(X),"</li>\n"].
+
+pre(X) ->
+ L = lists:flatten(io_lib:format("**~p~n", [X])),
+ ["<pre>\n",quote(L),"\n</pre>\n"].
+
+inlines2html(L) -> [inline2html(I)||I<-L].
+
+inline2html(br) -> "</br>\n";
+inline2html({link,L}) -> ["<a href='",L,"'>",L,"</a>"];
+inline2html({strike,{p,L}}) -> ["<strike>",inlines2html(L),"</strike>"];
+inline2html({warn,{p,L}}) -> ["<font color='red'>",inlines2html(L),"</font>"];
+inline2html({quoted,{p,L}}) -> ["&ldquo;",inlines2html(L),"&rdquo;"];
+inline2html({footnote,L}) -> ["<span class='footnote'>",quote(L),"</span>"];
+inline2html({b, I}) -> ["<b>",quote(I),"</b>"];
+inline2html({i, I}) -> ["<i>",quote(I),"</i>"];
+inline2html({bi, I}) -> ["<b><i>",quote(I),"</i></b>"];
+inline2html({u, I}) -> ["<underline>",quote(I),"</underline>"];
+inline2html({code, I}) -> ["<tt>",quote(I),"</tt>"];
+inline2html({tt, I}) -> ["<tt>",quote(I),"</tt>"];
+inline2html({str, I}) -> quote(I);
+inline2html(X) -> pre({unexpected,inline,X}).
+
+quote("<" ++ T) -> "&lt;" ++ quote(T);
+quote("&" ++ T) -> "&amp;" ++ quote(T);
+quote("'" ++ T) -> "&rsquo;" ++ quote(T);
+quote("---" ++ T) -> "&mdash;" ++ quote(T);
+quote("--" ++ T) -> "&ndash;" ++ quote(T);
+quote([H|T]) -> [H|quote(T)];
+quote([]) -> [].
+
+
111 src/elib1_content_edit.erl
@@ -0,0 +1,111 @@
+%% Copyright (c) 2006-2009 Joe Armstrong
+%% See MIT-LICENSE for licensing information.
+
+-module(elib1_content_edit).
+
+-compile(export_all).
+-import(elib1_seq_web_server1, [pre/1]).
+-import(lists, [reverse/1, reverse/2]).
+
+edit([{"file", F}], Root, _) ->
+ %% F is the name relative to doc
+ Full = Root ++ "/doc/" ++ F ++ ".ehtml",
+ io:format("elib1_CEDIT Edit File=~p Root=~p Full=~p~n",[F, Root,Full]),
+ case file:read_file(Full) of
+ {ok, Bin} ->
+ Str = binary_to_list(Bin),
+ Str1 = strip_headers_and_trailers(Str),
+ Bin1 = list_to_binary(Str1),
+ %% io:format("Str=~s~nStr1=~s~n",[Str, Str1]),
+ Template = Root ++ "/doc/edit.html",
+ Args = [{"content", Bin1},{"file",F}],
+ R = elib1_misc:template_file_and_args_to_io_list(Template,
+ Args),
+ {ok, html, [R]};
+ {error, _} ->
+ {ok, html, pre({cannot,read,Full})}
+ end.
+
+list_dir(A, Root, Db) ->
+ io:format("list_dir:~p~n",[{A,Root,Db}]),
+ Files = filelib:wildcard(Root ++ "/doc/*.ehtml"),
+ L = [["<li><a href='/cgi?mod=elib1_content_edit&func=show_ehtml&file=",
+ I,"'>",I,"</a></li>"] || I <- Files],
+ L1 = ["<ul>",L,"</ul>"],
+ {ok, html, L1}.
+
+show_ehtml([{"file",F}], _, _) ->
+ Src = filename:rootname(F) ++ ".ehtml",
+ Dest = filename:rootname(F) ++ ".html",
+ case elib1_misc:out_of_date(Src, Dest) of
+ true ->
+ elib1_doc:file(filename:rootname(Src));
+ false ->
+ void
+ end,
+ show_html(Dest).
+
+show_html(F) ->
+ {ok, B} = file:read_file(F),
+ {ok, html, [B]}.
+
+
+save([{"file", File}, {"value", Str}], Root) ->
+ post_process(Root, File, Str),
+ {ok, html, pre("done")}.
+
+handle(Args, Root) ->
+ io:format("** ERROR cedit:~p~n",[{args,Args,root,Root}]),
+ {ok, html, pre({cedit,error,Args,Root})}.
+
+%%----------------------------------------------------------------------
+%% post_process
+
+post_process(Root, File, Str) ->
+ %% File comes back without the .ehtml extension
+ File1 = Root ++ "/doc/" ++ File ++ ".ehtml",
+ io:format("Post_process:: ~s ~s~n",[File,File1]),
+ Str1 = tidy(Str, [], []),
+ Str2 = ["<html>\n",Str1,"</html>\n"],
+ make_backup_copy(File1),
+ file:write_file(File1, Str2),
+ elib1_doc:file(filename:rootname(File1)).
+
+make_backup_copy(Src) ->
+ %% Src is /a/b/c.xxx
+ %% Dest /a/b/backup/c_<time>.xxx
+ Dir = filename:dirname(Src),
+ File = filename:basename(filename:rootname(Src)),
+ Ext = filename:extension(Src),
+ Dest = Dir ++ "/backup/" ++ File ++ "_" ++ elib1_misc:time_stamp() ++ Ext,
+ io:format("Rename :: ~s as ~s~n",[Src,Dest]),
+ file:rename(Src, Dest).
+
+strip_headers_and_trailers("<html>" ++ _ = T) ->
+ strip_headers_and_trailers(T, []);
+strip_headers_and_trailers([H|T]) ->
+ strip_headers_and_trailers(T).
+
+strip_headers_and_trailers("</html>" ++ _, L) ->
+ reverse(L);
+strip_headers_and_trailers([H|T], L) ->
+ strip_headers_and_trailers(T, [H|L]).
+
+flatten(L) ->
+ binary_to_list(list_to_binary(L)).
+
+%% remove strange stuff added by content editable mode
+tidy("<span style=\"font-weight: bold;\">" ++ T, S, L) ->
+ tidy(T, ["</b>"|S], reverse("<b>", L));
+tidy("<span style=\"text-decoration: line-through;\">" ++ T, S, L) ->
+ tidy(T, ["</strike>"|S], reverse("<strike>", L));
+tidy("<span style=\"font-style: italic;\">" ++ T, S, L) ->
+ tidy(T, ["</i>"|S], reverse("<i>", L));
+tidy("</span>" ++ T, [Final|S], L) ->
+ tidy(T, S, reverse(Final, L));
+tidy([H|T], S, L) ->
+ tidy(T, S, [H|L]);
+tidy([], S, L) ->
+ reverse(L).
+
+
110 src/elib1_diff.erl
@@ -0,0 +1,110 @@
+%% Copyright (c) 2006-2009 Joe Armstrong
+%% See MIT-LICENSE for licensing information.
+
+-module(elib1_diff).
+
+%% File : diff.erl
+%% Author : Joe Armstrong (joe@bluetail.com)
+%% Purpose : Diff of two files (like Diff and patch).
+
+%% diff(A, B) -> Patch
+%% patch(B, Patch) -> A
+%% patchL(B, [Patch]) -> A'
+
+%% -compile(export_all).
+
+-export([diff/2, diff_files/2, patch/2, patchL/2]).
+-export([test/0]).
+
+-import(lists, [foldl/3, reverse/1]).
+
+test() ->
+ diff_files("diff.erl.orig", "diff.erl"),
+ diff_files("diff.erl", "diff.erl.orig").
+
+diff_files(A, B) ->
+ {ok, B1} = file:read_file(A),
+ {ok, B2} = file:read_file(B),
+ diff(binary_to_list(B1), binary_to_list(B2)).
+
+%% diff(A, B) -> P
+%% computer patches P that takes B in A
+%% ie P such that
+%% patch(B, P) -> A
+%%
+%% The idea is that we only keep the latest version of a
+%% file "New". Given Old and New we compute P (the patch)
+%% then we can throw away Old. Old can be reconstructed by
+%% applying P to New
+
+-spec diff(A::string(), B::string()) -> [{integer(),integer()} | string()].
+
+diff(Old, New) ->
+ diff(str2lines(Old), str2lines(New), []).
+
+%% comment in for testint
+%%% io:format("Patch size=~p~n",[size(Patch)]),
+%%% case patch(New, _Patch) of
+%%% Old -> Patch;
+%%% _ -> exit(oops)
+%%% end.
+
+patchL(New, Patches) ->
+ foldl(fun(Patch, N) -> patch(N, Patch) end, New, Patches).
+
+patch(New, Patch) ->
+ sneaky_flatten(patch1(binary_to_term(Patch), str2lines(New))).
+
+%% patch1(Patch, A) -> B
+%% apply a sequence of patches to A to form B
+%% A is the origonal file B is the new file
+%% A patch is either a new line (Str)
+%% or Lines {L1,L2} from A
+
+patch1([{L1,L2}|T], New) -> [get_lines(L1, L2, New)|patch1(T, New)];
+patch1([H|T], New) -> [H|patch1(T, New)];
+patch1([], _) -> [].
+
+get_lines(_, L2, [{L2,S}|_]) -> S;
+get_lines(L1, L2, [{L1,S}|T]) -> [S|get_lines(L1+1, L2, T)];
+get_lines(L1, L2, [_|T]) -> get_lines(L1, L2, T).
+
+sneaky_flatten(L) ->
+ binary_to_list(list_to_binary(L)).
+
+diff([], _, Patch) ->
+ term_to_binary(reverse(Patch));
+diff(Old = [{_,Str}|T], New, Patch) ->
+ case match(Old, New) of
+ {yes, Ln, Ln, Old1} ->
+ case Str of
+ "\n" ->
+ diff(Old1, New, [Str|Patch]);
+ _ ->
+ diff(Old1, New, [{Ln,Ln}|Patch])
+ end;
+ {yes, L1, L2, Old1} ->
+ diff(Old1, New, [{L1,L2}|Patch]);
+ no ->
+ diff(T, New, [Str|Patch])
+ end.
+
+match([{_,Str}|T], [{L1,Str}|T1]) -> extend_match(T, T1, L1, L1);
+match(X, [_|T]) -> match(X, T);
+match(_, []) -> no.
+
+extend_match([{_,S}|T1], [{L2,S}|T2], L1, _) -> extend_match(T1, T2, L1, L2);
+extend_match(X, _, L1, L2) -> {yes, L1, L2, X}.
+
+str2lines(L) -> str2lines(L, 1, [], []).
+
+str2lines([H|T], Line, C, L) ->
+ case H of
+ $\n -> str2lines(T, Line+1,[],[{Line,reverse([$\n|C])}|L]);
+ _ -> str2lines(T, Line, [H|C], L)
+ end;
+str2lines([], _Line, [], L) ->
+ reverse(L);
+str2lines([], Line, C, L) ->
+ reverse([{Line,reverse(C)}|L]).
+
60 src/elib1_doc.erl
@@ -0,0 +1,60 @@
+%% Copyright (c) 2006-2009 Joe Armstrong
+%% See MIT-LICENSE for licensing information.
+
+-module(elib1_doc).
+
+%% -compile(export_all).
+-export([batch/1, file/1, file/2, setup/1]).
+
+%% takes a file like this <html> ... </html>
+%% strips the header and replaces with a valid xhml header
+%% expands <e>...</e>
+
+-import(lists, [map/2, reverse/1, reverse/2]).
+
+batch([X]) ->
+ File = filename:rootname(atom_to_list(X)),
+ file(File).
+
+%% file converts F.ehtml to F.html in the same directory
+
+file(F) ->
+ io:format("elib1_doc::~s~n",[F]),
+ file(F ++ ".ehtml", F ++ ".html").
+
+file(InFile, OutFile) ->
+ case file:read_file(InFile) of
+ {ok, Bin} ->
+ Str1 = binary_to_list(Bin),
+ Str2 = remove_top_level_markup(Str1),
+ Str3 = elib1_expand:expand_string(Str2),
+ Str4 = add_xhtml_markup(InFile, Str3),
+ file:write_file(OutFile, Str4);
+ _ ->
+ cannot_read_file
+ end.
+
+remove_top_level_markup("<html>" ++ T) -> remove_top_level_markup(T, []).
+
+remove_top_level_markup("</html>" ++ _, L) -> reverse(L);
+remove_top_level_markup([H|T], L) -> remove_top_level_markup(T, [H|L]).
+
+add_xhtml_markup(File, L) ->
+ Root = filename:rootname(filename:basename(File)),
+ [<<"<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Strict//EN'
+ 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'>
+ <html xmlns='http://www.w3.org/1999/xhtml'>\n">>,
+ setup(Root),
+ L,
+ <<"</body></html>\n">>].
+
+setup(File) ->
+ ["<head>
+ <title>", File, "</title>
+ <link href='../include/elib1.css' type='text/css' rel='stylesheet'/>
+ </head>
+ <body>
+ <a href='/cgi?mod=elib1_content_edit&func=edit&file=",
+ File,"'>edit</a>
+
+ "].
452 src/elib1_docmaker.erl
@@ -0,0 +1,452 @@
+%% Copyright (c) 2006-2009 Joe Armstrong
+%% See MIT-LICENSE for licensing information.
+
+-module(elib1_docmaker).
+%% -compile(export_all).
+-import(lists, [filter/2,flatten/1, reverse/1, reverse/2]).
+-import(elib1_misc, [string2latex/1]).
+
+-export([test/0, batch/1, convert/1,
+ color_erlang_code/1,
+ str2wikiA/1,
+ wikiA2html/1, wikiA2latex/1]).
+
+test() ->
+ convert("./design.chap"),
+ convert("./lorum.chap"),
+ convert("./book.book").
+
+batch(X) ->
+ io:format("Batch:~p~n",[X]),
+ try
+ begin
+ [A] = X,
+ File = atom_to_list(A),
+ convert(File)
+ end
+ catch
+ P:Q ->
+ io:format("Some error ocurred ~p:~p~n",[P,Q])
+ end.
+
+convert(File) ->
+ case filename:extension(File) of
+ ".chap" -> convert(File, chapter);
+ ".book" -> convert(File, book)
+ end.
+
+%% START:tag1
+convert(File, Type) ->
+ Root = filename:rootname(File),
+ Str = elib1_misc:file2string(File),
+ Pass3 = str2wikiA(Str),
+ Debug = "../tmp/" ++ Root,
+ elib1_misc:dump(Debug, Pass3),
+ Html = wikiA2html(Pass3),
+ HtmlFile = "../doc/" ++ Root ++ ".html",
+ file:write_file(HtmlFile, [Html]),
+ io:format("created ~s~n",[HtmlFile]),
+ Latex = wikiA2latex(Pass3),
+ LatexFile = "../tmp/" ++ Root ++ "_inc.tex",
+ %% elib1_misc:dump("temp", Latex),
+ %% elib1_misc:check_io_list(Latex),
+ Ret1 = file:write_file(LatexFile, [Latex]),
+ io:format("created ~s Ret1=~p~n",[LatexFile,Ret1]),
+ TexFile = "../tmp/" ++ Root ++ ".tex",
+ io:format("created ~s~n",[TexFile]),
+ make_tex(TexFile, Root ++ "_inc", Type),
+ make_pdf(Root).
+
+str2wikiA(Str) -> {wikiA, parse_str0(Str)}.
+
+parse_str0(Str) ->
+ Pass1 = parse(Str, 1, []),
+ Pass2 = flatten([pass2(I) || I <- Pass1]),
+ [pass3(I) || I <- Pass2].
+
+make_pdf(File) ->
+ os:cmd("cd ../tmp; pdflatex; pdflatex " ++ File),
+ file:rename("../tmp/" ++ File ++ ".pdf",
+ "../doc/" ++ File ++ ".pdf").
+
+%% END:tag1
+%% Pass2 = [pass2(I) || I <- Pass1],
+%% elib1_misc:dump("pass2", Pass2).
+
+pass2({str,_Ln,L}) -> split_into_paras(L);
+pass2({dl,_Ln,L}) -> parse_dl(L);
+pass2({ol,_Ln,L}) -> {ol, parse_list(L)};
+pass2({ul,_Ln,L}) -> {ul, parse_list(L)};
+pass2({note,_Ln,Str}) -> {note,parse_str0(Str)};
+pass2({include,Ln,L}) -> parse_include(L, Ln);
+pass2({pre,_Ln,L}) -> {pre,L};
+pass2(X) -> exit({pass2, X}).
+
+pass3({para,[]}) -> drop;
+pass3({para,"=" ++ S}) -> parse_header(S, 1);
+pass3({para,S}) -> parse_para(S);
+pass3(X) -> X.
+
+parse_header([$=|T], N) -> parse_header(T, N+1);
+parse_header(T, N) -> {header, N, T}.
+
+parse_dl(S) ->
+ %% look for lines that start \n
+ %% io:format("parse_dl =~p~n",[S]),
+ Lines = split_into_regions(S),
+ %% io:format("Lines =~p~n",[Lines]),
+ Lines1 = [parse_dl_item(I) || I <- Lines],
+ %% io:format("Lines1 =~p~n",[Lines1]),
+ {dl, Lines1}.
+
+parse_dl_item(Str) ->
+ {Tag, Rest} = extract_tag(Str, []),
+ {tag,Tag,paras,parse_paras(split_into_paras(Rest))}.
+
+extract_tag([$\n|_],_) -> exit(nlNotAllowedInTag);
+extract_tag([], _) -> exit(eofInTag);
+extract_tag([$\\,$]|T], L) -> extract_tag(T, [$]|L]);
+extract_tag([$]|T], L) -> {reverse(L), T};
+extract_tag([H|T], L) -> extract_tag(T, [H|L]).
+
+split_into_regions(S) ->
+ L = re:split(S, "[\r]?\n[\s\t]*\\[",[{return,list}]),
+ remove_blank_lines(L).
+
+
+%% we only have the following top level tags
+%% <ul> ... </ul> <dl> .. </dl> <pre>...</pre> <include .... />
+%% NOTE: If you add a new top level tag remember to fix collect_str/3 as
+%% well
+
+parse([], _, L) -> reverse(L);
+parse("<note>" ++ T, Ln, L) -> parse1(note, T, "</note>", Ln, L);
+parse("<ol>" ++ T, Ln, L) -> parse1(ol, T, "</ol>", Ln, L);
+parse("<ul>" ++ T, Ln, L) -> parse1(ul, T, "</ul>", Ln, L);
+parse("<dl>" ++ T, Ln, L) -> parse1(dl, T, "</dl>", Ln, L);
+parse("<pre>" ++ T, Ln, L) -> parse1(pre, T, "</pre>", Ln, L);
+parse("<include" ++ T, Ln, L) -> parse1(include, T, "/>", Ln, L);
+parse(Str, Ln, L) ->
+ {Body, Ln1, T1} = collect_str(Str, Ln, []),
+ parse(T1, Ln1, [{str,Ln,Body}|L]).
+
+collect_str("<include" ++ _ = T, Ln, L) -> {reverse(L), Ln, T};
+collect_str("<pre>" ++ _ = T, Ln, L) -> {reverse(L), Ln, T};
+collect_str("<dl>" ++ _ = T, Ln, L) -> {reverse(L), Ln, T};
+collect_str("<ol>" ++ _ = T, Ln, L) -> {reverse(L), Ln, T};
+collect_str("<ul>" ++ _ = T, Ln, L) -> {reverse(L), Ln, T};
+collect_str("<note>" ++ _ = T, Ln, L) -> {reverse(L), Ln, T};
+collect_str([$\n|T], Ln, L) -> collect_str(T, Ln+1, [$\n|L]);
+collect_str([H|T], Ln, L) -> collect_str(T, Ln, [H|L]);
+collect_str([], Ln, L) -> {reverse(L), Ln, []}.
+
+parse1(Tag, Str, Stop, Ln, L) ->
+ {Body, Ln1, Str1} = collect_thing(Str, Stop, Ln, []),
+ parse(Str1, Ln1, [{Tag,Ln,Body}|L]).
+
+collect_thing([$\n|T], Stop, Ln, L) ->
+ collect_thing(T, Stop, Ln+1, [$\n|L]);
+collect_thing([H|T] = Str, Stop, Ln, L) ->
+ case elib1_misc:is_prefix(Stop, Str) of
+ {yes, Rest} -> {reverse(L), Ln, Rest};
+ no -> collect_thing(T, Stop, Ln, [H|L])
+ end;
+collect_thing([], Stop, Ln, _) ->
+ exit({eof,line,Ln,expecting,Stop}).
+
+%% split_into_para(Str) -> [Str].
+
+split_into_paras(S) ->
+ L = re:split(S, "[\r]?\n[\s\t]*[\r]?\n",[{return,list},trim]),
+ L1 = remove_blank_lines(L),
+ [{para,elib1_misc:remove_leading_whitespace(I)} || I <- L1].
+
+remove_blank_lines(L) ->
+ filter(fun(I) -> not elib1_misc:is_blank_line(I) end, L).
+
+%% list elements start with a +
+split_into_list_elements(S) ->
+ L = re:split(S, "[\r]?\n[\s\t]*\\+",[{return,list},trim]),
+ remove_blank_lines(L).
+
+parse_list(S) ->
+ Elements = split_into_list_elements(S),
+ [{li,parse_paras(split_into_paras(I))} || I <- Elements].
+
+parse_paras(L) ->
+ [ parse_para(I) || {para,I} <- L].
+
+parse_include(S, _Ln) ->
+ try
+ begin
+ {ok, Toks, _} = erl_scan:string(S),
+ case lists:sort(parse_include1(Toks)) of
+ [{file,File},{tag,Tag}] ->
+ Str = elib1_misc:get_erl_section(File, Tag),
+ Type = filename:extension(File),
+ {include, File, Type, Str};
+ [{chapter,Chap}] ->
+ {includeChapter, Chap}
+ end
+ end
+ catch
+ XX:YY ->
+ exit({ebadInclude, S, XX, YY})
+ end.
+
+parse_include1([{atom,_,A},{'=',_},{string,_,S}|T]) ->
+ [{A,S}|parse_include1(T)];
+parse_include1([]) ->
+ [].
+
+parse_para(Str) ->
+ {p, parse_para(Str, [])}.
+
+parse_para1(Tag, Str, Stop, L) ->
+ {B, Str1} = collect(Str, Stop, []),
+ parse_para(Str1, [{Tag,B}|L]).
+
+parse_para2(Tag, Str, Stop, L) ->
+ {B, Str1} = collect(Str, Stop, []),
+ parse_para(Str1, [{Tag,parse_para(B)}|L]).
+
+parse_para([], L) -> reverse(L);
+parse_para("<br/>" ++ T, L) -> parse_para(T, [br|L]);
+parse_para("<u>" ++ T, L) -> parse_para1(u, T, "</u>", L);
+parse_para("<tt>" ++ T, L) -> parse_para1(u, T, "</tt>", L);
+parse_para("<<" ++ T, L) -> parse_para1(code, T, ">>", L);
+parse_para("<code>" ++ T, L) -> parse_para1(code, T, "</code>", L);
+parse_para("<footnote>" ++ T, L) -> parse_para1(footnote, T, "</footnote>", L);
+parse_para("''" ++ T, L) -> parse_para1(i, T, "''", L);
+parse_para("[[" ++ T, L) -> parse_para1(link, T, "]]", L);
+parse_para("<s>" ++ T, L) -> parse_para2(strike, T, "</s>", L);
+parse_para("~~" ++ T, L) -> parse_para2(strike, T, "~~", L);
+parse_para("<warn>" ++ T, L) -> parse_para2(warn, T, "</warn>", L);
+parse_para("\"" ++ T, L) -> parse_para2(quoted,T, "\"", L);
+parse_para(T, L) ->
+ {B, T1} = collect_str(T, []),
+ parse_para(T1, [{str,B}|L]).
+
+collect_str("[[" ++ _ = T, L) -> {reverse(L), T};
+collect_str("''" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<tt>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<s>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<br/>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("~~" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<u>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<code>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<warn>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<footnote>" ++ _ = T, L) -> {reverse(L), T};
+collect_str("<<" ++ _ = T, L) -> {reverse(L), T};
+collect_str("\"" ++ _ = T, L) -> {reverse(L), T};
+collect_str([H|T], L) -> collect_str(T, [H|L]);
+collect_str([], L) -> {reverse(L), []}.
+
+%% mk to lib
+
+collect([], _, L) ->
+ {reverse(L), []};
+collect([$\\,H|T], Stop, L) ->
+ collect(T, Stop, [H|L]);
+collect(Str, Stop, L) ->
+ case elib1_misc:extract_prefix(Stop, Str) of
+ {yes, Rest} ->
+ {reverse(L), Rest};
+ no ->
+ collect(tl(Str), Stop, [hd(Str)|L])
+ end.
+
+%%----------------------------------------------------------------------
+
+html_header() ->
+ ["<style>"
+ "body {margin-left:1in; margin-right:1in; text-align:justify}"
+ "div.note { margin-left:1cm; margin-right:1cm; "
+ " padding:10px; background-color:#aaaaaa}"
+ "</style>\n"].
+
+
+wikiA2html({wikiA, L}) ->
+ [html_header(),[wikiA2html(I) || I <- L]];
+wikiA2html({include,File,_Tag,Str}) ->
+ ["<b>",File,"</b>\n<ul><pre><b>",quote(Str),"</b></pre></ul>\n"];
+wikiA2html({header,N,S}) ->
+ M = integer_to_list(N),
+ ["<h",M,">",quote(S),"</h",M,">\n"];
+wikiA2html({note,L}) ->
+ ["<div class='note'>\n", wikiA2html(L),"</div>\n"];
+wikiA2html({p, L}) ->
+ ["<p>",inlines2html(L),"</p>\n"];
+wikiA2html({pre, L}) ->
+ ["<ul><pre><b>\n", quote(L),"\n</b></pre></ul>\n"];
+wikiA2html({dl, L}) ->
+ ["<dl>\n", dl_body_to_html(L),"\n</dl>\n"];
+wikiA2html({nowiki, L}) ->
+ L;
+wikiA2html({ol, L}) ->
+ ["<ol>",[["<li>",list_paras(I),"</li>\n"] ||{li, I} <-L],"</ol>\n"];
+wikiA2html({ul, L}) ->
+ ["<ul>",[["<li>",list_paras(I),"</li>\n"]|| {li, I} <- L],"</ul>\n"];
+wikiA2html({bullet, L}) ->
+ ["<ul>\n",[li(I)||I<-L],"\n</ul>\n"];
+wikiA2html(drop) -> [];
+wikiA2html({includeChapter,X}) ->
+ ["<li><a href='",X,".html'>",X,"</a>"];
+wikiA2html(L) when is_list(L) ->
+ [wikiA2html(I) || I <- L];
+wikiA2html(X) ->
+ io:format("wikiA2html???:~p~n",[X]),
+ pre(X).
+
+list_paras([{p,P}|T]) ->
+ %% the first para has no <p> .. </p> wrapper the rest do
+ [inlines2html(P) | [wikiA2html(I) || I <- T]].
+
+dl_body_to_html(L) ->
+ [["<dt><b>",Tag,"</b></dt>"|dl_body1(Ps)] || {tag,Tag,paras,Ps} <- L].
+
+dl_body1([{p,H}|T]) ->
+ ["<dd>",inlines2html(H),"\n" , [wikiA2html(I) || I <- T],"</dd>\n"].
+
+li(X) ->
+ ["<li>",quote(X),"</li>\n"].
+
+pre(X) ->
+ L = lists:flatten(io_lib:format("**~p~n", [X])),
+ ["<pre>\n",quote(L),"\n</pre>\n"].
+
+inlines2html(L) -> [inline2html(I)||I<-L].
+
+inline2html(br) -> "</br>\n";
+inline2html({link,L}) -> ["<a href='",L,"'>",L,"</a>"];
+inline2html({strike,{p,L}}) -> ["<strike>",inlines2html(L),"</strike>"];
+inline2html({warn,{p,L}}) -> ["<font color='red'>",inlines2html(L),"</font>"];
+inline2html({quoted,{p,L}}) -> ["&ldquo;",inlines2html(L),"&rdquo;"];
+inline2html({footnote,L}) -> ["<span class='footnote'>",quote(L),"</span>"];
+inline2html({b, I}) -> ["<b>",quote(I),"</b>"];
+inline2html({i, I}) -> ["<i>",quote(I),"</i>"];
+inline2html({bi, I}) -> ["<b><i>",quote(I),"</i></b>"];
+inline2html({u, I}) -> ["<underline>",quote(I),"</underline>"];
+inline2html({code, I}) -> ["<tt>",quote(I),"</tt>"];
+inline2html({tt, I}) -> ["<tt>",quote(I),"</tt>"];
+inline2html({str, I}) -> quote(I);
+inline2html(X) -> pre({unexpected,inline,X}).
+
+quote("<" ++ T) -> "&lt;" ++ quote(T);
+quote("&" ++ T) -> "&amp;" ++ quote(T);
+quote("'" ++ T) -> "&rsquo;" ++ quote(T);
+quote("---" ++ T) -> "&mdash;" ++ quote(T);
+quote("--" ++ T) -> "&ndash;" ++ quote(T);
+quote([H|T]) -> [H|quote(T)];
+quote([]) -> [].
+
+%%----------------------------------------------------------------------
+
+begin_end(Tag, Content) ->
+ ["\\begin{",Tag,"}\n", Content, "\\end{",Tag,"}\n\n"].
+
+wikiA2latex({wikiA, L}) -> wikiA2latex(L);
+wikiA2latex(L) when is_list(L) -> [wikiA2latex(I) || I <- L];
+wikiA2latex({p, L}) -> [wikiA2latex(L),"\n\n"];
+wikiA2latex({str,S}) -> string2latex(S);
+wikiA2latex(drop) -> [];
+wikiA2latex({li,X}) -> ["\\item ", wikiA2latex(X)];
+wikiA2latex({i,I}) -> ["{\\sl ", string2latex(I),"}"];
+wikiA2latex({footnote,I}) -> ["\\footnote{ ", string2latex(I),"}"];
+wikiA2latex({header,N,I}) -> ["\\eee",$a+N-1,"{", string2latex(I),"}"];
+wikiA2latex({code,I}) -> ["\\texttt{", string2latex(I),"}"];
+wikiA2latex({pre,L}) -> begin_end("verbatim", L);
+wikiA2latex({quoted,{p,L}}) -> ["``", [wikiA2latex(I) || I <- L], "''"];
+wikiA2latex({dl,L}) -> [wikiA_tag_to_latex(I) || I <- L];
+wikiA2latex({ul, L}) -> begin_end("itemize",
+ [wikiA2latex(I)||I <-L]);
+wikiA2latex({ol,L}) -> begin_end("enumerate", [wikiA2latex(I) || I <- L]);
+wikiA2latex({strike,{p,L}}) -> ["\\sout{", [wikiA2latex(I) || I <- L], "}"];
+wikiA2latex({warn,{p,L}}) -> ["\\textcolor{red}{",[wikiA2latex(I) || I <- L],"}"];
+wikiA2latex({includeChapter, X}) -> ["\\input{",X,"_inc}\n"];
+wikiA2latex({note, X}) -> begin_end("note", wikiA2latex(X));
+wikiA2latex({include,File,".erl",Str}) ->
+ ["\\verb+",File,"+\n",
+ "\\begin{verbatim}\n",
+ Str,
+ "\\end{verbatim}\n"];
+wikiA2latex(X) ->
+ io:format("wikiA2latex:~p~n",[X]),
+ ["\\begin{verbatim}\n",
+ string2latex(flatten(io_lib:format("~p~n",[X]))),
+ "\\end{verbatim}\n"].
+
+wikiA_tag_to_latex({tag,Tag,paras,[P1|Paras]}) ->
+ ["\\hangindent=3pc \\hangafter=1\n",
+ "\\verb+",Tag,"+\\\\\n",
+ wikiA2latex(P1),
+ [["\\leftskip=3pc\n",wikiA2latex(I)] || I <- Paras],