Permalink
Browse files

Initial commit w/ single-file styling

  • Loading branch information...
0 parents commit bf64e810e5b468936ccf8e3dc7ddd9f9955b7bd8 @nevir committed Nov 26, 2011
@@ -0,0 +1,3 @@
+.DS_Store
+.sass-cache
+node_modules
@@ -0,0 +1,6 @@
+{
+ "_": ["lib"],
+ "index": "README.md",
+ "strip": ["lib"],
+ "very-verbose": true
+}
@@ -0,0 +1,16 @@
+Copyright (c) 2011 Ian MacLeod <ian@nevir.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or
+substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
+OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,7 @@
+#!/usr/bin/env node
+
+require('lidoc').CLI(process.argv.slice(2), function(error) {
+ if (error) {
+ process.exit(1)
+ }
+})
@@ -0,0 +1,6 @@
+require('coffee-script')
+
+module.exports = require('autorequire')('./lib', 'Classical', {
+ specialCaseModuleNames: {cli: 'CLI', languages: 'LANGUAGES', underscore: '_'},
+ extraGlobalModules: ['colors', 'fs-tools', 'jade', 'optimist', 'showdown', 'underscore']
+})
@@ -0,0 +1,73 @@
+class BaseStyle
+ constructor: (project) ->
+ @project = project
+ @log = project.log
+
+ renderFile: (data, sourcePath, targetPath, callback) ->
+ @log.trace 'BaseStyle#renderFile(..., %s, %s, ...)', sourcePath, targetPath
+
+ language = LANGUAGES.CoffeeScript
+ segments = Utils.splitSource data, language
+ @log.debug 'Split %s into %d segments', sourcePath, segments.length
+
+ Utils.highlightCode segments, language, (error) =>
+ if error
+ @log.debug error.pygmentsOutput if error.pygmentsOutput
+ @log.error 'Failed to highlight %s: %s', sourcePath, error.message
+ return callback error
+
+ Utils.markdownComments segments, @project, (error) =>
+ if error
+ @log.error 'Failed to markdown %s: %s', sourcePath, error.message
+ return callback error
+
+ @renderDocFile segments, sourcePath, targetPath, callback
+
+ renderDocFile: (segments, sourcePath, targetPath, callback) ->
+ @log.trace 'BaseStyle#renderDocFile(..., %s, %s, ...)', sourcePath, targetPath
+
+ throw new Error "@templateFunc must be defined by subclasses!" unless @templateFunc
+
+ docPath = path.resolve @project.outPath, "#{targetPath}.html"
+
+ fsTools.mkdir path.dirname(docPath), 0755, (error) =>
+ if error
+ @log.error 'Unable to create directory %s: %s', path.dirname(docPath), error.message
+ return callback error
+
+ for segment in segments
+ segment.markdownedComments = Utils.trimBlankLines segment.markdownedComments
+ segment.highlightedCode = Utils.trimBlankLines segment.highlightedCode
+
+ templateContext =
+ project: @project
+ segments: segments
+ sourcePath: sourcePath
+ targetPath: targetPath
+
+ # How many levels deep are we?
+ pathChunks = path.dirname(targetPath).split(/[\/\\]/)
+ if pathChunks.length == 1 && pathChunks[0] == '.'
+ templateContext.relativeRoot = ''
+ else
+ templateContext.relativeRoot = "#{pathChunks.map(-> '..').join '/'}/"
+
+ try
+ data = @templateFunc templateContext
+
+ catch error
+ @log.error 'Rendering documentation template for %s failed: %s', docPath, error.message
+ return callback error
+
+ fs.writeFile docPath, data, 'utf-8', (error) =>
+ if error
+ @log.error 'Failed to write documentation file %s: %s', docPath, error.message
+ return callback error
+
+ @log.pass docPath
+ callback()
+
+ renderCompleted: (callback) ->
+ @log.trace 'BaseStyle#renderCompleted(...)'
+
+ callback()
@@ -0,0 +1,118 @@
+# Readable command line output is just as important as readable documentation! It is the first
+# interaction that a developer will have with a tool like this, so we want to leave a good
+# impression with nicely formatted and readable output.
+
+# It's the caller's responsibility to give us a nice list of ARGV options. It's only a minor
+# annoyance in our binary, but a real win for the crazed developer who wants to be lazy and emulate
+# command line behavior.
+CLI = (inputArgs, callback) ->
+ # As such, we don't want our output bumping up against the shell execution lines and blurring
+ # together; use that whitespace with great gusto!
+ console.log ''
+
+ actualCallback = callback
+ callback = (args...) ->
+ # In keeping with our console beautification project, make sure that our output isn't getting
+ # too comfortable with the user's next shell line!
+ console.log ''
+
+ actualCallback args...
+
+ # [Optimist](https://github.com/substack/node-optimist) is a fantastic options parsing tool, but
+ # it does have a few rough edges for our uses:
+ #
+ # * We don't want to goof up the global optimist instance; it costs us this line of code, and
+ # allows the enterprising scripter to call into our CLI handling, if they're crazy enough to.
+ opts = optimist inputArgs
+
+ opts
+ .usage("""
+ Usage: lidoc [options] dirs/ sou/rce.files
+
+ You can also specify arguments via a configuration file in the current directory named
+ .lidoc.json. It should contain a mapping between (full) option names and their values. Search
+ paths (the positional arguments) should be set via the key "_". For example:
+
+ { "_": ["lib", "vendor"], out: "documentation", strip: [] }
+
+ Run lidoc without arguments to use the configuration file.
+ """)
+
+ # * Booleans don't jive very well with the `options` call, and they need to be declared prior to
+ # referencing `opts.argv`, or you risk associating positional options with a boolean flag.
+ .boolean(['help', 'h', '?', 'verbose', 'very-verbose'])
+
+ .options('help',
+ describe: "You're looking at it."
+ alias: ['h', '?']
+ )
+
+ .options('out',
+ describe: "The directory to place generated documentation, relative to the project root."
+ alias: 'o'
+ default: 'doc'
+ )
+
+ .options('index',
+ describe: "The file to use as the index of the generated documentation."
+ alias: 'i'
+ )
+
+ .options('root',
+ describe: "The root directory of the project."
+ alias: 'r'
+ default: process.cwd()
+ )
+
+ .options('strip',
+ describe: "A path prefix to strip when generating documentation paths (or --no-strip)."
+ alias: 's'
+ # * We want the default value of `--strip` to mirror the first directory given to us by the
+ # user. This ensures that the common case of `lidoc lib/` will map `lib/some/file.coffee` to
+ # `doc/some/file.html`, and not a redundant and ugly path such as `doc/lib/some/file.html`.
+ default: (p for p in opts.argv._ when fs.statSync(p).isDirectory())
+ )
+
+ .options('verbose',
+ describe: "Output the inner workings of lidoc to help diagnose issues."
+ )
+
+ .options('very-verbose',
+ describe: "Hey, you asked for it."
+ )
+
+ argv = opts.argv
+ # * There also does not be a way to enforce that a particular option is an array, so we do this
+ # coercion ourselves.
+ argv.strip = [argv.strip] unless util.isArray argv.strip
+
+ return console.log opts.help() if argv.help
+
+ # For the .lidoc.json configuration, we merge it into argv in order to pick up default values such
+ # as the root mapping to `process.cwd()`. Please don't treat this as a contract; it has the
+ # potential to change behavior in the future.
+ if argv._.length == 0
+ try
+ config = require path.resolve process.cwd(), '.lidoc.json'
+ argv[k] = v for k,v of config
+
+ catch err
+ console.log opts.help()
+ console.log
+ Logger.error "Failed to load .lidoc.json: %s", err.message
+
+ return callback err
+
+ project = new Project argv.root, argv.out
+
+ # Set up our logging configuration if the user cares about verbosity
+ project.log.minLevel = Logger::LEVELS.DEBUG if argv.verbose
+ project.log.minLevel = Logger::LEVELS.TRACE if argv['very-verbose']
+ project.log.trace "argv: %j", argv
+
+ # Set up the project
+ project.index = argv.index
+ project.add p for p in argv._
+ project.stripPrefix p for p in argv.strip when p # --no-strip will result in argv.strip == [false]
+
+ project.generate callback
@@ -0,0 +1,5 @@
+LANGUAGES =
+ CoffeeScript:
+ nameMatchers: ['.coffee']
+ pygmentsLexer: 'coffee-script'
+ singleLineComment: ['#']
@@ -0,0 +1,64 @@
+# We have pretty simple needs for a logger, and so far have been unable to find a reasonable
+# off-the-shelf solution that fits them without being too overbearing:
+class Logger
+ # * We want the standard levels of output, plus a few more.
+ LEVELS:
+ TRACE: 0
+ DEBUG: 1
+ INFO: 2
+ PASS: 2
+ WARN: 3
+ ERROR: 4
+
+ # * Full on level: labels are **extremely** heavy for what is primarily a command-line tool. The
+ # common case - `INFO` - does not even expose a label. We only want it to call out uncommon
+ # events with some slight symbolism.
+ LEVEL_PREFIXES:
+ TRACE: ''
+ DEBUG: ''
+ INFO: ' '
+ PASS: ''
+ WARN: '» '
+ ERROR: '! '
+
+ # * Colors make the world better.
+ LEVEL_COLORS:
+ TRACE: 'grey'
+ DEBUG: 'grey'
+ INFO: 'black'
+ PASS: 'green'
+ WARN: 'yellow'
+ ERROR: 'red'
+
+ # * Don't forget the semantics of our output.
+ LEVEL_STREAMS:
+ TRACE: console.log
+ DEBUG: console.log
+ INFO: console.log
+ PASS: console.log
+ WARN: console.error
+ ERROR: console.error
+
+ constructor: (minLevel = @LEVELS.INFO) ->
+ @minLevel = minLevel
+
+ for name of @LEVELS
+ do (name) =>
+ @[name.toLowerCase()] = (args...) ->
+ @emit name, args...
+
+ emit: (levelName, args...) ->
+ if @LEVELS[levelName] >= @minLevel
+ output = util.format args...
+ # * We like nicely indented output
+ output = output.split(/\r?\n/).join('\n ')
+
+ @LEVEL_STREAMS[levelName] colors[@LEVEL_COLORS[levelName]] "#{@LEVEL_PREFIXES[levelName]}#{output}"
+
+ output
+
+# * Sometimes we just want one-off logging
+globalLogger = new Logger Logger::LEVELS.TRACE
+
+for level of globalLogger.LEVELS
+ Logger[level.toLowerCase()] = (args...) -> globalLogger[level.toLowerCase()] args...
@@ -0,0 +1,86 @@
+# A core concept of `lidoc` is that your code is grouped into a project, and that there is a certain
+# amount of context that it lends to your documentation.
+#
+# A project:
+class Project
+ constructor: (root, outPath, minLogLevel=Logger::INFO) ->
+ @log = new Logger minLogLevel
+
+ # * Has a single root directory that contains (most of) it.
+ @root = path.resolve root
+ # * Generally wants documented generated somewhere within its tree. We default the output path
+ # to be relative to the project root, unless you pass an absolute path.
+ @outPath = path.resolve @root, outPath
+ # * Contains a set of files to generate documentation from, source code or otherwise.
+ @files = []
+ # * Should strip specific prefixes of a file's path when generating relative paths for
+ # documentation. For example, this could be used to ensure that `lib/some/source.file` maps
+ # to `doc/some/source.file` and not `doc/lib/some/source.file`.
+ @stripPrefixes = ["#{@root}/"]
+
+ # Add source files that should have documentation generated for them to the project.
+ add: (fileOrDir) ->
+ @log.trace "Project#add(%s)", fileOrDir
+
+ absPath = path.resolve @root, fileOrDir
+ stats = fs.statSync absPath
+
+ # You can add individual files, and we special case that.
+ if stats.isFile()
+ @files.push absPath
+
+ # Or directories to be recursively walked to find all files under them.
+ else if stats.isDirectory()
+ @add path.join fileOrDir, p for p in fs.readdirSync fileOrDir when path.basename(p)[0] != '.'
+
+ # Adds a path prefix that should be stripped from source file paths in order to generate relative
+ # paths for documentation.
+ stripPrefix: (pathPrefix) ->
+ @log.trace 'Project#strip(%s)', pathPrefix
+
+ # Prefix paths are either relative to the project root, or absolute
+ @stripPrefixes.push "#{path.resolve @root, pathPrefix}/"
+
+ # This is both a performance (over) optimization and debugging aid. Instead of spamming the
+ # system with file I/O and overhead all at once, we only process a certain number of source files
+ # concurrently. This is similar to what [graceful-fs](https://github.com/isaacs/node-graceful-fs)
+ # accomplishes.
+ BATCH_SIZE: 10
+
+ # Where the magic happens.
+ generate: (callback) ->
+ # We want to support multiple documentation styles, but we don't expect to have a stable API for
+ # that just yet.
+ style = new styles.default.Style @
+
+ fileMap = Utils.mapFiles @files, @stripPrefixes
+ toProcess = (k for k of fileMap)
+ inFlight = 0
+
+ processFile = =>
+ currentFile = toProcess.pop()
+ inFlight += 1
+ @log.trace "Processing %s (%d in flight)", currentFile, inFlight
+
+ fs.readFile currentFile, 'utf-8', (error, data) =>
+ if error
+ @log.error "Failed to process %s: %s", currentFile, error.message
+ return callback error
+
+ style.renderFile data, currentFile, fileMap[currentFile], (error) =>
+ return callback error if error
+
+ inFlight -= 1
+ if toProcess.length > 0
+ processFile()
+ else
+ if inFlight == 0
+ style.renderCompleted (error) =>
+ return callback error if error
+
+ @log.info ''
+ @log.pass 'Documentation generated'
+ callback()
+
+ while toProcess.length > 0 and inFlight < @BATCH_SIZE
+ processFile()
Oops, something went wrong.

0 comments on commit bf64e81

Please sign in to comment.