From 961026e2930d8eaab7785843ad123be6d55b5859 Mon Sep 17 00:00:00 2001 From: Rebecca Murphey Date: Thu, 26 Nov 2015 19:42:36 -0600 Subject: [PATCH] it works --- .gitignore | 1 + deploy.sh | 7 + harp.json | 4 +- package.json | 11 +- public/_partials/layout.jade | 183 +- public/_partials/page-nav.jade | 4 +- public/_partials/post-nav.jade | 5 +- public/_partials/search.jade | 5 +- public/_partials/tag.jade | 4 +- public/about.md | 16 +- public/blog/_data.json | 8 +- public/blog/_layout.jade | 8 +- public/blog/building-for-http2.md | 199 ++ public/index.jade | 9 - server.js | 137 +- www/404.html | 116 + www/about.html | 248 ++ www/archives/2006/index.html | 116 + www/archives/2007/index.html | 116 + www/archives/2008/index.html | 116 + www/archives/2009/index.html | 116 + www/archives/2010/index.html | 116 + www/archives/2011/index.html | 116 + www/archives/2012/index.html | 116 + www/archives/2013/index.html | 116 + www/archives/2014/index.html | 116 + www/archives/2015/index.html | 116 + www/archives/index.html | 116 + www/archives/layout.html | 231 ++ ...s-attribute-vs-id-selectors-in-jquery.html | 140 + ...some-thoughts-on-working-for-yourself.html | 182 ++ ...y-want-a-jack-of-all-trades-developer.html | 138 + www/blog/a-dojo-boilerplate.html | 127 + www/blog/a-new-chapter.html | 132 + ...pen-source-jquery-training-curriculum.html | 145 + ...her-cautionary-remote-javascript-tale.html | 129 + .../avoid-bare-class-selectors-in-jquery.html | 142 + www/blog/back-on-the-air.html | 118 + www/blog/building-for-http2.html | 249 ++ www/blog/bye-bye-yui-bloat.html | 123 + www/blog/cache-regexes-in-javascript.html | 118 + www/blog/centering-images.html | 118 + .../choosing-presentation-color-scheme.html | 132 + ...2-structuring-javascript-applications.html | 228 ++ ...s-tables-maybe-the-design-is-to-blame.html | 147 + ...cle-through-list-elements-with-jquery.html | 262 ++ www/blog/datejs.html | 118 + www/blog/dear-conference-organizer.html | 128 + www/blog/deferreds-coming-to-jquery.html | 248 ++ ...termine-the-order-of-two-dom-elements.html | 178 ++ ...ty-blanket-and-lived-to-tell-the-tale.html | 561 ++++ www/blog/enterprisedojo-com-uses-jquery.html | 126 + www/blog/five-questions.html | 223 ++ www/blog/fix-for-slow-loading-google-ads.html | 126 + .../fix-uneven-line-lengths-in-headlines.html | 174 ++ www/blog/flying-lessons.html | 169 ++ ...25-more-this-gift-card-comes-in-green.html | 132 + ...nctionality-focused-code-organization.html | 170 ++ www/blog/getting-better-at-javascript.html | 211 ++ ...evelopers-on-the-subversion-bandwagon.html | 236 ++ www/blog/how-i-learned-css.html | 157 + ...r-dreamweaver-s-design-view-no-really.html | 122 + www/blog/i-need-to-say-a-few-things.html | 132 + ...n-case-you-thought-ie6-was-going-away.html | 120 + ...earch-of-javascript-developers-a-gist.html | 562 ++++ .../inaugural-north-carolina-jquery-camp.html | 134 + ...ery-into-any-page-using-a-bookmarklet.html | 126 + ...introducing-yayquery-a-jquery-podcast.html | 137 + ...jquery-breakout-women-and-conferences.html | 225 ++ .../jquery-build-table-from-json-data.html | 180 ++ .../jquery-ie7-operation-aborted-error.html | 123 + ...ry-style-guide-from-benjamin-sterling.html | 120 + www/blog/jquery-validation-and-tinymce.html | 133 + ...st-one-element-in-a-group-is-required.html | 143 + www/blog/js-conditionals.html | 197 ++ .../karma-webpack-tape-code-coverage.html | 273 ++ www/blog/lessons-from-a-rewrite.html | 427 +++ ...ed-from-taking-on-a-project-in-crisis.html | 146 + ...ojo-when-a-simple-xhr-caching-example.html | 199 ++ www/blog/modern-javascript.html | 133 + www/blog/more-on-jquery-selectors.html | 125 + ...-case-for-standards-based-web-layouts.html | 157 + ...off-a-design-to-a-front-end-developer.html | 200 ++ www/blog/object-literals.html | 209 ++ www/blog/objects-as-arguments.html | 333 +++ www/blog/office-hours.html | 123 + www/blog/on-conferencing.html | 172 ++ ...ning-respect-as-a-front-end-developer.html | 135 + www/blog/on-jquery-large-applications.html | 192 ++ www/blog/on-rolling-your-own.html | 182 ++ ...peaking-at-the-2009-jquery-conference.html | 134 + www/blog/patterns-for-dry-er-javascript.html | 280 ++ www/blog/pausing-office-hours.html | 123 + www/blog/planning-a-wordpress-cms-site.html | 174 ++ www/blog/recent-talks.html | 140 + www/blog/remixing-trac-with-jquery.html | 198 ++ ...ipt-with-document-write-is-killing-me.html | 130 + ...alivating-over-server-side-javascript.html | 123 + www/blog/seeking-a-web-designer.html | 126 + www/blog/selectors-in-jquery.html | 118 + .../skipping-photoshop-for-web-design.html | 124 + ...hich-i-d-be-hard-pressed-to-do-my-job.html | 144 + ...n-for-google-map-contents-not-showing.html | 120 + ...urns-bad-results-in-internet-explorer.html | 151 + www/blog/standards-for-html-emails.html | 118 + .../suspicious-stumbleupon-bounce-rates.html | 125 + www/blog/tech-podcast-recording-tips.html | 155 + ...-jquery-fundamentals-and-a-confession.html | 142 + www/blog/this-is-the-cigarette.html | 123 + www/blog/this-is-the-cup-of-coffee.html | 125 + www/blog/times-open-science-fair.html | 124 + www/blog/ttl-podcast.html | 127 + www/blog/txjs-js-bbq-you.html | 184 ++ www/blog/unit-tests.html | 432 +++ ...-browser-method-to-add-icons-to-links.html | 224 ++ www/blog/update-page-using-json-data.html | 173 ++ ...icks-with-google-analytics-and-jquery.html | 152 + .../using-ems-for-font-sizing-in-css.html | 131 + .../using-objects-to-organize-your-code.html | 586 ++++ ...building-a-non-trivial-js-application.html | 124 + www/blog/writing-conference-proposals.html | 136 + www/feed.xml | 2623 +++++++++++++++++ www/index.html | 118 + www/js/highlight.min.js | 1 + www/search.html | 119 + www/tag/business/index.html | 116 + www/tag/code/index.html | 116 + www/tag/personal/index.html | 116 + www/tag/web/index.html | 116 + 129 files changed, 21940 insertions(+), 170 deletions(-) create mode 100644 .gitignore create mode 100755 deploy.sh create mode 100644 public/blog/building-for-http2.md create mode 100644 www/404.html create mode 100644 www/about.html create mode 100644 www/archives/2006/index.html create mode 100644 www/archives/2007/index.html create mode 100644 www/archives/2008/index.html create mode 100644 www/archives/2009/index.html create mode 100644 www/archives/2010/index.html create mode 100644 www/archives/2011/index.html create mode 100644 www/archives/2012/index.html create mode 100644 www/archives/2013/index.html create mode 100644 www/archives/2014/index.html create mode 100644 www/archives/2015/index.html create mode 100644 www/archives/index.html create mode 100644 www/archives/layout.html create mode 100644 www/blog/13-seconds-attribute-vs-id-selectors-in-jquery.html create mode 100644 www/blog/2-years-in-some-thoughts-on-working-for-yourself.html create mode 100644 www/blog/5-reasons-you-don-t-really-want-a-jack-of-all-trades-developer.html create mode 100644 www/blog/a-dojo-boilerplate.html create mode 100644 www/blog/a-new-chapter.html create mode 100644 www/blog/announcing-jquery-fundamentals-an-open-source-jquery-training-curriculum.html create mode 100644 www/blog/another-cautionary-remote-javascript-tale.html create mode 100644 www/blog/avoid-bare-class-selectors-in-jquery.html create mode 100644 www/blog/back-on-the-air.html create mode 100644 www/blog/building-for-http2.html create mode 100644 www/blog/bye-bye-yui-bloat.html create mode 100644 www/blog/cache-regexes-in-javascript.html create mode 100644 www/blog/centering-images.html create mode 100644 www/blog/choosing-presentation-color-scheme.html create mode 100644 www/blog/code-org-take-2-structuring-javascript-applications.html create mode 100644 www/blog/css-vs-tables-maybe-the-design-is-to-blame.html create mode 100644 www/blog/cycle-through-list-elements-with-jquery.html create mode 100644 www/blog/datejs.html create mode 100644 www/blog/dear-conference-organizer.html create mode 100644 www/blog/deferreds-coming-to-jquery.html create mode 100644 www/blog/determine-the-order-of-two-dom-elements.html create mode 100644 www/blog/dojo-confessions-or-how-i-gave-up-my-jquery-security-blanket-and-lived-to-tell-the-tale.html create mode 100644 www/blog/enterprisedojo-com-uses-jquery.html create mode 100644 www/blog/five-questions.html create mode 100644 www/blog/fix-for-slow-loading-google-ads.html create mode 100644 www/blog/fix-uneven-line-lengths-in-headlines.html create mode 100644 www/blog/flying-lessons.html create mode 100644 www/blog/for-only-25-more-this-gift-card-comes-in-green.html create mode 100644 www/blog/functionality-focused-code-organization.html create mode 100644 www/blog/getting-better-at-javascript.html create mode 100644 www/blog/getting-non-developers-on-the-subversion-bandwagon.html create mode 100644 www/blog/how-i-learned-css.html create mode 100644 www/blog/i-have-discovered-a-use-for-dreamweaver-s-design-view-no-really.html create mode 100644 www/blog/i-need-to-say-a-few-things.html create mode 100644 www/blog/in-case-you-thought-ie6-was-going-away.html create mode 100644 www/blog/in-search-of-javascript-developers-a-gist.html create mode 100644 www/blog/inaugural-north-carolina-jquery-camp.html create mode 100644 www/blog/insert-jquery-into-any-page-using-a-bookmarklet.html create mode 100644 www/blog/introducing-yayquery-a-jquery-podcast.html create mode 100644 www/blog/jquery-breakout-women-and-conferences.html create mode 100644 www/blog/jquery-build-table-from-json-data.html create mode 100644 www/blog/jquery-ie7-operation-aborted-error.html create mode 100644 www/blog/jquery-style-guide-from-benjamin-sterling.html create mode 100644 www/blog/jquery-validation-and-tinymce.html create mode 100644 www/blog/jquery-validation-indicate-that-at-least-one-element-in-a-group-is-required.html create mode 100644 www/blog/js-conditionals.html create mode 100644 www/blog/karma-webpack-tape-code-coverage.html create mode 100644 www/blog/lessons-from-a-rewrite.html create mode 100644 www/blog/lessons-learned-from-taking-on-a-project-in-crisis.html create mode 100644 www/blog/making-sense-of-dojo-when-a-simple-xhr-caching-example.html create mode 100644 www/blog/modern-javascript.html create mode 100644 www/blog/more-on-jquery-selectors.html create mode 100644 www/blog/my-case-for-standards-based-web-layouts.html create mode 100644 www/blog/notes-on-handing-off-a-design-to-a-front-end-developer.html create mode 100644 www/blog/object-literals.html create mode 100644 www/blog/objects-as-arguments.html create mode 100644 www/blog/office-hours.html create mode 100644 www/blog/on-conferencing.html create mode 100644 www/blog/on-gaining-respect-as-a-front-end-developer.html create mode 100644 www/blog/on-jquery-large-applications.html create mode 100644 www/blog/on-rolling-your-own.html create mode 100644 www/blog/on-speaking-at-the-2009-jquery-conference.html create mode 100644 www/blog/patterns-for-dry-er-javascript.html create mode 100644 www/blog/pausing-office-hours.html create mode 100644 www/blog/planning-a-wordpress-cms-site.html create mode 100644 www/blog/recent-talks.html create mode 100644 www/blog/remixing-trac-with-jquery.html create mode 100644 www/blog/remote-javascript-with-document-write-is-killing-me.html create mode 100644 www/blog/salivating-over-server-side-javascript.html create mode 100644 www/blog/seeking-a-web-designer.html create mode 100644 www/blog/selectors-in-jquery.html create mode 100644 www/blog/skipping-photoshop-for-web-design.html create mode 100644 www/blog/software-without-which-i-d-be-hard-pressed-to-do-my-job.html create mode 100644 www/blog/solution-for-google-map-contents-not-showing.html create mode 100644 www/blog/solved-ajax-returns-bad-results-in-internet-explorer.html create mode 100644 www/blog/standards-for-html-emails.html create mode 100644 www/blog/suspicious-stumbleupon-bounce-rates.html create mode 100644 www/blog/tech-podcast-recording-tips.html create mode 100644 www/blog/the-future-of-jquery-fundamentals-and-a-confession.html create mode 100644 www/blog/this-is-the-cigarette.html create mode 100644 www/blog/this-is-the-cup-of-coffee.html create mode 100644 www/blog/times-open-science-fair.html create mode 100644 www/blog/ttl-podcast.html create mode 100644 www/blog/txjs-js-bbq-you.html create mode 100644 www/blog/unit-tests.html create mode 100644 www/blog/unobtrusive-cross-browser-method-to-add-icons-to-links.html create mode 100644 www/blog/update-page-using-json-data.html create mode 100644 www/blog/update-tracking-outbound-clicks-with-google-analytics-and-jquery.html create mode 100644 www/blog/using-ems-for-font-sizing-in-css.html create mode 100644 www/blog/using-objects-to-organize-your-code.html create mode 100644 www/blog/when-you-re-building-a-non-trivial-js-application.html create mode 100644 www/blog/writing-conference-proposals.html create mode 100644 www/feed.xml create mode 100644 www/index.html create mode 100644 www/js/highlight.min.js create mode 100644 www/search.html create mode 100644 www/tag/business/index.html create mode 100644 www/tag/code/index.html create mode 100644 www/tag/personal/index.html create mode 100644 www/tag/web/index.html diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c2658d7 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +node_modules/ diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 0000000..0056428 --- /dev/null +++ b/deploy.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +node server compile +git add www +git commit -m 'deploy to heroku' +git push heroku master + diff --git a/harp.json b/harp.json index b342f27..4efa34f 100644 --- a/harp.json +++ b/harp.json @@ -1,8 +1,8 @@ { "globals": { - "site_title": "Adventures in JavaScript Development", + "site_title": "Adventures in JavaScript", "site_description": "", "site_url": "http://rmurphey.com", "analytics": "UA-143877-10" } -} \ No newline at end of file +} diff --git a/package.json b/package.json index ef362f1..a6295a0 100644 --- a/package.json +++ b/package.json @@ -1,15 +1,15 @@ { - "name": "remysharp.com", + "name": "rmurphey.com", "version": "2.33.10", - "description": "Remy Sharp's b:log", + "description": "Rebecca Murphey's blog", "main": "server.js", "engines": { - "node": ">=0.10.3 <0.12" + "node": ">=0.12 <1" }, "scripts": { "test": "node_modules/.bin/harp compile", "build": "node server.js compile", - "start": "node_modules/.bin/harp start" + "start": "node server" }, "bin": { "blog": "./bin/cli.js" @@ -22,6 +22,7 @@ "glob": "~4.0.5", "harp": "^0.17.0", "moment": "~2.8.2", + "node-sass": "^3.4.2", "promise": "^6.0.1", "router-stupid": "~0.7.0", "semver": "~3.0.1", @@ -29,4 +30,4 @@ "st": "~0.5.1", "then-fs": "^1.0.8" } -} \ No newline at end of file +} diff --git a/public/_partials/layout.jade b/public/_partials/layout.jade index c737c3f..8d3b87b 100644 --- a/public/_partials/layout.jade +++ b/public/_partials/layout.jade @@ -15,12 +15,73 @@ html#rmurphey-com block meta link(rel="shortcut icon" href="/favicon.ico") - link(rel="stylesheet" type="text/css" href="/css/screen.css?#{version}") - link(rel="stylesheet" type="text/css" href="/css/zenburn.css?#{version}") + link(href='https://fonts.googleapis.com/css?family=Open+Sans:400,700,400italic,300,800|Source+Code+Pro:400,700,300', rel='stylesheet', type='text/css') + link(rel="stylesheet", href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/default.min.css") - link(rel="alternate" type="application/rss+xml" title="RSS 2.0" href="http://feeds.feedburner.com/remysharp") - link(rel="alternate" type="text/xml" title="RSS .92" href="http://feeds.feedburner.com/remysharp") - link(rel="alternate" type="application/atom+xml" title="Atom 0.3" href="http://feeds.feedburner.com/remysharp") + style. + @media (max-width: 800px) { + body { + margin: 5%; + } + } + + @media (min-width: 800px) { + body { + margin: 5% 20%; + font-size: 120%; + } + } + + body { + font-family: "Open Sans"; + font-weight: 300; + } + + p, li { + line-height: 1.8em; + } + + em { + font-style: italic; + } + + strong { + font-weight: 700; + } + + a:hover { + color: #fb6b70; + } + + a:visited { + color: #9a3337; + } + + a { + color: #d9272e; + text-decoration: none; + } + + pre, code { + font-family: "Source Code Pro", monospace; + font-weight: 400; + } + + pre strong, code strong { + font-weight: 700; + } + + h1.banner { + font-size: 220%; + } + + h1.banner span { + font-weight: 300; + } + + .search input[type=submit] { + margin-left: 1em; + } script. (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ @@ -32,6 +93,10 @@ html#rmurphey-com ga('send', 'pageview'); body(id="#{current.path.join('-')}-page") + h1.banner.title rmurphey  + span adventures in javascript + + != partial('_partials/page-nav') block content @@ -47,70 +112,66 @@ html#rmurphey-com .flex-item.links h2 Links ul + li: a(href="/about.html") About li: a(target="_blank" href="/feed.xml") RSS feed - li: a(href="/search") Search li: a(target="_blank" href="https://github.com/rmurphey") On GitHub li: a(target="_blank" href="https://twitter.com/rmurphey") On Twitter .flex-item.license h2 License p.vcard Copyright © #{ moment().format('YYYY') } Rebecca Murphey. :markdown - All code and content for this blog is available as [open source on GitHub](https://github.com/rmurphey). + All code and content for this blog is available [on GitHub](https://github.com/rmurphey). - script(src="//code.jquery.com/jquery-1.11.1.min.js") - script. - window.jQuery || document.write(' +``` + +The manifest contains information about the other resources that the scout will request, and can even be used by the server to determine what to push alongside the HTML. + +A manifest could provide these instructions: + +```js +module.exports = { + baseUrl : 'https://mysite.com/static/', + resources : { + vendor : { + version : 'vendor-d41d8cd98f.js', + pushWith : [ 'scout' ] + }, + application : { + version : 'application-a32e3ec23d.js', + pushWith : [ 'scout' ] + }, + secondary : { + version : 'secondary-e43b8ad12f.js', + pushWith : [ ] + } + } +}; +``` + +Processing this manifest would require intelligence on the part of the CDN; it may be necessary to replace s3 storage with an actual server that is capable of making these decisions, fronted by a CDN that can intelligently relay responses that include server push. + +## The elephants in the room + +There are two notable challenges to the rapid transition to an HTTP/2 world: the continued existence of legacy browsers, especially on mobile; and the requirement that HTTP/2 connections be conducted over TLS. Thankfully, the latter provides a reasonable opportunity to address the former. Let's, then, talk about the TLS requirement first. + +HTTP/2 is a new protocol, and as such, it is greatly confusing to a large segment of the existing internet: proxies, antivirus software, and the like. During the development of HTTP/2 and [SPDY](https://en.wikipedia.org/wiki/SPDY) before it, engineers observed that traffic that was transported on an insecure connection would frequently fail. The reason? The proxies, the antivirus software, and all the rest had certain expectations of HTTP traffic; HTTP/2 violated those expectations, and so HTTP/2 traffic was considered unsafe. The software that thwarted insecure HTTP/2 traffic didn't have the ability to inspect secure traffic, and so HTTP/2 traffic over a secure connection passed through just fine. Thus was born the requirement — which is a browser implementation detail, and not part of the HTTP/2 spec — that HTTP/2 web communication be conducted using TLS. + +The [Let's Encrypt](https://letsencrypt.org/) project aims to eliminate the high cost of obtaining the certificate that enables secure HTTP communication; there will still be technical hurdles to using that certificate, but those should be surmountable for anyone who cares enough to engineer a performant HTTP/2 deployment. + +In order for a browser and a server to communicate using HTTP/2, the browser and the server must first agree that they *can*. The TLS handshake that enables secure communication turns out to be the ideal time to negotiate the communication protocol, as well: no additional round trip is required for the negotiation. + +When a server is handling a request, it knows whether the browser understands HTTP/2; we can use this information to shape our payload. We can send a legacy browser an HTML file that includes an inlined scout file, and that inlined scout file can include the manifest. The manifest can provide information about how to support legacy browsers: + +```js +module.exports = { + baseUrl : 'https://mysite.com/static/', + resources : { + // ... + }, + legacyResources : { + legacyMain : { + initialLoad : true, + version : 'legacy-main-c312efa43e.js' + }, + legacySecondary : { + version : 'legacy-secondary-a22cf1e2af.js' + } + } +}; +``` + +## For Consideration: HTTP/2-friendly deployments with HTTP/1.1 support + +Putting the pieces together, we arrive at a deployment process that does the following: + +- Generates files that contain one or more modules, grouped by likelihood of changing, functionality, or another strategy. The file grouping strategy must persist across builds; new groupings would need a new, unique name that had not been used by earlier builds. +- Generates **legacy files**, where those files contain modules that are grouped according to their likelihood to change, and according to whether they are required for initial load. +- Names all files with a content hash. +- Generates a manifest for the build, where the manifest includes: + - a `baseUrl` property whose value is a string that should be used as the base for generating a full URL to a resource, using the pattern `/` + - a `resources` property whose value is an object that, for each file, provides: + - the most recent *changed* version + - a list of individual files which, when any of the files is requested, should trigger a push of the bundle + - a `legacyResources` property whose value is an object that, for each legacy bundle, provices: + - the most recent *changed* version + - an optional `initialLoad` property whose value is `true` if the resource should be loaded immediately by the scout +- Generates an HTTP/2 scout file\* that provides the ability to load resources, and that loads a manifest. +- Generates an HTTP/1 scout file\* that provides the ability to load resources, and that *includes* the manifest. +- Uploads the static resources. +- Updates a delivery mechanism (such as a server or a CDN) based on the data in the new manifest. + +The versioning and caching of the resources would be as follows: + +- **manifest** Unversioned. Short cache time, e.g. 10 minutes, to allow for the rapid uptake of new resources for HTTP/2 browsers. +- **scout** Unversioned. Medium cache time, e.g. one day, assuming the contents of this file are considered relatively stable. +- **legacy-scout** Unversioned. Short cache time, e.g. 10 minutes, to allow for the rapid uptake of new resources for legacy browsers. +- **application and vendor files** Versioned. Long cache time, e.g. one year, given that new versions will be picked up when a new manifest is loaded. + +\* In applications that a) control the initial HTML payload, and b) only use the scout to load other resources, it may not make sense to have a separate scout; it might be sufficient to just load those resources via `

rmurphey adventures in javascript

Four oh four...

Yeah, so this page isn't here yet. Maybe refresh and it'll magically appear.

\ No newline at end of file diff --git a/www/about.html b/www/about.html new file mode 100644 index 0000000..230a772 --- /dev/null +++ b/www/about.html @@ -0,0 +1,248 @@ +About

rmurphey adventures in javascript

About

Austin, Texas

+

I am a senior staff software engineer at Bazaarvoice, where I lead a +team that shepherds third-party JavaScript application development for +products with a reach of hundreds of millions of visitors across billions of +pageviews every month.

+ +

I have played a key role in the design and development of large client-side +web applications, and I'm known for my expertise in best practices for +organizing, testing, refactoring, and maintaining JavaScript application code. +I am also recognized for mentoring promising developers.

+

I developed the JS Assessment +project, an open-source tool used by individuals, companies, and +code schools to evaluate a developer's JavaScript skills. I was instrumental +in getting promises introduced to jQuery 1.5, and have contributed to several +open-source projects. I authored jQuery Fundamentals; +contributed to the jQuery Cookbook (O'Reilly); and served as a technical +reviewer for Node for Front-End Developers (Garann Means, O'Reilly) and +Effective JavaScript (David Herman, Effective Software Development Series).

+

Prior to my work at Bazaarvoice, I worked at a consulting, training, +and development firm focused on open web technologies; before that, I worked +for three years as an independent consultant. I am skilled and experienced +in evaluating front-end developer skills and front-end technology decisions.

+

I have spoken at dozens of conferences focused on front-end development, +including Front-End Ops Conf 2014, multiple jQuery Conferences, JSConf US +2013, JSConf US 2011, JSConf EU 2010, FrontTrends 2012, Fronteers 2012, and +many others. I live in Austin, Texas.

+

Career

Bazaarvoice, Austin TX

Senior Staff Software Engineer — June 2015 - present

+

Staff Software Engineer — September 2013 - June 2015

+

I lead a team responsible for promoting and ensuring best practices for +consumer-facing client-side application development, and coordinate key +front-end development activities and tools across product teams. Previously, I +led and grew a team of engineers of varying experience levels as they delivered new +functionality and supported existing functionality on a mission-critical +product, all while the user base grew from 10 million to more than 100 million +pageviews each month. I increased the product quality by creating and +documenting processes for code review and deployment; by bringing visibility +to quality shortcomings in our process; and by working with the +team's dev manager to institute policies around creating unit tests for new +and existing code. I led my team's planning, monitoring, and mitigation +activities for the surge of traffic associated with "Black Friday" and the +holiday shopping season.

+

Senior Software Engineer — February 2013 - August 2013

+

I transitioned rapidly from member to technical lead of a team responsible +for the consumer-facing display of a mission-critical product.

+

Bocoup, Boston, MA

Senior JavaScript Developer — April 2012 - December 2012

+

I joined Bocoup to do JavaScript consulting, but found myself engaging in the +training side of the business instead. I developed new curriculum for teaching +JavaScript beginners, overhauled the jQuery Fundamentals learning site to +provide an interactive learning experience, and created presentations and +training material around the subject of writing testable JavaScript. I also +consulted on Roost, a training-focused conference featuring Bocoup developers, +and developed a JavaScript coaching product aimed at clients who need ongoing +JavaScript support.

+

Toura Mobile, New York, NY

Lead JavaScript Developer — November 2010 - April 2012

+

I initially worked with Toura in a consulting role to help clean up a JavaScript +mess; later, I joined full time as the lead developer on a team of varying skill +levels. I guided the design and implementation of a client-side +framework for developing content-rich, offline-capable mobile applications +inside a PhoneGap wrapper. I also gained familiarity with Ruby on Rails, the +framework we used for the content management system that created the mobile +applications, and wrote extensive command line tooling using Ruby.

+

Independent Consultant

July 2008 - July 2011

+

I turned the sudden ending of my DailyStrength work into a successful +consulting business. For three years, I worked with clients ranging from +startups to large companies, helping them improve their JavaScript practices +and organize their client-side code. In the process, I established myself as a +thought leader on the subject of client-side application development.

+

DailyStrength, Santa Cruz, CA

January 2008 - July 2008

+

For five short months, I worked for this small startup, reworking the +client-side code to depend on one DOM library instead of four, and standardizing the +approach to implementing various aspects of the user interface. Though I was +working remotely with an otherwise-colocated team, I proved my ability to become an +integral part of a small team from across the country. I lost my job, along +with the rest of the developers, when the money ran out.

+

Webslingerz, Carrboro, NC

August 2006 - January 2008

+

I joined this local interactive agency to focus on improving my web development +skills. The existing team was firmly attached to the 2001 way of doing things; +I campaigned effectively to get fellow developers to embrace web standards, +modern techniques, and the best practices of the day.

+

Before That

September 1996 - August 2006

+

I began my career at a small newspaper in upstate New York, working on the +night copy desk with a team that was responsible for laying out the newspaper, +editing the stories to fit the available space, and writing the headlines. I +worked there for five years; I left in the summer of 2001. I spent the next 18 +months bartending -- and making about as much money as I had made at the paper +-- before getting on my bicycle and riding it from Maine to North Carolina, +camping along the way. In North Carolina, I worked as a waitress for several +months before landing a job at an advertising agency. There, I did page layout +for various print materials, and grew to be heavily involved in the agency's +fledgling efforts with web technologies.

+

Speaking & Writing

I have spoken at dozens of conferences and events since 2009, with a focus on +client-side application development, JavaScript best practices, and encouraging +other developers to share what they know.

+

JSConf

+

jQuery Conference

+

Others Conferences of Note

+

Publications & Contributions

+

Education

I studied journalism at Lehigh University +in Bethlehem, PA, from 1993-1995. I created the college newspaper's first +online presence, authoring HTML in pico or somesuch, previewing it in the +text-only Lynx browser, and deploying the files to the sever at the +appropriate time using at. I also worked with a friend to create one of the +first online resources for LGBT college students; we were very excited when, +in 1995, its URL was featured in a print magazine.

+

Tools

I do my work on a 2015 Retina MacBook Pro with a 27" Cinema Display. I use +Atom and vim as my editors, the Chrome Dev Channel as my primary +development browser, and git for version control of all the things. I write +just about everything in Markdown, including this document.

\ No newline at end of file diff --git a/www/archives/2006/index.html b/www/archives/2006/index.html new file mode 100644 index 0000000..6317a8d --- /dev/null +++ b/www/archives/2006/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2006 archive

\ No newline at end of file diff --git a/www/archives/2007/index.html b/www/archives/2007/index.html new file mode 100644 index 0000000..b840501 --- /dev/null +++ b/www/archives/2007/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2007 archive

December

\ No newline at end of file diff --git a/www/archives/2008/index.html b/www/archives/2008/index.html new file mode 100644 index 0000000..d32d45e --- /dev/null +++ b/www/archives/2008/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2008 archive

December

November

September

August

July

June

May

April

March

February

January

\ No newline at end of file diff --git a/www/archives/2009/index.html b/www/archives/2009/index.html new file mode 100644 index 0000000..d46e342 --- /dev/null +++ b/www/archives/2009/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2009 archive

November

October

September

April

February

January

\ No newline at end of file diff --git a/www/archives/2010/index.html b/www/archives/2010/index.html new file mode 100644 index 0000000..940eea4 --- /dev/null +++ b/www/archives/2010/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2010 archive

December

November

October

September

August

July

June

April

March

\ No newline at end of file diff --git a/www/archives/2011/index.html b/www/archives/2011/index.html new file mode 100644 index 0000000..7b1afd7 --- /dev/null +++ b/www/archives/2011/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2011 archive

July

May

April

March

\ No newline at end of file diff --git a/www/archives/2012/index.html b/www/archives/2012/index.html new file mode 100644 index 0000000..1ec7dc3 --- /dev/null +++ b/www/archives/2012/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2012 archive

December

November

\ No newline at end of file diff --git a/www/archives/2013/index.html b/www/archives/2013/index.html new file mode 100644 index 0000000..729e97a --- /dev/null +++ b/www/archives/2013/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2013 archive

\ No newline at end of file diff --git a/www/archives/2014/index.html b/www/archives/2014/index.html new file mode 100644 index 0000000..e93f5f9 --- /dev/null +++ b/www/archives/2014/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2014 archive

November

July

\ No newline at end of file diff --git a/www/archives/2015/index.html b/www/archives/2015/index.html new file mode 100644 index 0000000..0443b54 --- /dev/null +++ b/www/archives/2015/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

2015 archive

November

October

August

July

May

January

\ No newline at end of file diff --git a/www/archives/index.html b/www/archives/index.html new file mode 100644 index 0000000..a1ca396 --- /dev/null +++ b/www/archives/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

rmurphey adventures in javascript

All years archive

2015

November

October

August

July

May

January

2014

November

July

2012

December

November

2011

July

May

April

March

2010

December

November

October

September

August

July

June

April

March

2009

November

October

September

April

February

January

2008

December

November

September

August

July

June

May

April

March

February

January

2007

December

\ No newline at end of file diff --git a/www/archives/layout.html b/www/archives/layout.html new file mode 100644 index 0000000..2eb7acb --- /dev/null +++ b/www/archives/layout.html @@ -0,0 +1,231 @@ +Adventures in JavaScript

rmurphey adventures in javascript

Adventures in JavaScript

rmurphey adventures in javascript

\ No newline at end of file diff --git a/www/blog/13-seconds-attribute-vs-id-selectors-in-jquery.html b/www/blog/13-seconds-attribute-vs-id-selectors-in-jquery.html new file mode 100644 index 0000000..9a48722 --- /dev/null +++ b/www/blog/13-seconds-attribute-vs-id-selectors-in-jquery.html @@ -0,0 +1,140 @@ +13 seconds: Attribute vs. ID selectors in jQuery

rmurphey adventures in javascript

13 seconds: Attribute vs. ID selectors in jQuery

Before:

+

{% codeblock lang:javascript %} +// select all elements with an id or name attribute of fieldName; +// some are inputs (name attribute), some are td's (id attribute) +var $field = $('#' + fieldName + ', [name=' + fieldName + ']'); +{% endcodeblock %}

+

After:

+

{% codeblock lang:javascript %} +// give inputs both a name and an id attribute, +// and then just select fields and td's via id +var $field = $('#' + fieldName); +{% endcodeblock %}

+

Savings: 13 seconds over the course of selecting > 100 elements. Thank god for Firebug's profiler, which pointed me to the problem in a few seconds. Attribute selectors in jQuery are handy, OK, but they don't seem to work for large-scale selections.

+

A side note: vim made the HTML change easy, once I worked through its quirky regexp syntax which requires you to escape the +:

+

{% codeblock %} +s/name="([^"]+)" /name="\1" id="\1" /g +{% endcodeblock %}

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/2-years-in-some-thoughts-on-working-for-yourself.html b/www/blog/2-years-in-some-thoughts-on-working-for-yourself.html new file mode 100644 index 0000000..74dac07 --- /dev/null +++ b/www/blog/2-years-in-some-thoughts-on-working-for-yourself.html @@ -0,0 +1,182 @@ +2 Years In, Some Thoughts on Working for Yourself

rmurphey adventures in javascript

2 Years In, Some Thoughts on Working for Yourself

Two years and three weeks ago, I lost my job. My partner and I had just had an expensive deck built; I’d just bought a gratuitously fancy grill to go on it. My token severance check was in the mail — I’d spend most of it to be able to keep my work computer. And so, six-pack of Yuengling and a pack of Parliament Lights in hand — I hadn’t smoked in years — I sat on said deck near said grill, contemplating a world I’d last contemplated when I, age 6 at the time, came home in the middle of the day to find my just-laid-off dad sitting on the porch.

+ +

It was not a good day.

+ +

Two years on, things look a lot better than they did. My “salary,” as best as I can figure it, is comfortable. Figuring it is hard: I travel to conferences whenever I want, and work pays for them. I buy new equipment and software when I want, and work pays for it. I don’t work a certain number of hours in a day or a week or a month, so I’ve been able to get my pilot’s license, spend two weeks in Hawaii without drawing on some meticulously calculated vacation balance, and take on projects like TXJS without having to get anyone’s blessing — hard to put a value on that. On the flip side, I’ve had more sleepless nights than I’d dare count, wondering where the money was going to come from next month, wondering whether it was time to walk away from an abusive client with no other work on the horizon, wondering whether my relationship could tolerate all the ups and downs, wondering how it was that little ol' me, the Independent Consultant, had any business telling people what to do.

+ +

People ask me, now and then, what advice I have for them about being independent, about working for myself. More than a few of them have suddenly found themselves sitting on that proverbial deck, waiting for that severance check (or not). I’ve written many an email, but I thought I’d take a few minutes to try to assemble all of them into a coherent post.

+ +

The Touchy-Feely

+ +

Network

+ +

Once I decided I was going to stop looking for a job and work for myself, I wrote an email to absolutely everyone I could think of who might help me find work. It felt weird, and it’s also exactly how I landed a job that paid the bills fairly solidly for the next nine months. When you decide to take the plunge, it’s imperative that you let people know. Let them know you’re looking for work, and what kind of work you’re willing to do. Get out and participate in your local tech community, giving talks and connecting with people who can connect you with people. Start writing blog posts, answering questions on forums, helping people in IRC channels. Become known as a helpful and knowledgable person. You do not get to be an introvert. Getting your name out there, when you start and constantly after that, is the No. 1 most valuable thing you can do — at least as important as being good at what you do.

+ +

Dive in and Do

+ +

I cost a fair bit of money. In return for paying that, clients expect that I can come up to speed on their project quickly and start solving their problems for them — they don’t want to pay for a long ramp-up period before they can even accurately assess whether my time was a good purchase. Part of doing this is having solid fundamental skills — an understanding of basic application development patterns, version control systems, development environments, etc. When I don’t have those fundamental skills for a particular project and I expect that may get in the way of a quick ramp-up, I make that very clear to my clients.

+ +

More generally, it’s critical that you demonstrate value as soon as you can, and make sure your clients are well aware of the progress you’re making — if they don’t ask you, tell them anyway. If you get stuck on a particular problem, part of your job is to recognize that you’re stuck, and honestly assess whether this is your own shortcoming or not. If it is, think long and hard about whether you should bill the client for your learning time; if it’s not, at least let the client know about the roadblock and your plan for overcoming it — you never know, the client might re-frame the problem in a way that’s easier to solve.

+ +

Don’t Specialize … Yet

+ +

When you first start, it pays to be open-minded about the kind of work you’ll take on. It helps get those bills paid, but also, it helps you zero in on exactly how you’re good at providing value to clients. The value you provide may be entirely different than the value you were providing at your job, or even than how you thought you provided value in general. Over time, you can start to market yourself using true stories of how you helped real live clients, and start to develop your niche.

+ +

Think Global (and favor clients who do, too)

+ +

I have had clients in California, Florida, New York, and places in between, but I’ve never needed to travel to any of them. I find that the work I do is incredibly time and place independent, as long as the client has good systems in place. This means version control, a development environment I can ssh to, liberal use of IM, a sane deployment process (read: not FTP), and some sort of project management tool and/or ticketing system. Projects that have lacked these systems have been more challenging, and these days I tend not to accept them.

+ +

Don’t Accept Abuse

+ +

There are terrible clients out there, but you do not have to work for them. If a client is impossible despite your best efforts to improve the situation, quit in a professional manner but without remorse. The client is not always right. You will find more work from someone better. Every. Single. Time.

+ +

Ignore the Economy

+ +

One question that’s come up a lot is how I think the economy has affected my ability to find work. Lehman Brothers would declare bankruptcy less than two months after I lost my job; the Dow would lose 3,000 points in the following weeks. My general theory on the economy question is this: rarely is full-time employment of a web worker an efficient distribution of labor, unless you are working for a very, very large company. The volume of work can fluctuate tremendously. I think of all the hours at previous jobs when there was literally nothing to do, yet the companies kept me around for the moment when there was. This was dumb. I’d like to think that as companies are looking for ways to cut costs, they’ll realize that was dumb, and bring people on as needed. The flip side of that is that those displaced workers are now competing for the consulting work. In the end, I think the economy may be a bit of a wash if you’re good at what you do.

+ +

Get Support

+ +

This feels like the single most discouraging thing I have to tell people: I am not sure I could have done this without the knowledge that the bills would still get paid if I failed. Our double-income-no-kids salary is embarrassing, but we were very intentional when we bought our house that we wouldn’t take on more than we could afford with one salary, no matter what the lenders told us we could have. My partner’s employer allowed me to enroll in their insurance coverage almost as if I were her spouse (I had to wait a few months for open enrollment, costly months I wouldn’t have had to wait if we were married). The comfort of this knowledge has been amazing, in those first few months when it became clear that I just might not need a job and in those dark months when it’s been unclear where the next check would come from. I am sure working for yourself is possible without these stars aligning, but it would require a braver soul than me.

+ +

Practical Matters

+ +

Have Clear Payment Terms (and realistic expectations)

+ +

I learned early on that I can’t expect anything quicker than Net 30, and that’s a long time — as much as 60 days after I did the work, assuming I bill monthly. One client failed to pay due to some issues with their accounting department, and it got to the point where I had to let them know that I couldn’t continue working with them until I got paid. A deadline was looming; a check arrived FedEx the next day. Be clear about your payment terms. You’ll get Net 15 if you’re lucky; Net 30 is standard. Incentivize them with late penalties if you need, and don’t hesitate to contact a client once that deadline passes.

+ +

Decide What You’re Worth

+ +

Honestly evaluate what your fee system should be, then stick to it. People who want to pay you less will cause other headaches that will make you wish you’d charged them more without fail. If you enter into any retainer situations, make sure the terms are crystal clear to both sides. Generally retainers work where the client purchases a minimum number of hours per month (potentially in exchange for a bulk discount). Communicate with them if they are not using their minimum hours, but try hard not to end up in a situation where you’ve cleared your plate for a client who then doesn’t need you as much as they thought they would. On the flip side, if the client has promised you 30 hours a week because they overestimated a task, but you’re so good the work is only taking you 15, make that very clear to the client and move toward arriving at a new arrangement quickly. They’ll appreciate your honesty, and you can free up your time for other work.

+ +

Decide How Much You Want to Work

+ +

Be clear about your availability if it’s not 24/7. Let clients know what the best way is to contact you. For me, for example, I hate being interrupted by phone calls, and greatly prefer IM over email for quick exchanges. I’ve learned to tell my clients this up front. For your own sanity, contemplate whether there’s a minimum number of hours you’re willing to work on a project. Too many times I’ve spent more time discussing a project than actually doing it. Make sure you account for that discussion time, and for the cost of getting you to sit at your computer rather than playing outside.

+ +

Get an Accountant

+ +

If you find yourself making any money at all, get an accountant. Taxes are complicated for self-employment. April 15 will either suck or it will suck a whole lot. An accountant will help you figure out estimated payments and advise you on the best way to keep as much of your money as you can. For me, that meant forming an LLC, which meant a lot more paperwork throughout the year, but a lot more money in my pocket at the end of it.

+ +

So, Go!

+ +

This working for yourself thing is hard. It’s so very important to be good at what you do, and yet being good at what you do has so little to do with being able to pull off working for yourself. On one level, I wish more people would do it — I believe it achieves a far more efficient distribution of skills and labor while allowing for some serious specialization. On the other hand, the instability of it ranges from mildly uncomfortable to downright terrifying, and so reality dictates that most people will wander back to the full-time world soon enough, and the independent thing will be but a blip on the resume. I, though, feel lucky I’ve made it this long.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/5-reasons-you-don-t-really-want-a-jack-of-all-trades-developer.html b/www/blog/5-reasons-you-don-t-really-want-a-jack-of-all-trades-developer.html new file mode 100644 index 0000000..1a1a21a --- /dev/null +++ b/www/blog/5-reasons-you-don-t-really-want-a-jack-of-all-trades-developer.html @@ -0,0 +1,138 @@ +5 reasons you don't really want a jack-of-all-trades developer

rmurphey adventures in javascript

5 reasons you don't really want a jack-of-all-trades developer

I've spent the last couple of weeks trolling Craigslist and have been shocked at the number of ads I've found that seem to be looking for an entire engineering team rolled up into a single person. Descriptions like this aren't at all uncommon:

+
Candidates must have 5 years experience defining and developing data driven web sites and have solid experience with ASP.NET, HTML, XML, JavaScript, CSS, Flash, SQL, and optimizing graphics for web use. The candidate must also have project management skills and be able to balance multiple, dynamic, and sometimes conflicting priorities. This position is an integral part of executing our web strategy and must have excellent interpersonal and communication skills.
+

Really. Now I don't know about you, but if I were building a house, I wouldn't want an architect doing the work of a carpenter, or the foundation guy doing the work of an electrician. But ads like the one above are suggesting that a single person can actually do all of these things, and the simple fact is that these are fundamentally different skills. The foundation guy may build a solid base, but put him in charge of wiring the house and the whole thing could, well, burn down. When it comes to staffing a web project or product, the principle isn't all that different -- nor is the consequence. I've thought a lot about this these last couple of weeks, and I don't think this post is sour grapes about the fact that I don't have the top-to-bottom, front-to-back web development skills that this ad and others seem to be asking for. I'm proud and confident of the abilities I've assembled when it comes to front-end development, and I have a rock-solid understanding of what makes websites tick. The thing is, the more you know, the more you find out you don't know. A year ago I'd have told you I could write PHP/MySQL applications, and do the front-end too; now that I've seen what it means to be truly skilled at the back-end side of things, I realize the most accurate thing I can say is that I understand PHP applications and how they relate to my front-end development efforts. To say that I can write them myself is to diminish the good work that truly skilled PHP/MySQL developers are doing, just as I get a little bent when a back-end developer thinks they can do my job. So to all of those companies who are writing ads seeking one magical person to fill all of their needs, I offer a few caveats before you post your next Craigslist ad:

+
    +
  1. If you're seeking a single person with all of these skills, make sure you have the technical expertise to determine whether a person's skills match their resume. Outsource a tech interview if you need to. Any developer can tell horror stories about inept predecessors, but when a front-end developer like myself can read PHP and think it's appalling, that tells me someone didn't do a very good job of vetting and got stuck with a programmer who couldn't deliver on his stated skills.
  2. +
  3. A single source for all of these skills is a single point of failure on multiple fronts. Think long and hard about what it will mean to your project if the person you hire falls short in some aspect(s), and about the mistakes that will have to be cleaned up when you get around to hiring specialized people. I have spent countless days cleaning up after back-end developers who didn't understand the nuances and power of CSS, or the difference between a div, a paragraph, a list item, and a span. Really.
  4. +
  5. Writing efficient SQL is different from efficiently producing web-optimized graphics. Administering a server is different from troubleshooting cross-browser issues. Trust me. All are integral to the performance and growth of your site, and so you're right to want them all -- just not from the same person. Expecting quality results in every area from the same person goes back to the foundation guy doing the wiring. You're playing with fire.
  6. +
  7. Asking for a laundry list of skills may end up deterring the candidates who will be best able to fill your actual need. Be precise in your ad: about the position's title and description, about the level of skill you're expecting in the various areas, about what's nice to have and what's imperative. If you're looking to fill more than one position, write more than one ad; if you don't know exactly what you want, try harder to figure it out before you click the publish button.
  8. +
  9. If you really do think you want one person to do the task of an entire engineering team, prepare yourself to get someone who is OK at a bunch of things and not particularly good at any of them. Again: the more you know, the more you find out you don't know. I regularly team with a talented back-end developer who knows better than to try to do my job, and I know better than to try to do his. Anyone who represents themselves as being a master of front-to-back web development may very well have no idea just how much they don't know, and could end up imperiling your product or project -- front to back -- as a result.
  10. +
+

If your budget really is limited to a single position, you might want to consider whether you'd be better off working with several contractors with specific and proven skills, rather than a single person who claims to encompass everything you're after. Your management overhead will increase in the short term, yes, but your headaches down the road will decrease exponentially. In the process, you'll gain access to people who can help you evaluate potential full-timers, and probably gain some insight into the actual list of skills a full-timer needs to provide. If you're one of the people who's written these ads, all is not lost. Invest in a technical consultant -- probably one you can't afford to hire full-time -- to help you really understand your needs and the skills required to solve them. Often they can assist you with writing and posting the ad, and interviews too. For example, I'll meet with a client, write and post a detailed ad, identify candidates, and interview contenders; if I don't have the technical skills required to evaluate a candidate, chances are I personally know someone who can. Doing that homework up front, and understanding and describing what your needs really are, is vastly more likely to give you the perfect fit you're after than if you just cast a wide net and see what you catch.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/a-dojo-boilerplate.html b/www/blog/a-dojo-boilerplate.html new file mode 100644 index 0000000..770240a --- /dev/null +++ b/www/blog/a-dojo-boilerplate.html @@ -0,0 +1,127 @@ +A Dojo Boilerplate

rmurphey adventures in javascript

A Dojo Boilerplate

When I first started playing with the Dojo Toolkit, it was easy enough to use the CDN-hosted dojo.js and get started, but before long I wanted to make use of one of the features that drew me to Dojo in the first place: the build system that parses your code’s dependencies as expressed by dojo.require() statements and creates production-ready files.

+

Coming from a world where this was entirely a DIY affair, the patterns I should follow for taking advantage of Dojo’s system were, shall we say, less than clear. There was a lot of frustration, a lot of swearing, and a lot of pleas for help in #dojo on Freenode.

+

These days, I’m talking about Dojo a lot, and I’ve gotten pretty comfortable with how to set up a project — I even wrote a post about scaffolding a Dojo app once I felt like I had the basics down — but for a long time I’ve wanted to release a ready-made starter project, rather than making people follow seven lengthy steps.

+

With the help of Colin Snover, I’m pleased to release the Dojo Boilerplate, a simple starter project if you’d like to get your feet wet with Dojo and the power of its dependency management and build system. It comes with a bare-bones do-nothing app, a shell script for downloading the Dojo SDK and getting it in the right place, and a shell script and profile file for actually creating a built version. For the brave, it also includes a work-in-progress router for single-page apps — one of the few features that I feel Dojo itself is missing. Everything you should need to know is documented in the README.

+

I’ve also created a small demo app that uses the boilerplate and shows some of the basic concepts of MVC development using Dojo, including separating your code into models, views, controllers, and third-party services. It includes an example of templated widgets, which are one of the biggest selling points of Dojo for me, as well as an uber-basic example of object stores, new in Dojo 1.6.

+

The goal of the boilerplate and the demo app is to eliminate some of that pain and WTF that I went through — while Dojo is ridiculously powerful, the barrier to entry can seem daunting. Over and over again, though, I am grateful that I took the time to overcome it.

+

Finally: as always, pull requests and issues are welcome. Enjoy.

+

Update: Colin is now the maintainer of the boilerplate; I've updated the links above accordingly.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/a-new-chapter.html b/www/blog/a-new-chapter.html new file mode 100644 index 0000000..531d4df --- /dev/null +++ b/www/blog/a-new-chapter.html @@ -0,0 +1,132 @@ +A new chapter

rmurphey adventures in javascript

A new chapter

It was three years ago this summer that I got the call, bought the Yuengling, smoked the cigarettes, and began life as an independent consultant. It’s been (almost) three years of ups and downs, and, eventually, among the most rewarding experiences of my life. Day by day, I wrote my own job description, found my own clients, set my own schedule, and set my own agenda.

+ +

Starting tomorrow, it’s time for a new chapter in my working life: I’ll be joining Toura Mobile full-time as their lead JavaScript developer, continuing my work with them on creating a PhoneGap- and Dojo-based platform for the rapid creation of content-rich mobile applications.

+ +

I’ve been working with Toura for about six months now, starting shortly after I met Matt Rogish, their director of development, at a JavaScript event in New York. They brought me on as a consultant to review their existing application, and the eventual decision was to rewrite it from the ground up, using the lessons learned and knowledge gained from the first version to inform the second. It was a risky decision, but it’s paid off: earlier this year, Toura started shipping apps built with the rewritten system, and the care we took to create modular, loosely coupled components from the get-go has paid off immensely, meeting current needs while making it easier to develop new features. With the rewrite behind us, these days we’re using the solid foundation we built to allow users of the platform to create ever more customized experiences in their applications.

+ +

If you know me at all, you know that I’ve been pretty die-hard about being an independent consultant, so you might think this was a difficult decision. Oddly, it wasn’t — I’ve enjoyed these last several months immensely, the team I work with is fantastic, and I’ve never felt more proud of work I’ve done. Whenever I found myself wondering whether Toura might eventually tire of paying my consulting rates, I’d get downright mopey. Over the course of three years, I’ve worked hard for all of my clients, but this is the first time I’ve felt so invested in a project’s success or failure, like there was a real and direct correlation between my efforts and the outcome. It’s a heady feeling, and I hope and expect it to continue for a while.

+ +

By the way, I’ll be talking about the rewrite at both TXJS and GothamJS in the next few weeks.

+ +

Also: we’re hiring :)

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/announcing-jquery-fundamentals-an-open-source-jquery-training-curriculum.html b/www/blog/announcing-jquery-fundamentals-an-open-source-jquery-training-curriculum.html new file mode 100644 index 0000000..138b3fe --- /dev/null +++ b/www/blog/announcing-jquery-fundamentals-an-open-source-jquery-training-curriculum.html @@ -0,0 +1,145 @@ +Announcing jQuery Fundamentals: An Open-Source jQuery Training Curriculum

rmurphey adventures in javascript

Announcing jQuery Fundamentals: An Open-Source jQuery Training Curriculum

I've been leading jQuery trainings for more than a year now, from tiny gatherings that I organized myself at the local coworking space, to intensive two-day sessions at local web companies, to whirlwind one-day classes at governmental agencies. Over the course of those trainings, I've developed what I'd like to think is a decent curriculum -- training material that's the size of a small book, exercises that demonstrate core concepts, and solutions to those exercises that students can peek at later or when they get stuck.

+

I decided recently that it was time for all of this material to see the light of day, so I spent the last several days converting it all to DocBook files that allow for easy publication to HTML and PDF (and other formats, if I'm later so inclined). I also fleshed out some topics that I'd given short shrift, and started planning sections covering advanced topics such as plugin authoring, code organization, best practices, and more. There's more to come in the next few days, but I think what I've done so far is worth sharing. I hope you'll agree.

+ + +


+

+Media_httpgyazocoma21_qfifz +
+

+
+

My goals in releasing this are several. First and foremost, I want to see people writing better jQuery. The free resources for learning jQuery are scattered across the internets, and my personal experience of learning the library was haphazard — it was a long time before I learned some things I wish I'd known from the get-go. In addition, I want people who are writing jQuery to understand JavaScript. To that end, the book begins with a survey of JavaScript itself before jumping into jQuery. Finally, I want to enlist the bright minds of the jQuery community to help developing a robust, authoritative, in-depth jQuery curriculum, and in exchange it only seemed fair to make it available to everyone.

+

I should mention that the goal of this material is to serve as a companion to a human instructor. That said, individuals may find it useful for self-study, especially if they're diligent about doing the exercises at the end of each chapter.

+

If you're inclined to help -- by adding a chapter, a section, a paragraph, an exercise, or even just a correction -- fork the repo and send me a pull request. I look forward to seeing how this project might evolve with the community's help.

+

Note: If you comment on this post pointing out an issue with the material, I will do my best to tend to the issue, but I probably won't publish your comment, as this post isn't the right place for reporting issues in the code. You can report issues at the repository, but if it's important to you, please fork the repository, make the change, and send me a pull request.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/another-cautionary-remote-javascript-tale.html b/www/blog/another-cautionary-remote-javascript-tale.html new file mode 100644 index 0000000..2475765 --- /dev/null +++ b/www/blog/another-cautionary-remote-javascript-tale.html @@ -0,0 +1,129 @@ +Another cautionary remote Javascript tale

rmurphey adventures in javascript

Another cautionary remote Javascript tale

It sounds like TechCrunch got burnt yesterday by some remote Javascript appearing too high in their pages' HTML. Visitors' browsers were waiting and waiting for the remote script to load, and refusing to render the rest of the page until it did.

+

All we knew is that our sites all simultaneously went down three times yesterday. After the first time we identified the likely problem as Seesmic and contacted the company. They assured us there was no way the plugin could take the site down. When it happened a second time we disabled the Seesmic plugin and the sites went back up. We identified the problem - the plugin was loading an external Javascript file, and when Seesmic’s servers were down, we just sat and waited for it for up to two minutes before timing out.
+The remedy to this, of course, is to load remote scripts in the foot of the page, after all of the actual content has loaded. However, plenty of remote Javascript "widgets" aim at the least common denominator without offering a viable option for skilled users who don't actually want their site to break. The widget publishers simply require sites to put a remote script call exactly where the site wants the widget to appear. If the widget server chokes, the site is out of luck.

+

I wrote about a related problem with ad-related Javascript, and my attempts to get around its desire to appear wherever the ad appears, making page rendering grind to a halt if the ad network was slow. We ended up getting around that problem with a bit of utter absurdity: using an iframe that called some server-side code that would generate an HTML page that contained little more than a script tag.

+

Our solution, as it were, meant an extra HTTP request to our server for every ad on the page, just to avoid a problem that wouldn't exist in the first place if the authors of these widgets and ads would offer some real options to their savvier users. A simple method like

+

{% codeblock lang:javascript %} +widget.appendToElement('foo'); +{% endcodeblock %}

+

seems like it would do the trick -- something that could be called at the end of a page and fail fairly silently by just leaving the destination element empty. I imagine that the use of such a method is hardly out of the reach of the good folks at TechCrunch, or plenty of other sites. Until an approach like this becomes a bit more common, expect many more rants like the one we saw from TechCrunch.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/avoid-bare-class-selectors-in-jquery.html b/www/blog/avoid-bare-class-selectors-in-jquery.html new file mode 100644 index 0000000..708f163 --- /dev/null +++ b/www/blog/avoid-bare-class-selectors-in-jquery.html @@ -0,0 +1,142 @@ +Avoid bare class selectors in jQuery

rmurphey adventures in javascript

Avoid bare class selectors in jQuery

It just happened again: I was looking at someone else's jQuery and I came across something like this:

+

{% codeblock lang:javascript %} +$('.button').click(function() { / do something / }); +{% endcodeblock %}

+

This is a classic case of "just because you can, doesn't mean you should." This little bit of jQuery will, indeed, find every element on a page with a class of "button", but that's exactly the problem: it has to look at every element on the page to figure out which ones match the selector. It seems short and sweet, like so much of jQuery is, but on a page with a lot of elements, this selector can actually take a non-trivial amount of time to run.

+

There are a few ways to avoid this:

+
    +
  • If the element you're after has an ID attribute, use it. That's the single-fastest way to find an element. However, don't gratuitously add ID attributes to elements; the other methods below are perfectly good.
  • +
  • Specify the type of element you're after. For example, $('input.button'). This will tell jQuery that it's only looking for inputs, so it can disregard anything on the page that isn't an input. If you're looking for multiple element types, tell jQuery that: $('input.button, a.button') +
  • +
  • Give jQuery some information about where to look for the element. For example: $('#myForm .button') +
  • +
  • Use an element you've already found to tell jQuery where to find the element: + +{% codeblock lang:javascript %} +var $ul = $('#myUnorderedList') +var $li = $ul.find('.selected'); +{% endcodeblock %} + +
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/back-on-the-air.html b/www/blog/back-on-the-air.html new file mode 100644 index 0000000..a642590 --- /dev/null +++ b/www/blog/back-on-the-air.html @@ -0,0 +1,118 @@ +Back on the air

rmurphey adventures in javascript

Back on the air

I've been juggling 1.5 jobs for the last few weeks, but starting Monday it's back down to one, but a new one: I'll be doing front-end development for DailyStrength.org, which will hopefully give me plenty of front-end goodness to write about again. For all two of you paying attention to this site, sorry for the blackout.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/building-for-http2.html b/www/blog/building-for-http2.html new file mode 100644 index 0000000..74b8753 --- /dev/null +++ b/www/blog/building-for-http2.html @@ -0,0 +1,249 @@ +Building for HTTP/2

rmurphey adventures in javascript

Building for HTTP/2

Earlier this year, I got the chance to speak with Google's Ilya Grigorik about HTTP/2 for the 1.10 episode of the TTL Podcast. It was a great primer for me on how HTTP/2 works and what it means for how we build the web, but it wasn't until more recently that I started to think about what it means for how we build the web — that is, how we generate and deploy the HTML, CSS, and JS that power web applications.

+

If you're not familiar with HTTP/2, the basics are simultaneously simple and mind-boggling. Whereas its predecessors allowed each connection to a server to serve only one request at a time, HTTP/2 allows a connection to serve multiple requests simultaneously. A connection can also be used for a server to push a resource to a client — a protocol-level replacement for the technique we currently call “inlining.”

+

This is everything-you-thought-you-knew-is-wrong kind of stuff. In an HTTP/2 world, there are few benefits to concatenating a bunch of JS files together, and in many cases the practice will be actively harmful. Domain sharding becomes an anti-pattern. Throwing a bunch of <script> tags in your HTML is suddenly not a laughably terrible idea. Inlining of resources is a thing of the past. Browser caching — and cache busting — can occur on a per-module basis.

+

What does this mean for how we build and deploy applications? Let's start by looking at the state of the art in client-side application deployment prior to HTTP/2.

+

Deploying JavaScript Applications (2013)

In March of 2013, Alex Sexton wrote Deploying JavaScript Applications, and it's what I consider to be the canonical post on the topic for sites and apps that include more than about 50K of client-side code.

+

In his post, Alex describes a deployment that uses a "scout" approach: a small bit of code, included directly in the HTML or loaded via <script> tag.

+

The scout file exists to balance the desire for application resources to be highly cacheable vs. the need for changes to those resources to take effect quickly.

+

To meet that goal, the scout needs a short cache time when it's a file; if the scout is in the HTML, then the HTML itself needs a short cache time. The scout contains information about the location of the file(s) that provide the current version of the application, and the code necessary to load those files.

+

Files loaded by the scout can have extremely long cache times because the scout loads resources from versioned URLs: when a resource is updated, it is hosted at a new URL, and the scout is updated to load the resource from that new URL.

+

Why a scout approach rather than just loading the versioned files using <script> tags directly from the HTML? The scout technique lets you deploy changes to your JavaScript application without requiring a re-deploy of the server-side application. (In an ideal world this might not seem valuable, but in the real world, it often is.) When the scout is served separately from the HTML, it also allows for a different caching strategy for the HTML.

+

In this system, it's typical that the scout would load one or two JavaScript files that were generated by combining the modules needed for the initial state of the application. More code might be loaded later to support additional application behavior; again, that code would typically comprise a set of modules shipped in a single file.

+

There are a few shortcomings inherent to this approach, which are difficult to overcome without upsetting the balance between cacheability and changeability:

+
    +
  • Shipping the application as a large file with a long cache time works great for repeat visitors, but not so well for first-timers who have to wait for the large file to load.
  • +
  • All users have to download the whole large file again whenever something changes — even something small.
  • +
  • Even when nothing changes, a short cache time means repeat visitors may end up re-downloading the scout frequently.
  • +
+

Adding HTTP/2 to the mix — that is, flipping the switch that gets your server to start speaking HTTP/2 to browsers that understand it — has a nominal positive impact the performance of an app crafted for maximum performance on HTTP/1. Indeed, the applications most likely to see big improvements without big changes are applications whose deployments were poorly designed in the first place.

+

To see performance gains in a well-engineered deployment, we'll have to re-engineer the deployment itself.

+

Splitting it up

One of the most obvious opportunities is presented by HTTP/2's ability to handle multiple requests over the same connection. Rather than shipping a single large application file over the wire, what if we tell the scout to load the individual modules that make up the application? We would no longer have to invalidate the cache for the whole application every time we make a change.

+

A few reasons come to mind why this might be a bad idea.

+

The first is the concern that compression might suffer if shipping modules individually. As it turns out, though, combining multiple modules into a single file results in only slightly better compression than if the modules are compressed individually. For example, compressing a file containing minified versions of jQuery, Underscore, and Backbone results in 42,186-byte file; compressing each minified file individually results in a combined size of 42,975 bytes. The difference is 789 bytes -- barely meaningful.

+

Other second concern may be more legitimate: our server or CDN may be unhappy about serving one request per module; and it may be unduly complex to ship a single module per file, especially since any given request might fail for whatever reason. For the sake of discussion, we'll assume that it's reasonable to do some grouping of modules into individual files.

+

How to group those modules is up for debate. One strategy could be to group files according to their likelihood of changing, recognizing that library and framework modules don't change often, while application modules do. Another strategy would be to group files associated with a unit of useful functionality, though this leaves us needing a way to deliver code that's shared across units of functionality.

+

At Bazaarvoice, we solve this concern via a lightweight require/define system that ships in the scout file, allowing us to share vendor files such as jQuery and Backbone across applications. An application can express a dependency on a vendor file using NAMESPACE.require(), and vendor files declare themselves using NAMESPACE.define(). Once a vendor file has been defined, other modules on the page have access to it immediately via NAMESPACE.require().

+

Versioning

For HTTP/1.1-friendly builds, we always increment the version of the built application file, and embed a URL pointing to that new version in the scout file. We do this because it is essentially guaranteed that the contents of the application file have changed whenever we do a new build -- otherwise there would be no reason for the build.

+

For HTTP/2-friendly builds, we’re generating many smaller files; we only want to increment their version when something has changed.

+

For example, imagine a build that generates vendor-v1.js and application-v1.js; it also generates a scout that loads these two files. We then make a change to an application file, and we do another build, creating vendor-v2.js and application-v2.js. However, no vendor files have changed; our scout should now load to application-v2.js but still load vendor-v1.js. If our scout points to vendor-v2.js, we lose the benefit of being able to cache smaller pieces of our code.

+

This can be solved by using hashes of the file contents rather than version numbers: vendor-d41d8cd98f.js. If a file has not changed, its hash will remain the same. (Notably, inconsequential changes will change the hash -- for example, a new copyright comment that is inserted post-minification.) Plenty of build strategies already use content hashes for versioning; however, many still use integers, dates, or commit hashes, which change even when the contents of a file have not.

+

Given files whose names include a hash, our scout can include a manifest that prescribes the file to load for a given resource. The manifest would be generated by the build after all of the resources were generated.

+
module.exports = {
+  baseUrl : 'https://mysite.com/static/',
+  resources : {
+    vendor : 'vendor-d41d8cd98f.js',
+    application : 'application-a32e3ec23d.js'
+  }
+};
+
+

Push: Because you downloaded scout.js, you might also like ...

Another exciting opportunity in an HTTP/2 world is the ability to push a cascade of resources.

+

The first push opportunity is the scout itself: for sites and applications that currently ship the scout inlined in the initial HTML payload, server push affords an opportunity to send the scout as a separate resource when the initial HTML is requested.

+

There’s an interesting dilemma here: If the browser already has the resource cached, and the cache is still valid, it doesn’t need the server to push the resource. Currently, though, there’s no way for the browser to communicate its cache contents to the server. A browser can decline a push, but the server may have already started to send it. We’ve basically introduced a new tradeoff: server push can get the resource to the browser quickly, but we waste bandwidth if the browser doesn’t need it.

+

As discussed at the link above, a smart server could use session information to determine when to push -- for example, if the page is reloaded within a resource’s cache time, there is no need to re-push that resource to the same session -- but this makes push state-dependent, a frightening prospect if we hope to use CDNs to ensure efficient asset delivery.

+

Assuming we've generated a manifest as described above, we have the option of going a step further: we can separate the manifest and the scout, allowing the scout to have a far longer cache time than in a pre-HTTP/2 world. This is possible because the thing that typically changes about the scout is the version of the resources it loads, and it makes the most sense on a site where there are different payloads for different pages or users. For applications that previously included the scout in HTML, we can push the scout and the manifest, and have the scout request the manifest; for applications that loaded the scout as its own JS file, we can push the manifest when the scout file is loaded and, again, have the scout request the manifest.

+

This approach also makes a further case for a standardized scout: application-specific configuration can be shipped in the manifest, and a standardized scout can be shared across applications. This scout could be a file loaded via a script tag, where the script tag itself provides information about the application manifest to use:

+
<script src="/static/shared/js/scout.js"
+  data-manifest="/static/apps/myapp/manifest.js"></script>
+
+

The manifest contains information about the other resources that the scout will request, and can even be used by the server to determine what to push alongside the HTML.

+

A manifest could provide these instructions:

+
module.exports = {
+  baseUrl : 'https://mysite.com/static/',
+  resources : {
+    vendor : {
+      version : 'vendor-d41d8cd98f.js',
+      pushWith : [ 'scout' ]
+    },
+    application : {
+      version : 'application-a32e3ec23d.js',
+      pushWith : [ 'scout' ]
+    },
+    secondary : {
+      version : 'secondary-e43b8ad12f.js',
+      pushWith : [ ]
+    }
+  }
+};
+
+

Processing this manifest would require intelligence on the part of the CDN; it may be necessary to replace s3 storage with an actual server that is capable of making these decisions, fronted by a CDN that can intelligently relay responses that include server push.

+

The elephants in the room

There are two notable challenges to the rapid transition to an HTTP/2 world: the continued existence of legacy browsers, especially on mobile; and the requirement that HTTP/2 connections be conducted over TLS. Thankfully, the latter provides a reasonable opportunity to address the former. Let's, then, talk about the TLS requirement first.

+

HTTP/2 is a new protocol, and as such, it is greatly confusing to a large segment of the existing internet: proxies, antivirus software, and the like. During the development of HTTP/2 and SPDY before it, engineers observed that traffic that was transported on an insecure connection would frequently fail. The reason? The proxies, the antivirus software, and all the rest had certain expectations of HTTP traffic; HTTP/2 violated those expectations, and so HTTP/2 traffic was considered unsafe. The software that thwarted insecure HTTP/2 traffic didn't have the ability to inspect secure traffic, and so HTTP/2 traffic over a secure connection passed through just fine. Thus was born the requirement — which is a browser implementation detail, and not part of the HTTP/2 spec — that HTTP/2 web communication be conducted using TLS.

+

The Let's Encrypt project aims to eliminate the high cost of obtaining the certificate that enables secure HTTP communication; there will still be technical hurdles to using that certificate, but those should be surmountable for anyone who cares enough to engineer a performant HTTP/2 deployment.

+

In order for a browser and a server to communicate using HTTP/2, the browser and the server must first agree that they can. The TLS handshake that enables secure communication turns out to be the ideal time to negotiate the communication protocol, as well: no additional round trip is required for the negotiation.

+

When a server is handling a request, it knows whether the browser understands HTTP/2; we can use this information to shape our payload. We can send a legacy browser an HTML file that includes an inlined scout file, and that inlined scout file can include the manifest. The manifest can provide information about how to support legacy browsers:

+
module.exports = {
+  baseUrl : 'https://mysite.com/static/',
+  resources : {
+    // ...
+  },
+  legacyResources : {
+    legacyMain : {
+      initialLoad : true,
+      version : 'legacy-main-c312efa43e.js'
+    },
+    legacySecondary : {
+      version : 'legacy-secondary-a22cf1e2af.js'
+    }
+  }
+};
+
+

For Consideration: HTTP/2-friendly deployments with HTTP/1.1 support

Putting the pieces together, we arrive at a deployment process that does the following:

+
    +
  • Generates files that contain one or more modules, grouped by likelihood of changing, functionality, or another strategy. The file grouping strategy must persist across builds; new groupings would need a new, unique name that had not been used by earlier builds.
  • +
  • Generates legacy files, where those files contain modules that are grouped according to their likelihood to change, and according to whether they are required for initial load.
  • +
  • Names all files with a content hash.
  • +
  • Generates a manifest for the build, where the manifest includes:
      +
    • a baseUrl property whose value is a string that should be used as the base for generating a full URL to a resource, using the pattern <baseUrl>/<resource.version>
    • +
    • a resources property whose value is an object that, for each file, provides:
        +
      • the most recent changed version
      • +
      • a list of individual files which, when any of the files is requested, should trigger a push of the bundle
      • +
      +
    • +
    • a legacyResources property whose value is an object that, for each legacy bundle, provices:
        +
      • the most recent changed version
      • +
      • an optional initialLoad property whose value is true if the resource should be loaded immediately by the scout
      • +
      +
    • +
    +
  • +
  • Generates an HTTP/2 scout file* that provides the ability to load resources, and that loads a manifest.
  • +
  • Generates an HTTP/1 scout file* that provides the ability to load resources, and that includes the manifest.
  • +
  • Uploads the static resources.
  • +
  • Updates a delivery mechanism (such as a server or a CDN) based on the data in the new manifest.
  • +
+

The versioning and caching of the resources would be as follows:

+
    +
  • manifest Unversioned. Short cache time, e.g. 10 minutes, to allow for the rapid uptake of new resources for HTTP/2 browsers.
  • +
  • scout Unversioned. Medium cache time, e.g. one day, assuming the contents of this file are considered relatively stable.
  • +
  • legacy-scout Unversioned. Short cache time, e.g. 10 minutes, to allow for the rapid uptake of new resources for legacy browsers.
  • +
  • application and vendor files Versioned. Long cache time, e.g. one year, given that new versions will be picked up when a new manifest is loaded.
  • +
+

* In applications that a) control the initial HTML payload, and b) only use the scout to load other resources, it may not make sense to have a separate scout; it might be sufficient to just load those resources via <script> and <link> tags in the HTML itself. This approach isn't viable for applications that do not control the initial HTML payload, such as third-party applications.

+

Reality check

In several places so far, I’ve talked about the need for a server to make decisions about which resources it delivers, and when and how it delivers them. As I alluded to earlier, this could be profoundly challenging for CDNs, which traditionally simply receive a request and return a single resource in response. It also suggests the need for close collaboration between client and server development teams, and an increased knowledge of server-side technology for client-side developers.

+

CDN support of HTTP/2 in general is rather disappointing, with some major vendors providing nothing more than vague timelines for non-specific support.

+

As of this writing, I'm unaware of CDNs that support any notion of server push, but I'd be happy to find I am ill-informed. Ideally, CDNs need to provide applications with the ability to express how static assets relate to each other -- a task complicated by the fact that those relationships may be situational, such as in the case where an application doesn't want to push an asset that was just pushed to the same client 10 seconds before. One-size-fits-all push could be accomplished by setting a header on a file, indicating that other files should be pushed alongside it, but that doesn't allow for expressing more nuanced rules.

+

Even for applications that just want to split their payload into smaller files to take advantage of HTTP/2, and that don't intend to use server push, there is still a gap when it comes to providing a positive experience for HTTP/1.1 clients. CDNs need to surface the ability to change a response not just based on the URL that is requested, but the protocol of the request. Without this ability, we'll be stuck having to choose which protocol to support.

+

There is also work to be done on tooling, especially if we want to support HTTP/2 without significantly degrading the experience for legacy browsers. Ideally, our build tooling would figure out the optimal combination of files for us, with a knowledge of how the application was bundled previously so as not to squander past caching.

+

The developer story for HTTP/2 also leaves a lot to be desired as of this writing. Front-end developers are among the most likely in an organization to advocate for this new technology, but my experiences over a few weeks of learning about HTTP/2 suggest that the effort required to set up even a local environment will stretch the comfort zone for many. With a working local environment in hand, the tools to understand the differences between HTTP/2 and HTTP/1 behavior are limited and often confusing. Chrome presents information in its network tab that seems to conflict with the wall of text in its net-internals tool, especially when it comes to server push . Charles Proxy doesn't yet speak HTTP/2. Firefox shows pushed resources as an entry in the network tab, but they appear as though they were never received. nghttp2 provides great insight into how an HTTP/2 server is behaving, but it doesn't speak HTTP/1.1, so you can't use it to do comparisons. Measuring performance using a tool like WebPagetest requires a real certificate, which you may not have handy if you're just trying to experiment.

+

Alex wrote his 2013 post to document the product of years of experience in creating performant HTTP/1.1 deployments. HTTP/2 means we need to rethink everything we know about shipping applications to the web, and while the building blocks are there, there's still much to figure out about how we'll use them; the "right" answers are, in many cases, still TBD while we wait for vendors to act.

+

Further Reading

I've been bookmarking useful HTTP/2 resources as I come across them.

+

Thanks

Thanks to the many folks who have talked to me about the ideas in this post, but especially to Lon Ingram, Jake Archibald, and Andy Davies.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/bye-bye-yui-bloat.html b/www/blog/bye-bye-yui-bloat.html new file mode 100644 index 0000000..eb5309e --- /dev/null +++ b/www/blog/bye-bye-yui-bloat.html @@ -0,0 +1,123 @@ +Bye bye YUI bloat

rmurphey adventures in javascript

Bye bye YUI bloat

I've been doing some serious work this week cutting down on YUI bloat -- our site was including the entire YUI library at the top of every page, at a cost of more than 200k. Worse, because the library was included at the top of every page, the rest of the page had to wait for it to load before it would render.

+

First, I switched all of our AJAX to use jQuery, the library we've standardized on going forward -- the fact that we had a wrapper function for all of our AJAX calls made this pleasantly easy. Then, I went through the code and figured out which YUI components were being used where. Once that was done, I was able to roll up a few core YUI files into a file to be included in the global header, and call other components only as needed. Total savings: 170k. Holy crap.

+

I'd still like to see us do away with YUI all together -- Ajaxian wrote about Ojay the other day, and all I could think was "OMG, you want me to use another library to make this library easier to use? I'll stick with jQuery, thanks" -- but considering how much javascript we'd have to rewrite, I'm at least feeling much better about 38k than 208k.

+

By the way, Marcus sent me a link to a great article about why this stuff matters: Use "SCORN" to test the front end of a website for performance by Scott Barber. It's made me get serious about cutting down on our number of HTTP requests, and so far, it seems to be making a difference.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/cache-regexes-in-javascript.html b/www/blog/cache-regexes-in-javascript.html new file mode 100644 index 0000000..5cb901f --- /dev/null +++ b/www/blog/cache-regexes-in-javascript.html @@ -0,0 +1,118 @@ +Cache regexes in javascript

rmurphey adventures in javascript

Cache regexes in javascript

This post about precompiling and caching regular expressions came across my Google reader just as I was wrangling with a slow regex issue. Not sure my issue will benefit from this, but neat regardless.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/centering-images.html b/www/blog/centering-images.html new file mode 100644 index 0000000..386dd3b --- /dev/null +++ b/www/blog/centering-images.html @@ -0,0 +1,118 @@ +Centering images

rmurphey adventures in javascript

\ No newline at end of file diff --git a/www/blog/choosing-presentation-color-scheme.html b/www/blog/choosing-presentation-color-scheme.html new file mode 100644 index 0000000..63c2df7 --- /dev/null +++ b/www/blog/choosing-presentation-color-scheme.html @@ -0,0 +1,132 @@ +On Choosing a Syntax Highlighting Scheme for Your Next Presentation

rmurphey adventures in javascript

On Choosing a Syntax Highlighting Scheme for Your Next Presentation

This is a projector screen:

+

+

You will notice that it is white, or some reasonable approximation thereof. It is probably made of a reflective material that sparkles a bit when light shines on it. Still: white.

+

Do you know what color this screen is when you use a projector to display this image onto it?

+

+

It is still white. Crazy, I know! The thing is, projectors cannot project black; they can only not project any light on a region that you intend to be black.

+

Chances are you are reading this on an LCD screen of some sort, where the rules are completely different: they usually start out essentially black, not white, and pixels are brightened as required. The pixels that start out dark can generally stay pretty dark.

+

On a projection screen, on the other hand, the appearance of black is nothing more than an optical illusion, made possible by the projector projecting brightness everywhere else.

+

What does this mean? Lots of things, but in particular, it means that you should never, ever, ever use a color scheme with a dark background -- no matter how high-contrast and good it looks on your monitor -- if you will be presenting using a projector that is projecting onto a white screen. At least, assuming that you intend for your audience to be able to actually read the code.

+

Presentation Color Schemes That I Have Loved

    +
  • Ben Alman's TextMate Theme: Ben has tailored this to be incredible for presenting about JS code.
  • +
  • Tomorrow Theme: The light-background flavor is decent, but could probably stand to be higher-contrast, at least for some languages.
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/code-org-take-2-structuring-javascript-applications.html b/www/blog/code-org-take-2-structuring-javascript-applications.html new file mode 100644 index 0000000..1767664 --- /dev/null +++ b/www/blog/code-org-take-2-structuring-javascript-applications.html @@ -0,0 +1,228 @@ +Code Org, Take 2: Structuring JavaScript Applications

rmurphey adventures in javascript

Code Org, Take 2: Structuring JavaScript Applications

It’s hard to believe it was not even a year ago that I spoke at the jQuery Conference in Boston about code organization. At the time, I’d been thinking for a few months about how to improve my own code and give it more structure, and how to give advice to others about how to do the same.

+ +

My talk was about code organization, but really I was talking about how you might organize a single piece of functionality; I didn’t even begin to answer the larger question of how to structure a bona fide JavaScript application. Really, that question is almost perpendicular to the one I was tackling at the time; it’s a question of strategy, not tactics.

+ +

A year later, I’d like to share my thoughts on how I’m answering it these days.

+ +

First Things First: What’s a JavaScript Application?

+ +

GMail is a JavaScript application; this Posterous blog, while it makes use of JavaScript a bit, is probably not, but the admin interface most certainly is. The line can be frustratingly fuzzy, but at the same time it’s pretty important to realize when you’ve crossed it.

+ +

To me, the defining characteristic of a JavaScript application is that the heavy lifting of manipulating and displaying data falls to the browser, with most communication with the server happening via XHR.

+ +

If you find yourself in application land, welcome. Now what?

+ +

My Building Blocks

+ +

My approach to organizing an application is really just an MVC variant, so I don’t want to sound as though I’ve discovered something novel or new. However, there are a couple of things to note: For one, the term “controller” has a couple of different meanings to me, as explained below; for two, there are two distinct flavors of “views,” though I’m not sure exactly how important the distinction is.

+ +

I also want to be super-clear that I don’t tout this as The One and Only Way; it is just a way that has worked for me, a way that I evolve and adapt with every project I work on, and a way that I’ve run by a few people and they haven’t laughed at me. My point in dissecting it isn’t that you’ll try to follow it word-for-word; rather, I hope it might get you thinking about JavaScript applications beyond the DOM.

+ +

Models

+ +

There’s nothing particularly unique to models in a JavaScript application. They are responsible for fetching and storing application data and maintaining its integrity in the browser. They fetch data, store data, and provide an API for other application components to get access to that data. There may be more than one destination for the data: memory, the server, or some type of local storage. But if it has to do with managing data, it’s the model’s job.

+ +

Models stay out of the way when it comes to displaying data or responding (at least directly) to user interaction. Those tasks are left up to other pieces of the application, as we’ll see below.

+ +

Example

+ +

A simple search application would likely have a search results model, responsible for receiving the current search term, fetching the data for the term, and broadcasting it to the rest of the application. It might also allow for manipulating individual search results, such as indicating that a particular result was a favorite or a dud, though that task might also fall to an individual search result model depending on the needs.

+ +

Widgets and Data Views

+ +

Views comprise HTML (generally in the form of client-side templates) and CSS for a component, and are generally accompanied by a view controller (the JavaScript related to interacting with the vew; see below for an explanation). The HTML for a view consists of a single parent node with an arbitrary internal structure; the parent node will optionally have one or more classes on it that can be used to target CSS.

+ +

There are two flavors of views, in my mind: widgets, which are responsible for supporting user interaction with the application but don’t render any application data; and data views that are responsible for displaying and allowing interaction with application data.

+ +

Data views are instantiated with the initial data required to populate them; then, their view controllers listen for messages from other pieces of the application to tell them when new or updated data needs to be rendered.

+ +

Examples

+ +

A basic search input box would be considered a widget — when it is created, it doesn’t need any application data in order to render properly. The widget is strictly responsible for allowing the basic interaction of typing a search term and hitting enter; that is, it’s not responsible for actually performing the search.

+ +

A search results list is an example of a data view; it renders application data and, potentially, allows for interaction with it. Again, though, it’s not responsible for performing the search; it just renders data and then allows for interaction with it.

+ +

View Controllers

+ +

View controllers manage interaction with a data view or widget — interaction by the user, and interaction with the rest of the application. They are responsible for binding and handling events, for broadcasting user interactions with the widget to the rest of the application, and for listening to other pieces of the application to tell them they have new data to render.

+ +

View controllers never handle server communication directly; their role is solely to provide a user interface to the application. When something interesting happens to a view or widget, the view controller announces it. When new data is available for a view, the view controller should know how to handle it. But, again, the view controller itself should focus on providing a user interface, not on interfacing with the server.

+ +

Examples

+ +

The view controller for a search box might listen for the user to focus on the search box, hiding placeholder text for the input. Then, it might listen for the user to hit Enter inside the search box; when that happens, it would broadcast to the rest of the application that the user had submitted a search, along with the term that was searched.

+ +

The view controller for a search results list might listen for another piece of the application to announce that new search results are available to be displayed. If the results were for the currently displayed search term, it could add them to the list; if they were for a new term, the results list could empty itself and display the new results.

+ +

Application-Level Controllers

+ +

Application-level controllers are the glue of an application. Loosely, there may be one per “page” of the application, or one per feature. For example, an application that includes a search feature and a checkout feature might have a controller for each feature, even though the checkout feature might spread across multiple pages.

+ +

These controllers are responsible for getting the models and views/widgets for a feature in place and talking to each other. So, a controller might first make sure the required models are in place, then tell them to fetch the appropriate data; once the data is available, the controller would instantiate the views for displaying the data. Finally, the controller would broker future interactions between the views and the models.

+ +

Examples

+ +

On a search results page, a user might click a Favorite button on a search result. The search results list’s view controller would handle the click, broadcasting a message about the user’s action to the rest of the application. The controller would observe this message and pass it, along with any other relevant information, to the search results model, which would in turn pass the information to the server, or store it locally.

+ +

Notes on Enablers

+ +

I’ve glossed over a few implementation details that are somewhat tangential to the organization question, but I want to touch on them briefly:

+ +

Pubsub and Friends

+ +

I didn’t want to get too specific about how all of this “announcing” and “broadcasting” and “listening” happens, because there are lots of ways to accomplish it. One could use pubsub, custom events, or any number of other solutions. I don’t think the actual implementation is important, though personally I lean heavily on pubsub — what is important is the notion of broadcasting and listening for announcements that something has happened, allowing other components of the application to react appropriately.

+ +

Templating

+ +

If you aren’t using a toolkit with templating built in (or, heck, even if you are), I’ve kind of fallen in love with mustache.js lately. It’s a great client-side templating companion, making it dead-simple to turn data into markup without ending up with templates that look more like JavaScript than HTML.

+ +

Figuring out how to maintain templates can be tricky — do you store them in your page’s markup? Do you maintain them as separate files requested via XHR and then cached? Or do you put them in your JavaScript? Dojo’s dojo.cache() method provides a handy way to keep your templates in separate files and load them via XHR, while interning them into your JavaScript for you if you use Dojo’s build tool. I like this.

+ +

Attaching Events to Views

+ +

Another shameless Dojo plug: dijit._Templated provides some serious hotness when it comes to attaching events to views. Read up on dojoAttachPoint and dojoAttachEvent; together with dijit._Widget’s connect and subscribe methods, which provide automatic cleanup for you, there’s some real power here, which has me writing hardly any selector-based code these days.

+ +

File Structure

+ +

I hesitate to make any particular recommendations here, because the needs of an application can vary widely. However, I tend to have a directory each for models, views (for view controllers and templates), and controllers (for application-level controllers). Those directories — especially the views directory — may contain subdirectories, for instance if there’s more than one view for a certain type of data.

+ +

Why Go To All This Trouble, Again?

+ +

So this is the part where you might say “OMG, srsly, what happened to ‘get some elements, do something with them?!?’” Let me be clear, that approach may be entirely appropriate for your particular needs; I’m not here to convince you otherwise.

+ +

But: if your application is complex enough to warrant considering an approach like this, I’ve found that in the long run it actually simplifies my code by cleanly separating concerns and providing a decent roadmap for building new features. I can build and test a solid model for some Thinger, and then use that model throughout my code; I can build and test a user interface component for editing a Thinger long before the data exists to support it. I can map “pages” of my application to application-level controllers, providing a high-level view of what’s happening where.

+ +

Best of all, paths to code reuse become clear and entanglements become fewer when I keep this division of responsibilities in mind as I code. A search results data view, for example, can be made to accept search results from any model that provides them in the proper format; a search can be initiated and the results displayed without depending on a user entering text into a search input widget.

+ +

Dividing the responsibilities into well-defined sections leads to components that are truly pluggable, often in ways you may not have even imagined when you wrote them. In an application that evolves over time, it’s hard to overstate the benefits of this.

+ +

In Conclusion & A Plea

+ +

If you find yourself working on a JavaScript application, I can’t recommend enough that you consider, at length, what underlying structure makes the most sense; it’s almost inevitably more complex than you can manage via the DOM alone. Again, my answer isn’t the right answer, it’s just an answer, but I hope it helps you start thinking about what the right answer might be for your project.

+ +

If you’re interested in this stuff, I’d encourage you to check out JavaScriptMVC, if only to see how they approach these problems; Cujo.js is another framework, built on top of Dojo, that aims to enter this space, but you’ll have to wait until mid-September to see it.

+ +

Finally: If you have your own thoughts to share about how to approach these large application questions, I’m pleading with you to write your own blog post(s) about them. If you have other reference material on the topic, I beg you to share it. As more and more people transition from simple JavaScript enhancements to non-trivial applications, the need for education is huge.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/css-vs-tables-maybe-the-design-is-to-blame.html b/www/blog/css-vs-tables-maybe-the-design-is-to-blame.html new file mode 100644 index 0000000..95de4c7 --- /dev/null +++ b/www/blog/css-vs-tables-maybe-the-design-is-to-blame.html @@ -0,0 +1,147 @@ +CSS vs. Tables: Maybe the design is to blame?

rmurphey adventures in javascript

CSS vs. Tables: Maybe the design is to blame?

There's been some backlash lately against CSS, and some of it seems so well reasoned that even I find myself wondering if tables are really so bad after all. From giveupandusetables.com, which says the maximum time to spend before abandoning CSS is 47 minutes, to the well-illustrated blog post by Ron Garret, the general argument is that CSS isn't up to the task of faithfully reproducing elaborate designs cross-browser in an acceptable amount of developer time.

+

In his post about Garret's article, Dion Almaer at Ajaxian opines:

+
CSS purist[s] may poo poo him and say "he is just dumb and doesn't REALLY know CSS." The problem though is that most developers run into exactly the pain that he describes. We’ve all been there. It drives you nuts and when frustrated what do you do? You fluster about and change CSS like a mad man until it kinda looks right. And, you never learn what the real problem was, and thus destined to make the same mistake again.
+ +

It seems that while developers are thinking about sacrificing web standards for the perceived simplicity of tables, the viability of the design rarely enters the debate, and that's a shame. In my experience, some of the most difficult designs to produce using CSS were fundamentally flawed from the get-go, created by designers who failed to grasp that the web is not like print.

+

The web is not like print. In print, designers have near-total control over the output, because the number of new "pages" -- items of content -- is limited by the cost of printing. If a print designer wants text vertically centered in a fixed-height column, or two columns that are exactly the same height, or rounded corners with drop shadows on top of gradients, there's no reason they can't have that. The cost of printing is sufficiently high, and print graphics programs are sufficiently sophisticated, that making those design decisions has no impact on the marginal cost of production.

+

On the web, the marginal cost of creating a new page of content can be approximately zero, but to achieve that we must build pages that adapt to unpredictable content and unpredictable users. If we don't, we won't realize the economies of scale that the web has to offer. The tradeoff for that infinitesimally small marginal cost is that the rules have to be different, because the cost of implementing those print-centric design decisions is inordinately high. Instead of sophisticated graphics programs, the web has mere humans to turn PSDs into working pages; instead of content created by experts and pored over by editors, the web has volumes of user-generated content, and the ability to change it on a whim.

+

On the web, equal-height columns will cease to be equal height when the content changes; vertically centered content will outgrow its fixed-height bounds; and rounded corners with drop shadows on gradients can't possibly be worth the cost of producing them. These are not problems with CSS that should be solved with tables. They are, fundamentally, problems with the design.

+

When I talk about this to other developers (and any designers who are willing to give me the time of day after I'm done pointing out how costly their design will be to produce), I make the analogy that it's just as absurd to impose these print-centric design conventions on the web as it would be to use holograms for every picture in a magazine. Sure, you can, but that doesn't mean you should.

+

So what's a web developer to do? When designs reach the desk of the CSS developer, more often than not they've been through so many rounds of review, revision, and approval -- by people far-removed from the realities of the web -- that the developer has little choice but to toil away at reproducing them faithfully.

+

The best defense may be a good offense, which is to say, the burden is on you, dear developer, to educate the misguided designers. Here are some tactics I've used:

+
    +
  • Impose yourself early in the process, insisting on wireframes and information architecture documents (even if they're just sketches and an outline). Identify potential problems early on, but don't become a naysayer -- make sure you offer ideas, not just criticism.
  • +
  • Push back -- gently but firmly -- on design decisions that have the potential to cause problems down the road. Ask lots of "what if" questions and insist on answers.
  • +
  • Be honest about how long it will take you to accomplish a design -- with yourself and with your boss or client -- and identify opportunities to make cost-saving changes to the specifics of the design without changing its spirit.
  • +
  • Have examples at the ready of similar problems solved in more web-centric ways. The Yahoo! Design Patterns Library can be an excellent resource for this, but look also to other sites in your industry or genre.
  • +
+ +

The burden's also on you to get better at CSS. I am lucky in that, when I first started playing around with web production, I was a little intimidated by tables. A background in print production steeped in templates and stylesheets made tables seem awkward and strange to me; CSS, temperamental as it was, at least bore some resemblance to the cascading style sheets of print production programs like Quark and InDesign.

+

These days, it's rarer and rarer (but not unheard of) that I find myself beating my head against the wall over a CSS problem. I've learned HTML and CSS patterns that I reuse often, and I've learned to spot -- and speak up about -- design-induced ratholes. If you're finding yourself sucked in by the latest round of CSS vs. tables debate, take heart, stand firm, and reconsider the source of your frustration.

+

Useful things:

+

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/cycle-through-list-elements-with-jquery.html b/www/blog/cycle-through-list-elements-with-jquery.html new file mode 100644 index 0000000..b371fda --- /dev/null +++ b/www/blog/cycle-through-list-elements-with-jquery.html @@ -0,0 +1,262 @@ +Cycle through list elements with jQuery

rmurphey adventures in javascript

Cycle through list elements with jQuery

I've written several versions of this functionality, and each time I revisit it, it gets simpler and I feel silly for how complicated I made it before. The thing to remember when you're working with jQuery is to always leverage the DOM. Christian Montoya has a good writeup about how he came to the same realization, and it's worth a read.

+

In the meantime, here's my latest iteration of cycling through items in an unordered list. In the example below, I've just put straight-up text in the list items, but they could contain entire divs of content if you wanted.

+

First, let's assume we have this HTML:

+

{% codeblock lang:html %}

+
    +
  • Item 1
  • +
  • Item 2
  • +
  • Item 3
  • +
+{% endcodeblock %} + +We'll start by verifying that there's more than one item before we do all the rest of the work. (Of course, if your list length doesn't vary, this if isn't necessary.) + +{% codeblock lang:javascript %} +var $list = $('#myList'); + +if ($list.length > 1) { + ... +} +{% endcodeblock %} + + +Following the principles of progressive enhancement, we don't want to include navigation elements in the HTML if they will only work with Javascript; rather, we want to add them with Javascript: + +{% codeblock lang:javascript %} +var $list = $('#myList'); + +if ($list.length > 1) { + // put the nav before the list + $list.before(''); + + // style the nav as needed + $('#nav').css({textAlign: 'right'}); + $('#nav img').css({cursor: 'pointer'}); + $('#nav img:first').attr('src','backButton.gif'); + $('#nav img:last').attr('src','forwardButton.gif'); +} +{% endcodeblock %} + + +The comes the fun part: adding the behaviors to the nav buttons. This is where it pays to keep in mind the power of the DOM; in past iterations of this, I've seriously overthought the logic. + +{% codeblock lang:javascript %} +var $list = $('#myList'); + +if ($list.length > 1) { + // put the nav before the list + $list.before(''); + + // style the nav as needed + $('#nav').css({textAlign: 'right'}); + $('#nav img').css({cursor: 'pointer'}); + + // add the back button behavior + $('#nav img:first'). + attr('src','backButton.gif'). + click(function() { + $('li:visible',$list). + hide(). + prev('li'). + show(); + }); + + // add the forward button behavior + $('#nav img:last'). + attr('src','forwardButton.gif'). + click(function() { + $('li:visible',$list). + hide(). + next('li'). + show(); + }); + +} +{% endcodeblock %} + + +But what happens if there's not a next li when you click on the forward button, or a previous li when you click on the back button? + +{% codeblock lang:javascript %} +var $list = $('#myList'); + +if ($list.length > 1) { + // put the nav before the list + $list.before(''); + + // style the nav as needed + $('#nav').css({textAlign: 'right'}); + $('#nav img').css({cursor: 'pointer'}); + + // add the back button behavior + $('#nav img:first'). + attr('src','backButton.gif'). + click(function() { + $('li:visible',$list). + hide(). + prev('li'). + show(); + + // if there wasn't a previous li, + // show the last li in the list + if ($('li:visible',$list).length +{% endcodeblock %} + +One last thing: we need to hide all the list items and then show the first one: + +{% codeblock lang:javascript %} +var $list = $('#myList'); + +if ($list.length > 1) { + + // hide all the list items and show the first one + $('li',$list).hide().eq(0).show(); + + // put the nav before the list + $list.before(''); + + // style the nav as needed + $('#nav').css({textAlign: 'right'}); + $('#nav img').css({cursor: 'pointer'}); + + // add the back button behavior + $('#nav img:first'). + attr('src','backButton.gif'). + click(function() { + $('li:visible',$list). + hide(). + prev('li'). + show(); + + // if there wasn't a previous li, + // show the last li in the list + if ($('li:visible',$list).length +{% endcodeblock %} + +For extra fun: + +
    +
  • +Randomly reorder the list items before you add the rest of the code
  • +
  • Instead of show() and hide(), try fadeIn() and fadeOut() +
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/datejs.html b/www/blog/datejs.html new file mode 100644 index 0000000..2e7d7a1 --- /dev/null +++ b/www/blog/datejs.html @@ -0,0 +1,118 @@ +DateJS

rmurphey adventures in javascript

DateJS

Forgot about coming across DateJS in one of my feeds the other day. Haven't used it yet, but expect I will next time I have to do much of anything with user date input. I'll be especially happy if it does end up getting integrated with the jQuery UI datepicker ....

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/dear-conference-organizer.html b/www/blog/dear-conference-organizer.html new file mode 100644 index 0000000..5c9e0f8 --- /dev/null +++ b/www/blog/dear-conference-organizer.html @@ -0,0 +1,128 @@ +Dear conference organizer

rmurphey adventures in javascript

Dear conference organizer

+

Thank you for the invitation to speak at your conference, and your offer to pay my hotel and airfare in exchange. I notice that you are selling tickets for upwards of $1,500, in addition to hotel and travel costs. In exchange, you are hosting a three-day event, meals, a USB drive, a shirt, and a binder.

+

We need to talk, and not about the binder.

+

TXJS was a one-day conference with approximately a $30k budget -- some tickets cost a whopping $29, and the most expensive tickets were $109 -- and yet it managed to offer exactly what you're offering to me to its out-of-town speakers: hotel and travel. It had two nights of open-bar parties; a shuttle bus to get people to and from the amazing, eclectic, venue; catered breakfast and lunch; an open bar at the event itself; and a speaker and VIP brunch the following day. It also had some of the best JavaScript minds in the business on stage and in the audience, drawing people from around the country and beyond. 

+

I hope you can appreciate the disconnect here. It's not so much that I want to get paid -- I have gladly and eagerly spoken at lower-cost conferences without even getting my hotel compensated -- as the fact that I know now that conferences just don't have to cost that much. When they do, it's hard to get on board with lending my name for free when the conference stands to have revenue of hundreds of thousands of dollars and I know I only needed revenue tens of thousands to put on an amazing event. 

+

As a self-employed consultant, speaking at a conference has, at best, intangible benefit to me, and non-trivial costs. You and many other organizers can seem as though you assume the benefit is obvious, and the costs negligible. In fact, agreeing to a speaking engagement -- one that requires two non-working days for travel, a non-working day for speaking, and whatever prep time is required to actually put together my talk -- is a tough decision regardless of how much tickets cost, especially for someone without corporate backing or a product to promote. Call me selfish, but I need to be very clear that there's a benefit to me. A high-dollar conference likely to attract almost exclusively corporate-backed attendees with at best a peripheral relationship to my area of expertise is exactly the kind of conference where it's not clear what the benefit will be. Being away from home and work for three days, even in a nice hotel, just doesn't count. 

+

--

+

Finally: To all of the TXJS speakers, we heart you unbelievably and stand in awe that when we asked you to present at an event that had never existed before, each and every one of you said yes without batting an eye. I hope you know that we know that even with the open bar and the parties and the shuttle and the venue and all of it, we couldn't have had a conference without you. 

+

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/deferreds-coming-to-jquery.html b/www/blog/deferreds-coming-to-jquery.html new file mode 100644 index 0000000..8d327c9 --- /dev/null +++ b/www/blog/deferreds-coming-to-jquery.html @@ -0,0 +1,248 @@ +Deferreds coming to jQuery 1.5?

rmurphey adventures in javascript

Deferreds coming to jQuery 1.5?

I have updated this post to show the code using the API that was released in jQuery 1.5.0.

+

A few weeks ago, a major rewrite of jQuery's Ajax functionality +landed in the jQuery GitHub repo. Thanks to Julian Aubourg, jQuery looks like it will get a feature +that I've desperately wished it had ever since I started spending time with +Dojo:

+

{% codeblock lang:javascript %} +function doAjax(debug) { + var req = $.ajax({ + url : 'foo.php', + dataType : 'json', + success : function(resp) { + console.log(resp); + } + });

+

if (debug) { + req.success(function(resp) { + console.log("let's see that again!", resp); + }); + }

+

// return the request object so other + // things can bind to it too! + return req; +}

+

doAjax().success(function(resp){ + console.log("Once more, with feeling!", resp); +}); +{% endcodeblock %}

+

Starting with 1.5 (I'm guessing), users will be able to easily attach callbacks +to XHRs ... later! And pass around the return value of $.ajax as an actually +useful object with a familiar API, rather than just getting back the native +XHR! No longer will we have to bundle up callback functionality -- some of +which might be optional, or depend on other code -- inside our success or error +callbacks. So hott.

+

When I heard that these Ajax changes had landed, I got to thinking about how +Dojo provides its ability to belatedly attach success and error handlers to its +XHRs: underlying its XHR methods is +dojo.Deferred. +It allows users to assign callback functions for success and error conditions +for a task that may not complete immediately. Dojo makes use of this for its +XHR stuff, but it's incredibly useful generically, too:

+

{% codeblock lang:javascript %} +function doSomethingAsync() { + var dfd = new dojo.Deferred(); + setTimeout(function() { + dfd.resolve('hello world'); + }, 5000); + return dfd.promise; +};

+

doSomethingAsync().then(function(resp) { + console.log(resp); // logs 'hello world' +}); +{% endcodeblock %}

+

So, Dojo provided the late callback functionality via deferreds. jQuery now had +late callback functionality. Was the deferred functionality hidden in the +jQuery Ajax rewrite, waiting to be exposed? Julian and I and several others got +to talking in the jQuery development IRC channel, and decided it seemed like an +interesting and viable idea. A few days later, Julian's first draft of jQuery.Deferred +landed in a branch on GitHub.

+

It's early days, but there have been a lot of good discussions already about +the proposed API and how it should work. Through all of the conversations I've +been part of, it's become really clear that no one cares about deferreds until +you show them what they actually mean: the ability to register an interest in +the outcome of arbitrary asynchronous behavior, even if the outcome has +already occurred. Even better, you can register your interest in the outcome +of behavior that may or may not be asynchronous.

+

I assure you that once you have experienced this, you will wonder how you lived +without it.

+

{% codeblock lang:javascript %} +var cache = {};

+

function doSomethingMaybeAsync(val) { + if (cache[val]) { + return cache[val]; + }

+

return $.ajax({ + url : 'foo.php', + data : { value : val }, + dataType : 'json', + success : function(resp) { + cache[val] = resp; + } + }); +}

+

$.when(doSomethingMaybeAsync('foo')) + .then(function(resp){ + alert("The value for foo is", resp); + }); +{% endcodeblock %}

+

It'll also be possible to do something like you see below. I'm not sure what +the exact API will be for creating a generic deferred instance, but I hope it +will be something along these lines:

+

{% codeblock lang:javascript %} +function doIt() { + var dfd = new $.Deferred();

+

setTimeout(function() { + dfd.resolve('hello world'); + }, 5000);

+

return dfd.promise; +}

+

doIt().then(function(resp) { console.log(resp); }, errorFn); +{% endcodeblock %}

+

These changes are sitting in a branch in the jQuery GitHub repo as we speak, and I think +it's likely we'll see them move to master sooner than later. It's a nice story +of collaboration and community participation that helped make something good -- +the Ajax rewrite -- even better.

+

It's exciting to see jQuery venture a bit more into the abstract. My +experiences with Dojo core so far make me think there are probably more +opportunities for these sorts of utilities that would be of high value for +a substantial number of jQuery users. On the other hand, one of the constant +themes of our conversations about deferreds has been the potential for +confusion with the new methods. Will the API look familiar and jQuery-like, or +will users be confused about the ability to chain methods on something other +than a jQuery selection? Are there bigger-picture considerations when it comes +to adding new constructors to the jQuery namespace? It'll be interesting to see +how these questions sort themselves out, especially if other similar features +appear that don't fall neatly under the well-established +DOM/Ajax/Events/Effects umbrella.

+

The conversation's happening on GitHub -- I hope you'll +join in.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/determine-the-order-of-two-dom-elements.html b/www/blog/determine-the-order-of-two-dom-elements.html new file mode 100644 index 0000000..917aa44 --- /dev/null +++ b/www/blog/determine-the-order-of-two-dom-elements.html @@ -0,0 +1,178 @@ +Determine the order of two DOM elements

rmurphey adventures in javascript

Determine the order of two DOM elements

Inspired by this from PHP, I wanted a +utility function to determine whether a given element came before or after +another element in the DOM.

+

{% codeblock lang:javascript %} +(function($){ + $.order = function($a,$b) {

+
$a = $a.eq(0);
+$b = $b.eq(0);
+var c = 'order-test';
+
+$a.addClass(c);
+
+if ($b.hasClass(c)) {
+    // elements are the same
+    return 0;
+}
+
+$b.addClass(c);
+
+var $elements = $('.'+c);
+$elements.eq(0).addClass(c+'-first');
+
+if ($a.hasClass(c+'-first')) {
+    // $a is first
+    return -1;
+} else if ($b.hasClass(c+'-first')) {
+    // $b is first
+    return 1;
+}
+

};

+

})(jQuery) +{% endcodeblock %}

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/dojo-confessions-or-how-i-gave-up-my-jquery-security-blanket-and-lived-to-tell-the-tale.html b/www/blog/dojo-confessions-or-how-i-gave-up-my-jquery-security-blanket-and-lived-to-tell-the-tale.html new file mode 100644 index 0000000..824c6da --- /dev/null +++ b/www/blog/dojo-confessions-or-how-i-gave-up-my-jquery-security-blanket-and-lived-to-tell-the-tale.html @@ -0,0 +1,561 @@ +Dojo Confessions (Or: How I gave up my jQuery Security Blanket and Lived to Tell the Tale)

rmurphey adventures in javascript

Dojo Confessions (Or: How I gave up my jQuery Security Blanket and Lived to Tell the Tale)

This is a reprint of an article that originally appeared in the October +issue of JSMag.

+ +

I recently had the opportunity to architect the front-end of a new web +application from scratch, and after years of using jQuery, I decided to give a +href="http://dojotoolkit.org"Dojo a try. For a variety of reasons — +not least of which was Dojo’s approach to code organization and +dependency management — I thought this would be a good project to get +some real-world Dojo experience under my belt. What follows is an overview of +Dojo from the perspective of an avid jQuery user. I’ve been using jQuery +for years. Its simplicity is seductive; after a while, it kind of writes +itself. And maybe that was my problem: I was ready to try something new. Many +months ago, I wrote on Twitter that I was interested in learning more about +Dojo. Rey Bango, jQuery evangelist, +wrote back, and put me in touch with a +href="http://higginsforpresident.com"Pete Higgins, the Dojo project lead. +Pete proceeded to lobby me for months to give his “unified toolkit” +a try. I dabbled. I read the docs. Pete plied me with sample code to show me +what I was missing, and even drove to North Carolina to lead a Dojo camp. In +August, I decided it was time to stop dabbling and dive in. I’d just +finished writing some essentially library-less JavaScript for a web-based +iPhone application, a task that left me much more confident in my knowledge of +JavaScript. With the mobile site out of the way, the client was ready to build +the desktop version, and I would be in charge of the front end. It was time +to choose a library; this time, I chose Dojo.

+ +

Deciding on Dojo

+ +

The decision to try a new library on a client project was a tough one +— I knew that I could write the basic functionality of the site using +jQuery a whole lot faster than I would write it with Dojo, simply because I +knew my way around jQuery so much better. Using Dojo would mean I’d be +looking up a lot of things, from simple stuff like how to work with DOM +selections, to more complicated tasks like how to structure my code for the +purposes of reuse and abstraction. As my work on the project progressed and +the deadline neared, I had plenty of second thoughts. A few things convinced me +to stick with Dojo when the going got tough:

  • Code organization +patterns: Dojo provides pretty clear guidance on structuring both your features +and your codebase. I’ve given a lot of thought to organizing jQuery code. +I wrote an article on the topic for JSMag and gave a presentation on the topic +at the jQuery conference. I was eager to try a library that explicitly answers +the organization question.
  • Class inheritance: I knew from the start +that I was going to use a lot of interaction patterns over and over. I wanted +to be able to write those patterns in a way that would let me use them across +features while still staying DRY. The class inheritance provided by +dojo.declare() was an elegant, easy-to-use solution to the +problem.
  • Dependency management: Being able to easily manage +dependencies was a huge draw for me; it promotes reuse and abstraction in a big +way. Dojo’s dependency management would also pave the way to easily +building production-ready files that would combine all the necessary +functionality for a given type of page, reducing the number of HTTP requests +required.
  • Service method descriptions: This particular application +relied on XHRs (AJAX) in a big way. Dojo’s service method description +approach would let me manage the URLs and parameters for those requests in a +single place, keeping pieces that might change separate from the core code. +Eventually, theoretically, the server-side code could actually generate this +SMD file automatically. More on this in a bit.
  • Templating: All the XHR +responses were JSON, which I’d need to turn into HTML. jQuery has +templating plugins to solve this problem, so this wasn’t really a +differentiating factor, but nonetheless it was going to make my life easier. I +could maintain the templates for turning JSON into HTML separately from my +JavaScript, and even programmatically choose the template depending on the +situation.
  • The meaning of this: When binding a function +to an event on an element in jQuery, this inside the function +refers to the element that triggered the event. This is arguably desirable for +simple code, but when you start organizing your code into objects with methods +and you want this to refer to the object, not the element, it can get painful. +The dojo.hitch() method lets you cleanly change the meaning of +this for any given function, and it’s transparently rolled into other +methods, such as dojo.connect() for event binding.
  • +
  • Documentation and support: Dojo has a reputation for poor documentation, +and to some extent it’s deserved. Their documentation is a whole lot +harder to use than jQuery’s because, at first glance, it’s quite a +bit more scattered and substantially more API-based than task-based. However, +once I figured out where to +look for the docs I needed, finding answers to my questions was pretty +painless. I also leaned heavily on some experienced Dojo developers for +guidance and support, and dropped in to the ever-helpful #dojo IRC channel on +Freenode if I got stuck.
+ +

Getting Started

+ +

The first step was assembling my new toy. I opted to use the library via +Google’s CDN so I could get up and running as quickly as possible. After +that, it was time to figure out how I’d organize my files. Dojo +actively supports namespaces for components, which means you can put your +application files in one or more directories and associate your namespaces with +those directories. I created a high-level controller file in the root /js +directory; it would be responsible for figuring out which functionality was +required for a given page (a decision I’ll eventually revisit). Then, I +created a directory inside the root /js directory, where I’d put all of +the individual files for the various components.

+ +

Finally, I included a line in my controller file to tell Dojo where to find +the namespace I’d be using: dojo.registerModulePath('myNamespace', +'../js/myNamespace'); Figuring out all of these pieces may have been the +hardest part of making the switch to Dojo — it was a whole lot more setup +than I was used to with jQuery, and though it is all documented, it took a bit +of effort to find the details and to get the paths set up correctly. The time +it took to get everything working properly was time that I spent wondering +whether I’d made a good decision. Once it was working, it was time to +write some actual code and try to answer that question.

+ +

Get Some Elements, Do Something With Them

+ +

Those words sum up the jQuery paradigm. In jQuery, you query the DOM using a +CSS selector, and the result of that query is a jQuery object, which you can +then operate on using method chains. It’s fairly rare in jQuery to work +directly with a DOM element. While Dojo supports this paradigm through its +dojo.query() method and the NodeList it returns, it’s common in Dojo to +work directly with a DOM element rather than a NodeList. My initial +inclination was to stick with what I knew from jQuery, and to use +dojo.query() to get everything I wanted to work with. As I dug in, +though, I discovered that it could actually be just as elegant (and less +expensive) to work directly with DOM elements, even though they didn’t +come with any of the magic of a jQuery object. The syntax for doing so was a +bit different — for example, dojo.addClass(myDomElement, +‘foo’) instead of +$(myDomElement).addClass(‘foo’) — but the more +code I wrote, the more frequently and easily I found myself using the +dojo.addClass syntax instead. Embracing this approach was especially valuable +when it came to methods that returned something. For example, the +dojo.connect() method (used to connect events to elements, similar +to $().bind()) returns a connection object, which can be stored +and disconnected later without having to know which element the event was +attached to. This is, in a word, awesome. It’s also an example of how +Dojo requires you to think somewhat differently about how you write your +JavaScript.

+ +

Returning a Result For the Win

+ +

Along those lines, I had to get used to the fact that a lot of Dojo methods +returned objects that I could talk to later. For example, +dojo.animateProperty() created an animation object which could +later be play()’d. All of the XHR methods — and asynchronous +methods in general — returned a “deferred” object, to which I +could later add success and failure callbacks. jQuery does return the native +XHR object from its $.ajax() method, so you can technically add +callbacks there too. What I liked about Dojo’s deferred approach is that +it provides a common, simple interface for interacting with all asynchronous +operations, and even lets you define your own. Getting the hang of how to take +advantage of these things took some doing, coming from the more procedural, +chained world of jQuery, where just about everything returns a jQuery object. +Soon, though, I was setting up animations long before I was ready to play them, +and adding callbacks to XHRs after they started.

+ +

SMDs: A Unified Way to Talk to the Server

+ +

One thing I really wanted to try with Dojo was making use of Service Method +Descriptions, or SMDs. An SMD file basically contains information about all of +the services provided by a resource. In my case, the resource was the +server-side application, which I’d be communicating with to request JSON +data. By creating an SMD file, and then instantiating a new Service based on +that file, I could create a single place for managing all the paths and +parameters I’d use to get what I needed from the server. When I asked the +server for something, the Service I created would return a deferred object, to +which I could attach callbacks. In the background, Dojo was just running an +XHR, but my individual classes didn’t have to worry about the details +— I just had to worry about the name of the method and the parameters it +required, and the Service I’d defined and instantiated would take care of +the rest. For my initial work, I just created the SMD file by hand, but +eventually it’s easy to see how the SMD could be generated by the +server-side application itself. Here's a sample SMD (normally an SMD would +have a lot more services, obviously):

+ +

{% codeblock lang:javascript %} +{ + transport : 'GET', + envelope : 'PATH', + target : '/json',

+

services : { + callouts : { + parameters : [ { name : 'callouts', type : 'string' } ], + returns : 'object' + } + } +} +{% endcodeblock %}

+

And here's some abbreviated code that makes use of the SMD:

+ +

{% codeblock lang:javascript %} +cache : {},

+

services : new dojox.rpc.Service(dojo.moduleUrl('smd', 'json.smd')),

+

postCreate : function() { + // ... +},

+

_lookup : function() { + var val = this.domNode.value;

+

if (this.cache[val]) { + this._handleResult(this.cache[val]); + } else { + var c = this.services.json.callouts({ 'callouts' : val });

+
c.addCallback(dojo.hitch(this, function(resp) {
+  this.cache[val] = resp;
+}));
+
+c.addCallback(dojo.hitch(this, '_handleResult'));
+

} +},

+

_handleResult : function(resp) { + // ... +} +{% endcodeblock %}

+

Dependency Management and Building

+ +

I love jQuery, I do, but this is an area where it is sorely lacking. It +offers little guidance on how to structure your codebase, how to ensure that +everything you need is loaded, or how to build your code into production-ready +files. Dojo really shines here, but again, it takes a bit of getting used to. +The base dojo.js includes a ton of basic functionality, but I had to quickly +learn to dojo.require() additional functionality as I needed it. In jQuery, +you’d do this simply by adding another script tag to your HTML; +dojo.require() basically does this for you programmatically, but checks to see +that the required code hasn’t already been included first. This means +each of your components can require exactly what it needs, and it’s +perfectly safe to require the same thing more than once. The flip side of this +is that if you forget to require something you need, or if you require it using +the wrong name, it’s not always clear exactly where you made the mistake. +Each file that will be dojo.require()’d begins with a +dojo.provide() statement, telling the dependency management system +that it has, indeed, found the right file. After that, the file can go on to +dojo.require() its own dependencies, which Dojo loads before +proceeding with the rest of the file’s code. The +dojo.provide() method also sets up the object chain along the +path; for example, if you dojo.provide(‘a.b.c.d’), you +can then safely do a.b.c.d.e = { ... }. When it comes time to +assemble your individual files into production-ready, combined, minified files, +Dojo’s build system is able to parse dojo.require() +statements and automatically include the appropriate files; with jQuery, this +is a much more manual process that can be difficult to maintain. Creating a +build wasn’t as straightforward as I’d hoped it would be, and I +stumbed a lot along the way. It took a bit of doing to get all of the paths +just right, and to figure out how to have a repeatable build process that we +could roll up into our full release process. The payoff was big, though: I +could keep my files organized how I wanted them, but only serve one file in +production. The build system figured out the steps in between.

+ +

Organization, Inheritance and Abstraction

+ +

As I mentioned above, code organization has been a big issue for me with +jQuery. I’ve developed some patterns that I use to keep my code sane, but +plenty of other jQuery developers have not, which can make working with other +people’s code rather painful. While it’s certainly possible to +write procedural, disorganized code with Dojo, dojo.declare() +provides a clear way to avoid it. I talked earlier about creating a namespace +for my code and an associated directory. When it came time to start writing, I +created individual files in that directory for each component on the page. +Inside each file, I indicated which component the file was providing via +dojo.provide(), loaded any dependencies via +dojo.require(), and then created a class in the namespace using +dojo.declare(). The dojo.declare() method takes +three arguments: the name of the class you want to create +(‘myNamespace.Thinger’), other classes you want to +“mix in” to your new class (if any — this argument can be +null, a single class, or an array of classes), and, lastly, an object that +defines the class methods and properties. The result is a class that can be +instantiated using new myNamespace.Thinger(); the object +that’s created encapsulates all the behaviors and states associated with +a particular Thinger, and you can have as many instances of Thinger as you +want. The mixing in thing is huge, because it lets you have a class that +incorporates methods defined in another class. For example, I created a class +called myNamespace.Toggler that would show either the first item in a list or +all of the items in a list; clicking on the first list item would toggle +between the behaviors. Once the myNamespace.Toggler class was created, other +classes could inherit its behavior simply by passing a reference to the +myNamespace.Toggler class as the second argument of +dojo.declare(). I was able to encapsulate the Toggler behavior in +a reusable way, and keep the code for the classes that inherited the Toggler +behavior nice and clean.

+ +

Event Management

+ +

I mentioned earlier that Dojo has a slightly different take on event binding +than jQuery. I should also say that you can bind events to entire NodeLists +(the result of dojo.query()) if you want, using +.connect() (or convenience methods like .onclick(), +etc.). However, if you want a reference to the connection for later use, +dojo.connect() is your friend. I created a component using +dojo.declare() that was responsible for managing user interaction +with a star rating UI element. I used dojo.connect() to hook up +some mouseover/mouseout behaviors to the element, and stored the returned +connections as properties of the component. When the element was clicked, I +registered the rating, and I wanted the mouseover/mouseout behaviors to go +away; eliminating them was simply a matter of dojo.disconnect()-ing the stored +connections.

+ +

Publish and Subscribe

+ +

With all of the XHR traffic occurring on the page, I wanted a way to shut it +off if the user was inactive for a little while, but I didn’t want to +write the code for handling that inside every component that used XHR. +Dojo’s pub/sub tools offered the perfect answer. I created a new +component whose sole purpose was to watch for inactivity; when it discovered +inactivity, it would dojo.publish(‘/user/inactive’). +Then, other components — the ones I’d already written, and ones I +write in the future — could subscribe to the /user/inactive topic and +react accordingly. Pub/sub is an excellent way to allow this sort of abstract +communication between components. The component that publishes a +“topic” doesn’t care who’s listening; the component +that subscribes to a topic doesn’t care which component sent it. +It’s another example of how Dojo leads you to think a bit differently +about how you architect your applications — knowing about pub/sub can +help you write much more loosely coupled code.

+ +

What I Missed from jQuery

+ +

jQuery’s event delegation-related methods .live() and +.is() were hard to live without. There’s a reasonable way to +mimic .is(), but no out-of-the-box replacement for +.live() — you end up writing your event delegation yourself. +Dojo does have dojo.behavior(), which automatically binds event +handlers to elements that match a given selector as they’re added to the +page; however, the event handlers are bound to individual elements, and +there’s no way to provide a context to the selector that you pass to +dojo.behavior(). This may be my noob-ness talking, or maybe +I’m just used to the error messages I’d see when I did something +wrong with jQuery, but I often found myself feeling that the error messages +from Dojo were too cryptic. Worse, sometimes I’d do something wrong and +it would seem to fail silently. I spent a lot more time in Firebug tracking +down the errors of my ways. In general, the thing I really missed from jQuery +was the “it just works” aspect of the library. I expect that with +time I’ll feel that way about Dojo, but in the meantime there are +definitely growing pains. I had to constantly remind myself that the way to +solve a problem in Dojo might not be the same way I’d solve it in jQuery. +More than once I rewrote a slew of code when I discovered some Dojo methodology +or approach I hadn’t known about before.

+ +

Conclusion

+ +

Lest Rey worry that he never should have introduced me to Pete in the first +place, fear not: I don’t expect to give up jQuery anytime soon. If +anything, I’m excited to see how the library and the community mature and +start answering some of the organization and dependency management questions I +mentioned above. As a library, jQuery most definitely has its place; it has +virtually no barriers to entry and it has helped usher in an era where +it’s dead-simple to create rich, interactive websites. Deciding to use +Dojo instead was something of a gamble. I had to convince the project lead that +it was a good decision, which was challenging considering the popularity of the +jQuery library. He asked lots of pointed questions about the maintainability of +the code if I were to leave the project, and those questions were well +deserved. If anything, though, I think that choosing Dojo has actually +increased the maintainability of the code by presenting clear patterns for +organization, abstraction, and dependency management. Did it take a while to +come up to speed with Dojo? For sure. Will a jQuery developer off the street be +able to jump right in to the code I wrote? Possibly not. At the end of the day, +though, it is just JavaScript, and any skilled JavaScript developer should be +able to find their way around. They’ll almost certainly find, given an +hour or two, that the code I wrote is easier to follow than some of the jQuery +code I’ve run into that doesn’t make use of good organizing +principles. In the meantime, I hope to be working on the project for a while +to come, and I expect the trouble I went through to come up to speed on Dojo +will pay big dividends as the application I’m working on grows and +matures.

+ +

Learn More

+

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/enterprisedojo-com-uses-jquery.html b/www/blog/enterprisedojo-com-uses-jquery.html new file mode 100644 index 0000000..0605729 --- /dev/null +++ b/www/blog/enterprisedojo-com-uses-jquery.html @@ -0,0 +1,126 @@ +EnterpriseDojo.com uses jQuery!

rmurphey adventures in javascript

EnterpriseDojo.com uses jQuery!

And you should too ... for your WordPress blog that came with jQuery already installed and probably barely even needs JavaScript in the first place. 

+

Here's an idea: Let's have an honest discussion about when a given solution makes sense and when it doesn't, rather than mocking a contributor to that discussion for choosing a perfectly appropriate tool for a perfectly mundane task. EnterpriseDojo.com is a blog about using Dojo in the enterprise, not a blog about using Dojo on blogs. Snickering at its use of jQuery is approximately as useful as pointing to this graph and saying case closed, leaving more reasonable, thoughtful people to explain all the ways that graph doesn't tell us a damn thing

+

I want to be very clear, as the dust settles around my several recent rants: jQuery has its place, and it is a very, very big place. It is a lovely DOM, Ajax, and events library, and a great way to get certain things done quickly, especially for people who may not have the luxury of learning the inner workings of JavaScript (noble a goal as that is). There was a time when DOM and Ajax and events questions felt like the questions of the day, and jQuery showed us how to answer those questions using a simple, easy-to-learn API that suddenly made JavaScript accessible to the masses. Other libraries solved the same questions but those solutions felt ugly and clunky and goshdarnit, hard, and I give jQuery the utmost credit for making JavaScript suck so much less when all I wanted to do was show or hide or slide a thing or load some content onto my page. I'll even grant that it can be used as a piece of a large application solution, though I question the wisdom of doing so for reasons outlined in another post. Heck, I'll even grant that "enterprises" aren't off their rockers to use it -- for the things it was meant to be used for. 

+

What upsets me is when smart people seem to say that jQuery's victory in an internet-wide popularity contest suggests, well, anything at all when it comes to more complex needs. It upsets me because my clients hear those suggestions, look at that same well-marketed graph, and I am left explaining to them that, yes, jQuery is popular, but it's popular because it answers a small set of questions easily and well -- even for people who don't even know JavaScript! -- and you, dear client, have vastly larger questions than that.

+

At the end of the day, these toolkit decisions ought to be about more than a popularity contest; jQuery may be the right answer, or part of the right answer, but it's imperative, to me, that my clients understand the scope of the question first. It's imperative that they make their decisions based on a full understanding of pros and cons, risk and reward, cost and benefit -- not based on a graph, not based on a popularity contest.

+

So again I say: we, as a community, and especially the influential ones among us, do well when we elevate the conversation beyond that contest and acknowledge that choosing the right toolkit depends on first understanding what you're choosing the toolkit for. We do well when we educate teams and decisionmakers on the lessons we've learned in the time since DOM, Ajax, and events were the big question of the day, on the best practices that have emerged, on the situations where we've, gasp, had to look beyond jQuery -- either to other tools or other toolkits -- for the answers. And we do well when we start showing them how they can do the same.

+

An aside: Some people I like a lot have pointed out that in the midst of all of my complaining, I have not come out and offered a solution. This is fair. Two things: One, I have not wanted this conversation to collapse into Dojo vs. jQuery vs. YUI vs. Ext vs. MooTools vs. whatever, because if it does, I think we've missed the crux of the matter: that different tools do different things, that some set out to answer complex questions and some do not. If anyone has been unclear, which I rather doubt, my personal preference of late has been to use Dojo. I do not recommend using Dojo for everything under the sun, but I find it offers a lot of utility when writing non-trivial applications. I cannot make a compelling argument for using it vs. YUI, simply because I don't know YUI well enough. I can't even make a compelling argument for using it vs. Ext, except for licensing issues that may or may not be relevant in a given scenario. Two, if you're disappointed that I haven't offered a solution, especially a jQuery-based one, I apologize. However, I feel there are too many viable existing solutions out there already, and I haven't come up with a good reason to promote a jQuery-based solution besides jQuery's popularity. And, well, see above for my thoughts on that.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/five-questions.html b/www/blog/five-questions.html new file mode 100644 index 0000000..581eb39 --- /dev/null +++ b/www/blog/five-questions.html @@ -0,0 +1,223 @@ +Five Questions

rmurphey adventures in javascript

Five Questions

I recently started in a new role: I'm the dev lead of a project that was +already in the hands of a group of skilled developers before I showed up, a +project whose scope and technologies extend far beyond the experiences I've had +up until now.

+

As you might imagine, there have been a lot of challenges, but one that's been +particularly interesting has been figuring out how to meaningfully contribute +to decisions about systems I don't intimately understand. It's easy to be +inclined to sit those conversations out: I really don't yet know enough to +participate, and the "who am I to have a say?" instinct is strong.

+

The problem: that attitude will ensure that I never know enough to +participate, and though I am definitely out of my comfort zone, my job -- the +job I asked to do, and the job I have been asked to do -- is to participate, +to learn, and to change the definition of my comfort zone.

+

While I may not have the project-specific experience to lean on, I'm finding +that there are a few questions that help me understand, discuss, and -- ultimately -- consent or object to a technical plan. They're questions that +seem to work well across a spectrum of discussions; they work whether we're +talking about a wholly new system, a migration from an old system, or a +solution to a particularly prickly problem.

+

These questions don't just help me gain a better understanding of a topic, or +help me make better decisions; they've also helped me reframe my understanding +of my role as a lead.

+

Question 1: What are we doing and why?

When I hear the answer, I'm listening for whether the developer is clearly +articulating the problem and the solution. Do we clearly understand the +problem? Is the solution magical, or can we explain why it works? Are we +solving more than the problem, and thereby incurring unnecessary risk? Does the +developer agree that the work is necessary?

+

Question 2: How could it go wrong?

A developer who says nothing can go wrong probably hasn't been a developer +for very long. I want to hear far-fetched scenarios, and an explanation for +why they're far-fetched. I want to hear worst-case scenarios; good developers +have already thought about these plenty, they've worked to avoid them, and +yet they acknowledge their existence. The goal of this question isn't to plan +for everything; rather, the answers provide context for poking at assumptions.

+

Question 3: How will we know if it's going wrong?

This is probably my favorite question. If we're talking about developing a new +system or project, it's a question of how we'll know we're off track, which +leads to clear milestones and check-in points. If it's a migration to a new +system, or a solution to a bad bug, it's a question of how we'll know that +the new state is less good than we thought it would be. If the answer is +"customers will tell us," we're in dangerous territory. For services, I hope to hear answers about automated monitoring, but manual checks will suffice. For +new projects, I hope to hear near-term goals that will help us gauge progress.

+

Question 4: What will we do if it goes wrong?

The answer to this may not always be knowable -- obviously we won't always know +the ways things will go wrong -- but it's a useful exercise nonetheless. The +answer may be "we'll have to revert back to the old system and that will be +very hard," but that at least helps me understand the stakes of the decision. +For new projects, this is a great way to identify the point of no return -- +that is, the point in the project where starting over or changing course +becomes prohibitive.

+

Question 5: Is there an "undo" button?

Sometimes, the worst happens. Do we have an escape hatch? How hard will it be +to add one later vs. adding one now? Again, it may be OK if we don't have a +rollback plan, but knowing that answer should help guide the decision +about whether to proceed.

+
+

I'm learning that a lot of what makes me kind of OK (I hope!) at this dev lead +thing isn't a deep knowledge of the specific technologies that are the +underpinning of the project (though it's certainly important that I be able to +find my way around). Rather, it's my ability to ask these questions, and to +hear and understand the answers, and interpret them into action. I'm thankful +to the team that is giving me the chance.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/fix-for-slow-loading-google-ads.html b/www/blog/fix-for-slow-loading-google-ads.html new file mode 100644 index 0000000..fb48e44 --- /dev/null +++ b/www/blog/fix-for-slow-loading-google-ads.html @@ -0,0 +1,126 @@ +Fix for slow-loading Google ads

rmurphey adventures in javascript

Fix for slow-loading Google ads

Google's AdSense ads, and lots of others, are added to pages using Javascript, and if that Javascript appears early in the page's HTML, it can seriously slow down the rendering of the rest of the page. That's because browsers generally refuse to do any further rendering of the page until they have a requested Javascript file in hand.

+

We ran into this with the ad in the left column of some pages on DailyStrength.org, which appears above the page content in the HTML; when we switched to a new ad provider, which required fetching multiple Javascript files, the issue was even more pronounced.

+

Since these ads only work when Javascript is enabled anyway, I decided to use some DOM manipulation (via jQuery, which is already on the page) to load the ad script in a hidden div at the bottom of the HTML, and then relocate the Javascript-generated iframe containing the ad to an empty, visible div where the ad needed to be:

+

{% codeblock lang:javascript %} +$('#ad_hide').find('iframe').appendTo('#ad'); +{% endcodeblock %}

+

Now, loading the Javascript doesn't slow down the rendering of the content, and the ad appears right after the page is loaded.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/fix-uneven-line-lengths-in-headlines.html b/www/blog/fix-uneven-line-lengths-in-headlines.html new file mode 100644 index 0000000..beb91ef --- /dev/null +++ b/www/blog/fix-uneven-line-lengths-in-headlines.html @@ -0,0 +1,174 @@ +Fix uneven line lengths in headlines

rmurphey adventures in javascript

Fix uneven line lengths in headlines

I used to work in newspapers, and we were diligent about fixing "bad breaks" in headlines. Nothing looks worse than a headline that's far longer on one line than on another. Desktop publishing software has some intelligence about these bad breaks, but the web, not so much. A little jQuery can clean up the bad breaks, though -- for example, this will clean up all the h2s.

+

{% codeblock lang:javascript %} +$('h2').each(function() { + $h2 = $(this);

+

// get the height of the existing element + var height = $h2.height();

+

// get the text of the existing element + var text = $h2.text();

+

// put temporary text in the element + // that will make the element one line tall + $h2.text('temp');

+

// get the height of the one-line element + var singleLineHeight = $h2.height();

+

// check to see if the initial element was + // taller than the one-line element; + // if so, look for the midpoint of the + // initial text and split it there + if (height > singleLineHeight) { + var length = text.length; + var mid = parseInt(length/2); + for (i=mid; i>0; i--) { + if (text[i] == ' ') { + var breakIndex = i; + break; + } + } + var newText = ''; + for (i=0; i'; + } else { + newText = newText + text[i]; + } + } + $h2.html(newText);

+

} else {

+
// otherwise, put the initial text
+// back in the element and move on
+$h2.text(text);
+

} +}); +{% endcodeblock %}

+

So you end up changing this:

+

{% codeblock %} +This is a very long headline that goes on and then breaks +badly +{% endcodeblock %}

+

to something like this:

+

{% codeblock %} +This is a very long headline that +goes on and then breaks badly +{% endcodeblock %}

+

You'll want to change the $('h2') selector to exactly what you need; in the case of this blog, $('div.post h2 a') is actually more appropriate. You're going to be best off if matching elements contain only text, and no HTML.

+

Also, this will only work for elements that are one or two lines long; for headlines of arbitrary length, you'd need to see exactly how the height of the initial element compared to the height of the temporary one-line element, and then split the headline multiple times. Then again, thinking back to my newspaper days where we had to convey a whole story in four words, you might ask yourself whether you actually need a headline that's more than two lines long.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/flying-lessons.html b/www/blog/flying-lessons.html new file mode 100644 index 0000000..8574b15 --- /dev/null +++ b/www/blog/flying-lessons.html @@ -0,0 +1,169 @@ +Flying Lessons

rmurphey adventures in javascript

Flying Lessons

In October of 2008, I'd been unemployed for about four months. I was doing some freelance work, but still feeling entirely uncertain about my ability to make a living. I decided to do what any marginally employed person might do: spend about $7,000 taking lessons to become a private pilot.

+

I have had a lifelong fascination with flying, and I'd taken lessons in gliders when I was a kid -- every four hours of helping at the airfield got me one 15-minute lesson. If you don't know about gliders, they're just airplanes except without an engine. On the up side, that means your engine can't fail; on the down side, if you mess up when you're trying to land, you don't exactly get a second chance. That whole landing thing always terrified me, and I was off to college before I ever managed to "solo."

+

A little more than 13 years later, I found myself in a Cessna 152 rolling down a 3,200-foot grass runway just outside of Durham, N.C., the 100-foot trees at the end of the runway growing ever-closer in the windshield until a dial on the instrument panel said we were going 55 knots -- which is basically like 55 miles per hour, but when you say knots you sound like a pilot (or a sailor, I guess) -- and the instructor said it was time to pull back on the yoke, ever so gently. And the plane lifted off the runway and the windshield was filled with more sky than ground and the trees passed below me and I was flying.

+

The FAA says you can get your license after just [40 hours of flying][ppl], plus a little bit of ground instruction, and on that October day I was sure I'd knock it out in 45 hours, tops. I'd been flying flight simulators since I was like six years old, when -- no joke -- I loaded the program off a cassette player. When I was a kid I went to the airport in my hometown to score expired navigational charts. Plus I had flown gliders, and plus, I was smart. How hard could it be?

+

You know how they say flying is safer than driving? You're pretty safe on that plane that you took to get here -- like, hundreds of times safer than in a car -- but it turns out that little planes flown by private pilots crash all the time. Pilots leave the gas caps unscrewed and all the fuel gets sucked out and they don't even notice until the engine sputters and dies. They overload their plane on a hot day and don't quite clear those trees at the end of that runway. They fly into weather for which their skills are no match, and end up running into a mountain -- euphemistically referred to as CFIT, or "controlled flight into terrain."

+

Lots of private pilots are shining examples of the Dunning-Kruger effect: unskilled and unaware of it, much like myself in those first few lessons. A typical private pilot has fewer than [100 hours][nyt] of flying time -- airline pilots have thousands or even tens of thousands -- but they have that piece of plastic that says they can fly a plane, and gosh darnit, they are going to fly a plane.

+

It would eventually take me six months and more than 60 hours to get my license, and by the end I was in no rush. In one of many sleepless nights during my training, I came to realize a thing: learning to fly wasn't just about learning to take off and land and get from point A to point B. Barring infinite money and infinite time, it was about learning how to be permanently new at a thing that could kill me if I screwed it up.

+

It's been six years since I rolled down that runway, and six years later, it occurs to me that there are a whole lot of parallels between that experience and my life as a developer. I remember showing up to the inaugural JSConf in 2009, feeling pretty secure in my standing as a bit of a jQuery badass, and being promptly blown away by just how large the JavaScript world actually was, even in 2009. I felt intimidated and overwhelmed and, I won't lie, a bit embarrassed at how little I actually knew and understood. Over time, though, I've realized: this is just the permanent state of things. I'd better get used to it.

+

This, then, is a talk about how to be new at a thing.

+
    +
  • not about learning new things, about being new at a thing
  • +
+

Aviate, Navigate, Communicate

    +
  • translation: know your priorities
  • +
  • flying: nowhere to pull over
  • +
  • priorites when flying: 1) do not die, 2) get where you're going
  • +
  • priorities at a new job: balance learning with project delivery.
  • +
  • study, ask, do
  • +
  • on my team, we try to be explicit about this with new folks.
  • +
+

All Available Information

    +
  • FAR §91.103
  • +
  • flying: you are responsible for not dying. you're expected to know about the weather, the airport you're flying to, the route you're taking, your aircraft's limitations, your own limitations, etc. the FAA will occasionally do "ramp checks" to make sure you've done your due diligence.
  • +
  • ultimately, this is all about not making assumptions. just because it's sunny out doesn't mean it will be in a couple of hours. just because you flew to an airport on a half tank of fuel last time doesn't mean it will work out the same way today, when there's a 40-knot headwind.
  • +
  • "all available information" is somewhat preposterous on its face these days; the amount of information available to us is unreal. i think of this more as a challenge: what information could i get that i haven't gotten? could it possibly be useful?
  • +
  • "all available information" means being diligent and methodical about gathering facts before making a decision. this is a lot slower than making decisions based on assumptions. later, when you're not new, you can make decisions based on assumptions -- we might call that instinct. but not when you're new.
  • +
  • when i was hired at bv, they brought me on to improve the organization and maintainability of a project's codebase. i didn't just read the code and get to work tearing it apart; i interviewed every developer on the team to find out where their pain points were, and learned that certain parts of the code, while terrible, weren't worth spending time on.
  • +
  • what information could you get that you haven't gotten? could it possibly be useful?
  • +
+

Climb, Communicate, Confess, Comply

    +
  • sometimes we get lost. it's ok!
  • +
  • step 1 is to realize you're lost
  • +
  • step 2 is to explain what's wrong
  • +
  • step 3 is to ask for help
  • +
  • step 4 is to do what they say

    +
  • +
  • i think a lot of people are reluctant to ask for help because they're afraid of how people will respond when it becomes clear they don't know everything. of course they don't know everything, they are new! i think also though that people don't know how to ask for help. in my experience, people are actually incredibly willing to help -- as long as you've done your due diligence. this means you've read the docs, done your google due diligence, read the surrounding code, explored the problem with debugging tools, and produced a reduced test case that demonstrates your problem.

    +
  • +
  • developing the skills to make a good request for help is essential to being good at being new at a thing.
  • +
+

The Checklist

The Go-Around

    +
  • sometimes, despite our best efforts, we need to start over
  • +
  • this isn't exceptional -- it's an entirely normal maneuver

    +
  • +
  • Your code is not a reflection of you. It isn’t a reflection of your beliefs, your upbringing, or your ability to be a good person. Your code is [...] a reflection of your thinking process at the time that you wrote it. - @rockbot

    +
  • +
+

Trust Your Instruments

    +
  • translation: learn how to tell what's going on when you don't know what's going on
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/for-only-25-more-this-gift-card-comes-in-green.html b/www/blog/for-only-25-more-this-gift-card-comes-in-green.html new file mode 100644 index 0000000..0c4d580 --- /dev/null +++ b/www/blog/for-only-25-more-this-gift-card-comes-in-green.html @@ -0,0 +1,132 @@ +For only $25 more, this gift card comes in green

rmurphey adventures in javascript

For only $25 more, this gift card comes in green

I was just at the online Apple store to buy an iTunes gift card for my mother (who hopefully will not be reading this blog) -- when you arrive from iTunes, it drops you on a page where you can choose from a variety of gift card designs. On this screen, it seems that there is a 1:1 relationship between colors and denominations -- for example, the $25 card only comes in red.

+

I picked my denomination, and was taken to a page where I could enter a personalized message. Below that, I was offered the opportunity to "change theme":

+
+ +
+ + +

I wasn't much a fan of the red, so I clicked to choose the green, entered my personalized message, and clicked add to cart -- and only then did I find out that changing the theme also changed my order to a more expensive card. That's either extremely poor design, or extremely sketchy.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/functionality-focused-code-organization.html b/www/blog/functionality-focused-code-organization.html new file mode 100644 index 0000000..f0a1ff6 --- /dev/null +++ b/www/blog/functionality-focused-code-organization.html @@ -0,0 +1,170 @@ +Functionality-Focused Code Organization

rmurphey adventures in javascript

Functionality-Focused Code Organization

I spoke yesterday at the 2010 Boston jQuery Conference about +"Functionality-Focused Code Organization." It was a presentation that +summarized a year of thinking about organizing JavaScript applications, and I +think it went fairly well. It was a treat to see so many people nodding in +understanding and agreement out in the audience, and to talk to people +afterwards who have thought about and struggled with these same problems.

+
    +
  • Slides (Note +that you can download a PDF there if you'd prefer not to view the slides on +SlideShare)
  • +
+

I had one person come up to me afterwards who really left an impression -- I +don't recall his name (I am perfectly terrible with names), but what he said +was that he came from a background of developing desktop applications, and that +the concepts I talked about in fact made perfect sense to him. I think this is +important to remember -- the ideas that I talked about aren't new or novel at +all, but really just long-established patterns of developing applications, +adapted to fit in the world of JavaScript and the browser. I don't come from a +CS background at all -- I came to this whole JavaScript thing via HTML and CSS, +with a tiny smattering of programming mixed in -- and so I've arrived at a lot +of this things the hard way. It's gratifying to hear people tell me I've gotten +it right, and if anything, it makes me want to get the word out to all the +people who are in the same shoes as I was not that long ago.

+

Finally: A big thanks to the jQuery team, and especially Leah Silber, for +putting on the conference. I know it's hard work, and I know there were some +bumps (there always are!), but as a speaker, I feel like I've been treated very +well, which means a lot. Thanks, guys.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/getting-better-at-javascript.html b/www/blog/getting-better-at-javascript.html new file mode 100644 index 0000000..01b6425 --- /dev/null +++ b/www/blog/getting-better-at-javascript.html @@ -0,0 +1,211 @@ +Getting Better at JavaScript

rmurphey adventures in javascript

Getting Better at JavaScript

I seem to be getting a lot of emails these days asking a deceptively simple +question: “How do I get better at JavaScript?” What follows are some +semi-random thoughts on the subject:

+

The thing that I’ve come to realize about these questions is that some things +just take time. I wish I could write down “Ten Things You Need to Know to Make +You Amazing at the JavaScript,” but it doesn’t work that way. Books are +fantastic at exposing you to guiding principles and patterns, but if your brain +isn’t ready to connect them with real-world problems, it won’t.

+

The number one thing that will make you better at writing JavaScript is writing +JavaScript. It’s OK if you cringe at it six months from now. It’s OK if you +know it could be better if you only understood X, Y, or Z a little bit better. + Cultivate dissatisfaction , and fear the day when you aren’t disappointed with +the code you wrote last month.

+

Encounters with new concepts are almost always eventually rewarding, but in the +short term I’ve found they can be downright demoralizing if you’re not aware of +the bigger picture. The first step to being better at a thing is realizing you +could be better at that thing, and initially that realization tends to involve +being overwhelmed with all you don’t know. The first JSConf , in 2009, was +exactly this for me. I showed up eager to learn but feeling pretty cocky about +my skills. I left brutally aware of the smallness of my knowledge, and it was a +transformational experience: getting good at a thing involves seeking out +opportunities to feel small.

+

One of the most helpful things in my learning has been having access to smart +people who are willing to answer my questions and help me when I get stuck. +Meeting these people and maintaining relationships with them is hard work, and +it generally involves interacting with them in real life, not just on the +internet, but the dividends of this investment are unfathomable.

+

To that end, attend conferences. Talk to the speakers and ask them questions. +Write them emails afterwards saying that it was nice to meet them. Subscribe to +their blogs. Pay attention to what they’re doing and evangelize their good +work.

+

Remember, too, that local meetups can be good exposure to new ideas too, even +if on a smaller scale. The added bonus of local meetups is that the people +you’ll meet there are … local! It’s easy to maintain relationships with them +and share in learning with them in real life.

+

(An aside: If your company won’t pay for you to attend any conferences, make +clear how short-sighted your company’s decision is and start looking for a new +job, because your company does not deserve you. Then, if you can, cough up the +money and go anyway. As a self-employed consultant, I still managed to find +something like $10,000 to spend on travel- and conference-related expenses last +year, and I consider every penny of it to be money spent on being better at +what I do. When I hear about big companies that won’t fork over even a fraction +of that for an employee who is raising their hand and saying “help me be better +at what I do!”, I rage.)

+

Make a point of following the bug tracker and repository for an active +open-source project. Read the bug reports. Try the test cases. Understand the +commits. I admit that I have never been able to make myself do this for +extended periods of time, but I try to drop in on certain projects now and then +because it exposes me to arbitrary code and concepts that I might not otherwise +run into.

+

Read the source for your favorite library, and refer to it when you need to +know how a method works. Consult the documentation when there’s some part of +the source you don’t understand. When choosing tools and plugins, read the +source, and see whether there are things you’d do differently.

+

Eavesdrop on communities, and participate when you have something helpful to +add. Lurk on a mailing list or a forum or in an IRC channel, help other people +solve problems. If you’re not a help vampire — if you give more than you take — +the “elders” of a community will notice, and you will be rewarded with their +willingness to help you when it matters.

+

Finally, books:

+
    +
  • JavaScript: The Good Parts, by Douglas Crockford. It took me more than one +try to get through this not-very-thick book, and it is not gospel. However, +it is mandatory reading for any serious JavaScript developer.
  • +
  • Eloquent JavaScript , Marijn Haverbeke (also in print). This is another book +that I consider mandatory; you may not read straight through it, but you +should have it close at hand. I like it so much that I actually bought the +print version, and then was lucky enough to get a signed copy from Marijn at +JSConf 2011.
  • +
  • JavaScript Patterns, by Stoyan Stefanov. This was the book that showed me +there were names for so many patterns that I’d discovered purely through +fumbling around with my own code. I read it on the flight to the 2010 Boston +jQuery Conference, and it’s definitely the kind of book that I wouldn’t have +gotten as much out of a year earlier, when I had a lot less experience with +the kinds of problems it addresses.
  • +
  • Object-Oriented JavaScript, by Stoyan Stefanov. It’s been ages since I read +this book, and so I confess that I don’t have a strong recollection of it, +but it was probably the first book I read that got me thinking about +structuring JavaScript code beyond the “get some elements, do something with +them” paradigm of jQuery.
  • +
+

Good luck.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/getting-non-developers-on-the-subversion-bandwagon.html b/www/blog/getting-non-developers-on-the-subversion-bandwagon.html new file mode 100644 index 0000000..6ac169c --- /dev/null +++ b/www/blog/getting-non-developers-on-the-subversion-bandwagon.html @@ -0,0 +1,236 @@ +Getting non-developers on the subversion bandwagon

rmurphey adventures in javascript

Getting non-developers on the subversion bandwagon

Marcus turned me on to subversion when I was working on a project earlier this year and wanted a way to revisit old versions of files. Setting up a subversion repository was easy, and while I did download the Tortoise GUI for Windows, I found the command line was pretty easy too.

+

A few months later, we were wrestling at work with an issue I've confronted at every job I've had: how to maintain a filesystem that ...

+
    +
  • allows everyone to have access to any file they might need
  • +
  • promotes the concept that files are associated with a client, not with a person
  • +
  • facilitates regular and thorough backups
  • +
  • provides versioning more sophisticated than multiple files with names like project_outline_rev2.html +
  • +
  • does all this without intimidating the hell out of people who aren't on good terms with the command line
  • +
+ +

It wasn't a typical use for subversion, which is more normally (in my mind) used for version control with straight-up development projects, but the more I thought about it and talked about it with people who had used subversion, the more it seemed to make sense. We'd be using it to manage an infinite number of concurrent projects related to maintaining a large client's web content, and that meant we'd have to agree on a directory structure beyond the standard branches/tags/trunk. For it to work at all, I was also going to have to convince people -- some of whom were scarred by cvs -- to actually use it. I braced for a full-fledged campaign: sure, it's more complex than no system at all, I would argue, but writing HTML is more complex than writing in Notepad, too, and we seem to handle that OK, right?

+

As it turned out, my campaign lasted all of a day, and wasn't much of a campaign at all. One designer suggested using Dreamweaver or VersionCue to address these concerns, to which I said:

+
+one of the things i like about subversion (svn) is that it's a platform/software agnostic tool. if we used dw for site management, everyone would have to use dw -- for the homepage update i just did, i hardly touched dreamweaver, preferring to do most everything in vim/command line because it was just so much easier for the sort of work i was doing. + +i know that at least on the pc side, there is a subversion extension for dreamweaver. i also know on the pc side there's a pretty decent gui svn client. i imagine, since macs are getting pretty popular with devs, that there's something similar on that side. + +i don't mean to shoot down dw/vc out of hand, but i think that it would introduce some real limitations vs svn, even though it is tempting because it is a tool we already use. another thing that is great about svn is that it's integrated with the whole trac system that stancil has set up, which also includes a wiki and a ticketing system, all of which are linked together. i see huge potential here in terms of project management, documentation, and file management that we aren't taking advantage of, and that wouldn't be available with a dw solution. + +... + +it's possible that we don't need svn for everything, but honestly i think it's easy enough to use once you get the hang of it that it would be just as easy as the fileserver approach, with a ton of added benefits. perhaps the only true drawback is the disk space that would be required for versioning psd's and such. but disk space is cheap, and it think the extra storage we'd need would be worth its small cost if we had constant access to everything. +
+ +

Our sysadmin already had some repositories set up that weren't getting wide use, so setting up another one was trivial. He added a bunch of disk space in anticipation of the new storage demands (while subversion just stores diffs for text files, it stores full copies of each binary file, and we were going to have a lot of those), and I wrote up some instructions for getting people started:

+

Download and install the subversion binaries

+
    +
  • +for mac (click the download button)
  • +
  • +for pc (choose the latest .exe file)
  • +
+ +

Download the GUI

+ + +

Mac folk: update your path to include svn

+
    +
  • open terminal
  • +
  • cd ~
  • +
  • pico .bash_profile
  • +
  • enter the following: +
    +
    PATH="/usr/local/bin:/usr/local/subversion/bin:$PATH"
    +export PATH
    +
    + +
  • +
  • press CTRL-O to save
  • +
  • restart terminal
  • +
+ +

Establish ssl certification

+
    +
  • open a Terminal window (mac) or Start > Run > cmd.exe (windows)
  • +
  • type svn ls https://www.svnserver.com/repository/trunk/ and hit enter (get the exact URL for your repository; this is just an example)
  • +
  • enter user/password +
      +
    • if your username on your machine is the same as your svn username, enter your password.
    • +
    • otherwise, press enter and you'll be prompted for your svn username. enter it.
    • +
    • enter your svn password.
    • +
    +
  • +
+ +

Getting started and basic workflow

+I sat down with a group of initial users to do the initial checkout of the repository and show them the basic workflow. TortoiseSVN makes things especially easy, while scplugin is a bit more rudimentary. Here's the workflow I explained:

+
    +
  • +update a folder before you begin work in it; to save time, just update the specific folder, not the whole repo
  • +
  • make your changes
  • +
  • +add any new folders and files you create to the repo -- they won't be added automatically! +
  • +
  • if you change the structure of a dir, or remove a file, you have to do it via svn!
  • +
      +
    • +rename to change a file name or move a file
    • +
    • +delete to remove a file
    • +
    + +

  • +update folder prior to commit to receive others' changes
  • +

  • +commit (check in) whenever you reach a milestone or stop work. enter a message with your commit to indicate what you did.
  • +

+

File structure

+Subversion is usually used for managing development projects, but we wanted us to use it to manage an infinite number of small, concurrent projects related to our work maintaining the content on a large client's web site. The branches/tags/trunk paradigm didn't quite fit here. We ended up keeping the b/t/t setup, but most people just check out the trunk, which contains the following folders:

+
    +
  • +client_site, with a directory structure that mirrors the structure of our client's web site. This is where we keep most of our files. By mirroring the directory structure of the client's web site, we make it easy to find a file if we know the URL of the file on the site.
  • +
  • +projects, for files related to projects that don't fit neatly into the site structure.
  • +
  • +resources, for image templates, HTML templates, fonts, logos, etc.
  • +
+ +

To do

+Because we are constantly creating new files and adding them to the repository, some directories are getting unduly large, making updates take a long time, and forcing users to store a lot of unnecessary files locally once they do an update. We need to work out a way to age files so they are still available and easy to locate, but don't get picked up as part of the update process.

+

Further reading

+

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/how-i-learned-css.html b/www/blog/how-i-learned-css.html new file mode 100644 index 0000000..ff174e1 --- /dev/null +++ b/www/blog/how-i-learned-css.html @@ -0,0 +1,157 @@ +How I learned CSS

rmurphey adventures in javascript

How I learned CSS

I remember when I first tried to understand how to produce designs for the web -- coming from the paper-based world, it was hard for me to accept everything that was suddenly out of my control. When I first tried to grasp CSS with the help of now-defunct Adobe GoLive, I bailed pretty quickly. Table-based layout and font tags didn't make much sense to me either -- why did I have to slice up a page into a bunch of adjoining cells, instead of just drawing independent boxes like I did in Quark?

+

A couple of years later, I decided to try again, motivated by the realization that my eight-years-younger brother seemed to be better at this web stuff than I was. I spent untold hours trying to wrap my brain around the difference between margin and padding and exactly how to get floated elements to bend to my will. I remember the epiphany that one could use left-floated list items for a horizontal menu, or that the right DOCTYPE can force Internet explorer to behave more like a real browser.

+

These days, I have an honest-to-god job doing this stuff, and every now and then, someone will ask me how they can learn it too. It all makes so much more sense to me than it used to that it's hard to remember how I got here. In the interest of getting this stuff written down for passing along, though, here are a few thoughts:

+

The Tools

+These are things without which the rest is impossible:

+
    +
  • +A text editor Notepad will do just fine; for a few bucks, you can get TextMate for Mac or the e Text Editor for PC. If you use Dreamweaver, hide everything but the file navigator panel and the code editing view. You will learn nothing from Dreamweaver's "design" view. +
  • +
  • +Firefox You'll need to test anything you do in Internet Explorer, but first, you'll get it working in Firefox. Whereas Internet Explorer enjoys mocking web standards, Firefox does its best to adhere to them; plus, it has all sorts of extensions that make it easier to troubleshoot your work.
  • +
  • +Firefox Web Developer Toolbar This has all sorts of useful tools in it, including a real-time CSS editor that opens in the browser's sidebar so you try changes to your CSS and see the results immediately.
  • +
  • +Firebug This is most useful for Javascript debugging, but it has some nice features for debugging CSS as well.
  • +
  • +HTML Validator Incredibly helpful for finding errors in your HTML.
  • +
+ +

(An aside: A few months ago I booted up an old laptop and found a preview release of Firefox 1.0 installed beside a well-worn Internet Explorer 6; when I abandoned the laptop, I was in the process of abandoning IE too. I can't help but wonder how difficult my learning would have continued to be without the arrival of Firefox, which, with the extensions mentioned above, makes it so much more possible to learn all of this stuff in a very tangible, immediate sort of way.)

+

Learning with Firefox

+Once you have the tools above, open Firefox and start with a page someone else built -- like the one you're on right now -- and see what's inside. It pays to be curious about every web page you visit; if you see something interesting, view the source and figure out how it got there. Some tips:

+
    +
  • +Ctrl-U will show you the HTML for a page, and with the HTML Validator extension, you can "clean up" the HTML so it's easier to read.
  • +
  • +Ctrl-Shift-E will open the Web Developer Toolbar CSS editor, which will show you the CSS for the page, with a tab for each CSS file. You can edit the CSS in the editor and see the effects immediately.
  • +
  • +F12 will open Firebug. In the top left of the panel that opens, click the Inspect menu item, then move your mouse back to the page itself and click on an element to find out more about it. You can also click on the HTML tab to view the HTML and expand and collapse sections of it to see the structure of the page. Hovering over an element in the HTML panel will highlight it on the page; clicking on an element will let you find out more about it in the Style and Layout tabs of Firebug. (Firebug is an incredibly powerful tool that you really need to play with to fully appreciate. It's a completely non-destructive tool -- you can't hurt anything with it unless you try really, really hard -- so don't be afraid to click around and see what happens.)
  • +
  • Remember that these days, lots of page elements are built with Javascript rather than with straight HTML. Ctrl-U will show you only the HTML; Firebug will show you the "generated source," including any elements built with Javascript. Firebug also lets you look at the Javascript on a page, which can be helpful when you're trying to understand how something got there. +
  • +
+ +

POSH

+Plain-old semantic HTML. When you go to make a web page, write the simplest HTML you can, and use standard HTML elements whenever humanly possible. Start by creating HTML that represents the actual sections of the page -- header, navigation, sidebar, content, footer -- and give the elements names that say what they are, not where they go. When you think you're done, view the HTML in a browser, without CSS, and see if it makes sense. Then, and only then, open the browser's CSS editor and start styling the elements. See how far you can get without adding any design-related markup to your HTML. If you find yourself writing convoluted HTML or adding purely presentational markup, to make something work, it's time to reconsider your approach. Once you have a good stylesheet started, copy it to your text editor and continue working on it there.

+

Strategies

+It helps to give yourself deadlines, even if they're imaginary. I've learned more about HTML, CSS and Javascript in the past 12 months than I learned in the three years before, and I think that's largely because deadlines have forced me to solve problems rather than pondering them.

+

Don't be afraid to do something less than perfectly; there can be value in just getting it done. I constantly look back at things I did three months ago -- let alone three years ago, and sometimes three weeks ago -- and I cringe when I think how differently I would do them today. But half the reason I know what I know now is precisely because I didn't know it then, and I learned it along the way. Understanding the building blocks of the web is an iterative process, and you'll do better if you remember that you cannot know everything you wish you knew.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/i-have-discovered-a-use-for-dreamweaver-s-design-view-no-really.html b/www/blog/i-have-discovered-a-use-for-dreamweaver-s-design-view-no-really.html new file mode 100644 index 0000000..e56ffd5 --- /dev/null +++ b/www/blog/i-have-discovered-a-use-for-dreamweaver-s-design-view-no-really.html @@ -0,0 +1,122 @@ +I have discovered a use for Dreamweaver's design view (no really)

rmurphey adventures in javascript

I have discovered a use for Dreamweaver's design view (no really)

I just got a request to edit a static page that showed several of a company's employees. Turns out that a bunch of them aren't employees anymore, and the client wanted the page to be updated. I knew before I even opened it that the content was going to be in tables (nested nested tables, in fact), and that rearranging it would be a giant pain.

+

Dreamweaver design view to the rescue. This thing was made for tables, and in no time I was able to delete what needed to be deleted and rearrange the remaining employees without leaving empty table cells. I didn't need to navigate nasty table HTML, and I didn't have to worry about accidentally deleting a crucial td that would bring the page to its knees. A quick preview in the browser verified that no harm had been done in my brief foray into design view, and the job was done.

+

Let it be said: Design view is evil and awful; it seeks to make front-end development accessible to people who have no business going anywhere near front-end development. But it's a good tool to have in the toolbox when you're faced with editing a table-based page, even if you just use it to find your bearings in the mess of tags.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/i-need-to-say-a-few-things.html b/www/blog/i-need-to-say-a-few-things.html new file mode 100644 index 0000000..657845a --- /dev/null +++ b/www/blog/i-need-to-say-a-few-things.html @@ -0,0 +1,132 @@ +I need to say a few things.

rmurphey adventures in javascript

I need to say a few things.

Over the weekend, there was a tweet announcing that Google was going to provide "scholarships" to qualified women to attend JSConf.eu. There was then a tweet by another person calling this "disgusting" and "illegal." Nicole Sullivan has a level-headed and well-articulated roundup of the back-and-forth and some of the surrounding issues, and I suggest you read it.

+ +

I take no position on the scholarships. I question whether they will have any meaningful or lasting effect. I fear the availability of the scholarships will lead to ill feelings about the women who do attend. Simultaneously, I yearn to discover, against hope, that they make it possible for some highly qualified but unknown woman to gain access to the JavaScript community. Whatever. Smarter people than me have a better idea than I do as to how effective they will be, and lawyers can tell you whether they're illegal. I'll stand firmly in the "no" camp on the disgusting count.

+ +

You know what's disgusting? Being groped at a conference after-party by a drunk married man. Opening your hotel door to discover said drunk married man stumbling down the hall, asking himself into your room, and literally having to slam the door in his face. Having a video of you posted on the internet, suggesting that you were engaged in a sexual act with the yayQuery logo. Seeing someone ask, publicly, on Twitter, if anyone knows the name of the hot conference chick. That, dear reader, is disgusting.

+ +

I adore my male friends in the tech community. They have encouraged and supported me and welcomed me into their inner circles. But even they can act like 12-year-old boys sometimes, and while I don't begrudge them that, it is hard, because it's at those moments that I realize how much I am not them, how much I long to have more than the barest assembly of female peers who have any idea what this is like. And then I remember: those peers I long for will have to put up with so much shit to be in that cool kid's club, and you know what? If Google wants to pay them a measly few hundred bucks to put up with it, maybe that's OK. Hell, maybe they ought to pay them more. Perhaps, as ham-handed and questionably productive as the scholarships may be, it's only fair to pay women to look the other way when some asshole treats them like a thing instead of a person.

+ +

I am angry. I have been angry since Saturday, when this all started. I have spent the last year trying to be the thing that I want to see: the woman on stage. I have formed groups to encourage other women to do the same. I have reached out to women who show potential and tried to give them the encouraging nudge they need that no one really gave me. And right this very moment, I feel incredibly selfish. This weekend reminded me what I am asking those women to enter into: a world that presents no tangible barriers, but that will objectify them every step of the way. And if these women have the guts -- well, let's be community-appropriate here -- if they have the balls to speak up and say that it is hard to be a woman in this field, that it takes a thick skin and determination and a willingness to be one of the boys even when that's the last thing in the world they want to do, then they should brace for a chorus of men to rise and tell them they are wrong.

+ +

Men, guys, boys: I am not asking you to give up Star Wars and The Matrix. I'm not even asking you to give up gratuitous phallic references and #twss jokes, though I hope we're all grown-up enough to know that there's a time and a place. And you know what? If you want to DM your friend about trying to hook up with that hot conference chick, well, good luck with that. We're all human. But for the love of all that is good: this being a woman in your world thing, it's not easy, OK? Maybe you can't understand it, and I even believe it when you say you don't mean it. But when you deny it, you just look like an ass.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/in-case-you-thought-ie6-was-going-away.html b/www/blog/in-case-you-thought-ie6-was-going-away.html new file mode 100644 index 0000000..cbc4e63 --- /dev/null +++ b/www/blog/in-case-you-thought-ie6-was-going-away.html @@ -0,0 +1,120 @@ +In case you thought IE6 was going away

rmurphey adventures in javascript

In case you thought IE6 was going away

Microsoft has released yet another version (the third, I think) of their timebombed XP/IE6 image for use with their Virtual PC Console. (Why they keep timebombing it is a mystery to me -- it's as if they think one of these days, approximately six months from whenever they release a new version, there won't be a need for IE6 testing anymore.)

+

I have not a single kind word for IE6, and about the best I can say about IE7 is that a) it sucks less than IE6 and b) some proprietary software I have to use for client work crashes a bit less often in it than in Firefox. That said, both IE6 and IE7 are a fact of life, and it's also a fact of life that Microsoft saw fit to ensure that you can't easily run both side-by side -- which is why you have this whole Virtual PC setup. If you can get the Virtual Machine Additions, Shared Networking, and Shared Folders all to work (at the same time, if you're lucky!), then this is a decent kludge to let you test your sites in IE6.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/in-search-of-javascript-developers-a-gist.html b/www/blog/in-search-of-javascript-developers-a-gist.html new file mode 100644 index 0000000..d27b6e0 --- /dev/null +++ b/www/blog/in-search-of-javascript-developers-a-gist.html @@ -0,0 +1,562 @@ +In Search of JavaScript Developers: A Gist

rmurphey adventures in javascript

In Search of JavaScript Developers: A Gist

I posted a gist the other day in an attempt to locate some JavaScript help. I’m finding, lately, that I’m being asked to do more work than I can comfortably take on without having some overflow capacity, but I’ve been a little lax at actually identifying people who can provide that capacity up until now. That meant I was turning down work, and that’s not a good thing.

+ +

I had a great time the last couple of days watching people come up with solutions, and was especially gratified that so many people who weren’t looking for work thought that answering JavaScript questions sounded like fun.

+ +

A lot of people have asked if I’d be providing “answers” to the questions I posed in the gist, so I wanted to try to do that, mixed in with a bit of commentary about what I was looking for.

+ +

Question 1: Writing Readable but Terse Code

+ +

{% codeblock lang:javascript %} +// 1: how could you rewrite the following to make it shorter? +if (foo) { + bar.doSomething(el); +} else { + bar.doSomethingElse(el); +} +{% endcodeblock %}

+

Answers to this question told me whether to keep reading; I was looking for something like:

+ +

{% codeblock lang:javascript %} +barfoo ? 'doSomething' : 'doSomethingElse';

+

// OR

+

bar'doSomething' + (foo ? '' : 'Else'); +{% endcodeblock %}

+

To be honest, the second one is maybe a little too clever. But generally, I wanted to see submissions that understood using the ternary operator instead of an if statement to create shorter but still-readable code when all that is different is a method name.

+ +

Some people submitted an answer that used a ternary operator, but didn’t take advantage of it to just switch the method name:

+ +

{% codeblock lang:javascript %} +foo ? bar.doSomething(el) : bar.doSomethingElse(el); +{% endcodeblock %}

+

This is an improvement, definitely, but there’s room for more.

+ +

Question 2: Understanding Variable Scope

+ +

{% codeblock lang:javascript %} +var foo = 'hello';

+

(function() { + var foo = foo || 'world'; + console.log(foo); +})(); +{% endcodeblock %}

+

This was a question where, in hindsight, what I was looking for may not have been super-clear. I got a lot of right-enough answers, but really what I wanted to hear was that the || statement was absurd, because foo would always evaluate to 'world' due to variable hoisting.

+ +

This can be sort of a sort of crazy thing to wrap your head around, but basically JavaScript scans the contents of a function for var statements before it runs the function. Any variable initialized with a var statement inside the function will become local to the function, even if the variable is “used” before it is initialized. Changing the order of the two lines inside the function shows this readily:

+ +

{% codeblock lang:javascript %} +var foo = 'hello';

+

(function() { + console.log(foo); // undefined! + var foo = foo || 'world'; +})(); +{% endcodeblock %}

+

Some submissions thought I wanted access to the external foo inside the closure — not an unreasonable interpretation of the question.

+ +

{% codeblock lang:javascript %} +var foo = 'hello';

+

(function(f) { + var foo = f || 'world'; + console.log(foo); +})(foo); +{% endcodeblock %}

+

Anyway, there were lots of right-enough answers, but anyone who talked about hoisting definitely caught my eye.

+ +

Question 3: Working with Objects and Prototypes

+ +

This question was looking for really basic understanding of prototypes. It also was written to be a tad hard to follow, lumping all the questions into a single paragraph, to simulate a not-atypical client request.

+ +

{% codeblock lang:javascript %} +// 3: given the following code, how would you override the value of the +// bar property for the variable foo without affecting the value of the +// bar property for the variable bim? how would you affect the value of +// the bar property for both foo and bim? how would you add a method to +// foo and bim to console.log the value of each object's bar property? how +// would you tell if the object's bar property had been overridden for the +// particular object? +var Thinger = function() { + return this; +};

+

Thinger.prototype = { + bar : 'baz' +};

+

var foo = new Thinger(), + bim = new Thinger(); +{% endcodeblock %}

+

The good submissions broke the question down into separate comments, and then showed the answers:

+ +

{% codeblock lang:javascript %} +// override the bar prop for foo w/o affecting bim +foo.bar = 'new value';

+

// change the bar prop for both foo and bim +// (if it hasn't been overridden locally!) +Thinger.prototype.bar = 'another new value';

+

// we could delete foo.bar now and it would get +// the prototype value instead +// delete foo.bar;

+

// add a method to foo and bim to log bar +Thinger.prototype.logger = function() { + console.log(this.bar); +};

+

// check if bar has been overridden +foo.hasOwnProperty('bar'); // true +bim.hasOwnProperty('bar'); // false +{% endcodeblock %}

+

Question 4: Iterating over Objects

+ +

This one was pretty basic:

+ +

{% codeblock lang:javascript %} +// 4: given the following code, and assuming that each defined object has +// a 'destroy' method, how would you destroy all of the objects contained +// in the myObjects object? +var myObjects = { + thinger : new myApp.Thinger(), + gizmo : new myApp.Gizmo(), + widget : new myApp.Widget() +}; +{% endcodeblock %}

+

Really I just wanted to see people iterate over an object without the use of a helper like jQuery.each. The hasOwnProperty check may seem like overkill, but I was glad when people didn’t leave it out. Adding in the delete statement was another nice touch, though not strictly required by the question.

+ +

{% codeblock lang:javascript %} +for (var obj in myObjects) { + if (myObjects.hasOwnProperty(obj)) { + myObjects[obj].destroy(); + delete myObjects[obj]; + } +} +{% endcodeblock %}

+

Question 5: Solving Deceptively Simple Problems

+ +

This question was probably the most fun, because even though it was a dead-simple task, the answers were all over the map. This was the question:

+ +

{% codeblock lang:javascript %} +// 5: given the following array, create an array that contains the +// contents of each array item repeated three times, with a space between +// each item. so, for example, if an array item is 'foo' then the new +// array should contain an array item 'foo foo foo'. (you can assume the +// library of your choice is available) +var myArray = [ 'foo', 'bar', 'baz' ]; +{% endcodeblock %}

+

Rather than going through the different answers one at a time, I’m just going to tell you to visit this JSPerf test page to see some of the variations, and their relative performance.

+ +

This brings up a good question tweeted by Ryan Florence:

+ +

T | F – In #JavaScript, 90% of the time we do stuff only a few times, maybe hundreds. Therefore, 90% of the time Readability > Performance.

+ +

I tend to come down on the side of readability and compression over straight-up perf, precisely because we’re rarely doing anything that’s actually that intensive. On the other hand, we should avoid doing things that are outright stupid; where the line gets drawn depends a lot, I think, on experience.

+ +

Here’s the thing, though: Something that seems like a gratuitous and obscure optimization to a less experienced developer might seem completely readable and obvious to a more experienced developer. How to balance this? Can comments bridge the gap? Should the gap be bridged? I dunno.

+ +

Question 6: Basic jQuery Best Practices and DRY

+ +

I see way too much code in real life that looks like this question.

+ +

{% codeblock lang:javascript %} +// 6: how could you improve the following code? +$(document).ready(function() { + $('.foo #bar').css('color', 'red'); + $('.foo #bar').css('border', '1px solid blue'); + $('.foo #bar').text('new text!'); + $('.foo #bar').click(function() { + $(this).attr('title', 'new title'); + $(this).width('100px'); + });

+

$('.foo #bar').click(); +}); +{% endcodeblock %}

+

There are a slew of things wrong in this tiny snippet. First and foremost, making the same selection repeatedly suggests that the author fundamentally doesn’t understand what their code is doing, or the expense they’re incurring in doing it; the selection should be made once, and then the selection should be cached and/or the methods should be chained.

+ +

While it wasn’t imperative, submitters did well to point out that CSS changes should be made via class names instead of hard-coded CSS in JavaScript; they also did well to put the click handler in a named function. Finally, while there may be cases where an ID selector needs to be prefixed by a class, i.e. .foo #bar, I appreciated it if people questioned this.

+ +

{% codeblock lang:javascript %} +$(document).ready(function() { + var handleClick = function(el) { + el.attr('title', 'new title') + .width('100px'); + },

+
  bar = $('#bar')
+    // ideally: use a class for this
+    .css({
+      color : 'red',
+      border : '1px solid blue'
+    })
+    .text('new text!')
+    .click(function(e) {
+      handleClick($(e.target));
+    });
+

handleClick(bar); +}); +{% endcodeblock %}

+

Question 7: Asynchronicity

+ +

This is a pretty newby thing, but I wanted to make sure people understood the basic concept of async requests — that is, you can’t set the value of a variable inside an XHR’s callback and expect that value to be available immediately.

+ +

{% codeblock lang:javascript %} +(function() { + var foo;

+

dojo.xhrGet({ + url : 'foo.php', + load : function(resp) { + foo = resp.foo; + } + });

+

if (foo) { + // run this important code + } +})(); +{% endcodeblock %}

+

Fixing this just involves waiting for the XHR to complete before running the code that expects foo to be set. (Alternately, you could make the request run synchronously by setting sync : true in the XHR config object.)

+ +

There was one other issue with this code as well: Dojo needs to know the response should be handled as JSON, else it will handle it as text. If a submitter missed this, I didn’t hold it against them — Pete Higgins actually had to point it out to me :) That said, it would become pretty obvious pretty quickly in real code.

+ +

{% codeblock lang:javascript %} +(function() { + dojo.xhrGet({ + url : 'foo.php', + handleAs : 'json' + }) + .addCallback(function(resp) { + if (resp && resp.foo) { + // do stuff + } + }); +})(); +{% endcodeblock %}

+

Note that the callback function could also be specified in the XHR config object using the load property; Dojo’s XHRs are great in that while you can specify everything in a config object, you can also attach callbacks to the return value of the XHR methods. You should read more about this because it is very pleasant.

+ +

Question 8: DRY

+ +

Repetitive code is dumb.

+ +

{% codeblock lang:javascript %} +// 8: how could you rewrite the following code to make it shorter? +(function(d, $){ + $('li.foo a').attr('title', 'i am foo'); + $('li.bar a').attr('title', 'i am bar'); + $('li.baz a').attr('title', 'i am baz'); + $('li.bop a').attr('title', 'i am bop'); +})(dojo, dojo.query); +{% endcodeblock %}

+

How far you want to go with DRYing this out is debatable, but to me this cries out for improvement. Here’s what I’d do:

+ +

{% codeblock lang:javascript %} +(function(d, $){ + d.forEach(['foo', 'bar', 'baz', 'bop'], function(c) { + $('li.' + c + ' a').attr('title', 'i am ' + c); + }); +})(dojo, dojo.query); +{% endcodeblock %}

+

I’d be lying if I didn’t mention that I also wanted to show here how easy it is to make Dojo look like jQuery. After all, it’s just JavaScript, right?

+ +

Question 9: DOM Manipulation Best Practices & DRY

+ +

I thought it was well-known that we don’t append 202 things to the DOM one at a time; the good news is, most people did know this. The bad news is, some people did not.

+ +

In addition to doing 202 appends, this code also does 202 selections. To top it off, the iterator i is global because we didn’t prefix it with var.

+ +

{% codeblock lang:javascript %} +// 9: how would you improve the following code? +for (i = 0; i <= 100; i++) { + $('#thinger').append( + '

i am thinger ' + i + '

' + ); + $('#gizmo').append( + '

i am gizmo ' + i + '

' + ); +} +{% endcodeblock %} +

Here’s a fix:

+ +

{% codeblock lang:javascript %} +var thingerDom = [], gizmoDom = [], + tpl = '

i am %s %i

', + tplFn = function(str, i) { + return tpl.replace(/%s/g, str).replace(/%i/g, i); + }, + i; +

for (i = 0; i <= 100; i++) { + thingerDom.push(tplFn('thinger', i)); + gizmoDom.push(tplFn('gizmo', i)); +}

+

$('#thinger').append(thingerDom.join('')); +$('#gizmo').append(gizmoDom.join('')); +{% endcodeblock %}

+

There’s more that could be done here to DRY this out a bit more, but the fix addresses the main problem of excessive DOM manipulation.

+ +

Question 10: Loose Typing

+ +

Numbers in JavaScript suck, especially when the user enters them.

+ +

{% codeblock lang:javascript %} +// 10: a user enters their desired tip into a text box; the baseTotal, +// tax, and fee values are provided by the application. what are some +// potential issues with the following function for calculating the total? +function calculateTotal(baseTotal, tip, tax, fee) { + return baseTotal + tip + tax + fee; +} +{% endcodeblock %}

+

How you’d actually deal with this problem would probably depend on the business logic of your application; you may be well-advised to convert everything to integers instead of trying to deal with decimals, because math with floats in JavaScript can have issues.

+ +

What I wanted to see in submissions, though, was an awareness that the tip would come to us as a string, and we couldn’t just add it to the other arguments and expect a useful result. I was more interested in the discussion of this problem, and other problems that could arise, but here’s at least the beginning of a solution:

+ +

{% codeblock lang:javascript %} +function calculateTotal(baseTotal, tip, tax, fee) { + // convert the tip to a number using base 10; + // allow for a NaN result from parseFloat + tip = parseFloat(tip) || 0;

+

// don't allow a negative tip + if (tip < 0) { tip = 0; }

+

return baseTotal + tip + tax + fee; +} +{% endcodeblock %}

+

Question 11: Array Mapping

+ +

For this question, I was looking for code that used a map method to return an array by running a function on each item in the array. Some people used a forEach method to iterate over the array instead, and then push the results to a new array they created. I guess this is OK, but it’s not my preference.

+ +

{% codeblock lang:javascript %} +// 11: given the following data, write code that returns an array +// containing the name of each item, followed by a comma-separated list of +// the item's extras, if it has any. e.g. +// +// [ "Salad (Chicken, Steak, Shrimp)", ... ] +// +// (you can assume the library of your choice is available) +var menuItems = [ + { + id : 1, + name : 'Salad', + extras : [ + 'Chicken', 'Steak', 'Shrimp' + ] + }, + { + id : 2, + name : 'Potato', + extras : [ + 'Bacon', 'Sour Cream', 'Shrimp' + ] + }, + { + id : 3, + name : 'Sandwich', + extras : [ + 'Turkey', 'Bacon' + ] + }, + { + id : 4, + name : 'Bread' + } +]; +{% endcodeblock %}

+

Here’s an answer:

+ +

{% codeblock lang:javascript %} +var newArray = dojo.map(menuItems, function(item) { + var ret = item.name; + if (item.extras && item.extras.length) { + ret += '(' + item.extras.join(', ') + ')'; + } + return ret; +}); +{% endcodeblock %}

+

Bonus 1: Functional Programming 101

+ +

This was a late addition, courtesy of Andrew Hedges, so not everyone saw it:

+ +

{% codeblock lang:javascript %} +// BONUS: write code such that the following alerts "Hello World" +say('Hello')('World'); +{% endcodeblock %}

+

I wanted to see people understand that functions could return other functions, and that the returned function has access to the scope of the wrapper function:

+ +

{% codeblock lang:javascript %} +var say = function(first) { + return function(second) { + alert(first + ' ' + second); + } +}; +{% endcodeblock %}

+

Some people got pleasantly carried away with this question; check out this JSFiddle from Colin Snover.

+ +

Bonus 2: Attention to Detail

+ +

This last bonus was riddled with errors, including some that I made accidentally when I wrote it at 2 a.m.

+ +

{% codeblock lang:javascript %} +// BONUS: what is the faulty logic in the following code? +// how would you fix it? +var date = new Date(), + day = date.getDate(), + month = date.getMonth(), + dates = [];

+

for (var i = 0; i <= 5; i++) { + dates.push(month + '/' + (day + i)); +}

+

console.log('The next five days are ', dates.join(', ')); +{% endcodeblock %}

+

Here’s what you should see:

+ +
    +
  • The for loop will return 6 dates, not 5.
  • +
  • The method for calculating the date for each successive date could end up with nonexistent dates (32, 33, etc.), and it doesn’t change the month when it should.
  • +
  • The getMonth method on the date object returns a zero-indexed month.
  • +
+ + +

Here’s a fix:

+ +

{% codeblock lang:javascript %} +(function() { + var date = new Date(), + otherDate = new Date(), + day = date.getDate(), + future = 5, + dates = [], + i;

+

for (i = 1; i <= future; i++) { + otherDate.setDate(day + i); + newMonth = otherDate.getMonth() + 1; + newDay = otherDate.getDate(); + dates.push(newMonth + '/' + newDay); + } +})(); +{% endcodeblock %}

+

Postscript

+ +

I want to be really clear that I’m not some super-awesome and infallible JavaScript developer, and more to the point, there was a time in the not-too-distant past where I would have failed my own quiz miserably. While my main goal in putting this together was to find some skilled developers to help me out, I also wanted to provide a tool for exposing up-and-coming developers to some slightly more advanced concepts of JavaScript. I hope that, whatever your skill level, you found it to be at least entertaining, and at best, useful. I also hope you’ll forgive any gross errors I’ve made in the answers above, though I tried really hard to test them all.

+ +

Post-Postscript

+ +
    +
  • To the person who complained that I included code from a library other than jQuery, and who helpfully illustrated their point with the graph that shows jQuery is obviously better than anything that ever was: I’m not sure whether to laugh or cry or just be really snarky. I’m looking for JavaScript developers. If you don’t see the value in knowing more than just jQuery, and if you can’t find your way through relatively trivial non-jQuery code, you need not apply.
  • +
  • If you found the questions vague and requiring a lot of assumptions or guesswork, well, welcome to consulting — if such things make you uncomfortable, we’re not going to be a good fit. I was looking for people to tell me what they know, to impress me, to point out where they saw holes in the questions, to take initiative. This was not a multiple choice test; it was an interview.
  • +
+ + +

License

+ +

If you think the quiz would be useful to you as you’re looking for a JavaScript developer to call your own, it’s licensed under the WTFPL, which you should read just because it’s funny.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/inaugural-north-carolina-jquery-camp.html b/www/blog/inaugural-north-carolina-jquery-camp.html new file mode 100644 index 0000000..2c6f8d6 --- /dev/null +++ b/www/blog/inaugural-north-carolina-jquery-camp.html @@ -0,0 +1,134 @@ +Inaugural North Carolina jQuery Camp

rmurphey adventures in javascript

Inaugural North Carolina jQuery Camp

I'm just back from the inaugural North Carolina jQuery Camp at Viget Labs in Durham, N.C., and a couple of people have asked how it went, so I thought I'd write a quick post. I had a whim a few weeks ago to organize the camp; I envisioned an unstructured day where fellow jQuery developers could get together and talk about how they use the library. I figured that since the jQuery Conference had sold out and had a huge waiting list, getting a couple dozen people together on a Saturday in Durham, N.C. wouldn't be that hard.

+

We had around 25 people show up to the camp today, from novices to experts, including Scott Gonzalez, a contributor to the jQuery UI library. True to my (utter lack of) vision, it was a very unstructured day, but productive and fun I think. My only experience with unconferences was at BarCamp RDU just a few weeks ago, so it took me a bit to get into full go-with-the-flow mode, but when I didn't know what to do, I just asked "what do I do now?" and usually someone would tell me.

+

We started out by writing some topic ideas up on the wall, and quickly had enough to get started. I split off with the novices to give an intro to the library, while the main room dug into the topics that had been suggested, starting with a talk by Scott about stateful plugins. Up next was Brian Landau from Viget, showing off the code for his mapping plugin and giving an overview of ScrewUnit; then, David Eisinger, also of Viget, showed us some simple strategies for improving perceived performance.

+

Lunch -- made possible by the generosity of Rich Orris, FireStream Media, Ignite Social Media, and DesignHammer -- was time for informal conversations and demonstrations. Here's Scott and someone whose name I don't remember doing some quality whiteboarding:

+
+Media_httpfarm3static_bjtwc +
+ + +

We came back from lunch with a reprise of my presentation from the jQuery Conference about using objects to organize your code, minus FAST FORWARD but otherwise largely intact. I was grateful to have more than 30 minutes this time, and we ended up having some good conversation about code organization in general.

+

From there it was on to some great show-and-tell -- people are doing excellent things with the library, and doing them with ease -- and then Scott wrapped up the day with an overview of jQuery UI. Probably the biggest hit of the day was Scott's "just one more thing ..." moment, when he showed us a whole new API for using the position method as a setter, coming soon to a plugin near you. Lots of oohs and aahs about that one.

+

I said at the end of the day that this first camp was really just a proof of concept -- yes, I can get 25 people to show up to talk about jQuery. I'm hoping to do another jQuery camp in January, perhaps. There are a few things I'd like to do differently next time. For one, I'd like to be a little bit more intentional about having more than one session that's suitable for beginners -- a lot of the presentations were super-interesting, but way over the heads of people just getting started.

+

Also, there were a number of people who signed up who didn't make it, which is a shame because I ordered food expecting a larger turnout. I'd asked people to let me know if they couldn't make it, but alas only a handful did. Next time, I think I'll charge a token amount to attend -- say $10 or so -- so people will feel a bit more committed. If they don't make it, at least their food will be paid for! Charging a few dollars will also help reduce the need for sponsors -- not that I don't love the sponsors, just that it was a bit of work and stress to line them up.

+

Finally, next time I'd like to be a little bit more intentional about setting up and promoting the event. This time around, I started promoting it before I even had a venue, and shortly after I secured a venue (thank you Viget!) all the slots were filled. Next time around, I might approach that a little bit differently, especially if I can line up a few different venue options.

+

That's my report. For more pictures, visit Flickr.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/insert-jquery-into-any-page-using-a-bookmarklet.html b/www/blog/insert-jquery-into-any-page-using-a-bookmarklet.html new file mode 100644 index 0000000..2e1dd94 --- /dev/null +++ b/www/blog/insert-jquery-into-any-page-using-a-bookmarklet.html @@ -0,0 +1,126 @@ +Insert jQuery into any page using a bookmarklet

rmurphey adventures in javascript

Insert jQuery into any page using a bookmarklet

Ever wish you could use jQuery on a page at the drop of a hat, without having access to the page itself? Well I'll be damned, you can.

+

(This is one of those things that in hindsight seems so painfully obvious that I'm almost embarrassed to admit that it never occurred to me, but admit it I will.)

+

For those working with jQuery 1.2.1, here is an updated Insert jQuery 1.2.1 bookmarket, and here is the code for the bookmarklet in its entirety:

+

{% codeblock lang:javascript %} +javascript:void(function(){var s=document.createElement('script'); s.src='http://code.jquery.com/jquery-1.2.1.js'; document.getElementsByTagName('head')[0].appendChild(s);}()) +{% endcodeblock %}

+

I came across this in a recent post at Morethanseven, but the Simon Willison post is from August, so this is probably a bit of old news. I'm still excited though.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/introducing-yayquery-a-jquery-podcast.html b/www/blog/introducing-yayquery-a-jquery-podcast.html new file mode 100644 index 0000000..7d370a3 --- /dev/null +++ b/www/blog/introducing-yayquery-a-jquery-podcast.html @@ -0,0 +1,137 @@ +Introducing yayQuery -- A jQuery podcast

rmurphey adventures in javascript

Introducing yayQuery -- A jQuery podcast

+

+

+

yayQuery 1.0 from yayQuery on Vimeo.

+

If you'd like a download: +mp3 audio (30mb), mp4 video (94mb), ogg video (61mb), Vimeo

+

In this our first episode of the official yayQuery Podcast, Paul Irish, Adam J. Sontag, Alex Sexton and I stayed up way too late on Monday night and had ridiculous amounts of fun talking about:

+
    +
  • +Underscore.js, the new functional programming JavaScript library.
  • +
  • The demise of Thickbox (and some good, modern alternatives).
  • +
  • Using (or not using) jQuery for mobile development.
  • +
  • Paul Irish's antipattern of the week: css(key, value)
  • +
  • $var vs. var (Hungarian Notation)
  • +
+ +

Perhaps because it was so late when we finished, there was also chair dancing. Make sure you don't miss it, but don't fast-forward to the end or you'll miss the good stuff.

+

This is our first try with this, and who knows what will become of it, but we're very grateful for any and all feedback. You can find us on Twitter @yayQuery, or on the #jquery IRC channel. Enjoy!

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/jquery-breakout-women-and-conferences.html b/www/blog/jquery-breakout-women-and-conferences.html new file mode 100644 index 0000000..099224d --- /dev/null +++ b/www/blog/jquery-breakout-women-and-conferences.html @@ -0,0 +1,225 @@ +jQuery Conference Breakout Session: Women & Conferences

rmurphey adventures in javascript

jQuery Conference Breakout Session: Women & Conferences

I decided to organize a breakout session about women and conferences at the +jQuery Conference in Boston this past +weekend, and I couldn't be happier about how it went -- it was probably one of +the more productive sessions of its sort that I've participated in.

+

My best guess, with the help of some t-shirt size numbers from the most-amazing +Leah Silber, is that the conference +attendees were about 10% women. This sounds a bit terrible, but on the +contrary, looking around the room, I felt like women were in fact more +represented than I'm accustomed to at other JavaScript-centric conferences.

+

In setting up the breakout session, I wanted to do two things: one, applaud the +women who did show up, because they are already part of the solution; two, (at +the risk of sounding a bit lame), call upon them to be the change that so many +of us want to see at tech conferences.

+

I started the session with a small intro/lecture/rant: when we have these +conversations, we're talking about a conference for an open-source project, and +this means that we -- community members -- need to take some ownership of the +problem if we want to see more diversity on stage; it shouldn't just be up to +the project or the conference organizers. I've talked to plenty of jQuery team +members, and the problem is certainly not that they are seeking to exclude +non-white-male speakers, nor that they are insensitive to the issue; to the +contrary, they are incredibly interested in diversifying their lineup, but +they're somewhat at a loss as to where to find people who would be good +speakers. My call to action to the people in attendance was to be part of the +solution.

+

My favorite part about the session was that it didn't turn into a big bitchfest +-- I think there was actually some productive discussion that took place. While +the attendees were mostly women, there was a solid contingent of men. Menno van +Slooten, in particular, was great -- he talked about his experience as a +relative outsider whose boss pressured him into submitting a talk to the Bay +Area jQuery Conference earlier this year. His talk at that conference ended up +being so well received that he was back to speak in Boston.

+

At some point during the session, I realized that John Resig was sitting across +from me. I don't want sound like a giddy schoolgirl here -- John's just a +person, and I don't want to sound like he graced us with his benevolent +presence -- but I can't tell you how much I think it changed the dynamic of the +discussion to know that it was being heard by such an influential +representative. John answered questions directly and honestly, and there was a +serious sense that he was hearing feedback too -- we weren't just talking +amongst ourselves. His presence dispelled any sense that the project wasn't +interested in the topic; I'd encourage other prominent people who care about +these things to make time to take part in similar conversations.

+

I was kind of amazed to hear how simple the questions were about speaking at a +jQuery conference. I have given great credence to issues of anxiety, shyness, +and perceived inadequacy -- and I don't think these issues have gone away -- +but it never occurred to me that people had really basic questions about the +process of speaking at a conference. This was actually sort of great -- while I +can't singlehandedly get a potential speaker to overcome a sense of anxiety or +inadequacy, there were so many questions that could be answered simply and +directly.

+

To that end, I'm working on a FAQ for potential speakers at future jQuery +conferences, addressing basic questions about process, etc., that I hope the +project will incorporate into future calls for speakers. As a decently +connected member of the jQuery community, I know who to ask when I have these +questions, but I naively didn't realize how inaccessible the answers could seem +to "outsiders." Explaining the answers to basic questions is the sort of easy +but meaningful effort that a conference can make to lower the barriers to new +speakers, women or men. This should be a no-brainer for any conference that +wants to its lineup to be interesting, fresh, and diverse.

+

Another idea that we talked about was pairing accepted speakers up with a buddy +of sorts. I have tried to be this buddy, informally, to people who are speaking +for the first time, but I think it could be worthwhile to formalize the +process. New speakers could ask their "buddy" questions about what it's like to +speak, how to prepare, what to expect from the audience, etc. The speaker would +be more prepared and comfortable, and the audience would get a better +presentation.

+

There was a tremendous lack of clarity surrounding what's actually involved in +submitting a talk to a conference. While I'm by no means a grizzled veteran of +the conference circuit -- my first major talk was just a year ago -- my +experience so far is that submitting a talk is mostly about having a +well-articulated idea and some semblance of evidence that you can pull it off. +This, perhaps, is where the process becomes most subjective and biased toward +"insiders" -- people known to give good talks are likely to get the opportunity +to give talks again -- but I think there's plenty of room to overcome any bias. +People who have made a point of participating in the community, people who have +made a point of contributing useful content about their chosen topic, will +generally get the benefit of the doubt. Reaching out to a past speaker, or to +an established community member, can be a great help in shepherding your talk +through the process. Again, I'm hoping that the FAQ can address some of this +low-hanging fruit.

+

Regarding jQuery conferences in particular, John made the very good point that +there is a lot of room for talks that aren't specifically about jQuery. There +were actually lots of these sorts of talks at the conference this weekend -- +talks about TDD, HTML5, object-oriented CSS, CouchDB, and more. One of the +lovely things about the jQuery and JavaScript community is that community +members have a potentially wide area of interest -- any topic related to +front-end development is likely to be relevant to the audience. John pointed +out that talks about these tangential topics may be one of the greatest +opportunities for newcomers to the world of speaking: the speaker is likely to +be the only one presenting on the topic, and can potentially establish +themselves as something of a subject matter expert on the topic.

+

Again, this was one of the more productive conversations I've participated in +on this topic. If you'd like to talk to me about it more, don't hesitate to +drop me an email or a tweet or a comment. If you're considering venturing into +the speaking world, I'm happy to talk with you about that too. Thanks to +everyone who came, and especially to Ruthie BenDor for her support, and I look forward +to continued discussion.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/jquery-build-table-from-json-data.html b/www/blog/jquery-build-table-from-json-data.html new file mode 100644 index 0000000..894f680 --- /dev/null +++ b/www/blog/jquery-build-table-from-json-data.html @@ -0,0 +1,180 @@ +jQuery: Build table from JSON data

rmurphey adventures in javascript

jQuery: Build table from JSON data

Given data like:

+

{% codeblock lang:javascript %} +var data = { + "GUEST" : { + 'visits' : 1734070, + 'visits_pct' : 74, + 'users' : 1, + 'net_pv' : 3432781, + 'users_pct' : 0, + 'pv_pct' : 13 + }, + 'Logged In' : { + 'visits' : 4240, + 'visits_pct' : 0, + 'users' : 177, + 'net_pv' : 188112, + 'users_pct' : 0, + 'pv_pct' : 0 + } +} +{% endcodeblock %}

+

... a little jQuery ditty to make a table out of the data:

+

{% codeblock lang:javascript %} +$.each(data,function(rowLabel,v) { + if (! header) { + $table.append(''); + var $thead = $('thead tr',$table); + $thead.append(''); + $.each(v,function(headerLabel) { + $thead.append('' + headerLabel + ''); + }); + header = true; + } + $table.append(''); + var $tr = $('tr:last',$table); + $tr.append('' + rowLabel + ''); + $.each(v,function(j,cellData) { + $tr.append('' + cellData + ''); + }) +}); +{% endcodeblock %}

+

I'll come back and do something more interesting with this eventually, just wanted to jot it down for now.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/jquery-ie7-operation-aborted-error.html b/www/blog/jquery-ie7-operation-aborted-error.html new file mode 100644 index 0000000..4330b86 --- /dev/null +++ b/www/blog/jquery-ie7-operation-aborted-error.html @@ -0,0 +1,123 @@ +jQuery IE7 "Operation Aborted" error

rmurphey adventures in javascript

jQuery IE7 "Operation Aborted" error

I just got done troubleshooting an issue with a page with some jQuery on it. The jQuery was in a script called from the head of the document, and its job was to rearrange some elements into tabs once the page was loaded.

+

In unpredictable cases (sequential reloads had different results), loading the page in IE7 would lead to an "Operation aborted" error that provided no additional information. Through some googling, I found out that this was probably related to IE choking when some script was run against the DOM before the DOM was fully loaded.

+

What puzzled me was that I was using $.ready() to wrap my jQuery, which was supposed to wait for the DOM, but it didn't seem to matter. Then I found this -- it turns out that interacting with the DOM via javascript from within a table can make IE all sorts of angry, while interacting with the DOM via javascript from within a div is just fine.

+

While my case doesn't directly match the example above, it seems very possible that the table-based template I was writing for was the source of the headaches. Changing the base HTML template for the page I was working on wasn't immediately in the cards, so I split the offending js file into two pieces -- one in the head of the document that contains some basic functions but didn't actually do anything, and another right before the closing body tag that does the actual talking to the DOM -- and that seemed to fix it.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/jquery-style-guide-from-benjamin-sterling.html b/www/blog/jquery-style-guide-from-benjamin-sterling.html new file mode 100644 index 0000000..80e9b46 --- /dev/null +++ b/www/blog/jquery-style-guide-from-benjamin-sterling.html @@ -0,0 +1,120 @@ +jQuery Style Guide from Benjamin Sterling

rmurphey adventures in javascript

jQuery Style Guide from Benjamin Sterling

Just wanted to call attention to two excellent posts by Benjamin Sterling: Better jQuery Code 1 and Better jQuery Code 2. He offers some must-have tips on style, form and best practices. Thanks to @foobar2k for the link.

+

One addition to his notes on caching selections by setting up references: I'm partial to giving references a variable name that starts with a dollar sign, like $links. For me, it serves as a good reminder that the variable is a reference to a jQuery object.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/jquery-validation-and-tinymce.html b/www/blog/jquery-validation-and-tinymce.html new file mode 100644 index 0000000..06bfb33 --- /dev/null +++ b/www/blog/jquery-validation-and-tinymce.html @@ -0,0 +1,133 @@ +jQuery validation and TinyMCE

rmurphey adventures in javascript

jQuery validation and TinyMCE

Just solved a problem where the jQuery validation plugin wasn't playing so nicely with TinyMCE -- the validation plugin was trying to validate the textarea before TinyMCE had a chance to copy the editor contents back to the textarea. I was about to yank TinyMCE out of the page but a little reading through the TinyMCE docs led me to try this:

+

{% codeblock lang:javascript %} +$('#mySubmitButton').click(function() { + var content = tinyMCE.activeEditor.getContent(); // get the content + $('#myTextarea').val(content); // put it in the textarea +});

+

$('#myForm').validate(); +{% endcodeblock %}

+

And what do you know, it works. One note: it's important to bind the content replacement to the click event of the submit button, not to the actual form submission, or else the validation may try to run before the content gets copied back to the textarea.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/jquery-validation-indicate-that-at-least-one-element-in-a-group-is-required.html b/www/blog/jquery-validation-indicate-that-at-least-one-element-in-a-group-is-required.html new file mode 100644 index 0000000..82ccc83 --- /dev/null +++ b/www/blog/jquery-validation-indicate-that-at-least-one-element-in-a-group-is-required.html @@ -0,0 +1,143 @@ +jQuery validation: Indicate that at least one element in a group is required

rmurphey adventures in javascript

jQuery validation: Indicate that at least one element in a group is required

I had a need today to indicate that at least one of a set of input fields was required. I was hoping there was a direct way to do this in the jQuery validation plugin; while the method isn't quite as straightforward as I was wishing for, it's still fairly simple.

+

To start with, I put class="required_group" on each of the elements in the group. Then, I added a custom validation method:

+

{% codeblock lang:javascript %} +jQuery.validator.addMethod('required_group', function(val, el) { + var $module = $(el).parents('div.panel'); + return $module.find('.required_group:filled').length; +}); +{% endcodeblock %}

+

... a custom class rule to take advantage of the new method:

+

{% codeblock lang:javascript %} +jQuery.validator.addClassRules('required_group', { + 'required_group' : true +}); +{% endcodeblock %}

+

... and finally a custom message for the new method:

+

{% codeblock lang:javascript %} +jQuery.validator.messages.required_group = 'Please fill out at least one of these fields.'; +{% endcodeblock %}

+

What I'd love to see is a way to specify a dependent group without using a custom class rule, but I'm not sure what this would look like, as all validation rules are either keyed off an element's class or the presence of the element's name in the rules object. Thoughts? I'm open to the possibility that there's a far better way to solve this --

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/js-conditionals.html b/www/blog/js-conditionals.html new file mode 100644 index 0000000..546bc4b --- /dev/null +++ b/www/blog/js-conditionals.html @@ -0,0 +1,197 @@ +Two Things about Conditionals in JavaScript

rmurphey adventures in javascript

Two Things about Conditionals in JavaScript

Just a quick post, inspired by Laura Kalbag's post, which included this gem:

+
+

We shouldn’t be fearful of writing about what we know. Even if you write from the most basic point of view, about something which has been ‘around for ages’, you’ll likely be saying something new to someone.

+
+

One: There is no else if

When you write something like this ...

+
function saySomething( msg ) {
+  if ( msg === 'Hello' ) {
+    console.log('Hello there');
+  } else if ( msg === 'Yo' ) {
+    console.log('Yo dawg');
+  }
+}
+
+

... then what you're actually writing is this ...

+
function saySomething( msg ) {
+  if ( msg === 'Hello' ) {
+    console.log('Hello there');
+  } else {
+    if ( msg === 'Yo' ) {
+      console.log('Yo dawg');
+    }
+  }
+}
+
+

That's because there is no else if in JavaScript. You know how you can write an if statement without any curly braces?

+
if ( foo ) bar() // please don't do this if you want your code to be legible
+
+

You're doing the same thing with the else part of the initial if statement when you write else if: you're skipping the curly braces for the second if block, the one you're providing to else. There's nothing wrong with else if per se, but it's worth knowing about what's actually happening.

+

Two: return Means Never Having to Say else

Consider some code like this:

+
function howBig( num ) {
+  if ( num < 10 ) {
+    return 'small';
+  } else if ( num >= 10 && num < 100 ) {
+    return 'medium';
+  } else if ( num >= 100 ) {
+    return 'big';
+  }
+}
+
+

If the number we pass to howBig is less than 10, then our function will return 'small'. As soon as it returns, none of the rest of the function will run -- this means we can skip the else part entirely, which means our code could look like this:

+
function howBig( num ) {
+  if ( num < 10 ) {
+    return 'small';
+  }
+
+  if ( num < 100 ) {
+    return 'medium';
+  }
+
+  if ( num >= 100 ) {
+    return 'big';
+  }
+}
+
+

But wait -- if the first if statement isn't true, and the second if statement isn't true, then we will always return 'big'. That means the third if statement isn't even required:

+
function howBig( num ) {
+  if ( num < 10 ) {
+    return 'small';
+  }
+
+  if ( num < 100 ) {
+    return 'medium';
+  }
+
+  return 'big';
+}
+
+

Note: this post was edited to improve a couple of the examples and to fix some typos.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/karma-webpack-tape-code-coverage.html b/www/blog/karma-webpack-tape-code-coverage.html new file mode 100644 index 0000000..3a8ec78 --- /dev/null +++ b/www/blog/karma-webpack-tape-code-coverage.html @@ -0,0 +1,273 @@ +Browser Testing and Code Coverage with Karma, Tape, and Webpack

rmurphey adventures in javascript

Browser Testing and Code Coverage with Karma, Tape, and Webpack

We recently set up a new project at Bazaarvoice for centralizing common UI modules. We started by using node-tap for unit tests, but given that these are UI modules, we quickly switched to using tape, because it has a fairly easy browser testing story with the help of Karma.

+

One thing that node-tap provided that tape did not provide out of the box was the ability to measure the code coverage of unit tests. Karma does provide this, but getting it hooked up while using Webpack -- which is our build tool of choice these days -- wasn't quite as clear as I would have liked. If you're looking to use Karma, tape, and Webpack, then hopefully this post will help you spend a bit less time than I did.

+

What You'll Need

By the time it was all said and done, I needed to npm install the following modules:

+
    +
  • karma
  • +
  • karma-phantomjs-launcher
  • +
  • karma-chrome-launcher
  • +
  • karma-tap
  • +
  • karma-webpack
  • +
  • karma-coverage
  • +
  • istanbul-instrumenter-loader
  • +
  • tape
  • +
+

The directory structure was simple:

+
    +
  • a root directory, containing karma.conf.js and package.json
  • +
  • a lib subdirectory, containing module files
  • +
  • a test/unit subdirectory, containing the unit tests
  • +
+

An example application file at lib/global/index.js looked like this:

+
/**
+ *  @fileOverview Provides a reference to the global object
+ *
+ *  Functions created via the Function constructor in strict mode are sloppy
+ *  unless the function body contains a strict mode pragma. This is a reliable
+ *  way to obtain a reference to the global object in any ES3+ environment.
+ *  see http://stackoverflow.com/a/3277192/46867
+ */
+'use strict';
+
+module.exports = (new Function('return this;'))();
+
+

An example test in test/unit/global/index.js looked like this:

+
var test = require('tape');
+var global = require('../../../lib/global');
+
+test('Exports window', function (t) {
+  t.equal(global, window);
+  t.end();
+});
+
+

Testing CommonJS Modules in the Browser

The applications that consume these UI modules use Webpack, so we author the modules (and their tests) as CommonJS modules. Of course, browsers can't consume CommonJS directly, so we need to generate files that browsers can consume. There are several tools we can choose for this task, but since we've otherwise standardized on Webpack, we wanted to use Webpack here as well.

+

Since our goal is to load the tests in the browser, we use the test file as the "entry" file. Webpack processes the dependencies of an entry file to generate a new file that contains the entry file's contents as well as the contents of its dependencies. This new file is the one that Karma will load into the browser to run the tests.

+

Getting this to happen is pretty straightforward with the karma-webpack plugin to Karma. The only catch was the need to tell Webpack how to deal with the fs dependency in tape. Here's the initial Karma configuration that got the tests running:

+
var webpack = require('webpack');
+
+module.exports = function(config) {
+  config.set({
+    plugins: [
+      require('karma-webpack'),
+      require('karma-tap'),
+      require('karma-chrome-launcher'),
+      require('karma-phantomjs-launcher')
+    ],
+
+    basePath: '',
+    frameworks: [ 'tap' ],
+    files: [ 'test/**/*.js' ],
+
+    preprocessors: {
+      'test/**/*.js': [ 'webpack' ]
+    },
+
+    webpack: {
+      node : {
+        fs: 'empty'
+      }
+    },
+
+    webpackMiddleware: {
+      noInfo: true
+    },
+
+    reporters: [ 'dots' ],
+    port: 9876,
+    colors: true,
+    logLevel: config.LOG_INFO,
+    autoWatch: true,
+    browsers: ['Chrome'],
+    singleRun: false
+  })
+};
+
+

However, as I mentioned above, I wanted to get code coverage information. Karma offers the karma-coverage plugin, but that alone was insufficient in Webpack land: it would end up instrumenting the whole Webpack output -- including the test code itself! -- and thus reporting highly inaccurate coverage numbers.

+

I ended up reading a karma-webpack issue that told me someone else had already solved this exact problem by creating a Webpack loader to instrument modules at build time. By adjusting our Webpack configuration to only apply this loader to application modules -- not to test code or vendor code -- the Webpack output ends up properly instrumented for the karma-coverage plugin to work with it. Our final Karma config ends up looking like this:

+
var webpack = require('webpack');
+
+module.exports = function(config) {
+  config.set({
+    plugins: [
+      require('karma-webpack'),
+      require('karma-tap'),
+      require('karma-chrome-launcher'),
+      require('karma-phantomjs-launcher'),
+      require('karma-coverage')
+    ],
+
+    basePath: '',
+    frameworks: [ 'tap' ],
+    files: [ 'test/**/*.js' ],
+
+    preprocessors: {
+      'test/**/*.js': [ 'webpack' ]
+    },
+
+    webpack: {
+      node : {
+        fs: 'empty'
+      },
+
+      // Instrument code that isn't test or vendor code.
+      module: {
+        postLoaders: [{
+          test: /\.js$/,
+          exclude: /(test|node_modules)\//,
+          loader: 'istanbul-instrumenter'
+        }]
+      }
+    },
+
+    webpackMiddleware: {
+      noInfo: true
+    },
+
+    reporters: [
+      'dots',
+      'coverage'
+    ],
+
+    coverageReporter: {
+      type: 'text',
+      dir: 'coverage/'
+    },
+
+    port: 9876,
+    colors: true,
+    logLevel: config.LOG_INFO,
+    autoWatch: true,
+    browsers: ['Chrome'],
+    singleRun: false
+  })
+};
+
+

Even with the coverage hiccup, the speed with which I was able to get Karma set up the way I wanted -- and working with TravisCI -- was nothing short of breathtaking. I'm late to the Karma party, but I had no idea it could be this easy. If you haven't checked it out yet, you should.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/lessons-from-a-rewrite.html b/www/blog/lessons-from-a-rewrite.html new file mode 100644 index 0000000..5db29a3 --- /dev/null +++ b/www/blog/lessons-from-a-rewrite.html @@ -0,0 +1,427 @@ +Lessons From a Rewrite

rmurphey adventures in javascript

Lessons From a Rewrite

MVC and friends have been around for decades, but it’s only in the last couple +of years that broad swaths of developers have started applying those patterns +to JavaScript. As that awareness spreads, developers eager to use their +newfound insight are presented with a target-rich environment, and the +temptation to rewrite can be strong.

+
+

There’s a subtle reason that programmers always want to throw away the code +and start over. The reason is that they think the old code is a mess. … The reason +that they think the old code is a mess is because of a cardinal, fundamental +law of programming: It’s harder to read code than to write it. - Joel Spolsky +

+
+

When I started working with Toura Mobile late last year, they already had +a product: a web-based CMS to create the structure of a mobile application and +populate it with content, and a PhoneGap-based application to consume the +output of the CMS inside a native application. Customers were paying, but the +development team was finding that delivering new features was a struggle, and +bug fixes seemed just as likely to break something else as not. They contacted +me to see whether they should consider a rewrite.

+

With due deference to Spolsky, I don’t think it was a lack of readability +driving their inclination to rewrite. In fact, the code wasn’t all that +difficult to read or follow. The problem was that the PhoneGap side of things +had been written to solve the problems of a single-purpose, one-off +application, and it was becoming clear that it needed to be a flexible, +extensible delivery system for all of the content combinations clients could +dream up. It wasn’t an app — it was an app that made there be an app.

+
+

Where a new system concept or new technology is used, one has to build a system +to throw away, for even the best planning is not so omniscient as to get it +right the first time. Hence plan to throw one away; you will, anyhow. - Fred +Brooks, The Mythical Man Month

+
+

By the time I’d reviewed the code and started writing up my findings, the +decision had already been made: Toura was going to throw one away and start +from scratch. For four grueling and exciting months, I helped them figure out +how to do it better the second time around. In the end, I like to think we’ve +come up with a solid architecture that’s going to adapt well to clients’ +ever-changing needs. Here, then, are some of the lessons we learned along the +way.

+

Understand what you’re rewriting

I had spent only a few days with the codebase when we decided that we were +going to rewrite it. In some ways, this was good — I was a fresh set of eyes, +someone who could think about the system in a new way — but in other ways, it +was a major hindrance. We spent a lot of time at the beginning getting me up to +speed on what, exactly, we were making; things that went without saying for +existing team members did not, in fact, go without saying for me.

+

This constant need for explanation and clarification was frustrating at times, +both for me and for the existing team, but it forced us to state the problem in +plain terms. The value of this was incredible — as a team, we were far less +likely to accept assumptions from the original implementation, even assumptions +that seemed obvious.

+

One of the key features of Toura applications is the ability to update them +“over the air” — it’s not necessary to put a new version in an app store in +order to update an app’s content or even its structure. In the original app, +this was accomplished via generated SQL diffs of the data. If the app was at +version 3, and the data in the CMS was at version 10, then the app would +request a patch file to upgrade version 3 to version 10. The CMS had to +generate a diff for all possible combinations: version 3 to version 10, version +4 to version 10, etc. The diff consisted of queries to run against an SQLite +database on the device. Opportunities for failures or errors were rampant, +a situation exacerbated by the async nature of the SQLite interface.

+

In the new app, we replicated the feature with vastly less complexity +— whenever there is an update, we just make the full data available at an +app-specific URL as a JSON file, using the same format that we use to provide +the initial data for the app on the device. The new data is stored on the +device, but it’s also retained in memory while the application is running via +Dojo’s Item File Read Store, which allows us to query it synchronously. The +need for version-by-version diffs has been eliminated.

+

Restating the problem led to a simpler, more elegant solution that greatly +reduced the opportunities for errors and failure. As an added benefit, using +JSON has allowed us to meet needs that we never anticipated — the flexibility +it provides has become a valuable tool in our toolbox.

+

Identify pain points

If the point of a rewrite is to make development easier, then an important step +is to figure out what, exactly, is making development hard. Again, this was +a time to question assumptions — as it turned out, there were things that had +come to be accepted burdens that were actually relatively easy to address.

+

One of the biggest examples of this was the time required to develop and test +anything that might behave differently on one operating system versus another. +For example, the Android OS has limited support for the audio and video tags, +so a native workaround is required to play media on Android that is not +required on iOS.

+

In the original code, this device-specific branching was handled in a way that +undoubtedly made sense at the beginning but grew unwieldy over time. Developers +would create Mustache templates, wrapping the template tags in /* */ so the +templates were actually executable, and then compile those templates into plain +JavaScript files for production. Here are a few lines from one of those +templates:

+

{% codeblock lang:javascript %} +/ {{^android}} / +var mediaPath = "www/media/" + toura.pages.currentId + "/"; +/ {{/android}} / +/ {{#android}} / +var mediaPath = [Toura.getTouraPath(), toura.pages.currentId].join("/"); +/ {{/android}} / +var imagesList = [], dimensionsList = [], namesList = [], thumbsList = []; +var pos = -1, count = 0; +/ {{#android}} / +var pos = 0, count = 0; +/ {{/android}} / +{% endcodeblock %}

+

These templates were impossible to check with a code quality tool like JSHint, +because it was standard to declare the same variable multiple times. Multiple +declarations of the same variable meant that the order of those declarations +was important, which made the templates tremendously fragile. The theoretical +payoff was smaller code in production, but the cost of that byte shaving was +high, and the benefit somewhat questionable — after all, we’d be delivering the +code directly from the device, not over HTTP.

+

In the rewrite, we used a simple configuration object to specify information +about the environment, and then we look at the values in that configuration +object to determine how the app should behave. The configuration object is +created as part of building a production-ready app, but in development we can +alter configuration settings at will. Simple if statements replaced fragile +template tags.

+

Since Dojo allows specifying code blocks for exclusion based on the settings +you provide to the build process, we could mark code for exclusion if we really +didn’t want it in production.

+

By using a configuration object instead of template tags for branching, we +eliminated a major pain point in day-to-day development. While nothing matches +the proving ground of the device itself, it’s now trivial to effectively +simulate different device experiences from the comfort of the browser. We do +the majority of our development there, with a high degree of confidence that +things will work mostly as expected once we reach the device. If you’ve ever +waited for an app to build and install to a device, then you know how much +faster it is to just press Command-R in your browser instead.

+

Have a communication manifesto

Deciding that you’re going to embrace an MVC-ish approach to an application is +a big step, but only a first step — there are a million more decisions you’re +going to need to make, big and small. One of the widest-reaching decisions to +make is how you’ll communicate among the various pieces of the application. +There are all sorts of levels of communication, from application-wide state +management — what page am I on? — to communication between UI components — when +a user enters a search term, how do I get and display the results?

+

From the outset, I had a fairly clear idea of how this should work based on +past experiences, but at first I took for granted that the other developers +would see things the same way I did, and I wasn’t necessarily consistent +myself. For a while we had several different patterns of communication, +depending on who had written the code and when. Every time you went to use +a component, it was pretty much a surprise which pattern it would use.

+

After one too many episodes of frustration, I realized that part of my job was +going to be to lay down the law about this — it wasn’t that my way was more +right than others, but rather that we needed to choose a way, or else reuse and +maintenance was going to become a nightmare. Here’s what I came up with:

+
    +
  • myComponent.set(key, value) to change state (with the help of setter +methods from Dojo’s dijit._Widget mixin)
  • +
  • myComponent.on&lt;Event&gt;(componentEventData) to announce state changes +and user interaction; Dojo lets us +connect to the +execution of arbitrary methods, so other pieces could listen for these +methods to be executed.
  • +
  • dojo.publish(topic, [ data ]) to announce occurrences of app-wide interest, +such as when the window is resized
  • +
  • myComponent.subscribe(topic) to allow individual components react to +published topics
  • +
+

Once we spelled out the patterns, the immediate benefit +wasn’t maintainability or reuse; rather, we found that we didn’t have to make +these decisions on a component-by-component basis anymore, and we could focus +on the questions that were actually unique to a component. With conventions +we could rely on, we were constantly discovering new ways to abstract and DRY +our code, and the consistency across components meant it was easier to work +with code someone else had written.

+

Sanify asynchronicity

One of the biggest challenges of JavaScript development — well, besides working +with the DOM — is managing the asynchronicity of it all. In the old system, +this was dealt with in various ways: sometimes a method would take a success +callback and a failure callback; other times a function would return an object +and check one of its properties on an interval.

+

{% codeblock lang:javascript %} +images = toura.sqlite.getMedias(id, "image");

+

var onGetComplete = setInterval(function () { + if (images.incomplete) + return;

+

clearInterval(onGetComplete); + showImagesHelper(images.objs, choice) +},10); +{% endcodeblock %}

+

The problem here, of course, is that if images.incomplete never gets set to +false — that is, if the getMedias method fails — then the interval will never +get cleared. Dojo and now jQuery (since version 1.5) offer a facility for +handling this situation in an elegant and powerful way. In the new version of +the app, the above functionality looks something like this:

+

{% codeblock lang:javascript %} +toura.app.Data.get(id, ‘image’).then(showImages, showImagesFail); +{% endcodeblock %}

+

The get method of toura.app.Data returns an immutable promise +— the promise’s then method makes the resulting value of the asynchronous get +method available to showImages, but does not allow showImages to alter the +value. The promise returned by the get method can also be stored in a variable, +so that additional callbacks can be attached to it.

+

Using promises vastly simplifies asynchronous code, which can be one of the +biggest sources of complexity in a non-trivial application. By using promises, +we got code that was easier to follow, components that were thoroughly +decoupled, and new flexibility in how we responded to the outcome of an +asynchronous operation.

+

Naming things is hard

Throughout the course of the rewrite we were constantly confronted with one of +those pressing questions developers wrestle with: what should I name this +variable/module/method/thing? Sometimes I would find myself feeling slightly +absurd about the amount of time we’d spend naming a thing, but just recently +I was reminded how much power those names have over our thinking.

+

Every application generated by the Toura CMS consists of a set of “nodes,” +organized into a hierarchy. With the exception of pages that are standard +across all apps, such as the search page, the base content type for a page +inside APP is always a node — or rather, it was, until the other day. I was +working on a new feature and struggling to figure out how I’d display a piece +of content that was unique to the app but wasn’t really associated with a node +at all. I pored over our existing code, seeing the word node on what felt like +every other line. As an experiment, I changed that word node to baseObj in +a few high-level files, and suddenly a whole world of solutions opened up to me +— the name of a thing had limiting my thinking.

+

The lesson here, for me, is that the time we spent (and spend) figuring out +what to name a thing is not lost time; perhaps even more importantly, the goal +should be to give a thing the most generic name that still conveys what the +thing’s job — in the context in which you’ll use the thing — actually is.

+

Never write large apps

I touched on this earlier, but if there is one lesson I take from every large +app I’ve worked on, it is this:

+
+

The secret to building large apps is never build large apps. Break up your +applications into small pieces. Then, assemble those testable, bite-sized +pieces into your big application. - Justin Meyer

+
+

The more tied components are to each other, the less reusable they will be, and +the more difficult it becomes to make changes to one without accidentally +affecting another. Much like we had a manifesto of sorts for communication +among components, we strived for a clear delineation of responsibilities among +our components. Each one should do one thing and do it well.

+

For example, simply rendering a page involves several small, single-purpose +components:

+

{% codeblock lang:javascript %} +function nodeRoute(route, nodeId, pageState) { + pageState = pageState || {};

+

var nodeModel = toura.app.Data.getModel(nodeId), + page = toura.app.UI.getCurrentPage();

+

if (!nodeModel) { + toura.app.Router.home(); + return; + }

+

if (!page || !page.node || nodeId !== page.node.id) { + page = toura.app.PageFactory.createPage('node', nodeModel);

+
if (page.failure) {
+  toura.app.Router.back();
+  return;
+}
+
+toura.app.UI.showPage(pf, nodeModel);
+

}

+

page.init(pageState);

+

// record node pageview if it is node-only + if (nodeId && !pageState.assetType) { + dojo.publish('/node/view', [ route.hash ]); + }

+

return true; +} +{% endcodeblock %}

+

The router observes a URL change, parses the parameters for the route from the +URL, and passes those parameters to a function. The Data component gets the +relevant data, and then hands it to the PageFactory component to generate the +page. As the page is generated, the individual components for the page are also +created and placed in the page. The PageFactory component returns the generated +page, but at this point the page is not in the DOM. The UI component receives +it, places it in the DOM, and handles the animation from the old page to the +new one.

+

Every step is its own tiny app, making the whole process tremendously testable. +The output of one step may become the input to another step, but when input and +output are predictable, the questions our tests need to answer are trivial: +“When I asked the Data component for the data for node123, did I get the data +for node123?”

+

Individual UI components are their own tiny apps as well. On a page that +displays a videos node, we have a video player component, a video list +component, and a video caption component. Selecting a video in the list +announces the selection via the list’s onSelect method. Dojo allows us to +connect to the execution of object methods, so in the page controller, we have +this:

+

{% codeblock lang:javascript %} +this.connect(this.videoList, 'onSelect', function(assetId) { + var video = this._videoById(assetId); + this.videoCaption.set('content', video.caption || ''); + this.videoPlayer.play(assetId); +}); +{% endcodeblock %}

+

The page controller receives the message and passes it along to the other +components that need to know about it — components don’t communicate directly +with one another. This means the component that lists the videos can list +anything, not just videos — its only job is to announce a selection, not to do +anything as a result.

+

Keep rewriting

+

It takes confidence to throw work away … When people first start drawing, +they’re often reluctant to redo parts that aren’t right … they convince +themselves that the drawing is not that bad, really — in fact, maybe they meant +it to look that way. - Paul Graham, “Taste for Makers”

+
+

The blank slate offered by a rewrite allows us to fix old mistakes, but +inevitably we will make new ones in the process. As good stewards of our code, +we must always be open to the possibility of a better way of doing a thing. “It +works” should never be mistaken for “it’s done.”

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/lessons-learned-from-taking-on-a-project-in-crisis.html b/www/blog/lessons-learned-from-taking-on-a-project-in-crisis.html new file mode 100644 index 0000000..4a7723a --- /dev/null +++ b/www/blog/lessons-learned-from-taking-on-a-project-in-crisis.html @@ -0,0 +1,146 @@ +Lessons Learned from Taking On a Project in Crisis

rmurphey adventures in javascript

Lessons Learned from Taking On a Project in Crisis

I just got done with an emergency project for an agency developing a public-facing application for a mutinational technology client you've most certainly heard of. I developed the entire front-end -- HTML, CSS, and JavaScript -- for a non-trivial application with a limited spec in just seven days. The experience was so eye-opening that I feel the need to write down some of the things I've learned, in hopes that I can benefit from my experience in the future.

+ +
    +
  • Demand all technical source material up front, such as functional specs, mockups, work that's been done to date, etc. Give the client a fixed amount of time to deliver that source material, and don't make a decision about taking on the project until you've seen it. What the client can deliver in that fixed amount of time will shed a lot of light on the state of the project and whether their expectations are realistic.
  • +
  • Set clear time expectations. Am I willing to work 16 hours a day? Am I expected to? Are there hours during which I'll be expected to be available? Am I willing to work on the weekend?
  • +
  • Find out whether the client expects me to be available after the imminent deadline, and to what extent. The last thing I want is to snatch defeat from the jaws of victory by being unable to support the code I've written.
  • +
  • Do not accept responsibility for commitments made on my behalf. The recruiter said I'd be available six hours a day when I told him four? Not my problem. The client committed to having a feature ready for review without consulting me? They probably won't make that mistake again.
  • +
  • Ascertain the rest of the team's commitment to the project. If I'm expected to work long hours, will they be there during those long hours to get me what I need? Are there any constraints on their availability?
  • +
  • Establish a single point of contact at the client, and make clear I'll be depending on them to get me any information I need and I'll be treating their decisions as final. Insist that they participate in all calls I'm expected to participate in.
  • +
  • Limit the amount of work I do before I have access to all client systems I'll need access to: version control, testing environments, ticketing systems, etc.
  • +
  • Insist on a ticketing system. I'm new to the project and I have a lot to get up to speed on. I don't want emails flying at me from all directions -- decisions and technical requirements need to be documented in a single place that everyone can see. This is my only insurance when someone wants to know why something isn't done, or why it wasn't done the way they expected.
  • +
  • Insist on version control, even if it's something crappy like CVS. I'll need a way to make sure the rest of the team has access to my latest and greatest. FTP blows, especially when I'm FTPing to a server where another developer is constantly deploying a new build, overwriting my work.
  • +
+ +

What other advice do you have?

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/making-sense-of-dojo-when-a-simple-xhr-caching-example.html b/www/blog/making-sense-of-dojo-when-a-simple-xhr-caching-example.html new file mode 100644 index 0000000..c41fe8c --- /dev/null +++ b/www/blog/making-sense-of-dojo-when-a-simple-xhr-caching-example.html @@ -0,0 +1,199 @@ +Making sense of dojo.when: A simple XHR caching example

rmurphey adventures in javascript

Making sense of dojo.when: A simple XHR caching example

Right before Dojo 1.5 came out, the Sitepen blog had a great post about the improvements 1.5 would bring for dojo.Deferred. The part that really caught my eye was dojo.when, a method that lets you pass a value to a function whether that value is available now or as the result of some asynchronous operation. Either way, you get a “promise” that when the value is available, the function you provided will run.

+ +

This is one of those things that was super-neat when I read about it, but it took me a while to incorporate it into my code — it’s only in the last couple of weeks that I’ve had that wonderful moment when I’ve said “oh, I could totally use dojo.when for that!” Moments like these make me very happy.

+ +

It’s pretty common that an application makes an Ajax request for some data, and then caches that data so the request won’t have to happen again; the pattern might look something like this:

+ +

{% codeblock lang:javascript %} +var myCache = {};

+

function getSomeStuff(stuffId) { + if (myCache[stuffId]) { + handleResponse(myCache[stuffId]); + return; + }

+

dojo.xhrGet({ + url : 'foo.php', + content : { id : stuffId }, + load : function(response) { + myCache[stuffId] = response; + handleResponse(response); + } + }); +} +{% endcodeblock %}

+

Here we have a function that takes an ID; the function looks in the cache to see if there’s a value stored for the ID, and if so, it passes the stored value to a handleResponse function. If not, it does an XHR to get the data; when the XHR succeeds, it stores the data in the cache and, again, passes the value to the handleResponse function.

+ +

There’s nothing strictly wrong with this, but I discovered that some neat abstraction opportunities became more clear when I switched to using dojo.when instead:

+ +

{% codeblock lang:javascript %} +var myCache = {};

+

function getSomeStuff(stuffId) { + dojo.when( + myCache[stuffId] || dojo.xhrGet({ + url : 'foo.php', + content : { id : stuffId }, + load : function(response) { + myCache[stuffId] = response; + } + }), + handleResponse + ); +} +{% endcodeblock %}

+

Now we’re telling our getSomeStuff function to look for a cached value; if it finds one, dojo.when will immediately pass that value to the handleResponse function. If it doesn’t find one, it will run the XHR, and dojo.when will magically pass the XHR’s response to the handleResponse function instead. This is hot.

+ +

This works because dojo.xhrGet returns a “promise” object with a then method. dojo.when looks to see whether it got a promise object as its first argument; if so, it uses the then method of the promise object to attach the callback provided as the second argument to dojo.when. If not, it simply calls the callback immediately on the first argument. The real magic is actually in dojo.Deferred, not in dojo.when itself. Since all of Dojo’s XHR methods return a dojo.Deferred promise, dojo.when will “just work.”

+ +

I found that I was going through my application and ripping out instances of the old code, replacing it with the new. And then I had that “oh sh*t I’m copying and pasting, aren’t I …” moment, and saw my way to an abstraction.

+ +

In my application, I was actually caching the responses using the URL from which I’d requested them, which works out to be a perfectly unique ID for the data. (This particular part may or may not work in your application.) My abstraction was an essentially drop-in replacement for dojo.xhrGet calls:

+ +

{% codeblock lang:javascript %} +var cache = {};

+

function cacheableXhrGet(settings) { + var url = settings.url, + req = cache[url] || + dojo.xhrGet(dojo.mixin({ + // override the load handler + load : function(resp) { + cache[url] = resp; + } + }, settings));

+

dojo.when(req, settings.load); + return req; +} +{% endcodeblock %}

+

I can pass a settings object to cacheableXhrGet that looks exactly like the object I’d pass to dojo.xhrGet, but replace the load function before actually passing it to dojo.xhrGet. But before the XHR even has a chance to get set up, I check my cache for a stored response; if I find one, I store it in the req variable, but otherwise I store the XHR there.

+ +

In either case, the function defined at settings.load gets the proper response value via dojo.when. For bonus points, I then return either the cached value or the XHR — which means other code can use the return value of cacheableXhrGet for its own dojo.when. How neat is that?

+ +

Conclusion

+ +

Promises and deferred’s are a really pleasant tool to have in your JavaScript arsenal once you get the hang of them, and dojo.when seems like a great place to start understanding them.

+ +

Out of the box, Dojo makes use of deferreds for all of its XHR functionality, meaning that you can pass around the return value of any Dojo XHR method and do fun things you can’t do with jQuery’s $.ajax, like add more callbacks to a request after you’ve set it up.

+ +

I’ve just recently started realizing when I could incorporate dojo.Deferred functionality into my own code — again, now that I’ve got the hang of it, I’m pretty sure it’s going to dramatically change how I write asynchronous code.

+ +

Disclaimer: This post contains sample code for illustration purposes. In reality it’s all namespaced and these naked functions are actually methods in classes and stuff, and the real code doesn’t even look much like the code you see here. I’ve also completely ignored questions of when to clear or invalidate the cache. You’ve been warned.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/modern-javascript.html b/www/blog/modern-javascript.html new file mode 100644 index 0000000..7dd3637 --- /dev/null +++ b/www/blog/modern-javascript.html @@ -0,0 +1,133 @@ +Modern JavaScript

rmurphey adventures in javascript

Modern JavaScript

My presentation the jQuery Divide (video here) has been making the rounds on the internet again, six months after I delivered it at JSConf.eu in Berlin, and this time around, a colleague on IRC shared a link with me that drew from it: Is JavaScript the New Perl?

+

Perl has a special place in my heart; it's the first language I used to solve a real-world problem, and I still have the second edition Learning Perl that my good friend Marcus got for me at the time. These days I struggle for it not to look mostly like a lot of gibberish, but in the late 1990s it was funtimes.

+

Anyway. The post that linked to my presentation asked if JavaScript might be going through some of the same pains that Perl has gone through, and linked to an eerily relevant presentation about Modern Perl, a movement "actively seeks to both to teach how to write good code and change perceptions of Perl that still linger from the dot.com 90s." It talks about the void that Perl sought to fill way back in 1987, and then steps through the highs and lows of the intervening 23 years.

+

One thing that struck me, reading the slides, is that Perl -- like other open-source, server-side languages -- has the distinct benefit of being community-driven. While, yes, JavaScript has a wonderful and vibrant community, the language itself is held hostage by browser vendors, some of whom have shown a strong inclination to not give a fuck about owning up to and fixing their egregious mistakes. Using new features of a language like Perl is, at the end of the day, a largely internal problem -- given enough time and money, switching to a new version of the language that offers new features for code organization, testing, and abstraction is a thing a project can do. Indeed, Perl as a community can even make bold decisions like deciding that a new version simply won't be back-compat with a version that came before, throwing away ideas that turned out to be duds; meanwhile, JavaScript web developers often must bend over backwards to ensure back-compat with decade-old technology, and the only way to transition away from that technology is to give up on a set of users entirely.

+

We've already seen what this means for JavaScript as a language: it was years after JavaScript's debut before we really started seeing conversations about what a module should look like in JavaScript, and we're still fighting over it today. Without a solid dependency management system -- something you can take for granted in any 15-year-old community-driven language -- dependency management often means sticking another script tag on the page, and even the most popular JavaScript library on the planet struggles with how to participate in a fledgling ecosystem. With no arbiter of common, tested, community-approved, community-vetted solutions -- see Perl's CPAN -- it's an environment that's ripe for fragmentation, and shining examples of Not Invented Here (NIH) litter the JavaScript landscape. Lacking even an agreed-upon method of expressing dependencies, the findability of good solutions is low, and coalescence only occurs around tools with extremely low barriers to entry and extremely high near-term reward.

+

When Marcus was teaching me Perl, back in the dot com heyday of the late 1990s and before the world temporarily went to hell for a few years, there was great emphasis on TIMTOWTDI: there is more than one way to do it. That mantra made Perl beautiful and elegant and powerful. Too often, it also made it ridiculously hard for the next developer to build upon and maintain, especially as the problems developers were solving got more complicated than copying and pasting some code to support a contact form (sound familiar?). In the end, that mantra meant Perl's reputation suffered, as the consequences of code written by developers with a whole lot of freedom and not so much skill became clear.

+

This, in a nutshell, is what I was talking about in Berlin: that the reputation of this language we love stands to suffer if we don't get around to working together to solve these larger problems, and educating the wider world of JavaScript developers as we figure it out. Unlike with Perl, the language itself isn't going to evolve in time to help us here -- unless and until we're willing to give up on huge swaths of users, we will, generously, be stuck with the browser technology of 2009 for a long time to come. Unlike the Modern Perl movement, the patterns and tools and practices that will form the foundation of Modern JavaScript are going to have to come from outside implementations of the language itself.

+

Realizing that, it becomes clear just how imperative it is that we, as a community, figure out dependency management, modularization, and intentional interoperability so that these patterns, tools, and practices can start to emerge organically. James Burke, the creator of RequireJS, is something of a hero to me, not for creating RequireJS, but for taking on the challenge of interacting calmly and level-headedly with all sorts of stakeholders to try to make AMD modules a viable reality. Tool and library developers need to stop debating whether this is a good idea and get to work on making it happen.

+

Tools and libraries also need to take seriously the need for modularization -- though I confess I have many misgivings about the NIH aspect of Dustin Diaz's Ender.js, and wish that the considerable effort involved had been directed toward an established project with similar features, I can't help but hope it will pressure libraries like jQuery to make more efforts in the direction of modularization.

+

An equally important aspect of modularization is ensuring minimal duplication of effort. As a community, we need to agree on a core set of functionality that ought to be provided by the language but isn't, and implement that itself as an interchangeable module. A page with both Underscore.js and jQuery on it has tremendous duplication of functionality, for example. Interchangeability will allow end users to roll exactly the tool they need, no more and no less. Eventually, standard toolkits could emerge that draw on the best of all worlds, rather than one-size-fits-all tools that exist in isolation.

+

While I agree with what Tom Dale wrote in his oddly controversial post -- that "unless it is designed to work well together, it usually won’t" -- the more I think about it, the more I realize that the problem lies in our current inability to reliably isolate functionality and express dependencies across tools. It's not that large tools like Dojo are the One True Way -- it's that large tools like Dojo are incredibly powerful precisely because they take seriously the need for a lightweight core leveraged by components that deliver specific, isolated functionality. JavaScript as a whole will become more powerful by embracing the pattern.

+

The political problems here are obvious and several: such modularization will, by definition, lead to winners and losers; the identities of libraries as we know them stand to be diluted if it becomes trivial to use only parts of them. The emphasis will shift to curated toolkits that assemble best-of-breed solutions, and NIH efforts will compete on merit, not marketing. At the same time, though, trying new things will no longer involve learning a whole new set of tools, and developers won't be as stuck with a solution that made sense once upon a time but not anymore.

+

A final and important piece of the puzzle is actually educating people about the patterns that are enabled when we embrace these tools and practices. The wider community of everyday devs who are just trying to get their job done has hopefully graduated from copying and pasting scripts, but there's a long path ahead, and part of the work of Modern JavaScript needs to be clearing that path for them.

+

I said it in my Berlin talk, and I will say it again: sharing what we know is as important as making new things, even if it's not always quite as fun. All the script loaders, build tools, inheritance systems, array utilities, templating frameworks, and data abstractions in the world are meaningless if we don't help people understand how and why to use them.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/more-on-jquery-selectors.html b/www/blog/more-on-jquery-selectors.html new file mode 100644 index 0000000..cc11384 --- /dev/null +++ b/www/blog/more-on-jquery-selectors.html @@ -0,0 +1,125 @@ +More on jQuery selectors

rmurphey adventures in javascript

More on jQuery selectors

Following up on my 13 seconds of selection hell:

+

jQuery: what are the fastest selectors?

+

Turns out that, as you might expect, $('div.class') is faster than $('.class'), and so is $('div').filter('.class'). Knowing where not to look ("skip anything that's not a div") helps make jQuery faster. To which you might say, "well duh." Regardless, the post above does a nice job of demonstrating it pretty plainly, and it's a good reminder that, if I'd wanted to stick with my attribute-based selection, I could have helped my code a little bit by telling it where I expected to find those attributes so it wouldn't have had to look through every element on a ridiculously huge page.

+

{% codeblock lang:javascript %} +$('#data_entry input, #data_entry select').filter('[name=value]'); +{% endcodeblock %}

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/my-case-for-standards-based-web-layouts.html b/www/blog/my-case-for-standards-based-web-layouts.html new file mode 100644 index 0000000..fbd68e5 --- /dev/null +++ b/www/blog/my-case-for-standards-based-web-layouts.html @@ -0,0 +1,157 @@ +My case for standards-based web layouts

rmurphey adventures in javascript

My case for standards-based web layouts

Since 1999, the World Wide Web Consortium (W3C) -- a standards body that tells the world how the web should work -- has urged web sites to use semantic markup for content and cascading stylesheets (CSS) for layout, and to abandon code that combines information and presentation in non-semantic HTML table-based layouts. Semantic markup ...

+
    +
  • puts the most important information in a document at the beginning of the document;
  • +
  • clearly marks headings, paragraphs, lists and other standard elements; and
  • +
  • in general indicates what something is, not what it looks like.
  • +
+ +

It’s this separation of information from presentation that helps Google figure out what a page is about; that lets users make decisions about how they interact with content; that allows content to be reused in ways we didn’t think about when it was first created; that lets us change the look and feel of a whole site by editing a file or two; and that lets us take a site built for a PC-based browser and squeeze it onto an iPhone.

+

All those advantages aside, at the end of the day, just because the W3C says it should be so doesn’t make it so. Plenty of sites haven’t managed to let go of late-1900s practices, and for understandable reasons: There’s a non-trivial learning curve for designers and developers who grew up slicing and dicing layouts into HTML tables, and good semantic markup and CSS takes careful planning at the outset of a project. Combine those challenges with the fact that a dynamic site’s front-end code can be brutally intertwined with back-end processes, and it can be difficult for a company to justify transitioning away from a system that seems to be working just fine.

+

However, a few factors are tipping the balance in the W3C’s favor these days:

+
    +
  • +Search engines like pages they can read, and semantic markup using HTML provides a language search engines can understand. Mark an element as a Level 1 Heading, and search engines will know it’s the most important heading on the page and use that information to figure out what your page is about. Simply using semantic markup can contribute greatly to a site’s search engine optimization.
  • +
  • +Accessibility has come to the forefront of the web development world with the certification of a class-action lawsuit against Target department stores. The suit says that Target has refused to take steps to make the site usable by people who are visually impaired. As a result of a ruling in the lawsuit, California may mandate that sites be accessible to the visually impaired. While sites that use table-based layouts aren’t inaccessible by definition, they are inherently more difficult to make accessible than sites that separate their information from their presentation.
  • +
  • +The Web 2.0 phenomenon relies on content that can be rearranged and reused with ease, and that rearranging and reuse is infinitely easier on sites that separate information from presentation. Web 2.0 technologies rearrange, reuse and repopulate elements on a page based on what the elements are, not on what they look like or where they are on the page. As users come to expect more interactivity and fewer page loads from their web experience, sites that separate their information from presentation will be in a better position to make use of Web 2.0 technologies.
  • +
  • The mobile web has existed for several years now, but the iPhone is reshaping the mobile web landscape and blurring the line between PCs and portable devices. Though the iPhone can browse “normal” web sites, many content providers are offering sites that are HTML-based but customized for the shape and size of the iPhone’s screen. As more portable devices that can browse the “normal” web become available, sites that separate content from presentation will be uniquely positioned to offer mobile users a customized version of their existing pages without having to change the underlying code.
  • +
+ +

Further reading:

+

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/notes-on-handing-off-a-design-to-a-front-end-developer.html b/www/blog/notes-on-handing-off-a-design-to-a-front-end-developer.html new file mode 100644 index 0000000..61a55a2 --- /dev/null +++ b/www/blog/notes-on-handing-off-a-design-to-a-front-end-developer.html @@ -0,0 +1,200 @@ +Notes on handing off a design to a front-end developer

rmurphey adventures in javascript

Notes on handing off a design to a front-end developer

I've been spending a lot of time lately turning other people's designs into working websites, and often there are a few rounds of back-and-forth before I have everything I need. Some notes on must-have pieces before I can begin work:

+

Design

+
    +
  • +Custom fonts. If the design includes fonts that aren't on this list, then either the designer needs to provide a PSD that has all instances of the font being used in the design, or else I'm going to need the font file in order to create them. (There may be licensing issues here, which I leave as an exercise for the reader. In print land, we were allowed to provide the print shop with the fonts required to produce a printed piece, and the print shop was not supposed to retain them once the print run was complete. I imagine something similar applies to web production.)
  • +
  • +Navigation state information. What should navigation elements look like in the up, over, and current states? Ideally I'd like to see all of the buttons in each state, but if the variations are simple then I can probably make them myself.
  • +
  • +Link state information. What color should links be? Should they be underlined all the time, or only on hover? What color should they be when they are hovered? When they've already been visited?
  • +
  • +Form treatment. If the site has any forms on it, what should form elements -- text inputs, submit buttons and input labels -- look like?
  • +
  • +Typography specifications. Decisions about these can dramatically affect the feel of the site, and you may not want to leave them up to me. Specifically: +
      +
    • How big should body copy be?
    • +
    • How much space should there be between body copy lines? (Normally the "line-height" is set at 1.2 times the size of the font; increasing this can make body copy more legible.)
    • +
    • How much space should there be between body copy paragraphs?
    • +
    • How should headings be styled? How big should primary and secondary headlines be relative to the body copy?
    • +
    • How should lists (ordered and bulleted) be styled?
    • +
    +
  • +
  • +Imagery specifications. Like typography specifications, these considerations should be made carefully, as they affect the feel of the site. Specifically: +
      +
    • How should text wrap around an image? Should the image float to the left, with copy wrapping around the right? Float to the right, copy on the left? Leave it up to the user?
    • +
    • If an image is floated to the left or right, how much space should be between it and the text?
    • +
    +
  • +
+Documentation +
    +
  • +Approved content documents. Personally I prefer a single content document per static page that needs to be created, and a content sample for any dynamic pages that will need to be created. If there are any repeating elements in the design that require content, I'd enjoy getting a document that contains their text as well. Lastly, if there are any dynamically generated content items -- for example, a list of latest posts -- I'd like a document that shows me how these should be formatted. This will help me: +
      +
    • Make sure I've accounted for the pieces of the content and how they fit together.
    • +
    • Know that all content has been considered, reviewed and approved, so there shouldn't be too many surprises later.
    • +
    • Reconcile the content documents with any other documentation I've received.
    • +
    +
  • +
  • +Functional spec document. Let me know what should happen when someone fills out a form. Let me know how the items in the sidebar should be gathered. Let me know where that video's going to come from. Basically, anytime I ask "Where does that come from?" or "What does this do?", the functional spec should answer that question. While a functional spec should be initiated long before the design is complete, it is good to review the design and make sure those questions are answered. A functional spec should also include a sitemap.
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/object-literals.html b/www/blog/object-literals.html new file mode 100644 index 0000000..08ea1a0 --- /dev/null +++ b/www/blog/object-literals.html @@ -0,0 +1,209 @@ +Using object literals for flow control and settings

rmurphey adventures in javascript

Using object literals for flow control and settings

I got an email the other day from someone reading through jQuery Fundamentals -- they'd come across the section +about patterns for performance and compression, which is based on a +presentation by Paul Irish gave back at the 2009 jQuery +Conference in Boston.

+

In that section, there's a bit about alternative patterns for flow control -- +that is, deciding what a program should do next. We're all familiar with the +standard if statement:

+

{% codeblock lang:javascript %} +function isAnimal(thing) { + if (thing === 'dog' || thing === 'cat') { + console.log("yes!"); + } else { + console.log("no"); + } +} +{% endcodeblock %}

+

What stumped the person who emailed me, though, was when the same logic as we +see above was written like this:

+

{% codeblock lang:javascript %} +function isAnimal(thing) { + if (({ cat : 1, dog : 1 })[ thing ]) { + console.log("yes!"); + } else { + console.log("no"); + } +} +{% endcodeblock %}

+

What's happening here is that we're using a throwaway object literal to express +the conditions under which we will say a thing is an animal. We could have +stored the object in a variable first:

+

{% codeblock lang:javascript %} +function isAnimal(thing) { + var animals = { + cat : 1, + dog : 1 + };

+

if (animals[ thing ]) { + console.log("yes!"); + } else { + console.log("no"); + } +} +{% endcodeblock %}

+

However, that variable's only purpose would be to provide this one lookup, so +it can be argued that the version that doesn't bother setting the variable is +more economical. Reasonable people can probably disagree about whether this +economy of bytes is a good tradeoff for readability -- something like this is +perfectly readable to a seasoned developer, but potentially puzzling otherwise +-- but it's an interesting example of how we can use literals in JavaScript +without bothering to store a value in a variable.

+

The pattern works with an array, too:

+

{% codeblock lang:javascript %} +function animalByIndex(index) { + return [ 'cat', 'dog' ][ index ]; +} +{% endcodeblock %}

+

It's also useful for looking up values generally, which is how I find myself +using it most often these days in my work with Toura, where +we routinely branch our code depending on the form factor of the device we're +targeting:

+

{% codeblock lang:javascript %} +function getBlingLevel(device) { + return ({ + phone : 100, + tablet : 200 + })[ device.type ]; +} +{% endcodeblock %}

+

As an added benefit, constructs that use this pattern will return the +conveniently falsy undefined if you try to look up a value that doesn't have +a corresponding property in the object literal.

+

A great way to come across techniques like this is to read the source code of +your favorite library (and other libraries too). Unfortunately, once +discovered, these patterns can be difficult to decipher, even if you have +pretty good Google fu. Just in case your neighborhood blogger isn't available, +IRC is alive and well in 2011, and it's an excellent place to get access to +smart folks eager to take the time to explain.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/objects-as-arguments.html b/www/blog/objects-as-arguments.html new file mode 100644 index 0000000..11f819c --- /dev/null +++ b/www/blog/objects-as-arguments.html @@ -0,0 +1,333 @@ +Objects as Arguments: Where do you draw the line?

rmurphey adventures in javascript

Objects as Arguments: Where do you draw the line?

I was reviewing some code last week and came across a snippet that looked a lot like this:

+

{% codeblock lang:javascript %} +var someObject = { + // ...

+

onSuccess : function(resp) { + // ... + this.someMethod(resp.token, resp.host, resp.key, resp.secret); + },

+

someMethod : function(token, host, key, secret) { + // ... + } +}; +{% endcodeblock %}

+

My immediate response was to suggest that it didn't make sense to be passing +four separate arguments to someMethod, especially when the arguments were +being "unpacked" from an already-existing object. Certainly we could just pass +the resp object directly to someMethod, and let someMethod unpack it as +necessary -- we'd save some bytes, and we'd also leave ourselves some room to +grow. "I'm not a big fan of functions that take four arguments," I said in my +GitHub comment.

+

To the original author's credit, "because I say so" wasn't sufficient reason to +rewrite code that was working just fine, thank you very much. If four arguments +was too many, was two arguments too many? Why draw the line at four? Surely the +four-argument signature helped indicate to future developers what was required +in order for the function to ... function. Right? My hackles momentarily +raised, I parried by pointing out that if the arguments were actually required +by the function, maybe the function ought to actually check for their presence +before using them. Ha! While the original author was distracted by my disarming +logic, I fretted over the fact that I use a function that take four arguments +every day: dojo.connect(node, 'click', contextObj, 'handlerMethod'). Ohnoes.

+

So where do you draw the line? Certainly you could write that dojo.connect +call like so:

+

{% codeblock lang:javascript %} +dojo.connect({ + node : node, + event : 'click', + context : contextObj, + method : 'handlerMethod' +}); +{% endcodeblock %}

+

This, though, might make you poke your eyes out. It certainly isn't as concise +as the four-argument approach, and it makes a lot of things like +partial +application +a lot harder. Clearly there's more to this than "if there's more than four +arguments, put them in an object" ... but what are the rules?

+

Optional Arguments

Probably the most compelling reason to use an object is when there are several +optional arguments. For example, last fall I was reviewing some code from a +potential training client, and I came across this:

+

{% codeblock lang:javascript %} +addBling('#awesome', 'fuchsia', 'slow', null, null, 3, 'done!'); +{% endcodeblock %}

+

No one can argue that this is not terrible, and yet every experienced +JavaScript developer knows how the developer(s) who wrote it arrived there. At +first, the function needed three arguments, and all was good with the world. +But then, it seemed like the same function could be used to do another thing by +just passing two more arguments -- no big deal, because if those two arguments +weren't present, then just the first three would suffice. Five arguments +certainly isn't that bad, right? After that, though, things went south: for +whatever undoubtedly marketing-department-driven reason, suddenly both the +original three-argument case and the later five-argument case both needed to +receive two more arguments, and these two new arguments were mandatory. Now +both cases had seven-argument signatures, and in some cases, two of those seven +arguments needed to be null so nothing would break.

+

This case demonstrates the most compelling reason to switch to using an object +instead: optional arguments. When the developer discovered that the original, +three-argument addBling could be used for the five-argument case as well, it +was probably time to refactor:

+

{% codeblock lang:javascript %} +// original +addBling('#awesome', 'fuchsia', 'slow');

+

// new hotness +addBling('#awesome', { + color : 'fuchsia', + speed : 'slow' +}); +{% endcodeblock %}

+

Then, the same function could be used while passing it more information about +how to behave in the five-argument case:

+

{% codeblock lang:javascript %} +addBling('#omgSoAwesome', { + color : 'fuchsia', + speed : 'slow', + unicorns : 3, + rainbows : 5 +}); +{% endcodeblock %}

+

Then, when it came time to add yet more bling, the function signature wouldn't need to change,

+

{% codeblock lang:javascript %} +addBling('#awesome', { + color : 'fuchsia', + speed : 'slow', + timesToBlink : 3, + alertOnSuccess : 'done!' +});

+

addBling('#omgSoAwesome', { + color : 'purple', + speed : 'fast', + unicorns : 3, + rainbows : 5, + timesToBlink : 9001, + alertOnSuccess : 'woohoo!' +}); +{% endcodeblock %}

+

Extensibility and Future-Proofing

Another case for passing in an object is when you want the flexibility that an +object provides, even if your code doesn't require it for now:

+

{% codeblock lang:javascript %} +var Person = function(args) { + this.firstName = args.firstName; + this.lastName = args.lastName; + return this; +}; +{% endcodeblock %}

+

For now, you only want to be able to provide the first and last name of the +person -- it would work just fine to create a function signature for the +Person constructor that took exactly those two arguments, because indeed they +are required. On the other hand, though, this is incredibly short-sighted -- +while first and last name may be all that you care about now, there's obviously +more to a person than those two attributes, and eventually you may want to +provide attributes such as age, occupation, etc. Doing this with individual +arguments quickly becomes unsustainable. Besides that, though, it also makes +assigning instance properties a pain in the ass. By passing an object, we can +rewrite the above code as such:

+

{% codeblock lang:javascript %} +var Person = function(args) { + dojo.mixin(this, args); + // jQuery: $.extend(this, args); + return this; +}; +{% endcodeblock %}

+

Now -- assuming this is what we want -- we can mix in any settings we provide +in the args argument. Dojo, for example, bakes this ability in to anything +that inherits from dijit._Widget:

+

{% codeblock lang:javascript %} +var thinger = new my.Thinger({ + title : 'Favorite Animals', + animals : [ 'Narwhal', 'Lemur', 'Honey Badger' ] +}); +{% endcodeblock %}

+

Use Objects for Related Data

An important qualifier here is that all of the properties of an object that +we've talked about passing to our Person constructor are related -- they all +are saying something about the Person you're creating. What if creating our +Person was asynchronous, and we wanted to run a function once our Person was +created? In a (contrived) case like that, I think it does make sense to pass in +a separate argument:

+

{% codeblock lang:javascript %} +new Person(configObj, fn); +{% endcodeblock %}

+

In this particular example, we still only have two arguments -- we haven't +wandered into that muddy realm of four or more. That said, I think this +distinction is part of what makes dojo.connect(node, 'click', contextObj, +'handlerMethod') OK: the arguments are four distinctly different types of +information. Taken together, they have an almost narrative quality: when this +node receives a click, use the context object's handlerMethod. A signature +like new Person('Rebecca', 'Murphey', 34, 'web developer', 2 /*cats*/, 2 +/*dogs*/) doesn't feel the same as the dojo.connect example -- it's +information that's too related to be expressed as independent arguments.

+

Four or More, Time to Refactor?

I think the bottom line here is a) it's complicated, and b) if your function +signature has four or more arguments, you should almost certainly consider +whether there's a better way to do it. If the arguments are super-related, it +may be they belong in an object, so you get the benefit of easy extensibility +down the road. If there are optional arguments, you almost certainly want to +wrap those in an object to avoid passing null over and over again.

+

Personally, my threshold is actually closer to two arguments -- if I find +myself wanting a third argument, I question whether my function is trying to do +more than it should be doing -- maybe I should do some pre-processing of the +input so I can get away with just passing in two arguments. Every additional +argument is an indication of additional complexity, which means an additional +opportunity for things to go wrong.

+

Other Considerations

I posed this question to +Twitter and got a +ton of interesting feedback. Here are some of the highlights that I didn't +cover above:

+
    +
  • @raydaly no new nouns +is my principle. If unrelated data needs to be passed, diff args.
  • +
  • @dadaxl I would pass +an obj if I've a dynamic bunch of args containing functions.
  • +
  • @sh1mmer omg! Objects +for the love of god! No one likes immutable APIs. Just ask @ls_n
  • +
  • @MattRogish Rails +tends to do required things are named args, optional things are a hash
  • +
  • @ryanflorence +obfuscation often influences me, objects don't compress as well as multiple +args.
  • +
  • @getify if more than +half of the args are optional...or if there are several boolean params which +without names can be confusing
  • +
  • @jcoglan When further +args are optional, or args.length>3. Need to spot when options merit a +refactoring, though.
  • +
  • @digitalicarus +A combo of sheer length, amount of reuse, if it's an API, and/or if it's +designed to be called a variety of ways to a similar end.
  • +
  • @BryanForbes If I +have to start swapping arguments and type checking, it's time for one object +or reworking my function.
  • +
  • @myfreeweb I use an +object when I start forgetting the order of args ... or there is no logical +order like (key, value, callback) at all
  • +
  • @zetafleet When +many of the arguments are optional or they’re all getting stored or +copied directly over to the object.
  • +
  • @maravillas I +usually don't make an obj just for passing data; if arglist is too long, +maybe the function does too much and needs refactoring.
  • +
+

Postscript

We ended up leaving the code that spurred this whole conversation exactly as it +was.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/office-hours.html b/www/blog/office-hours.html new file mode 100644 index 0000000..c85d9d8 --- /dev/null +++ b/www/blog/office-hours.html @@ -0,0 +1,123 @@ +Office Hours for Aspiring Speakers

rmurphey adventures in javascript

Office Hours for Aspiring Speakers

Update: Office hours are on hold for now while I settle into a new role at Bazaarvoice.

+

I'm expecting that my 2015 is going to include a bit less speaking than in years past, so I'm hoping I can use some of that newly available time to help new speakers find their way to the stage. To that end, I'm kicking off "office hours" this week: a few slots a week where aspiring and up-and-coming speakers can borrow my ear for a bit to talk about their ideas, their fears, their questions, and their ambitions.

+

This idea isn't mine; I was inspired by a similar effort by Jen Myers, who has been offering mentoring sessions to aspiring speakers since 2013. I'm forever indebted to the folks who helped me get through my first talk, and I've been honored to give a gentle nudge to several other speakers in the years since.

+

If you're interested, you can sign up here. There's no script or agenda, and -- at least to start with -- I'm not going to try to suggest who should or shouldn't sign up. If you think it would be useful to you, go for it! My only ask is that you be seriously interested in giving coherent, informative, engaging talks on technical topics.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/on-conferencing.html b/www/blog/on-conferencing.html new file mode 100644 index 0000000..ae0a9e7 --- /dev/null +++ b/www/blog/on-conferencing.html @@ -0,0 +1,172 @@ +On Conferencing

rmurphey adventures in javascript

On Conferencing

I'm a few hours away from finalizing my slides for the Rich Web Experience in Ft. Lauderdale next month. I'll be presenting on basic tips for refactoring your jQuery; I think it's a decent presentation -- the part of it I've finished, anyway -- and I'll be glad to have it in my collection.

+

By my count, RWX will be the 15th conference or non-trivial event I attend in 2010, including two that I've organized and eight that I've spoken at:

+

Organizer

    +
  • TXJS (Austin, TX)
  • +
  • NCJS (Durham, NC)
  • +
+

Speaker

+

Attendee

    +
  • SXSW (Austin, TX)
  • +
  • Bay Area jQuery Conference (Mountain View, CA)
  • +
  • JSConf US (Washington, DC)
  • +
  • CouchCamp (Walker Creek Ranch, CA)
  • +
  • Dojo Developer Days (Mountain View, CA)
  • +
+

(For what it's worth, and because I was curious enough to count: I've also participated in 11 episodes of yayQuery, written a half-dozen in-depth blog posts, organized at least six local web women meetups, and published an open jQuery training curriculum.)

+

It was sometime in early 2009 that I decided to make a point of speaking at more events. I'd always enjoyed the challenge and the thrill of public speaking, but I also wanted to be part of the solution to the dearth of women speakers in JavaScript land. Something like 18 months later, it seems I might be sort of good at it, if my SpeakerRate is to be believed. This fall I found myself actually turning down requests to speak.

+

It turns out that they forgot to remind me in work-for-yourself school that at the end of the day or week or month, I've still gotta make some money now and then. Putting together presentations and traveling to conferences and meeting the expectations that come along with being a Vaguely Important Person ... it's fucking hard work, and when there's no company to pay you for it, it's also fucking expensive, even if it theoretically brings in work in the long run. While every non-local event I spoke at paid for my hotel and accommodations, I was not otherwise compensated. The events that I organized ended up being pretty much a wash, financially.

+

I spent untold hundreds of hours -- I'm not making that up -- preparing presentations, traveling to conferences, and speaking this year. Looking back at my calendar since late August, when all of this got really and truly insane (most of my speaking has happened since then), the sad size of my business bank account makes stunning sense. We'll leave aside the toll that working all the time and then being gone all the rest of the time takes on one's home life, but suffice to say that the toll is, also, not trivial.

+

Over the last few weeks, it's become pretty clear that I need to take a break. I'm mostly over pre-talk jitters, but these days I find myself thinking "for the love of all that's good, do I really have to get on a plane again?" People ask me about 2011 events and I find myself on the verge of losing my shit, which really isn't fair to anyone. People tell me how they want to be invited to speak at stuff like I am, and I let loose a heavy sigh. People come up to me at events or email me randomly because they think that I'm an "expert" on this thing or that, and can they just ask me a little question? -- and I wonder what, exactly, I have wrought with all of this effort. Worst of all: potential clients ask me about taking on lucrative work, and I must tell them: "Not now, I can't, I'm sorry. I've got this presentation to prepare ..."

+

I might be a terrible person entirely too full of herself, or a drama queen, or whatever else you want to think. You might be certain that if I'd just think about it, there would be lots of efficiencies I could realize, and really I just make this harder than it is. That's OK, and you might be right.

+

I want to be clear that I realize that two years ago I was nobody, and to the extent that I am anybody now, it is largely because I have been afforded so many opportunities to make a name for myself. I am grateful for them. It becomes clear, though, that I've let the pendulum swing way too far.

+

And so as I sit here, about to finish off the last slides of the year, I fantasize about deleting Keynote from my computer. But I also find myself thinking how I'm deciding not to do this thing that I'm kind of good at because I simply can't afford to, and it makes me sad. It kills me to think about missing out on that warm fuzzy it-was-all-worth-it moment when everyone claps at the end. And more than anything, I feel like walking away, for however long, means yet one less woman on stage in a field that is desperate for them, and that makes me saddest of all.

+

There is no point to this post, really, except for me to get some of my thoughts out of my head and for you to know where I've gone if you don't see or hear from me as much. It is, alas, time for me to actually do some of that work that all this effort has brought in. We'll see when I emerge -- maybe I will feel rejuvenated in the new year, who knows! -- and whenever that is, I hope to see you there.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/on-gaining-respect-as-a-front-end-developer.html b/www/blog/on-gaining-respect-as-a-front-end-developer.html new file mode 100644 index 0000000..eedc3d1 --- /dev/null +++ b/www/blog/on-gaining-respect-as-a-front-end-developer.html @@ -0,0 +1,135 @@ +On gaining respect as a front-end developer

rmurphey adventures in javascript

On gaining respect as a front-end developer

Someone wrote me today:

+
Where I work, design is highly valued with the leader of that group being our Creative Director, back end programmers are also highly valued, but front end ... not so much. Partly I think its that I don't toot my horn but I know there are other reasons. At times, my bosses haven't even understood what it is that I do. Back end programmers look down on front end assuming that its trivial or something that should be relegated to compilers. + +I was wondering if this is a common thing or more so something that is happening at my particular company, and if you have any advice or pointers on this.
+ +

I thought my response might be worth sharing:

+

I do think this attitude is common but not necessarily the rule. In my experience, I've found that by having a proven value proposition, you can gain converts and respect.

+

Front end developers are in a unique position to improve page performance (perceived and actual) by using best practices such as the YSlow tests. Front end developers are also in a unique position to help develop templating systems and to write thoughtful CSS, both of which help enable the rapid prototyping and rollout of new features. A focus on results and best practices -- demonstrating that you aren't just pushing pixels around -- is the key.

+

Back end developers respect people who think like they do. Be mindful of opportunities for abstraction and reuse. Write object-oriented CSS and JavaScript. Craft solutions that are maintainable and documented. Learn and make use of version control systems. Look for opportunities to participate in developer conversations about new features, and understand what the back end developers are up against. They'll appreciate all of this.

+

Take the time to teach and to learn. Be sure you have at least a passing understanding of the code the back end developers are writing, and leap at opportunities to share your knowledge. I've worked with more than one back end developer who was surprised to discover what all they didn't know about the front end, and through our conversations about how we approached problems, we both learned a lot.

+

Finally: identify opportunities for quick victories, execute on them, and make the results known. Benchmark before and after. Can you reduce the number of HTTP requests on a page, decreasing both the perceived and actual rendering time? Are you keeping your JavaScript out of the <head> as much as possible, preventing pages from stalling while rendering? Can you write JavaScript that is primed for reuse, and demonstrate opportunities for that reuse? Has your carefully crafted CSS allowed the rapid rollout of a new feature? Don't be afraid to tell these stories -- they'll tend to strengthen your position by clarifying the important role the front-end developer plays in a site.

+

Good luck :)

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/on-jquery-large-applications.html b/www/blog/on-jquery-large-applications.html new file mode 100644 index 0000000..fc7b50a --- /dev/null +++ b/www/blog/on-jquery-large-applications.html @@ -0,0 +1,192 @@ +On jQuery & Large Applications

rmurphey adventures in javascript

On jQuery & Large Applications

Update: I’ve written a separate post on the wisdom of rolling your own large application toolkit that incorporates jQuery.

+ +

I’ve been thinking a lot lately about JavaScript applications. As my skills have evolved, I’ve had the privilege of working on more actual applications, and I’ve gotten further and further from clients who want to add a bit of Ajax or bling to an otherwise fairly traditional web site.

+ +

The most interesting applications I work on are client-side intensive: the server is responsible for providing data as JSON to the client, and most everything else — templating, state management, data management, site navigation, and of course user interaction — is left to the client side.

+ +

It’s a lovely way of writing an application. There’s no need for me to touch server-side code; in some cases I work with a server-side developer to decide what the data they send will look like, but in others I just take what an API already provides and make it work. I get to use the same templating framework across projects, regardless of server-side technology, and I can prototype complex interactions before the server side even exists.

+ +

This is a land where HTML, CSS, and JavaScript are almost all you need, and I like it. I’ve become a firm believer in moving giant hunks of functionality that used to belong to the server over to the client. For a variety of reasons, I think it’s clear that this is where most interesting web development is headed, to the extent it’s not already there.

+ +
+ +

This style of building an application changes the front-end development game. In fact, “development” may no longer be an adequate description; we’re moving into the realm of engineering, here. We’re not using JavaScript to add a bit of bling to our sites — a slideshow here, some Ajax there — we’re architecting an application, damnit. We can’t just write some procedural code that binds a bunch of anonymous functions to some events and call it a day; if we do, I can tell you from experience that we’re going to end up with a steaming pile of unmaintainable crap.

+ +

Among a host of questions presented by these sorts of applications, some of the most interesting to me are:

+ +
    +
  • What are the units of functionality that will make up the application?
  • +
  • How will those pieces be organized into units of code?
  • +
  • How will those pieces communicate with each other?
  • +
  • How will dependencies between components be expressed and managed while adhering to the principle of loose coupling?
  • +
  • How will components manifest themselves in the DOM? Do they need to?
  • +
  • How will we persist data across URL and page loads?
  • +
  • How will we manage communication with the server?
  • +
  • How will we make sure users only see the data they’re allowed to see?
  • +
+ + +

At the risk of making a broad generalization, this isn’t the way today’s average JavaScripter learned to think. The mantra of jQuery, the most popular JavaScript library on the internets, is “get some elements, do something with them” — perfectly terrible preparation for analyzing an application from a perspective other than the DOM. And, IMHO, therein lies a tremendous problem.

+ +
+ +

As more and more application logic moves to the browser, I’m eager to see the JavaScript community rise to the challenge, but instead it feels like the opposite is happening. People with little understanding or appreciation of these questions are taking on projects that demand these questions be answered. The result is a land of fragile code that gets the job done while giving the finger to the next developer; a land of code so tightly coupled, so deeply beholden to the DOM, so blatantly not reusable or extensible or maintainable as to render every subsequent commit a complete crapshoot, as liable to cripple the application as not. The viability of the project is threatened, and so is the reputation of JavaScript.

+ +

We are better than this. JavaScript, even that old-fashioned browser kind, is a language worthy of respect, not a thing to be dreaded. But — and here’s the sentence I have struggled 10 months to realize and an hour to write: in order to prove that we are better than this, we must make abundantly clear to the budding developers, to the project managers, to the enterprises, to anyone intending to build a remotely complex JavaScript application, that there’s more to JavaScript than jQuery. The questions are bigger, the answers more complex, and the relevant skills, alas, a bit harder to come by.

+ +

We have to make clear that, in fact, jQuery is but a hammer. When it comes to building these intensively client-side applications, we’re talking about building skyscrapers, for god’s sake. The problems solved by a hammer are the least of our concerns.

+ +
+ +

It was just a few months ago that I gave a presentation on building large jQuery applications. I emphasized jQuery’s role as strictly a DOM and Ajax tool, and demonstrated a few other tools — John Resig’s simple inheritance, James Burke’s RequireJS dependency management and build tool, Jan Lenhardt’s mustache.js — that one would want to bring to the table for such an undertaking.

+ +

But to what end do we assemble said hodgepodge of tools? Is it just so we can continue to “use jQuery”?

+ +

jQuery’s API is, indeed, dead-simple, but we are smart people! We are building skyscrapers! When it’s time to write a complex application, and we need all of these things that jQuery doesn’t offer, can we not learn to use another hammer — learn that dojo.place('<div>I am new!</div>', oldDomElement, 'last') means the same thing as $('<div>I am new!</div>').appendTo(oldDomElement) — if learning it gives us access to legions more functionality than jQuery even aspires to provide?

+ +

Do we assemble this hodgepodge because finding jQuery developers is perceived as an easier task than finding practitioners of another library, even though someone saying they “know jQuery” is little indication that they will know how to work with the assembled solution?

+ +

Do we do it for the plugin ecosystem — full of code of varying quality and maintenance — even though many of the large application needs addressed by those plugins are addressed by other libraries as well, and sometimes better?

+ +

And when we do it, when we assemble this collection of tools ourselves, what risks are we accepting? What price will we pay down the road to maintain three or five or 10 different pieces from three or five or 10 different authors, with different release cycles, no guarantee of compatibility or maintenance, and no central project thoughtfully considering their future?

+ +
+ +

I’ve wrestled with these questions for months, agonizing during sleepless early-morning hours over how to advise clients on the answers. I’m the co-host of yayQuery, a contributor to the jQuery Cookbook, and, I’ll venture to say, a decently respected member of the jQuery community. I did not arrive at this conclusion lightly, and I have few illusions it will be well-received, or even heeded.

+ +

But I’ve grown weary of people championing a tool that simply does not answer the big questions I see in project after intensively client-side project. I’ve grown weary of those same people dismissing tools that answer those questions handily and have been answering them for a while now. I cringe when clients tell me they’ve chosen jQuery because it was “easy,” and then watch them predictably struggle with all of the questions it does not answer. And I’ve found I can’t continue to bite my tongue when people recommend jQuery as an enterprise-grade solution while failing to acknowledge these questions, let alone answer them*.

+ +
+ +

I do not want to see jQuery go away. The simplicity of its API was undeniably instrumental in the rise of JavaScript as a language these last few years. It is a perfect gateway drug, and I greatly enjoy watching people transition from “get some elements, do something with them” to the elegant patterns of JavaScript itself.

+ +

jQuery is an entirely appropriate answer to so many questions, but it falls so short for large applications, forcing you to assemble such a tenuous toolkit of your own, that it simply isn’t a viable answer — or, in my opinion, part of an answer — for large applications. If we hope to continue to gain respect as a community, we ought to admire jQuery’s immense contributions, but we must not be afraid to accept and make very clear its limitations. We do otherwise at our peril.

+ +
+ +

*An aside: To its credit, JupiterIT has put forward JavaScriptMVC, the only substantive attempt I’ve seen at answering these large application questions using jQuery. I applaud them, but fear their efforts will continue to be somewhat isolated without the support and endorsement of the wider jQuery community. If you have read this far and still have your heart set on a jQuery-centric large application solution, you should by all means take a look at JavaScriptMVC.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/on-rolling-your-own.html b/www/blog/on-rolling-your-own.html new file mode 100644 index 0000000..de4e8b7 --- /dev/null +++ b/www/blog/on-rolling-your-own.html @@ -0,0 +1,182 @@ +On Rolling Your Own

rmurphey adventures in javascript

On Rolling Your Own

There’s been a lot of activity around my last post, On jQuery & Large Applications. A number of people have asked me why, exactly, I’m so opposed to using jQuery as part of a roll-your-own solution.

+ +

To answer that, let’s start by looking at (some of) the pieces my ideal large application solution might include:

+ +
    +
  • DOM manipulation tools
  • +
  • Ajax tools
  • +
  • A dependency management and build system
  • +
  • Clear patterns for code organization, such as namespaced modules
  • +
  • An inheritance system, preferably one that offers multiple inheritance, for sharing code across modules and staying DRY
  • +
  • A non-DOM-centric, loosely coupled API for communication between modules
  • +
  • A widget system that makes use of the inheritance system, with lifecycle management (setup/teardown) and templating
  • +
  • A system for maintaining templates separate from JavaScript while interning them into the build to eliminate HTTP requests
  • +
  • A system for abstracting RESTful server communication
  • +
  • For a UI-intensive project, a rich widget system pluggable to arbitrary data sources and designed with an easily-extended API
  • +
  • For an enterprise project, a11y and i18n provisions, as well as clear patterns for file organization
  • +
+ + +

To all of you who have said you can do this yourself, you win. I can’t argue with you without sounding like I’m saying you’re too dumb, and you’re probably not.

+ +

But here’s my question: why? What, exactly — exactly — do you gain by putting all of this together for yourself, rather than using the pieces you need of a toolkit that provides all of this out of the box? Because here are a few things I think you lose:

+ +
    +
  • Integration. Is your abstracted RESTful data API designed to talk to your widget system that’s designed to talk to your template system? That’s hott. Now what about when a new version of one of those components comes out that violates an assumption you made?
  • +
  • Maintenance. I heart the good folks who have put together individual answers to these individual questions, but they have no obligation to continue being the good folks they are, and they certainly have no obligation to do it on any sort of schedule. Remember all those plugins that broke with jQuery 1.4? That sure was fun.
  • +
  • Documentation. I’m going to grant you, right now, that jQuery is one of the best-documented JavaScript libraries out there, hands down. But what about all these other pieces you’re putting together? Especially the ones you really are rolling on your own, like that templated widget thing that communicates so nicely with your abstract data API. There are a wealth of resources for understanding, troubleshooting, and using these pieces in established toolkits. Where will the next developer turn when they have questions about yours?
  • +
  • Experience. Like I said, I get that you’re smart. Possibly smarter than me. That’s cool. But are you smarter than the combined wisdom of a team of people that has been thinking about these questions for years? Are you sure your solution has thought through all the questions they have?
  • +
+ + +

I’ve noticed that, in the conversations I’ve had the last few days, it seems to fall to me to “prove” that a roll-your-own solution that includes jQuery isn’t advisable. Perhaps that’s fair — “you started it!”, you might say, and that I did. But simultaneously, others argue that jQuery never set out to answer these questions, and so it’s not jQuery’s fault that people are trying to use it in ways it wasn’t intended to be used. I have waited in vain to hear a compelling reason why jQuery should be part of a large application solution, to hear why I should recommend a roll-your-own solution that includes jQuery to my clients. The extent of the argument seems to be “because I like it, and it doesn’t force me to think a certain way.”

+ +

No one puts baby in a corner. Got it. But the straw man-ness of this argument has me, literally, chuckling right now. Let’s not confuse a mythical one-size-fits-all solution with a toolkit that provides, well, tools. Tools to do all sorts of things, tools meant to work together, tools developed and tested and maintained by a whole big team of smart people, tools that are, actively, being used in really frigging big, really frigging enterprisey applications.

+ +

I very purposefully didn’t propose a particular alternate solution in the original post, but it’s hardly a secret that my personal favorite, of late, has been Dojo. Not because it purports to solve every problem or prescribes how to solve them, but because it gives me so many tools to use to solve a given problem. Time and again I find that “Dojo already did that” — they already wrote the tool I’m wishing I had. Now I don’t have to write it, and, perhaps more importantly, I know it was written to work with all of the pieces I’m already using, and when I use it I’m not risking duplication of code or a lack of testing, maintenance or support. Win.

+ +

But let’s be very clear: no one’s forcing me to use that component! No one is forcing me to do things a certain way, any more than jQuery is “forcing” me to think of my application entirely in terms of the DOM. I can write my own component if I want, or use someone else’s if I want, or change it a bit if I want! For example, on a current project I pulled in mustache.js because the project had a lot of templates that had already been written to use it. The brilliant thing, though, was that integrating mustache.js into dijit._Templated instead of the standard templating system was trivial. That component, and all the others in Dojo and Dijit, are architected explicitly not to be one-size-fits-all. They provide a rock-solid base for large application development, for getting up and running quickly using a bevy of ready-made solutions, but also provide so many extension points that you can turn those solutions on their head if you want or need.

+ +

Garann Means, whose blog you should be reading, took a bit of issue with my original post in model-view-controller and comfy clothes.

+ +

I do agree that it benefits everyone to be working in the same setup and making use of tools that have been vetted by geniuses whose entire job is to create such things. But I’m dubious about any approach which comes too close to promising one size fits all. If you’ve ever sewn a dress, you understand that one size fits all is technically possible, but some people are going be left with a lot of excess while others will scarcely be able to breathe.

+ +

Carrying on that metaphor, these pieces provided by Dojo — or any other comprehensive toolkit, for that matter — are but starter patterns, and thread, and scissors, and pins, and a sewing machine, and OK I’m stretching the metaphor, now, but my point is they’re definitely not finished one-size-fits-nobody garments. On the other hand, if I decide to use jQuery in a large application, it can feel like I’ve been given a black marker and some of that crinkly brown paper, and now it’s up to me to draw a pattern and then come up with all those other pieces, too. Intellectually interesting and pleasingly crafty, perhaps, but not particularly efficient, sustainable, repeatable, or maintainable.

+ +

So again I ask, in all seriousness and in hopes of fostering a good discussion: Why? jQuery provides you with DOM, Ajax, and event management tools, but little else. There are tools designed for building large applications, designed to provide all of the pieces I want and so many more it’s not even funny, and they provide you with DOM, Ajax, and event management tools, too. What’s the compelling case for rolling your own solution that includes jQuery instead?

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/on-speaking-at-the-2009-jquery-conference.html b/www/blog/on-speaking-at-the-2009-jquery-conference.html new file mode 100644 index 0000000..2f3339c --- /dev/null +++ b/www/blog/on-speaking-at-the-2009-jquery-conference.html @@ -0,0 +1,134 @@ +On speaking at the 2009 jQuery Conference

rmurphey adventures in javascript

On speaking at the 2009 jQuery Conference

One of my personal goals for this year was to start being part of the solution to the dearth of female speakers at tech events. Though I’ve talked at a couple of smallish local events over the past few months, this past weekend I got to do it in a big way: I presented a talk on using objects to organize your jQuery code to an audience of around 100 people, more by far than I’ve ever spoken to before.

+ +

[This post isn’t so much about the talk itself as my first experience with talking at a conference. If you’re interested in the talk, I encourage you to check out the slides, links, and code at the link above.]

+ +

I decided I wanted to try to talk at the jQuery conference after I saw the initial very smart, very male speaker lineup. I submitted my talk based on an article I wrote earlier this year, and by the time it was all said and done, mine was the second most popular topic and I was slated to have 30 minutes in “the big room.”

+ +

There is something sort of out-of-body about that moment when I am standing in front of a roomful of people right before I talk — I had it when I gave my first Refresh talk, when I taught my first jQuery class, when I spoke at my first BarCamp RDU, and yet again this weekend. For that moment, in my head, I am a complete and utter case, and can’t quite fathom that I thought this was a good idea. And then I start talking, and then it is OK. And then when it’s over, people clap, and I like that part.

+ +

Back when I set out to start speaking more, I decided to take an improv class. For six weeks, we practiced being spontaneously funny, and at the end, we got up on stage in front of a bunch of strangers and tried to do it for real. Knowing what that feels like — what it feels like to run up the aisle like you’re excited when really you’re terrified because you’ve never done this before and in real life you sit at a desk all day and talk to no one and what were you thinking? — makes the thought of talking to a bunch of strangers about what you actually know how to do seem like a completely reasonable thing.

+ +

My experience this weekend was nothing short of excellent — people I barely knew rallied around me throughout the weekend to help me improve my presentation (most notably Chris Williams, organizer of JSConf, to whom I owe many thanks for all the images — especially the Liger). The audience graciously tolerated the part in the middle where I had to leave the podium to (very publicly) blow my nose. People asked great questions, and audience members gently pointed out things I might want to rethink. With the exception of one creepy off-the-wall comment about my “fine-boned features,” the reaction was overwhelmingly positive.

+ +

Reliable sources told me that of 300 attendees, approximately 282 were men. I was the only woman to submit a talk. So this is the part where I encourage other women to do the same. I think women, on the whole (of course there are exceptions), are way more inclined than men to think they aren’t good enough speakers, that they don’t know a topic well enough to tell it to other people. Two truths: one, the speaking skills of the speakers I’ve seen have been all over the map; two, you’d be surprised how much you actually know about a topic, especially given the right audience. Go speak at a small event — a local meetup, a Refresh, even a lunch-and-learn at your office. Get to know the people who do speak at events, and discover that they’re people just like you. Go out on a limb and try something that’s

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/patterns-for-dry-er-javascript.html b/www/blog/patterns-for-dry-er-javascript.html new file mode 100644 index 0000000..b6c38af --- /dev/null +++ b/www/blog/patterns-for-dry-er-javascript.html @@ -0,0 +1,280 @@ +Patterns for DRY-er JavaScript

rmurphey adventures in javascript

Patterns for DRY-er JavaScript

I came across a little code the other day that reminded me I've been meaning to write about JavaScript patterns I take for granted. The code in question was intended to set the value of some fields in a form when a checkbox was selected; when it was deselected, the same fields were to be emptied. It looked not unlike this:

+ +

{% codeblock lang:javascript %} +// config is defined outside of this snippet, +// and may contain more than the properties +// we care about +$('#myCheckbox').click(function() { + if (this.checked) { + $('#field_foo').val(config.foo); + $('#field_bar').val(config.bar); + $('#field_baz').val(config.baz); + } else { + $('#field_foo').val(''); + $('#field_bar').val(''); + $('#field_baz').val(''); + } +}); +{% endcodeblock %}

+

This is a wholly readable bit of code -- there's almost no question what's going on here. On the other hand, it's pretty easy to see the rampant repetition; this code isn't interested in "don't repeat yourself" (DRY). We're calling the same method on every selection we make, and our selections are repeated in both the if and else block. When I saw this code, I had an immediate inclination to rewrite it. Here's what I came up with first:

+ +

{% codeblock lang:javascript %} +// config is defined outside of this snippet, +// and may contain more than the properties +// we care about

+

$('#myCheckbox').click(function() { + // note whether the checkbox is checked + var checked = this.checked;

+

// iterate over the keys we care about + $.each(['foo', 'bar', 'baz'], function(i,v) { + // find the field for the given key + $('#field_' + v) + // and set its value either to the string + // stored for the key, or to an empty string, + // depending on whether the checkbox was checked + .val(checked ? config[v] : ''); + }); +}); +{% endcodeblock %}

+

This looks approximately nothing like the initial code, and without the +comments, the code itself would be substantially less readable than the +original. The idealistic part of me -- the part that believes people who write +JavaScript should understand JavaScript -- says this is an acceptable price to +pay. And besides, there's something to be said for explaining the code in a +comment that can be stripped by a minifier, rather than explaining the code via +the code. In this iteration, we've introduced two patterns for DRY-er code: +iterating over an array literal (or, alternately, an object) to achieve +repetition without repeating ourselves, and using the ternary operator in place +of an if/else statement when the simplicity of our logic allows it. The array +literal serves as a list of the fields we care about. When our checkbox is +clicked, we iterate over this list, build up a selector for each item in the +list, make our selection, and then set the field value using a a +href="http://stackoverflow.com/questions/1788917/javascript-ternary-operator"ternary +operator. We've gone from 11 lines of code to six, with the added bonus +that we have to do a lot less typing if we need our checkbox to affect more +fields. + +(A side note: Is this premature optimization? I'd argue no, if you've +learn to see these patterns before you start writing code. Once you learn how +to spot these patterns in a requirement, writing code that embraces them can +actually be easier than writing code that takes a more "literal" approach to +the problem. For example, imagine if the checkbox affected 20 other fields +instead of one? You'd undoubtedly find yourself copying and pasting code if you +took the more "literal" approach to the problem, and that would be your first +clue that you were doing something inefficiently.) + +The great thing about using a pattern like this is that it rapidly exposes the +actual meat of what you're doing, and makes refactoring far less painful. I +also find that it helps me see opportunities for reuse that I might not have +spotted in the more literal version of the code. Let's say we're feeling all +proud of ourselves for DRYing out our code using clever JavaScript that only +super-smart people can read. Now there's another checkbox that needs similar +behavior, but it's going to use a different config object and a different list +of fields. No problem! You've already written this code, so you can just copy +and paste it and then change what's different. Sweet. Er ... suddenly you're +not looking so DRY after all. This is when another pattern comes into play: +creating a function that returns another function with certain variables +already baked in (that is, creating a closure). We'll execute this creator +function and then use the function it returns in place of the anonymous +function we were using previously when we bound to the click event.

+ +

{% codeblock lang:javascript %} +// handleClick accepts a config object +// and a makeSelector function; it returns +// a function that can be bound to +// a click event, using the config object +// and the makeSelector function to react +// appropriately to the click +var handleClick = function(fields, config, makeSelector) { + return function() { + var checked = this.checked;

+
fields && $.each(fields, function(i, v) {
+  // build the selector using the provided
+  // makeSelector function
+  $(makeSelector(v))
+          // set the value using the
+          // config object, depending
+          // on whether the checkbox
+          // is checked
+          .val(checked ? config[v]: '');
+});
+

}; +};

+

$('#myCheckbox').click( + // use handleClick to create a function + // that has these variables baked in; + // pass the created function as the + // click handling function + handleClick( + ['foo','bar','baz'], + myCheckboxConfig, + function(field) { return '#field_' + field; } + ) +);

+

$('#myOtherCheckbox').click( + handleClick( + ['bim','bar','bop'], + myOtherCheckboxConfig, + function(field) { return 'input[name="' + field + '"]'; } + ) +); +{% endcodeblock %}

+

By creating a function that returns a function, we can isolate what's different about the two different handlers while centralizing the pieces that are the same. If your event handling function was slightly less trivial than this one, or if you were binding to five different checkboxes instead of two, the benefit of consolidating the code would be even more substantial. JavaScript offers plenty of patterns for writing DRY-er code; it's important to learn to both recognize and use them. It's also important to recognize when you're writing un-DRY code in the first place -- copying and pasting code is one crystal-clear indicator, but others are more subtle and you may not identify them on the first go-round. For example, take these two functions; each receives a list item as its only argument and returns either the next or previous list item, returning to the beginning or end of the list if there is no next or previous item.

+ +

{% codeblock lang:javascript %} +var getNextItem = function($item) { + return $item.next().length ? + $item.next() : $items.first(); +};

+

var getPrevItem = function($item) { + return $item.prev().length ? + $item.prev() : $items.last(); +}; +{% endcodeblock %}

+

This felt repetitive when I first wrote it, but I couldn't quickly come up with a single function that would work. A little thinking about it, though, led me to this single function which gets a second argument: direction. That argument is used to decide whether to run the item's next or previous method, and then whether to run the item's first or last method. Besides combining two functions into one, it also eliminates calling next or prev twice inside each function.

+ +

{% codeblock lang:javascript %} +var getItem = function($item, direction) { + var $returnItem = $itemdirection; + return $returnItem.length ? + $returnItem : + $items(direction == 'next') ? 'first' : 'last'; +}; +{% endcodeblock %}

+

Learning about patterns and then discovering opportunities to use them is one of the more pleasing parts of my job. I hope this helps you identify some of those opportunities for yourself :)

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/pausing-office-hours.html b/www/blog/pausing-office-hours.html new file mode 100644 index 0000000..61bc7fa --- /dev/null +++ b/www/blog/pausing-office-hours.html @@ -0,0 +1,123 @@ +Pausing Office Hours

rmurphey adventures in javascript

Pausing Office Hours

I started doing office hours at the beginning of the year; it's been tremendous fun, super eye-opening, and just generally quite rewarding. I won't lie: it's also been a lot of time, especially the first few weeks when I terribly underestimated how many people would sign up.

+

I was out for dinner with a friend tonight and she asked me how it was going, and I had to pause for a second when I realized that we might not be sitting there, right then, if she hadn't signed up for a slot. Things like that -- and getting to see, on stage, speakers who trusted me with their talk idea months ago -- make me super-glad that I let (mostly) strangers put 30 minutes on my calendar a couple-few dozen times these last few months. Hopefully I helped a few people along the way too.

+

In the last couple of weeks I've started a new role at Bazaarvoice: after a few months of working on some fairly independent projects, I'm back to leading a team. I used to get a kick out of being the female lead of an otherwise-all-male team; now I'm honored and humbled to get to work with a team where I am one of five women. The ratio isn't quite 50-50, but it's closer than any team I've worked on before. It's also the first lead role I've had where my responsibilities span beyond the front end, a prospect both exciting and daunting.

+

Which is to say: My hands seem a bit fuller than they did back when I started office hours, so for now, it's time to hit the pause button while I focus on my new team. If you're interested in picking up where I left off, hit me up and I'm happy to spread the word.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/planning-a-wordpress-cms-site.html b/www/blog/planning-a-wordpress-cms-site.html new file mode 100644 index 0000000..c6fb82c --- /dev/null +++ b/www/blog/planning-a-wordpress-cms-site.html @@ -0,0 +1,174 @@ +Planning a Wordpress CMS site

rmurphey adventures in javascript

Planning a Wordpress CMS site

There have been plenty of rumblings lately about how WordPress can be used as a content management system, beyond its core competency as a blogging platform. By harnessing the power of pages and subpages, writing custom Page templates, segmenting posts into category-based content feeds, and using handy little plugins like my brother's Page Link Manager, you can do some pretty neat stuff.

+

Lots of people have caught on, including small advertising and marketing agencies that want to be in the content and design business while staying away from programming. In my work with some of those agencies lately, I've often found that there's a gap between having the idea to use WordPress as a CMS and knowing the inner workings of WordPress that allow it to be used as a CMS. Since I often find myself needing to explain the building blocks of WordPress and how to use them as part of a lightweight CMS, I thought I should write my thoughts down. My goal here is not to get into the nitty gritty of actually implementing a WordPress-as-CMS site; I leave that to skilled WordPress developers ;). Rather, it's to give non-technical people an overview of how WordPress works so they can make the most of it during the site planning process.

+

Note: In doing my homework for this post, I discovered lots of posts that mentioned workarounds for earlier versions of WordPress. Many of these things -- such as setting a particular Page to be your homepage -- are build in to the newer versions of WordPress. Make sure if you're reading a WordPress as CMS tutorial that you are reading one for the current version.

+

Content building blocks

+

WordPress offers a few building blocks for content management: posts, categories, pages (better referred to as sections), and custom fields.

+

    +
  • +Posts and Categories are used to organize related content. By assigning a Post to a Category, it can be grouped with other Posts in that Category. For example, you could have a "team members" Category and a "news" Category, and then easily display all team members in one location, and all news items in a completely separate location. You could even have a "blog" Category, which would allow you to have a blog on the site while still using Posts to manage other content as well. (Besides allowing this type of organization, Categories also automatically create Category Archives -- an easy way for your users to browse all content related to a Category, if you choose.)
  • +
  • +Pages are used to manage static content, and are used for the site's navigation. They can also display other content items -- both Posts and Pages -- that match certain criteria. (For the sake of this discussion, pages and subpages may be better thought of as sections and subsections; not every "page" (i.e. URL) on a Wordpress site is managed using a Page in the Wordpress admin, but every section and subsection is managed that way. When I'm talking about a Page with a capital "P", I'm referring to the kind that you manage through the WordPress admin; when I'm talking about a page with a lowercase "p", I mean any part of your site that has a unique URL.) It's important to note that, using the Page Link Manager plugin mentioned above, you can easily exclude any Page from the site navigation, which makes them much more powerful for managing pieces of content that don't need to show up in the navigation.
  • +
  • +Custom fields can be added to both Pages and Posts. They can contain all sorts of extra information related to a content item, which can then be accessed whenever the content item is displayed.
  • +
+Theme building blocks

+

The design of a WordPress site is managed using a theme. Themes include a global header, footer and sidebar. They also include a variety of files for managing the design and structure of the content area, depending on the page you are viewing.

+

A very basic theme will include the following content area templates:

+

    +
  • Latest Posts template
  • +
  • Single Post template
  • +
  • Default Page template
  • +
  • Search results template (used for displaying all Posts and Pages containing a term)
  • +
  • Archives template (used for displaying date-, author- and Category-based Post archives)
  • +
+Harnessing the power of custom templates +

+

This is where things get interesting. On a regular WordPress blog, the default Page template will just display some static content that's entered into the Page using WordPress admin, plus the site's header, footer, and sidebar. This is fine for Pages that don't need to pull in any other content, like, say, an About section. When you start using WordPress as a CMS, you need to start doing more creative things with the content area of your Pages by using custom Page templates.

+

For the News Page of a site, you might create a custom template that would include introductory content entered via the WordPress admin, along with the 10 most recent posts in the "news" Category, ordered from newest to oldest, and a link to view all items in the "news" category. For the Home Page of a site, you might create a more elaborate custom template, pulling Posts from a variety of Categories, pulling static content from a variety of hidden Pages, and displaying it all in a variety of ways: full content, titles only excerpts, etc. You might even make use of custom fields. Do you want to be able to show a thumbnail with each Post in the "video" category, but only when you're showing those posts on your homepage? Add a custom field to those posts with the URL to the thumbnail, then tell your custom Home Page template to look for that custom field and use it to insert an image tag.

+

You could just as easily create a custom field containing the name of a PHP file to be used as a custom dynamic sidebar for that page; the options here are pretty much limited by your imagination (and your developer).

+

Developing the site architecture

+

In planning a site that will use WordPress as a CMS, it's imperative to think of how your content, sections and subsections fit into the WordPress model. It will save you lots of headaches when it comes time to actually develop the site, and lots of calls and emails from your developer trying to figure out what you had in mind. In fact, I strongly recommend getting a skilled WordPress developer involved early in the process, to help you turn your ideas into a viable spec.

+

Here's the basic process I recommend:

+

    +
  • Develop a simple sitemap for the site. Figure out what the sections and subsections are, and how they're organized.
  • +
  • Identify the pieces content that will appear on each item in your sitemap, and evaluate how they should be managed. Is the content static and self-contained, such as "About Our Company" text? It should probably be managed through a Page. Is the content some kind of list of related items, such as news items or team members? It should probably be managed through Posts and Categories. Is it a secondary sidebar that appears on several pages? Consider using a hidden page if the sidebar is static or a PHP include if the sidebar uses other WordPress content items.
  • +
  • Identify the Categories you'll use to distribute the Posts to their proper place(s). For each category, evaluate whether you'll need to collect information beyond title, body, and author. If so, identify these as custom fields.
  • +
  • Identify the Pages you'll be using on the site -- including Pages that will be hidden in the navigation and used solely for placing static content on multiple pages.
  • +
  • Decide how you will use your header, sidebar and footer to help users navigate your site. Will you provide links to Category archives? Will you allow your visitors to search your site? Will you show subpages of the Page a visitor is on?
  • +
  • Identify which visible Pages will use a default Page template, and which will require a custom template, and identify the types of custom templates that will be required. Ideally, you will not require a separate custom template for every custom Page; look for similarities among the custom Pages, and try to identify the fewest number of custom templates that will do the job.
  • +
+Plan the design based on the architecture +

+

Once you've completed the site architecture, you are ready to actually design the site. For your own sanity, don't try to do too much design before this point. You'll need the information gathered in the site architecture phase to guide you in developing the design document upon which your template developer will base their work; a design document that doesn't take all of these considerations into account is going to lead your developer in circles as they try to make sense of what you had in mind.

+

Before you begin designing, you may want to draw up some wireframes that show the different elements you'll be designing -- header, footer, sidebar, custom page views, single post views, etc. They can guide the design process and be valuable to the developer who implements your design. When you're finished, your design document should include the following:

+

    +
  • General header. Note any ways in which the header should change according to the context in which it's being displayed -- for example, a breadcrumb or an indication of the current navigation item.
  • +
  • General sidebar (or left and right sidebar). Again, note ways in which the sidebar should change according to the context in which it's being displayed.
  • +
  • General footer.
  • +
  • Latest Posts view. If you are going to give your visitors the ability to view your latests Posts, make sure you design a view for this. The latest Posts view will usually include 10 posts; for each Post, you'll usually show the title, author, date, category, body, and comment count.
  • +
  • Single Post view. Ideally the structure of this design will vary little from one Category to another, though the visual presentation may change. This design will likely include the post's title, body, author, date, category/ies, navigation to the next and previous post, a comment submission form, and user comments.
  • +
  • Archive view. This will be used for viewing all of the Posts by Category, month, year, author, etc. This is only necessary if you will give users the option of viewing posts this way. Usually this view will include multiple Posts. For each post, you'll show the title and an excerpt; you may also choose to show date, author and category information.
  • +
  • Search view. This will be used for displaying user search results. It is usually very much like the Archive view: a list of posts, for which you show the title, an excerpt, and any other relevant information.
  • +
  • Default Page view. This will be used for displaying Pages that do not have custom templates associated with them. Usually this will consist of the Page title and body, and potentially a secondary sidebar.
  • +
  • Custom Page views. For every custom view identified in the site architecture phase, create a corresponding custom design.
  • +
+Giving consideration to how a site will be built in WordPress is a tad more complicated than deciding that it should be built in WordPress and then handing it off to a developer; of course, you can do it that way, but it's not exactly the most cost-effective method. Understanding the system, working with a developer from the start, and doing the planning before you dive into design will give you the results you intend, and probably open your eyes to some connections and possibilities that you can then decide how to take advantage of, rather than leaving it up to your developer to see them and decide whether to bring them to your attention. It's a lot to wrap your brain around at first, perhaps, but I've found that the planning pays for itself by smoothing the rest of the process and truly taking advantage of the CMS features built into Wordpress.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/recent-talks.html b/www/blog/recent-talks.html new file mode 100644 index 0000000..f366ec3 --- /dev/null +++ b/www/blog/recent-talks.html @@ -0,0 +1,140 @@ +Recent Talks

rmurphey adventures in javascript

Recent Talks

A post from Alex Russell reminded me that I've given a number of talks in the last few months, and some of them even have video on the internet.

+

I've been ridiculously spoiled to get to travel all over the place these last few months -- San Francisco, New York, Amsterdam, Berlin, Brighton -- and speak at some truly first-class conferences, sharing the stage, sharing meals, and sharing beers with some seriously amazing folks. My recent news means I'll be doing a lot less travel for the next little bit, but I'm ever-so-grateful for the opportunities I've had and the people I've gotten to see and meet these last few months.

+

Writing Testable JavaScript

This is the first talk I've developed that I've managed to give several times in rapid succession: three times in six days, including at Full Frontal, the online JS Summit, and to a group of developers at the New York Times. There's no video yet, but the slides are here, and there should be video soon, I think.

+

+ +

JS Minty Fresh

A fun talk at Fronteers about eliminating code smells from your JavaScript. The best feedback I got afterwards was from an attendee who said they felt at the beginning of the talk like the material was going to be too basic for them, and by the end of the talk, the material was nearly over their head. "I guess that makes you a good teacher," he said. Aw!

+ + +

Rebecca Murphey | JS Minty Fresh: Identifying and Eliminating Smells in Your Code Base | Fronteers 2012 from Fronteers on Vimeo.

+ +

Slides

+

If you like this, you should also check out the screencasts we released at Bocoup earlier this week.

+

Beyond the DOM: Sane Structure for JS Apps

An update of my code organization talk, delivered at the jQuery Conference in San Francisco. It's fun for me to see how my thinking around code organization has evolved and improved since my first, now-almost-embarassing talk at the 2009 jQuery Conference in Boston.

+ + +

Slides

+

Johnny Five: Bringing the JavaScript Culture to Hardware

This one was from the New York Times Open Source Science Fair, a fun night of about a dozen folks presenting open-source projects at "booths," followed by short talks about open source by Jeremy Ashkenas, me, and Zach Holman. The slides don't necessarily stand on their own very well, but the short version is: use JavaScript to make things in the real world, because it's ridiculously easy and ridiculously fun.

+

+ +

Getting Better at JavaScript

I put this together as a quickie for the Berlin UpFront user group -- it was the first talk I gave with my broken foot, and the last talk I'd give for weeks because I lost my voice a couple of hours later. There's not a whole lot here, but it was a fun talk and a fun group, and a topic that I get plenty of questions about. Again, no video, but here are the slides:

+

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/remixing-trac-with-jquery.html b/www/blog/remixing-trac-with-jquery.html new file mode 100644 index 0000000..43d647f --- /dev/null +++ b/www/blog/remixing-trac-with-jquery.html @@ -0,0 +1,198 @@ +Remixing trac with jQuery

rmurphey adventures in javascript

Remixing trac with jQuery

I love trac, and I've been doing lots of work lately to customize it so our clients can use it to send us requests. With the configurable workflow option offered in version 0.11, it became the perfect way for us to manage and track client requests. By connecting it to the subversion repository we use for version control of client files, we've virtually eliminated the need for back-and-forth emails -- everything is in one place.

+

Since trac is intended more for managing a development project than for managing an ongoing client relationship, I've had to make some changes to the out-of-the-box version. I don't know Python, and I was having a hard time getting a developer to dive in at the level I needed on a non-billable project. Lucky for me, version 0.11 also includes jQuery (version 1.1.3.1). After a couple of chats with a developer and a few "duh" moments, it became clear that I could do most of what I needed without messing around under the hood.

+

The story so far:

+
    +
  • Our clients use trac for internal approval of a request before it gets sent to us -- a content provider can enter a request, which then gets reviewed by a manager. We needed a way to assign this manager to the ticket, without the manager becoming the "owner" of the ticket. Initially, we had content providers entering their manager's email address into the Cc: field, but that didn't go over well. Now, jQuery reuses the existing Assign to menu to build a manager dropdown, with the usernames of our employees removed. When the ticket is submitted, the manager is added to the Cc: field automatically. + +{% codeblock lang:javascript %} +// build the new dropdown +var custom_cc_select = ''; +$('#custom-form').append(custom_cc_select); +$('#field-owner option').each(function() { + var name = $(this).html(); + if ($.inArray(name,excludedManagers) == -1) { + $('#custom-cc').append('' + name + ''); + } +}); + +// add the selected manager to the cc field on ticket submit +$('div.ticket form').submit(function() { + var oldCc = $('#field-cc').val(); + var manager = $('#custom-cc option:selected').html(); + if (oldCc != '') { $('#field-cc').val(oldCC + ', ' + manager); } + else { $('#field-cc').val(manager); } +}); +{% endcodeblock %} + +
  • + +
  • The standard trac ticket fields were exactly what we needed, but the labels for them were a little confusing to client users. Plus, the form was ugly. A few lines of jQuery created a new section in the form, moved the fields to that section without their table-based layout, reordered the fields and and relabeled them. For example: + +{% codeblock lang:javascript %} +$('#field-component'). // get the component dropdown +remove(). // remove it from the DOM but maintain access to it +appendTo('#custom-form'). // put it at the end of the #custom-form div +before('Request Type'); // put a new label before it +{% endcodeblock %} + +The new form makes more sense to the client users, reduces errors and omissions, and looks better too.
  • + +
  • For certain client requests, a more complex form is required. Trac offers custom fields, but that solution seemed too limiting. With a few more lines of jQuery, I set up a system that watches the Component dropdown. When certain components are chosen, the standard submission form is hidden, and a new form is put in its place using jQuery's load() method. The replacement content can include HTML, standard form elements and javascript; when the form is submitted, javascript assembles the field values into a well-formatted description.
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/remote-javascript-with-document-write-is-killing-me.html b/www/blog/remote-javascript-with-document-write-is-killing-me.html new file mode 100644 index 0000000..42a1d1b --- /dev/null +++ b/www/blog/remote-javascript-with-document-write-is-killing-me.html @@ -0,0 +1,130 @@ +Remote Javascript with document.write() is killing me

rmurphey adventures in javascript

Remote Javascript with document.write() is killing me

I have been coming across way too much remote Javascript that uses document.write() to insert its contents. From ad providers to video hosting services, it's common practice to provide a Javascript tag that includes document.write and tell the site owner to just include it in their page where the ad or content should appear.

+

This is a great least-common-denominator approach for folks who don't want to be bothered with understanding how this stuff works. The problem is that when an ad or a video or any other content loaded via remote Javascript needs to appear near the top of the HTML document, before the content, the site user doesn't see any more content while the browser fetches the file. In the case of one service we tried, the initial script tag actually wrote five more remote script tags, each of which requires a separate HTTP request, amounting to a total delay of more than a second before the content appeared. In the meantime, the user was staring at a near-empty page.

+

There are various hacks for getting around this, which involve loading the content low on the page and using (yet more) Javascript to move it to the appropriate place once the page has loaded, but they are hacky and not necessarily foolproof when it comes to ads. My initial experiments with doing this with some ad providers led to multiple ads appearing on the page, or to the whole page being replaced with just an ad.

+

If these services want people to stop complaining that their sites are loading slowly as a result of adding these tags (and perhaps looking at least for solutions that minimize the number of scripts that need to be loaded), it's time that they expose an API that will allow sites to include their script tag at the end of the page's HTML, where it belongs. Then sites could call a method to insert the ad wherever it's needed, once the content has finished loading.

+

For example:

+

{% codeblock lang:javascript %} +ad.appendTo('#my_ad_container'); +{% endcodeblock %}

+

I grant that this would require a bit more code from the ad providers (and that their ads would load a little bit later than they do now), but the improvement in user experience would be tremendous, relative to waiting as much as a second or two for the ad — and thus the rest of the page — to load.

+

--

+

Update: If you're looking for a fix to this problem, also check out John Resig's document.write() rewrite.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/salivating-over-server-side-javascript.html b/www/blog/salivating-over-server-side-javascript.html new file mode 100644 index 0000000..029befc --- /dev/null +++ b/www/blog/salivating-over-server-side-javascript.html @@ -0,0 +1,123 @@ +Salivating over server-side Javascript

rmurphey adventures in javascript

Salivating over server-side Javascript

I came across The End of Web Frameworks in my dzone RSS feed this morning, and it echoes a thought that runs through my brain as I get more and more comfortable with jQuery: who needs the server when you've got Javascript? Wouldn't it be great if the server just handed out data, and the client figured out what to do with it? After talking directly to the DOM for the last several months, a trip back to PHP left me utterly disoriented for a few minutes -- why, exactly, couldn't I take the anchors from that unordered list I'd just built 50 lines ago and reuse their hrefs for something else?

+

As a couple of commenters pointed out, it's not that easy -- there's still graceful degradation (the yin to progressive enhancement's yang?) to worry about. It's easy to say "oh, I don't have to worry about that," but until the day that's actually true, Javascript on the client side simply can only supplement good, solid, server-generated markup.

+

That's why stuff like this discussion about server-side jQuery has me nearly salivating. I'm imagining getting JSON data from a simple bit of server-side code that's not encumbered by messy markup and then writing some Javascript to build the DOM of my dreams. The added bonus would be server-side code that could serve that clean, pretty JSON data up to anyone I wanted to have it.

+

Of course, that DOM would still need to use semantic HTML and not rely on client-side Javascript for anything -- and not just because a visually or otherwise impaired person might drop by someday and want to use my site. Progressive enhancement is good for SEO, too, (and so are web standards in general). One only wishes that would lead SEO-focused companies to fall all over themselves in pursuit of it :)

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/seeking-a-web-designer.html b/www/blog/seeking-a-web-designer.html new file mode 100644 index 0000000..4faaeb4 --- /dev/null +++ b/www/blog/seeking-a-web-designer.html @@ -0,0 +1,126 @@ +Seeking a web designer

rmurphey adventures in javascript

Seeking a web designer

When I wrote about a 37signals post about the pitfalls of using Photoshop for web design, I didn't know that just a couple of weeks later I'd be helping DailyStrength write a job posting for a senior web designer a few weeks later. I thought a lot about what they'd written, and the skills I'd want the person who would be working so closely with me to have.

+

We are a health-based social networking site, with users who are largely social networking novices; we are seeking a designer who can help us develop a consistent visual language for the site that will be easy for these users to understand, while at the same time not limiting our power users. This job entails helping us envision and develop new features, as well as refining the current site.

+

We have a framework that allows rapid prototyping of new features, letting developers develop and designers design. That means it’s imperative that you be comfortable working with HTML and CSS, so you can take a rough-but-working prototype and give it the polish it needs. You’ll need to be at least as comfortable in Firebug as you are in Photoshop, and be able to communicate clearly about technical aspects of your designs. You won’t need to write any Javascript, but you should be very capable of understanding how and when it fits into an application. +In all of my jobs, from bartending to desktop publishing to my current role as a front-end engineer, I've always valued it when the person giving me direction has had some understanding of my job. I didn't care for bar owners who overlooked lousy tippers, and I didn't care for the art director I worked for who thought one font worked as well as the next when it came to web sites. So when I found out we were hiring, I jotted down some notes about what I was looking for -- specifically, a designer who knew Firebug and Photoshop, and who could take my lightly-skinned markup and polish it to perfection rather than giving me a picture of how to polish it to perfection for them.

+

We've spent the past several weeks developing a framework for our site that will truly separate content and business logic from our presentation, leaving me largely free from mucking around in PHP sprinkled with HTML. Further, we've spent a lot of time thinking about the core pieces that make up our site, and figuring out how to, say, filter, search and present lists of elements -- any elements -- using largely the same base code, markup and CSS. I'm excited to soon be working with a designer who can embrace that framework and work on further standardizing the site -- a task that, til now, has been made difficult by how intertwined our markup was with our code.

+

If you're interested, pop over to the 37signals job board and check out the rest of the posting.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/selectors-in-jquery.html b/www/blog/selectors-in-jquery.html new file mode 100644 index 0000000..2d682b2 --- /dev/null +++ b/www/blog/selectors-in-jquery.html @@ -0,0 +1,118 @@ +Selectors in jQuery

rmurphey adventures in javascript

Selectors in jQuery

Just a note to self: Quick Tip - Optimizing DOM Traversal. Avoid bare classname selectors like $('.foo') when possible; qualify them with an element type or, ideally, the ID of a parent element: $('div.foo') or $('#bar .foo').

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/skipping-photoshop-for-web-design.html b/www/blog/skipping-photoshop-for-web-design.html new file mode 100644 index 0000000..0bb5aaa --- /dev/null +++ b/www/blog/skipping-photoshop-for-web-design.html @@ -0,0 +1,124 @@ +Skipping Photoshop for web design

rmurphey adventures in javascript

Skipping Photoshop for web design

Occasionally the folks at 37signals write a blog post that seems to channel thoughts that I've just never put to paper [blog]. This week they did it twice, with two posts about how they prefer to skip Photoshop and work with HTML and CSS when it comes to designing a web site.

+

I've found it incredibly pleasant to work with designers who have demonstrated knowledge and skill when it comes to CSS and HTML -- at my last job, our lead designer was also the guy you went to if you were stumped by a CSS issue. Knowing the tools means that production considerations become an active part of the design process: you don't create an element without giving some thought to how it will be built. In the best of worlds, you weigh the cost of building the element -- in time, in HTTP requests, in excessive markup -- against the benefit of having it on the site, and make educated decisions about what is "worth it."

+

I've never found Photoshop to be an adequate tool for mocking up a design -- it just doesn't "think" the way the web does, it just doesn't observe the constraints that the web imposes, and it just doesn't make accessible the parts of CSS that are truly powerful.

+

While I am no designer, I play one sometimes for freelance clients. When I haven't wanted to go straight to code, I've found that another Adobe product, InDesign, can be incredibly powerful. It's an application intended for print layout (and some of its related assumptions can be a little frustrating, such as its lack of a hex-based color picker/setter), but generally it is exponentially closer to the realities of the web than Photoshop. It thinks in boxes, not pixels, and offers paragraph-, character- and object-level stylesheets that can ... cascade! Just like with CSS, you can rapidly and dramatically alter the appearance of a page once you establish the base, semantic elements of it. It steers you to think about a page's structure, not just its presentation.

+

I'm hoping that the number of web designers who think that Photoshop is a sufficient tool will dwindle, but I'm not sure what the route is to get to that point. Photoshop's inadequacy in this regard is so clear to me -- and to people like the 37signals folks -- that I'm not sure how you persuade someone who still believes otherwise.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/software-without-which-i-d-be-hard-pressed-to-do-my-job.html b/www/blog/software-without-which-i-d-be-hard-pressed-to-do-my-job.html new file mode 100644 index 0000000..5fe8087 --- /dev/null +++ b/www/blog/software-without-which-i-d-be-hard-pressed-to-do-my-job.html @@ -0,0 +1,144 @@ +Software without which I'd be hard-pressed to do my job

rmurphey adventures in javascript

Software without which I'd be hard-pressed to do my job

    +
  • +vim A sophisticated text editor with no GUI -- everything is text-based, and all commands are issued from the keyboard. Frightening at first, and then so ridiculously efficient that nothing else makes sense.
  • +
  • +Firefox Great in and of itself, and the ability to add extensions makes it priceless.
  • +
  • +Firebug One of said Firefox extensions -- as Marcus said recently, it's hard to imagine how Web 2.0 would have happened without it. The Inspect function, the console, the Net view ... without it I'd be utterly lost.
  • +
  • +Filezilla I booted up an old computer of mine and found an FTP client I think I had to either pay for or steal, and it wasn't half as good as this. I wish it had bookmarks for moving around servers easily ... alas.
  • +
  • +Pidgin All my instant messaging personas in one application, with pounces, fine-grained status control and logging. Plus: file transfers seem to work better now. Woo.
  • +
  • +cygwin In an alternate universe, I get to use a Mac. Until that alternate reality arrives, cygwin gives me the power of the terminal in Windows. These days, I always have at least one cygwin window open most of the time, because sometimes, things are just easier on the command line.
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/solution-for-google-map-contents-not-showing.html b/www/blog/solution-for-google-map-contents-not-showing.html new file mode 100644 index 0000000..7e67d10 --- /dev/null +++ b/www/blog/solution-for-google-map-contents-not-showing.html @@ -0,0 +1,120 @@ +Solution for Google Map contents not showing

rmurphey adventures in javascript

Solution for Google Map contents not showing

I just spent more time than was reasonable trying to figure out why some Javascript I was using to show a Google map on one site wasn't working to show the map on another site. The map controls and copyright were rendering, but there was no actual map.

+

Turns out the problem wasn't with my Javascript; I had a CSS rule that was setting overflow:hidden on all divs within a container, and the divs created by the Google map API were getting that rule too. No good. I wrote a new CSS rule to set overflow:visible on all divs inside the map container -- problem solved.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/solved-ajax-returns-bad-results-in-internet-explorer.html b/www/blog/solved-ajax-returns-bad-results-in-internet-explorer.html new file mode 100644 index 0000000..079967d --- /dev/null +++ b/www/blog/solved-ajax-returns-bad-results-in-internet-explorer.html @@ -0,0 +1,151 @@ +Solved: AJAX returns bad results in Internet Explorer

rmurphey adventures in javascript

Solved: AJAX returns bad results in Internet Explorer

Note to self: IE6 likes to cache AJAX requests, and this can be a bad thing if other data on the page that will affect the result of the request has changed.

+

You'll see this if you have two fields on a page that both contribute to a result, but only send them to the server when one of them has changed.

+

Let's say field1 and field2 get multiplied on the server side, and both start out with values of 1. So, in IE6:

+
    +
  • Set field1 to 1000: Response is 1000.
  • +
  • Set field1 to 100: Response is 100.
  • +
  • Set field2 to 123: Response is 12300
  • +
  • Set field 1 back to 1000: Uh oh! Response is 1000 again! +
  • +
+ +

IE6 has decided that the result from the first time you set field1 to 1000 was good enough, and it doesn't look to see that the new result is different.

+

The solution? Add a timestamp to your requests, so each one will look different to IE even if the real data is the same:

+

{% codeblock lang:javascript %} +// just an example; you'd +// obviously build this from +// the field itself +var data = 'field1=1000';

+

// build the timestamp +var timestamp = new Date().getTime(); +data = data + '&' + timestamp;

+

// now you can send your data +// using ajax +{% endcodeblock %}

+

Thankfully, when this happened to me yesterday, I had a coworker who only needed a couple of minutes with the server logs to figure out what was happening. After he figured it out here, I found this thread, which verified the cause, and I was able to solve it in a few minutes.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/standards-for-html-emails.html b/www/blog/standards-for-html-emails.html new file mode 100644 index 0000000..a9f8729 --- /dev/null +++ b/www/blog/standards-for-html-emails.html @@ -0,0 +1,118 @@ +Standards for HTML emails?

rmurphey adventures in javascript

Standards for HTML emails?

After a couple of attempts that ended not-so-well, I gave up on HTML emails. I advise people to work with someone who's an expert with them, or to at least use templates that are known to work, and then I wish them well. At a job interview I actually proclaimed my active dislike of HTML emails and general unwillingness to muck with them -- I'm not sure that went over so well. The news that Outlook 2007 actually took a giant leap backwards in HTML support seals the deal for me. Still, it's good to read that some people are trying to bring some sanity to the situation. I wish them luck.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/suspicious-stumbleupon-bounce-rates.html b/www/blog/suspicious-stumbleupon-bounce-rates.html new file mode 100644 index 0000000..0e5c4b0 --- /dev/null +++ b/www/blog/suspicious-stumbleupon-bounce-rates.html @@ -0,0 +1,125 @@ +Suspicious StumbleUpon Bounce Rates?

rmurphey adventures in javascript

Suspicious StumbleUpon Bounce Rates?

I've been promoting some of my blog posts by submitting them to StumbleUpon, and it's been generating a fair bit of traffic -- my post "How I Learned CSS" has done especially well. I've been watching my site using Google Analytics, and I've noticed that visitors from StumbleUpon have a substantially lower bounce rate than, say, visitors from dzone. So I think, "yay, StumbleUpon" and start submitting anything interesting I write to there.

+

But in poking around in my Google Analytics a bit more, I've also noticed that, on pages that I've promoted through SU, I'm seeing strange navigation patterns: for "How I Learned CSS," 40% of visits to the page came from the page itself (and, as it logically follows, 40% of next pages were the page itself). The page was viewed a total of 4,700 times, but a little more than 2,800 of those pageviews were unique. Since bounce rate basically measures whether a new arrival went to another page before leaving the site, I'm not sure it shouldn't count as a bounce if the second page the user visits is the same as the one they were already on.

+

I'm not seeing this pattern with pages I don't promote on SU, which makes me curious. SU is clearly driving a ton of traffic to my site; it's just not clear that the traffic is actually resulting in the remarkably low bounce rate that Google Analytics shows. If I took out the 40% of people whose next page was the page they were already on, I'm left with not even 1% of people who went to another, different page on the site before leaving -- and suddenly dzone is looking good again.

+

I did a Google search and didn't come up with much except this, which discusses the importance of bounce rate, and this, which suggests using StumbleUpon to reduce your bounce rate.

+

I'm curious whether anyone else is seeing this, and whether it's actually related to StumbleUpon, Google Analytics, or something else. If I had to guess, I'd imagine that this is a result of people using the StumbleUpon toolbar (since it's not happening in all cases), and maybe the toolbar is pre-visiting the page on the user's behalf to make sure it still exists?

+

Update: See the comments and a more in-depth discussion.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/tech-podcast-recording-tips.html b/www/blog/tech-podcast-recording-tips.html new file mode 100644 index 0000000..00d2572 --- /dev/null +++ b/www/blog/tech-podcast-recording-tips.html @@ -0,0 +1,155 @@ +So You're Going on a Podcast

rmurphey adventures in javascript

So You're Going on a Podcast

I got a fair bit of experience recording a podcast back in the yayQuery days; when I decided to start another one, the technical side of it felt pretty familiar, minus the part where yayQuery was crazy enough to also record video. Back then, we mostly were talking to each other, so it was easy for us to all be on the same page about the technical requirements: headphones always, typing never (at least when you're also talking), and buy a good microphone. We also had 20-some episodes to get good at working with each other.

+

I've been recording the TTL Podcast for a few months now; it's a show with a different guest every week, so the tech challenges are different, and each show is completely different from the last. It has been great fun, and I can't believe how lucky I am to get to ask all of these people to talk to me and they keep saying yes.

+

I've learned a few things about how to be a good podcast guest along the way, but I haven't always been good at sharing them ahead of time with guests. This, then, is really just an attempt to write them down in a single place, with the added benefit that maybe they will be useful to others. This is mostly focused on being a guest of a show; I have lots to say about being a host, but I feel like that's a lot more complicated than this.

+

Technical

    +
  • Wear headphones, preferably the best ones you own. The iPhone headphones aren't nice, and actually leak noise like crazy. I alternate between using Klipsch and Shure (sorry, not sure of the model, so no link) in-ear headphones, both of which have a nice silicone seal to keep the sound I'm hearing in my ears and out of my microphone.
  • +
  • Use the best microphone you can. A MacBook's built-in microphone is decent enough in a pinch, but it's probably worth springing for an external microphone. I used the AT2020 for most of the yayQuery episodes, but I stepped up to a Shure SM7B to record TTL at the suggest of Alex Sexton. The USB mic is just fine and very reasonably priced; the Shure sounds absolutely lovely but is a bit more of an investment. If you don't want to spring for a mic, see if someone in your office has one you can borrow. If you have questions about audio gear, I am mostly clueless beyond what I've written above.
  • +
  • If you're a guest, always plan to record your side of the conversation. (If you're a host, always plan to record all sides of the conversation; I've lost an episode by failing to do this.) On a Mac, Quicktime has a simple audio recording feature. There's also plenty of other software that will do the same.
  • +
+

Preparation

    +
  • Listen to at least one episode of the show before you go on (and possibly before you even agree to go on).
  • +
  • Ask the host what they want to talk to you about, and try to have a decent sense of the outline of the conversation before you start. If the host doesn't have great guidance -- she's almost certainly less familiar with your work than you are -- it's generally very welcome for you to propose an outline yourself.
  • +
  • If you have access to a soundproofed room, consider using it. Avoid large, echo-y rooms, or rooms that will be subject to a lot of hallway or construction noise.
  • +
+

The Show

    +
  • Consider your biological needs before you start recording :) Except for a live show, you're always welcome to pause if you need to step away, but you may find yourself distracted in the meantime. Make sure you have water nearby!
  • +
  • Silence phone notifications (no vibrating phones; silence means silent); on your computer, close Twitter, your mail client, etc.; option-click the Notification Center icon in your Mac toolbar to put it in do-not-disturb mode (thanks Ralph Holzmann for that tip).
  • +
  • Unless it's a live show, feel free to pause and try again if you make a mistake or say something wrong. It's important that you announce that you're starting over, then pause, then start over -- that way it's easy to fix in post-production.
  • +
  • Remember that a podcast is a conversation, not a presentation. Unlike a presentation, you're conversing with a host who knows the audience and can ask you the questions that will help that audience connect with you. Use a video chat so you can watch the host for visual cues that she might want to interject.
  • +
+

That's my list, though undoubtedly I've left things out. If you have stuff to add, please share in the comments —

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/the-future-of-jquery-fundamentals-and-a-confession.html b/www/blog/the-future-of-jquery-fundamentals-and-a-confession.html new file mode 100644 index 0000000..88fe934 --- /dev/null +++ b/www/blog/the-future-of-jquery-fundamentals-and-a-confession.html @@ -0,0 +1,142 @@ +The Future of jQuery Fundamentals (and a confession)

rmurphey adventures in javascript

The Future of jQuery Fundamentals (and a confession)

About 9 months ago, I released jQuery Fundamentals, a free, online training curriculum for people interested in learning jQuery based on material I’d assembled while leading jQuery trainings.

+ +

The response was and has continued to be amazing: not only has the book seen hundreds of thousands of visits, but it has also received content contributions and bug reports from dozens of people. It has become something of a collaborative work, and one of the go-to resources for jQuery and beginning JavaScript learning. It has been used to teach classes internally at companies and at colleges and universities, and it’s been translated into multiple languages. It’s even made me a tad bit of money — I recently granted a license to Webucator to create derivative works for their jQuery class — and landed me near the top of Google’s search results for “jQuery training”.

+ +

And so here is where we get to the confession part: while I’ve stayed very much in touch with the evolution of jQuery these last couple of years, written gobs of sample code in efforts to make people better at using the library, and even played a bit of a role in some of the new features in jQuery 1.5, the last time I chose the library for a project was in the fall of 2008. The last time I used it on a project at all was in the summer of 2010, and in a matter of a few weeks I was gutting the fragile, bug-ridden, DOM-centric code and re-writing the single-page application with — wait for it! — Dojo. jQuery and I have gone from being in a committed relationship to seeing other people to pretty much just saying hi on Facebook now and again.

+ +

This has put me in a strange place with jQuery Fundamentals — I want to be investing my energy supporting projects that I use, and while I can still write jQuery just fine and stay in touch with what’s going on with it, I really don’t … use it. That’s made it increasingly difficult to continue maintaining jQuery Fundamentals as a resource for the jQuery community.

+ +

Burying the Lede

+ +

At the jQuery conference in Boston last fall, John Resig invited me to participate in a conversation about an effort by the project to create a learning resource for the community, and through the course of that and future conversations, jQuery Fundamentals has found its new home.

+ +

I’ve been working actively with jQuery team member (and yayQuery co-host) Adam J. Sontag and community member Dan Heberden to get the book into good shape as it transitions to being “owned” by the jQuery project. I’ve also donated a third of the proceeds of the Webucator licensing arrangement to the jQuery project, to recognize the contributions of the community and to give even a wee bit of financial support to the learning efforts.

+ +

Adam, Dan, and I will be working hard to address some of the open issues with the book in the coming weeks. If you’re interested in helping, drop me an email, hit me up on Twitter, or just submit a pull request (though you may want to talk to us first if the solution to an issue isn’t straightforward). From formatting fixes to writing new content to updating the book to reflect the changes in jQuery 1.5, there’s a lot to be done.

+ +

What’s Next?

+ +

These days I’m working with a fantastic client doing mobile application development with PhoneGap and Dojo. It’s pretty much the most challenging, engaging, rewarding project I’ve had an opportunity to work on in nearly three years of independent consulting. These days, when I get the very inquiries I hoped to get by releasing jQuery Fundamentals in the first place, I direct people to the excellent folks at Bocoup. Slowly, I’m recalibrating my efforts and attention toward the projects that make my day-to-day development life better. As soon as I feel like jQuery Fundamentals is in a good place where I don’t have to worry about its future, you can expect to see a lot more learning-related content coming from me again; just, this time, it probably won’t be about jQuery.

+ +

I hope you’ll stick around.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/this-is-the-cigarette.html b/www/blog/this-is-the-cigarette.html new file mode 100644 index 0000000..8aef701 --- /dev/null +++ b/www/blog/this-is-the-cigarette.html @@ -0,0 +1,123 @@ +This is the Cigarette

rmurphey adventures in javascript

This is the Cigarette

+

This is the cigarette I smoked* on Wednesday after I got out of a meeting in Boston and went to my desk and read my messages and learned that our birthmother "match" had fallen through.

+

The last three weeks have been among the happiest, most exciting, most terrifying times I can remember. Saying that we are sad and disappointed and et cetera doesn't really cover it, but, well, there it is. Our search will continue.

+

* Don't worry, Mom, I don't usually smoke. Desperate times, desperate measures.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/this-is-the-cup-of-coffee.html b/www/blog/this-is-the-cup-of-coffee.html new file mode 100644 index 0000000..3bdea5e --- /dev/null +++ b/www/blog/this-is-the-cup-of-coffee.html @@ -0,0 +1,125 @@ +This is the Cup of Coffee

rmurphey adventures in javascript

This is the Cup of Coffee

+

This is the cup of coffee I was making earlier this week when Melissa gave me a thumbs-up while she talked on the phone to a woman in Pennsylvania who had just finished telling Melissa that yes, indeed, after 10 weeks or three years of waiting depending on how you count, a 29-year-old woman who's due to give birth in Iowa at the beginning of February has decided that Melissa and I should be so lucky as to get to be her baby girl's forever family.

+

Most people get to post ultrasound pictures on Twitter at moments like these, but for now this will suffice to remind me of the moment I found out I would get to be a mom. My head is spinning, and while on the one hand it's a little difficult to fathom that this is all just 10 weeks away, on the other hand I'm counting down the days.

+

Our adoption will be an open one; the meaning of "open" varies widely, but in our case it means we talked to the birth mother before she chose us, we'll be meeting her in a few weeks, we'll do our very best to be in Iowa for the delivery, and we'll stay in touch with letters and pictures afterwards. Melissa and I are grateful that we'll be able to adopt as a couple, though we are saddened that we have to adopt outside of our home state of North Carolina in order to do so. It's important to us that our child have both of us as her legal parents, and I don't hesitate to say that it's downright shitty that we have to jump through significant legal and financial hoops -- and stay in a hotel in Iowa with a newborn for an unknown number of days -- to make it so. It is what it is, and good people are working and voting to make it better, and it can't happen fast enough.

+

I've learned a lot about adoption these past few months, and I know a lot of people have a lot of questions, some of which they're reluctant to ask. If you're interested in learning more, I highly recommend In On It: What Adoptive Parents Would Like You to Know About Adoption. You're also welcome to ask me questions if you see me in real life or on the internets -- I can't promise I'll know the answers, but I promise to do my best.

+

In the meantime, wish us luck :)

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/times-open-science-fair.html b/www/blog/times-open-science-fair.html new file mode 100644 index 0000000..15fe90c --- /dev/null +++ b/www/blog/times-open-science-fair.html @@ -0,0 +1,124 @@ +Show & Tell

rmurphey adventures in javascript

Show & Tell

I spoke at the Times Open Source Science Fair a couple of weeks ago. I'll admit that I was pretty skeptical of the concept when I was first asked, but as someone who used to work as an editor at a tiny newspaper in upstate New York, I wasn't about to say no when the Times asked me to come say hi.

+

A few days before the event, I got an email asking me for information about what I'd be showing off at my booth. Booth? Wat? They weren't kidding about the science fair thing, but what the heck was I going to show at a booth?

+

It turns out this is basically the best idea ever. I recruited my Bocoup colleague Rick Waldron to join me, and together we spent a whirlwind hour showing off robots powered by JavaScript to an endless stream of people walking up to our booth. Rick did a great job of setting up a demo that people could play with, and they took turns moving sliding potentiometers that controlled servos that moved an arm with a gripper at the end, trying to pick up Bocoup stickers. Ours was one of about a dozen booths showing off open-source projects, and the room was a wonderful madhouse.

+

After a break for dinner, I, Jeremy Ashkenas, and Zach Holman each gave 20-minute talks, but the talks were really just icing on the evening. The "science fair" format promoted such intentional interaction, in a way that traditional conferences just can't, no matter how great the hall track or the parties may be. The format invited and encouraged attendees to talk to the presenters -- indeed, if they didn't talk to the presenters, there wasn't much else for them to do. By the time the official talks came around, a super-casual, super-conversational atmosphere had already been established, and the energy that created was tangibly different from any event I've been to before.

+

I love conferences, and the sharing of knowledge that happens there, and there's a whole lot to be said for their speaker-audience format -- don't get me wrong. But I'd also love to see more events figure out how to integrate this show and tell format. "Booths" don't need to mean "vendors trying to sell things" -- they can actually be a great opportunity to facilitate conversation, and to let open source contributors show off their hard work.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/ttl-podcast.html b/www/blog/ttl-podcast.html new file mode 100644 index 0000000..7ae7dad --- /dev/null +++ b/www/blog/ttl-podcast.html @@ -0,0 +1,127 @@ +The TTL Podcast

rmurphey adventures in javascript

The TTL Podcast

Over the past several months, I've been on a few different podcasts, plus I've been having a lot of fun doing office hours, and generally talking a lot with other people who do the kind of work that I do. I've particulary enjoyed talking about a subject that Alex Sexton dubbed Front-End Ops.

+

It has occurred to me that a) I'm really super fortunate to get to have conversations about this stuff with super-smart people; b) there aren't yet a lot of great sources of information about front-end ops in the real world; and c) I used to be on a podcast and that sure was fun.

+

To that end, I threw a tweet out into the world to see who might be willing to talk to me and let me record the conversation. I got enough great responses that I decided to don my podcasting hat again for a little bit, and the result is the TTL Podcast.

+

ttlpodcast.com

+

If you're a mid-level front-end dev looking to level up, I'd humbly suggest that this is very much a show for you -- you'll get to listen in on the thought process of some of the best front-end devs I know. That said, it's not just a show for those aspiring to take the front-end world by storm; it's also a chance for those who are already in the trenches, doing daily battle with WebDriver and trying to shave 10 more milliseconds off page load, to commiserate asynchronously. I know I personally have learned a ton -- in some cases I've seen a new angle on a problem, and in other cases I've had some serious Developer Guilt assuaged.

+

I've released three episodes so far -- conversations with Alex, Burak Yiğit Kaya (Disqus), and Daniel Espeset and Seth Walker (Etsy). More episodes are in the pipeline, including developers from Walmart Labs, Yammer, FT Labs, and The Guardian.

+

While the initial focus has been on front-end ops, I can see the scope widening over time to cover, generally, the tools and challenges of doing front-end dev at significant scale. If you or someone you know would be a good person to talk to about that sort of thing, I hope you'll let me know.

+

While I'm here, I want to give huge and sincere thanks to SauceLabs and Travis CI for their support of the show; to Una Kravets for finding time in her busy life to make me a website; to my sister, who's been kind enough to pitch in with the editing; and to Bazaarvoice for giving me the freedom to take on a project like this.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/txjs-js-bbq-you.html b/www/blog/txjs-js-bbq-you.html new file mode 100644 index 0000000..d339008 --- /dev/null +++ b/www/blog/txjs-js-bbq-you.html @@ -0,0 +1,184 @@ +TXJS: JS + BBQ + You

rmurphey adventures in javascript

TXJS: JS + BBQ + You

If you follow me on Twitter, you've heard about TXJS by now, but let me tell you: now that we've announced Douglas Crockford and John Resig, there is officially no way you should miss this. TXJS is a full-day conference set for June 5 in Austin, Texas, hosted by the yayQuery team and featuring BBQ and a slew of awesome JavaScripters:

+
    +
  • +Brandon Aaron, jQuery team member
  • +
  • +Tim Caswell, Node.js contributor and founder of howtonode.org +
  • +
  • +Douglas Crockford, JavaScript architect at Yahoo! and author of "JavaScript: The Good Parts"
  • +
  • +Andrew Dupont, Prototype core team member
  • +
  • +Peter Higgins, Dojo project lead
  • +
  • +Paul Irish, jQuery team member and yayQuery podcast co-host
  • +
  • +Brian LeRoux, software architect at Nitobi and PhoneGap hacker/contributor
  • +
  • +Joe McCann, senior technologist at Frog Design
  • +
  • +John Resig, creator of the jQuery JavaScript library
  • +
  • +Alex Sexton, front-end developer and yayQuery podcast co-host
  • +
  • +Kyle Simpson, Austin JS organizer and author of LABjs
  • +
  • +Adam Sontag, jQuery UI developer relations team member and yayQuery podcast co-host
  • +
  • +Mike Taylor, HTML5 aficionado and front-end developer at Tunecore
  • +
  • +Juriy Zaytsev (aka @kangax), expert JavaScript wrangler and Prototype core developer
  • +
+ +

Earlybird tickets are just $49 while they last, and regular tickets are just $69, which is about as close to free as we could get. See you there!

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/unit-tests.html b/www/blog/unit-tests.html new file mode 100644 index 0000000..725e712 --- /dev/null +++ b/www/blog/unit-tests.html @@ -0,0 +1,432 @@ +Writing Unit Tests for Existing JavaScript

rmurphey adventures in javascript

Writing Unit Tests for Existing JavaScript

My team at Bazaarvoice has been spending a lot of time lately thinking about quality and how we can have greater confidence that our software is working as it should.

+

We've long had functional tests in place that attempt to ask questions like "When a user clicks a button, will The Widget do The Thing?" These tests tell us a fair amount about the state of our product, but we've found that they're brittle -- even after we abstracted away the CSS selectors that they rely on -- and that they take approximately forever to run, especially if we want to run them in all of the browsers we support. The quality of the tests themselves is all over the map, too -- some of them are in fact unit tests, not really testing anything functional at all.

+

A few months ago we welcomed a new QA lead to our team as part of our renewed focus on quality. Having a team member who is taking a thoughtful, systematic approach to quality is a game-changer -- he's not just making sure that new features work, but rather has scrutinized our entire approach to delivering quality software, to great effect.

+

One of the things he has repeatedly emphasized is the need to push our tests down the stack. Our functional tests should be black-box -- writing them shouldn't require detailed knowledge of how the software works under the hood. Our unit tests, on the other hand, should provide broad and detailed coverage of the actual code base. In an ideal world, functional tests can be few and slow-ish, because they serve as an infrequent smoke test of the application; unit tests should be thorough, but execute quickly enough that we run them all the time.

+

Until now, our unit tests have been entirely focused on utility and framework code -- do we properly parse a URL, for example? -- not on code that's up close and personal with getting The Widget to do The Thing. I'd told myself that this was fine and right and good, but in reality I was pretty terrified of trying to bolt unit tests onto feature code of incredibly varying quality, months or even years after it was first written.

+

A week or so ago, thanks to some coaxing/chiding from fellow team members, I decided to bite the bullet and see just how bad it would be. A week later, I feel like I've taken the first ten steps in a marathon. Of course, taking those first steps involves making the decision to run, and doing enough training ahead of time that you don't die, so in that regard I've come a long way already. Here's what I've done and learned so far.

+

Step 0

I was lucky in that I wasn't starting entirely from scratch, but if you don't already have a unit testing framework in place, don't fret -- it's pretty easy to set up. We use Grunt with Mocha as our test framework and expect.js as our assertion library, but if I were starting over today I'd take a pretty serious look at Intern.

+

Our unit tests are organized into suites. Each suite consists of a number of files, each of which tests a single AMD module. Most of the modules under test when I started down this path were pretty isolated -- they didn't have a ton of dependencies generally, and had very few runtime dependencies. They didn't interact with other modules that much. Almost all of the existing unit test files loaded a module, executed its methods, and inspected the return value. No big deal.

+

Feature-related code -- especially already-written feature-related code -- is a different story. Views have templates. Models expect data. Models pass information to views, and views pass information to models. Some models need parents; others expect children. And pretty much everything depended on a global-ish message broker to pass information around.

+

Since the code was originally written without tests, we are guaranteed that it would be in various states of testability, but a broad rewrite for testability is of course off the table. We'll rewrite targeted pieces, but doing so comes with great risk. For the most part, our goal will be to write tests for what we have, then refactor cautiously once tests are in place.

+

We decided that the first place to start was with models, so I found the simplest model I could:

+
define([
+  'framework/bmodel',
+  'underscore'
+], function (BModel, _) {
+  return BModel.extend({
+    options : {},
+    name : 'mediaViewer',
+
+    init : function (config, options) {
+      _.extend(this.options, options);
+    }
+  });
+});
+
+

Why do we have a model that does approximately nothing? I'm not going to attempt to answer that, though there are Reasons -- but for the sake of this discussion, it certainly provides an easy place to start.

+

I created a new suite for model tests, and added a file to the suite to test the model. I could tell you that I naively plowed ahead thinking that I could just load the module and write some assertions, but that would be a lie.

+

Mocking: Squire.js

I knew from writing other tests, on this project and projects in the past, that I was going to need to "mock" some of my dependencies. For example, we have a module called ENV that is used for ... well, way too much, though it's better than it used to be. A large portion of ENV isn't used by any given module, but ENV itself is required by essentially every model and view.

+

Squire.js is a really fantastic library for doing mocking in RequireJS-land. It lets you override how a certain dependency will be fulfilled; so, when a module under test asks for 'ENV', you can use Squire to say "use this object that I've hand-crafted for this specific test instead."

+

I created an Injector module that does the work of loading Squire, plus mocking a couple of things that will be missing when the tests are executed in Node-land.

+
define([
+  'squire',
+  'jquery'
+], function (Squire, $) {
+  return function () {
+    var injector;
+
+    if (typeof window === 'undefined') {
+      injector = new Squire('_BV');
+
+      injector.mock('jquery', function () {
+        return $;
+      });
+
+      injector.mock('window', function () {
+        return {};
+      });
+    }
+    else {
+      injector = new Squire();
+    }
+
+    return injector;
+  };
+});
+
+

Next, I wired up the test to see how far I could get without mocking anything. Note that the main module doesn't actually load the thing we're going to test -- first, it sets up the mocks by calling the injector function, and then it uses the created injector to require the module we want to test. Just like a normal require, the injector.require is async, so we have to let our test framework know to wait until it's loaded before proceeding with our assertions.

+
define([
+  'test/unit/injector'
+], function (injector) {
+  injector = injector();
+
+  var MediaViewer;
+
+  describe('MediaViewer Model', function () {
+    before(function (done) {
+      injector.require([
+        'bv/c2013/model/mediaViewer'
+      ], function (M) {
+        MediaViewer = M;
+        done();
+      });
+    });
+
+    it('should be named', function () {
+      var m = new MediaViewer({});
+      expect(m.name).to.equal('mediaViewer');
+    });
+
+    it('should mix in provided options', function () {
+      var m = new MediaViewer({}, { foo : 'bar' });
+      expect(m.options.foo).to.equal('bar');
+    });
+  });
+});
+
+

This, of course, still failed pretty spectacularly. In real life, a model gets instantiated with a component, and a model also expects to have access to an ENV that has knowledge of the component. Creating a "real" component and letting the "real" ENV know about it would be an exercise in inventing the universe, and this is exactly what mocks are for.

+

While the "real" ENV is a Backbone model that is instantiated using customer-specific configuration data, a much simpler ENV suffices for the sake of testing a model's functionality:

+
define([
+  'backbone'
+], function (Backbone) {
+  return function (injector, opts) {
+    injector.mock('ENV', function () {
+      var ENV = new Backbone.Model({
+        componentManager : {
+          find : function () {
+            return opts.component;
+          }
+        }
+      });
+
+      return ENV;
+    });
+
+    return injector;
+  };
+});
+
+

Likewise, a "real" component is complicated and difficult to create, but the pieces of a component that this model needs to function are limited. Here's what the component mock ended up looking like:

+
define([
+  'underscore'
+], function (_) {
+  return function (settings) {
+    settings = settings || {};
+
+    settings.features = settings.features || [];
+
+    return {
+      trigger : function () {},
+      hasFeature : function (refName, featureName) {
+        return _.contains(settings.features, featureName);
+      },
+      getScope : function () {
+        return 'scope';
+      },
+      contentType : settings.contentType,
+      componentId : settings.id,
+      views : {}
+    };
+  };
+});
+
+

In the case of both mocks, we've taken some dramatic shortcuts: the real hasFeature method of a component is a lot more complicated, but in the component mock we create a hasFeature method whose return value can be easily known by the test that uses the mock. Likewise, the behavior of the componentManager's find method is complex in real life, but in our mock, the method just returns the same thing all the time. Our mocks are designed to be configurable by -- and predictable for -- the test that uses it.

+

Knowing what to mock and when and how is a learned skill. It's entirely possible to mock something in such a way that a unit test passes but the actual functionality is broken. We actually have pretty decent tests around our real component code, but not so much around our real ENV code. We should probably fix that, and then I can feel better about mocking ENV as needed.

+

So far, my approach has been: try to make a test pass without mocking anything, and then mock as little as possible after that. I've also made a point of trying to centralize our mocks in a single place, so we aren't reinventing the wheel for every test.

+

Finally: when I first set up the injector module, I accidentally made it so that the same injector would be shared by any test that included the module. This is bad, because you end up sharing mocks across tests -- violating the "only mock what you must" rule. The injector module shown above is correct in that it returns a function that can be used to create a new injector, rather than the injector itself.

+

Here's what the final MediaViewer test ended up looking like:

+
define([
+  // This properly sets up Squire and mocks window and jQuery
+  // if necessary (for running tests from the command line).
+  'test/unit/injector',
+
+  // This is a function that mocks the ENV module.
+  'test/unit/mocks/ENV',
+
+  // This is a function that mocks a component.
+  'test/unit/mocks/component'
+], function (injector, ENVMock, component) {
+  injector = injector();
+
+  // This will become the constructor for the model under test.
+  var MediaViewer;
+
+  // Create an object that can serve as a model's component.
+  var c = component();
+
+  // We also need to mock the ENV module and make it aware of
+  // the fake component we just created.
+  ENVMock(injector, { component : c });
+
+  describe('MediaViewer Model', function () {
+    before(function (done) {
+      injector.require([
+        'bv/c2013/model/mediaViewer'
+      ], function (M) {
+        MediaViewer = M;
+        done();
+      });
+    });
+
+    it('should be named', function () {
+      var m = new MediaViewer({
+        component : c
+      }, {});
+      expect(m.name).to.equal('mediaViewer');
+    });
+
+    it('should mix in provided options', function () {
+      var m = new MediaViewer({
+        component : c
+      }, { foo : 'bar' });
+
+      expect(m.options.foo).to.equal('bar');
+    });
+  });
+});
+
+

Spying: Sinon

After my stunning success with writing 49 lines of test code to test a 13-line model, I was feeling optimistic about testing views, too. I decided to tackle this fairly simple view first:

+
define([
+  'framework/bview',
+  'underscore',
+  'hbs!contentAuthorProfileInline',
+  'mf!bv/c2013/messages/avatar',
+  'bv/util/productInfo',
+  'framework/util/bvtracker',
+  'util/specialKeys'
+], function (BView, _, template, msgPack, ProductInfo, BVTracker, specialKeys) {
+  return BView.extend({
+    name : 'inlineProfile',
+
+    templateName : 'contentAuthorProfileInline',
+
+    events : {
+      'click .bv-content-author-name .bv-fullprofile-popup-target' : 'launchProfile'
+    },
+
+    template : template,
+
+    msgpacks : [msgPack],
+
+    launchProfile : function (e) {
+      // use r&r component outlet to trigger full profile popup component event
+      this.getTopModel().trigger( 'showfullprofile', this.model.get('Author') );
+
+      BVTracker.feature({
+        type : 'Used',
+        name : 'Click',
+        detail1 : 'ViewProfileButton',
+        detail2 : 'AuthorAvatar',
+        bvProduct : ProductInfo.getType(this),
+        productId : ProductInfo.getId(this)
+      });
+    }
+  });
+});
+
+

It turned out that I needed to do the same basic mocking for this as I did for the model, but this code presented a couple of interesting things to consider.

+

First, I wanted to test that this.getTopModel().trigger(...) triggered the proper event, but the getTopModel method was implemented in BView, not the code under test, and without a whole lot of gymnastics, it wasn't going to return an object with a trigger method.

+

Second, I wanted to know that BVTracker.feature was getting called with the right values, so I needed a way to inspect the object that got passed to it, but without doing something terrible like exposing it globally.

+

Enter Sinon and its spies. Spies let you observe methods as they are called. You can either let the method still do its thing while watching how it is called, or simply replace the method with a spy.

+

I solved the first problem by defining my own getTopModel method on the model instance, and having it return an object. I gave that object a trigger method that was actually just a spy -- for the sake of my test, I didn't care what trigger did, only how it was called. Other tests [will eventually] ensure that triggering this event has the desired effect on the targeted model, but for the sake of this test, we don't care.

+

Here's what the test looks like:

+
describe('#launchProfile', function () {
+  var spy;
+  var v;
+
+  before(function () {
+    spy = sinon.spy();
+
+    v = new InlineProfile({
+      // model and component are defined elsewhere
+      component : component,
+      model : model
+    });
+
+    model.set('Author', 'author');
+
+    v.getTopModel = function () {
+      return {
+        trigger : spy
+      };
+    };
+  });
+
+  it('should trigger showfullprofile event on top model', function () {
+    v.launchProfile();
+
+    expect(spy.lastCall.args[0]).to.equal('showfullprofile');
+    expect(spy.lastCall.args[1]).to.equal('author');
+  });
+});
+
+

I solved the second problem -- the need to see what's getting passed to BVTracker.feature -- by creating a BVTracker mock where every method is just a spy:

+
// This is a mock for BVTracker that can be used by unit tests.
+define([
+  'underscore'
+], function (_) {
+  return function (injector, opts) {
+    var BVTracker = {};
+
+    injector.mock('framework/util/bvtracker', function () {
+      _([
+        'error',
+        'pageview',
+        'feature'
+      ]).each(function (event) {
+        BVTracker[event] = sinon.spy();
+      });
+    });
+
+    return BVTracker;
+  };
+});
+
+

My test looked at the BVTracker.feature spy to see what it got when the view's launchProfile method was called:

+
it('should send a feature analytics event', function () {
+  v.launchProfile();
+
+  var evt = BVTracker.feature.lastCall.args[0];
+
+  expect(evt.type).to.equal('Used');
+  expect(evt.name).to.equal('Click');
+  expect(evt.detail1).to.equal('ViewProfileButton');
+  expect(evt.detail2).to.equal('AuthorAvatar');
+  expect(evt.bvProduct).to.equal('RatingsAndReviews');
+  expect(evt.productId).to.equal('product1');
+});
+
+

I've barely touched on what you can do with spies, or with Sinon in general. Besides providing simple spy functionality, Sinon delivers a host of functionality that makes tests easier to write -- swaths of which I haven't even begun to explore. One part I have explored is its ability to create fake XHRs and to fake whole servers, allowing you to test how your code behaves when things go wrong on the server. Do yourself a favor and spend some time reading through the excellent docs.

+

What to test ... and not

I've written tests now for a tiny handful of models and views. Setting up the mocks was a bit of a hurdle -- and there were plenty of other hurdles that are too specific to our project for me to talk about them in detail -- but overall, the hardest part has been figuring out what, exactly, to test. I crafted the examples above to be pretty straightforward, but reality is a lot more complicated.

+

Writing tests for existing code requires first understanding the code that's being tested and identifying interesting moments in that code. If there's an operation that affects the "public" experience of the module -- for example, if the value of a model attribute changes -- then we need to write a test that covers that operation's side effect(s). If there's code that runs conditionally, we need to test the behavior of that code when that condition is true -- and when it's not. If there are six possible conditions, we need to test them all. If a model behaves completely differently when it has a parent -- and this happens far too often in our code -- then we need to simulate the parent case, and simulate the standalone case.

+

It can be tempting to try to test the implementation details of existing code -- and difficult to realize that you're doing it even when you don't mean to. I try to stay focused on testing how other code might consume and interact with the module I'm testing. For example, if the module I'm testing triggers an event in a certain situation, I'm going to write a test that proves it, because some other code is probably expecting that event to get triggered. However, I'm not going to test that a method of a certain name gets called in a certain case -- that's an implementation detail that might change.

+

The exercise of writing unit tests against existing code proves to be a phenomenal incentive to write better code in the future. One comes to develop a great appreciation of methods that have return values, not side effects. One comes to loathe the person -- often one's past self -- who authored complex, nested conditional logic. One comes to worship small methods that do exactly one thing.

+

So far, I haven't rewritten any of the code I've been testing, even when I've spotted obvious flaws, and even when rewriting would make the tests themselves easier to write. I don't know how long I'll be able to stick to this; there are some specific views and models that I know will be nearly impossible to test without revisiting their innards. When that becomes necessary, I'm hoping I can do it incrementally, testing as I go -- and that our functional tests will give me the cover I need to know I haven't gone horribly wrong.

+

Spreading the love

Our team's next step is to widen the effort to get better unit test coverage of our code. We have something like 100 modules that need testing, and their size and complexity are all over the map. Over the coming weeks, we'll start to divide and conquer.

+

One thing I've done to try to make the effort easier is to create a scaffolding task using Grunt. Running grunt scaffold-test:model:modelName will generate a basic file that includes mocking that's guaranteed to be needed, as well as the basic instantiation that will be required and a couple of simple tests.

+

There's another senior team member who has led an effort in the past to apply unit tests to an existing code base, and he's already warned me to expect a bit of a bumpy road as the team struggles through the inevitable early challenges of trying to write unit tests for existing feature code. I expect there to be a pretty steep hill to climb at first, but at the very least, the work I've done so far has -- hopefully -- gotten us to the top of the vertical wall that had been standing in our way.

+

Further Reading

I'm not exactly the first person to write about this. You may find these items interesting:

+

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/unobtrusive-cross-browser-method-to-add-icons-to-links.html b/www/blog/unobtrusive-cross-browser-method-to-add-icons-to-links.html new file mode 100644 index 0000000..d78b875 --- /dev/null +++ b/www/blog/unobtrusive-cross-browser-method-to-add-icons-to-links.html @@ -0,0 +1,224 @@ +Unobtrusive, cross-browser method to add icons to links

rmurphey adventures in javascript

Unobtrusive, cross-browser method to add icons to links

There are lots of examples of using CSS to add filetype icons to links, but they all rely on advanced CSS selectors, which Internet Explorer 6 doesn't support.

+

If you're looking for a cross-browser method of adding filetype icons to links, you have a couple of choices: you can add a class name to all of your links and target that class name with CSS that IE6 can understand; or you can let Javascript add the links dynamically based on the extension it finds at the end of each URL. The class name method is fine if you don't mind adding the class to all of your links in your HTML, but the Javascript method will figure out which links get which icon automatically.

+

The Javascript/jQuery method

+This uses my favorite Javascript library, jQuery; you'll need to include the library in your document, and then either put the icon-adding code in another file you include, or put it inline with your HTML. It's good practice to put all your jQuery code inside $.ready(), to be executed when the document is ready to be manipulated, but I've left that part out here for brevity.

+

Round 1: Because jQuery supports CSS1-3, you can mimic the CSS rule that wouldn't work in IE6:

+

{% codeblock lang:javascript %} +$('a[href$=".doc"]'). + css({ + // use paddingLeft instead of padding-left; + // jQuery (and Javascript) use camelCase + // for CSS attributes instead of hyphenation + paddingLeft: '18px', + background: 'transparent url("word-doc.gif") no-repeat center left' + }); +{% endcodeblock %}

+

Round 2: You could do one of these snippets for each file type, but with jQuery, you can take advantage of the $.each() utility method to do a loop, eliminating redundant code:

+

{% codeblock lang:javascript %} +// first, create an object +// that contains information +// about how file extensions +// correspond to images

+

var fileTypes = { + // extension: 'image file' + doc: 'doc.gif', + xls: 'xls.gif', + pdf: 'pdf.gif' +};

+

// then, loop over the object +// using jQuery's $.each() +$.each(fileTypes, function(extension,image) { + $('a[href$="' + extension + '"]'). + css({ + paddingLeft: '18px', + background: 'transparent url("' + image + '") no-repeat center left' + }); +}); +{% endcodeblock %}

+

Round 3: One problem with this: while jQuery does support these attribute selectors, in my experience they require some pretty heavy lifting to do the pattern matching, which can slow things down significantly. Since the method above would require multiple selections (one for each extension/image combination), it makes sense to try a different way that will require fewer selections and less pattern matching:

+

{% codeblock lang:javascript %} +var fileTypes = { + doc: 'doc.gif', + xls: 'xls.gif', + pdf: 'pdf.gif' +};

+

// this is like $.each() above, except +// it iterates over the matched elements +$('a').each(function() {

+

// get a jQuery object for each anchor found + var $a = $(this);

+

// get the href attribute + var href = $a.attr('href');

+

// get the extension from the href + var hrefArray = href.split('.'); + var extension = hrefArray[hrefArray.length - 1];

+

var image = fileTypes[extension];

+

if (image) { + $a.css({ + paddingLeft: '18px', + background: 'transparent url("' + image + '") no-repeat center left' + }); + }

+

}); +{% endcodeblock %}

+

And in fact, in limited testing using Firebug, this second version is faster than the first if you have more than two filetypes (and thus more than two selections).

+

Taking it further

+You can also add a different icon to external links, and only add filetype icons to internal links:

+

{% codeblock lang:javascript %} +var fileTypes = { + doc: 'doc.gif', + xls: 'xls.gif', + pdf: 'pdf.gif' +};

+

$('a').each(function() {

+

var $a = $(this); + var href = $a.attr('href');

+

if ( + (href.match(/^http/)) && + (! href.match(document.domain)) + ) {

+
// use a special image for external links
+var image = 'external.gif';
+

} else { + // get the extension from the href + var hrefArray = href.split('.'); + var extension = hrefArray[hrefArray.length - 1];

+
var image = fileTypes[extension];
+

}

+

if (image) { + $a.css({ + paddingLeft: '18px', + background: 'transparent url("' + image + '") no-repeat center left' + }); + }

+

}); +{% endcodeblock %}

+

Or, only add icons to certain links, like a list of files in an unordered list, by changing your selector from $('a') to $('ul#fileList a').

+

Resources

+
    +
  • There are nuances to the CSS you'll want to apply to your links; the above is just an example of something that may or may not work for you. Read more here about how to style the links so your background images appear as you want them to.
  • +
  • If you're using transparent PNGs for your background images, iepngfix will save the day.
  • +
  • +famfamfam has some great icons available for free under the Creative Commons Attribution 2.5 License.
  • + +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/update-page-using-json-data.html b/www/blog/update-page-using-json-data.html new file mode 100644 index 0000000..89eb769 --- /dev/null +++ b/www/blog/update-page-using-json-data.html @@ -0,0 +1,173 @@ +Update page using JSON data

rmurphey adventures in javascript

Update page using JSON data

updateWithJSON is a jQuery plugin that updates elements on your page based on key/value pairs in a JSON object. Usage:

+

{% codeblock lang:javascript %} +$.updateWithJSON(jsonData) +{% endcodeblock %}

+

Here's a demo, and here's how it works:

+
    +
  • iterate over each property/value combination in the JSON object
  • +
  • look for an element in the DOM that matches the property name +
      +
    • first, look for an element with a matching id attribute
    • +
    • if no element with a matching ID is found, look for input, select or textarea elements with a matching name attribute
    • +
    +
  • +
  • update the value, contents or selection of the matched element(s) based on the value in the JSON object
  • +
+ +

NOTE: If you have multiple checkboxes with the same name attribute, or a select that allows multiple values, you'll need to pass the values of the selected items in an array:

+

{% codeblock lang:javascript %} +{ + text_input: 'value1', + checkbox_group: ['value1','value2','value3'], + multi_select: ['value4','value5','value6'] +} +{% endcodeblock %}

+

If you use a name attribute like "input[]" on a set of related checkboxes, remember that you'll need to quote it in the JSON object:

+

{% codeblock lang:javascript %} +{ + 'input1[]': ['value1','value2','value3'] +} +{% endcodeblock %}

+

The back story

+For a recent project, I needed to update elements on a page based on some server-side calculations. JSON is a handy way to transport data from the server back to the browser, and jQuery's getJSON() method makes it painless. So, for example, I was getting back

+

{% codeblock lang:javascript %} +{ foo: 'bar' } +{% endcodeblock %}

+

with which I needed to update an element corresponding to "foo" and give it the value "bar". Simple enough; on the page I was dealing with, it was easy to put a unique ID on every element that would need to be updated, and I matched that ID with the property name in the JSON object. It was (almost) as simple as this:

+

{% codeblock lang:javascript %} +$.each(data,function(name,value) { + $('#'+name).val(value); +} +{% endcodeblock %}

+

It's not always the case, though, that you can easily put a unique ID on each element that will need updating; for example, PHP rewards you if you put the same name attribute on related checkboxes. Having checkboxes with the same name but different IDs can quickly get confusing when you're dealing with data on the client side and on the server side. The solution above worked in my particular case, but I wanted something that would work more broadly -- say, on pages that had related checkboxes -- and that's how I came up with the plugin.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/update-tracking-outbound-clicks-with-google-analytics-and-jquery.html b/www/blog/update-tracking-outbound-clicks-with-google-analytics-and-jquery.html new file mode 100644 index 0000000..7728b63 --- /dev/null +++ b/www/blog/update-tracking-outbound-clicks-with-google-analytics-and-jquery.html @@ -0,0 +1,152 @@ +Tracking outbound clicks with Google Analytics and jQuery

rmurphey adventures in javascript

Tracking outbound clicks with Google Analytics and jQuery

A while back I wrote a post about tracking outbound clicks with Google Analytics; way back then (about 6 months ago), the only event that Google Analytics could track was a pageview. Now that they've introduced the _trackEvent method of the pageTracker object, events that aren't pageviews don't need to count as pageviews anymore; instead, they can be counted as "events," and they can be categorized and labeled.

+

Here's an updated example of how to track outbound clicks using Google Analytics and jQuery. You'll of course need to be including the "new" analytics code (ga.js, not urchin.js) for this to work, as well as the jQuery library.

+

{% codeblock lang:javascript %} +$('a').click(function() { + var $a = $(this); + var href = $a.attr('href');

+

// see if the link is external + if ( (href.match(/^http/)) && (! href.match(document.domain)) ) {

+
// if so, register an event
+var category = 'outgoing'; // set this to whatever you want
+var event = 'click'; // set this to whatever you want
+var label = href; // set this to whatever you want
+
+pageTracker._trackEvent(category, event, href);
+

} +}); +{% endcodeblock %}

+

You can use the same method to unobtrusively add tracking code to file downloads:

+

{% codeblock lang:javascript %} +var fileTypes = ['doc','xls','pdf','mp3'];

+

$('a').click(function() { + var $a = $(this); + var href = $a.attr('href'); + var hrefArray = href.split('.'); + var extension = hrefArray[hrefArray.length - 1];

+

if ($.inArray(extension,fileTypes) != -1) { + pageTracker._trackEvent('download', extension, href); + }

+

}); +{% endcodeblock %}

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/using-ems-for-font-sizing-in-css.html b/www/blog/using-ems-for-font-sizing-in-css.html new file mode 100644 index 0000000..f8a3782 --- /dev/null +++ b/www/blog/using-ems-for-font-sizing-in-css.html @@ -0,0 +1,131 @@ +Using ems for font sizing in css

rmurphey adventures in javascript

Using ems for font sizing in css

A List Apart had a great article recently on using 'em' for CSS font-size declarations, which served as great back-up for some conversations I'd been having among coworkers. (It turns out that people who didn't have a former life in print don't necessarily understand what an em is: a self-referential unit of font size measurement, equal to the height of the capital letter M. Back in the typesetting days, it was a unit for measuring space, especially horizontal space, in the form of an "em dash" (rendered now as &mdash;) or an "em space." There was also a sister unit, the "en.")

+

Anyway: I just came across this post, by someone trying to put ems into practice; at the end, he gets into tools for doing conversions from pixels to ems, and it struck a surprising nerve with me.

+

Here's the thing: While I can see how pixel conversion seems useful when you first make the switch from px to em, I think the whole point of using ems instead of pixels is to embrace the concept that, on the web, sizes are relative, not absolute. If you're focused on matching an absolute size in a mockup by converting pixels to ems, the chances seem good that you're dealing with a layout that wasn't intended to work with relatively sized text to begin with, and/or that your ultimate product won't work if the user makes a different decision than you about how they will consume it.

+

If you receive a Photoshop mockup where body copy is 10px tall, I think it's a tremendous waste of time to measure that 10px and convert it to ems -- 10px on my 1024x768 monitor is a far cry from 10px on my 1600x1200 monitor, nevermind on the HDTV sitting across the room.

+

When you're designing and styling text for the web, better to:

+
    +
  • Assume from the start that users will consume your site differently than you do.
  • +
  • Make decisions in your design, CSS and HTML that will consistently convey the content and hierarchy of your site, regardless of how it is consumed.
  • +
  • Assume that the user knows better than you what a good base text size is for them.
  • +
  • Use a reset styleheet — I like Eric Meyer's CSS Reset Reloaded — to set everything to that base size.
  • +
  • Vary the sizes of elements relatively using ems.
  • +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/using-objects-to-organize-your-code.html b/www/blog/using-objects-to-organize-your-code.html new file mode 100644 index 0000000..6815881 --- /dev/null +++ b/www/blog/using-objects-to-organize-your-code.html @@ -0,0 +1,586 @@ +Using Objects to Organize Your Code

rmurphey adventures in javascript

Using Objects to Organize Your Code

This is a reprint of an article that originally appeared in the March 2009 issue of JSMag.

+ +

When you move beyond simple snippets of jQuery and start developing more complex user interactions, your code can quickly become unwieldy and difficult to debug. This article shows you how to start thinking about these interactions in terms of the bits of behavior the feature comprises, using the object literal pattern.

+ +

In the past few years, JavaScript libraries have given beginning developers the ability to add elaborate interactions to their sites. Some, like jQuery, have a syntax so simple that people with zero programming experience can quickly add bells and whistles to their pages. Adding all those bells and whistles, even some pretty elaborate ones, seems to be just a few Google searches away. A copy here, a paste there, a plugin or a few dozen lines of custom code — the client is duly impressed, and you’re adding jQuery to your resume.

+ +

But wait. Now the requirements have changed. Now the thing that needed to work for three elements needs to work for ten. Now your code needs to be reused for a slightly different application where all the IDs are different. We’ve all seen the snippets that make jQuery (and other libraries) look dead-simple. What those snippets leave out — and hey, they’re just snippets, right? — is how to design your code when your needs go beyond dropping in a plugin or doing some show() and hide().

+ +

Introducing the Object Literal pattern

+ +

The object literal pattern offers a way to organize code by the behaviors it comprises. It’s also a means to keep your code from “polluting the global namespace,” which is a good practice for all projects and imperative for larger ones. It forces you to think at the outset about what your code will do and what pieces need to be in place in order for you to do it. An object literal is a way to encapsulate related behaviors, as shown here:

+ +

{% codeblock lang:javascript %} +var myObjectLiteral = { + myBehavior1 : function() { + / do something / + },

+
myBehavior2 : function() {
+    /* do something else */
+}
+

}; +{% endcodeblock %}

+

As an artificially simplistic example, suppose you had the jQuery shown in Listing 2 for showing and hiding content when a list item was clicked.

+ +

{% codeblock lang:javascript %} +$(document).ready(function() { + $('#myFeature li') + .append('

') + .each(function() { + $(this).find('div') + .load('foo.php?item=' + $(this).attr('id')); + }) + .click(function() { + $(this).find('div').show(); + $(this).siblings().find('div').hide(); + }); +}); +{% endcodeblock %}

+

Simple enough, and yet even in this example there are several things you might want to change later — for example, the way you determine the URL for loading the content, the destination of the loaded content, or the show and hide behavior. An object literal representation of the feature cleanly separates these aspects. It might look like this:

+ +

{% codeblock lang:javascript %} +var myFeature = { + config : { + wrapper : '#myFeature', + container : 'div', + urlBase : 'foo.php?item=' + },

+
init : function(config) {
+    $.extend(myFeature.config, config);
+    $(myFeature.config.wrapper).find('li').
+        each(function() {
+            myFeature.getContent($(this));
+        }).
+        click(function() {
+            myFeature.showContent($(this));
+        });
+},
+
+buildUrl : function($li) {
+    return myFeature.config.urlBase + $li.attr('id');
+},
+
+getContent : function($li) {
+    $li.append(myFeature.config.container);
+    var url = myFeature.buildUrl($li);
+    $li.find(myFeature.config.container).load(url);
+},
+
+showContent : function($li) {
+    $li.find('div').show();
+    myFeature.hideContent($li.siblings());
+},
+
+hideContent : function($elements) {
+    $elements.find('div').hide();
+}
+

};

+

$(document).ready(function() { myFeature.init(); }); +{% endcodeblock %}

+

Because the initial example was incredibly simplistic, the object literal incarnation is longer. Truth be told, the object literal method generally won’t save you lines of code. What it will save is headaches. By using an object literal, we’ve broken our code into its logical parts, making it easy to locate the things we might want to change down the road. We’ve made our feature extendable, by providing the ability to pass in overrides to the default configuration. And, we’ve done some limited self-documentation — it’s easy to see at a glance what the feature does. As your needs grow beyond the simplicity of this example the benefits of the structure will become clearer, as you’ll see below.

+ +

Note: For an excellent primer on objects, properties, and methods, check out Object-Oriented JavaScript: Create scalable, reusable high-quality JavaScript applications and libraries by Stoyan Stefanov. You may also want to read up on JSON (JavaScript Object Notation).

+ +

An in-depth example

+ +

Our mission will be to create a UI element that features multiple pieces of content divided into several sections. Clicking on a section will show a list of items in the section; clicking on an item in the left nav will show the item in the content area. Whenever a section is shown, the first item in the section should be shown. The first section should be shown when the page loads.

+ +

Step 1: Crafting the HTML

+ +

Writing good semantic HTML is a crucial prerequisite to writing good JavaScript, so let’s start by thinking about what the HTML for something like this might look like. The HTML should:

+ +
    +
  • Make sense (and work) when JavaScript isn’t available.
  • +
  • Provide a predictable DOM to which we can attach JavaScript.
  • +
  • Avoid unnecessary IDs and classes (and you might be surprised by how few are necessary).
  • +
+ + +

With those guidelines in mind, we’ll start with this html.

+

{% codeblock lang:html %}

+

This is My Nifty Feature

+ +

+

+
<li>
+  <h2><a href="/section/2">Section 2</a></h2>
+  <ul>
+    <li>
+      <h3><a href="/section/2/content/1">Section 2 Title 1</a></h3>
+      <p>The excerpt content for Content Item 1</p>
+    </li>
+    <li>
+      <h3><a href="/section/2/content/2">Section 2 Title 2</a></h3>
+      <p>The excerpt content for Content Item 2</p>
+    </li>
+    <li>
+      <h3><a href="/section/2/content/3">Section 2 Title 3</a></h3>
+      <p>The excerpt content for Content Item 3</p>
+    </li>
+  </ul>
+</li>
+
+<li>
+  <h2><a href="/section/3">Section 3</a></h2>
+  <ul>
+    <li>
+      <h3><a href="/section/3/content/1">Section 3 Title 1</a></h3>
+      <p>The excerpt content for Content Item 1</p>
+    </li>
+    <li>
+      <h3><a href="/section/3/content/2">Section 3 Title 2</a></h3>
+      <p>The excerpt content for Content Item 2</p>
+    </li>
+    <li>
+      <h3><a href="/section/3/content/3">Section 3 Title 3</a></h3>
+      <p>The excerpt content for Content Item 3</p>
+    </li>
+  </ul>
+</li>
+

+ +{% endcodeblock %}

+

Note that we haven’t included any markup to display the section navigation or the item navigation; those pieces will be added by jQuery since they will only work with jQuery; non-JavaScript users will get nice semantic markup. (If there’s anything surprising or confusing in that HTML, now would be a good time to read up on POSH (plain-old semantic HTML) and progressive enhancement.)

+

Step 2: Scaffolding the Object

+ +

My first step in creating an object for a feature is to create “stubs” within the object. Stubs are basically placeholders; they’re the outline for the feature we’re going to build. Our object will have the following methods:

+ +
    +
  • myFeature.init() will run on $(document).ready(). It will turn the semantic HTML we start with into a JavaScript-enabled user interface.
  • +
  • myFeature.buildSectionNav() will be called by myFeature.init(). It will take a jQuery object that contains all of the sections from the semantic HTML and use those sections to build the top navigation. It will bind the click handlers to the top navigation items so that clicking on them will show the appropriate section.
  • +
  • myFeature.buildItemNav() will be called by myFeature.showSection(). It will ake a jQuery object that contains all of the items associated with the section from the semantic HTML, and use them to build the side navigation. It will bind the click handlers to the side navigation items so that clicking on them will show the appropriate content.
  • +
  • myFeature.showSection() will be called when the user clicks on an item in the top navigation. It will use the navigation item that’s clicked on to figure out which section to show from the semantic HTML.
  • +
  • myFeature.showContentItem() will be called when the user clicks on an item in the side navigation. It will use the navigation item that’s clicked on to figure out which content item to show from the semantic HTML.
  • +
+ + +

We’ll also make room for a configuration property, myFeature.config, which will be a single location for setting default values rather than scattering them throughout the code. We’ll include the ability to override the defaults when we define the myFeature.init() method.

+ +

{% codeblock lang:javascript %} +var myFeature = { + 'config' : { }, + 'init' : function() { }, + 'buildSectionNav' : function() { }, + 'buildItemNav' : function() { }, + 'showSection' : function() { }, + 'showContentItem' : function() { } +}; +{% endcodeblock %}

+

Step 3: The Code

+ +

Once we’ve built this skeleton, it’s time to start coding. Let’s start by setting up a simple myFeature.config object and writing the myFeature.init() method:

+ + +

{% codeblock lang:javascript %} +'config' : { + // default container is #myFeature + 'container' : $('#myFeature') +},

+

'init' : function(config) { + // provide for custom configuration via init() + if (config && typeof(config) == 'object') { + $.extend(myFeature.config, config); + }

+
// create and/or cache some DOM elements
+// we'll want to use throughout the code
+myFeature.$container = myFeature.config.container;
+
+myFeature.$sections = myFeature.$container.
+    // only select immediate children!
+    find('ul.sections > li');
+
+myFeature.$section_nav = $('<p/>')
+  .attr('id','section_nav')
+  .prependTo(myFeature.$container);
+
+myFeature.$item_nav = $('<p/>')
+  .attr('id','item_nav')
+  .insertAfter(myFeature.$section_nav);
+
+myFeature.$content = $('<p/>')
+  .attr('id','content')
+  . insertAfter(myFeature.$item_nav);
+

// build the section-level nav and + // "click" the first item + myFeature.buildSectionNav(myFeature.$sections); + myFeature.$section_nav.find('li:first').click();

+

// hide the plain HTML from sight + myFeature.$container.find('ul.sections').hide();

+

// make a note that the initialization + // is complete; we don't strictly need this + // for this iteration, but it can come in handy + myFeature.initialized = true; +} +{% endcodeblock %}

+

Next we’ll create the myFeature.buildSectionNav() method:

+ +

{% codeblock lang:javascript %} +'buildSectionNav' : function($sections) {

+
// iterate over the provided list of sections
+$sections.each(function() {
+
+    // get the section
+    var $section = $(this);
+
+    // create a list item for the section navigation
+    $('<li/>')
+      // use the text of the first h2
+      // in the section as the text for
+      // the section navigation
+      .text($section.find('h2:first').text())
+
+      // add the list item to the section navigation
+      .appendTo(myFeature.$section_nav)
+
+      // use data() to store a reference
+      // to the original section on the
+      // newly-created list item
+      .data('section', $section)
+
+      // bind the click behavior
+      // to the newly created list itme
+      // so it will show the section
+      .click(myFeature.showSection);
+});
+

} +{% endcodeblock %}

+

Next we’ll create the myFeature.buildItemNav() method:

+ +

{% codeblock lang:javascript %} +// iterate over the provided list of items +$items.each(function() {

+
// get the item
+var $item = $(this);
+
+// create a list item element for the
+// item navigation
+$('<li>')
+
+  // use the text of the first h3
+  // in the item as the text for the
+  // item navigation
+  .text($item.find('h3:first').text())
+
+  // add the list item to the item navigation
+  .appendTo(myFeature.$item.nav)
+
+  // use data to store a reference
+  // to the original item on the
+  // newly created list item
+  .data('item', $item)
+
+  // bind the click behavior to the
+  // newly created list item so it will
+  // show the content item
+  .click(myFeature.showContentItem);
+

}); +{% endcodeblock %}

+

Finally, we’ll write the methods for showing sections and content items:

+ +

{% codeblock lang:javascript %} +'showSection' : function() { + // capture the list item that was clicked on + var $li = $(this);

+

// clear out the left nav and content area + myFeature.$item_nav.empty(); + myFeature.$content.empty();

+

// get the jQuery section object from the orginal HTML, + // which we stored using data() during buildSectionNav + var $section = $li.data('section');

+

// mark the clicked list item as current + // and remove the current marker from its siblings + $li.addClass('current') + .siblings().removeClass('current');

+

// find all of the items related to the section + var $items = $section.find('ul li');

+

// build the item nav for the section + myFeature.buildItemNav($items);

+

// "click" on the first list item in the section's item nav + myFeature.$item_nav.find('li:first').click();

+

},

+

'showContentItem' : function() { + var $li = $(this);

+

// mark the clicked list item as current + // and revmoe the current marker from its siblings + $li.addClass('current') + .siblings().removeClass('current');

+

// get the jQuery item object from the original HTML, + // which we stored using data during buildContentNav + var $item = $li.data('item');

+

// use the item's HTML to populate the content area + myFeature.$content.html($item.html()); +} +{% endcodeblock %}

+

All that’s left to do is to call the myFeature.init() method:

+ +

{% codeblock lang:javascript %} +$(document).ready(myFeature.init); +{% endcodeblock %}

+

Step 4: Changing Requirements

+ +

No project is complete without some last-minute change in the requirements, right? Here’s where the object literal approach really shines by making it quick and fairly painless to implement last-minute changes. What if we need to get the content item excerpts via AJAX instead of from the HTML? Assuming the backend is set up to handle it, try this:

+ +

{% codeblock lang:javascript %} +var myFeature = {

+
'config' : {
+    'container' : $('#myFeature'),
+
+    // configurable function for getting
+    // a URL for loading item content
+    'getItemURL' : function($item) {
+        return $item.find('a:first').attr('href');
+    }
+
+},
+
+'init' : function(config) {
+    // stays the same
+},
+
+'buildSectionNav' : function($sections) {
+    // stays the same
+},
+
+'buildItemNav' : function($items) {
+    // stays the same
+},
+
+'showSection' : function() {
+    // stays the same
+},
+
+'showContentItem' : function() {
+
+    var $li = $(this);
+
+    $li.addClass('current').
+        siblings().removeClass('current');
+
+    var $item = $li.data('item');
+    var url = myFeature.config.getItemURL($item);
+
+    // myFeature.$content.html($item.html());
+    myFeature.$content.load(url);
+
+}
+

}; +{% endcodeblock %}

+

Do you need more flexibility? There’s a lot more you can configure (and therefore override) if you really want to make this flexible. For example, you can use myFeature.config to specify how to find and process the title text for each item in the left nav.

+ +

{% codeblock lang:javascript %}

+

var myFeature = { + 'config' : { + 'container' : $('#myFeature'),

+
    // specify the default selector
+    // for finding the text to use
+    // for each item in the item nav
+    'itemNavSelector' : 'h3',
+
+    // specify a default callback
+    // for "processing" the jQuery object
+    // returned by the itemNavText selector
+    'itemNavProcessor' : function($selection) {
+        return 'Preview of ' +
+            $selection.eq(0).text();
+    }
+},
+
+'init' : function(config) {
+    // stays the same
+},
+
+'buildSectionNav' : function($sections) {
+    // stays the same
+},
+
+'buildItemNav' : function($items) {
+
+    $items.each(function() {
+        var $item = $(this);
+
+        // use the selector and processor
+        // from the config
+        // to get the text for each item nav
+        var myText = myFeature.config.itemNavProcessor(
+            $item.find(myFeature.config.itemNavSelector)
+        );
+
+        $('<li/>')
+        // use the new variable
+        // as the text for the nav item
+          .text(myText)
+          .appendTo(myFeature.$item_nav)
+          .data('item', $item)
+          .click(myFeature.showContentItem);
+  });
+

},

+

'showSection' : function() { + // stays the same + },

+

'showContentItem' : function() { + // stays the same + }

+

}; +{% endcodeblock %}

+

Once you’ve added defaults to the config object, you can override them when you call myFeature.init():

+ +

{% codeblock lang:javascript %} +$(document).ready(function() { + myFeature.init({ 'itemNavSelector' : 'h2' }); +}); +{% endcodeblock %}

+

Beyond the scope of this article (but also interesting to contemplate and much easier with the object literal pattern) is this: making the back button retrace your path through the tabs using the jQuery history plugin. I leave it as an exercise for the reader.

+ +

Conclusion

+ +

If you’ve stepped through the code examples in this column, you should have a basic understanding of the object literal pattern and how it might prove useful to you as you develop more complex features and interactions. You also have access to some code that you can use to build on this basic foundation.

+ +

I encourage you to give this pattern a try the next time you find yourself writing more than a few lines of JavaScript — it forces you to think through the elements and behaviors that make up a complex feature or interaction. Once you become proficient, it provides a sturdy foundation for extending and reusing your code.

+ +

Learn More

+ +

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/when-you-re-building-a-non-trivial-js-application.html b/www/blog/when-you-re-building-a-non-trivial-js-application.html new file mode 100644 index 0000000..3e54d07 --- /dev/null +++ b/www/blog/when-you-re-building-a-non-trivial-js-application.html @@ -0,0 +1,124 @@ +When you're building a non-trivial JS application ...

rmurphey adventures in javascript

When you're building a non-trivial JS application ...

I sense another round of discussion of this is about to begin, and 140 characters isn't quite enough to say what I want to say, so:

+

When you're building a non-trivial JS application, you don't want a jQuery developer, or a Dojo developer, or a YUI developer, or, frankly, any developer who chooses their tool before they evaluate the problem. For god's sake, you want a JavaScript developer. Can you roll your own solution with jQuery as the base? Yes! Should you? I don't think so, and I advise my clients against it for reasons I've written about at length, but I'm open to hearing compelling, articulate, fact-based arguments in favor of it!

+

But do me a favor, OK? Don't base your arguments solely on the winner of a popularity contest. Don't tell me how easy it is to find developers familiar with one library or another, because I'll come right back and ask you just how good those developers will be at solving problems that aren't addressed by said library. And please tell me you've at least explored some of the other options besides [insert the library you're advocating here]. 

+

People read what I write about JavaScript libraries and they write me heartfelt tweets and e-mails saying OMG YOU HATE JQUERY NOW WHAT HAPPENEDDDDD? I don't hate jQuery! It is a perfectly viable and valuable tool for so many things! But when people argue not just its viability but its absolute supremacy, when people get defensive and possibly even angry that I suggest there are solutions that are vastly better suited to a certain set of problems, when people contort themselves into pretzels to make their case and their case is "well, it's not that bad" ... well, that smacks of blind loyalty, not a thoughtful weighing of the tradeoffs and challenges we face as developers, and I question how those people would fare if actually confronted with the needs of a non-trivial application. 

+

So, please: Tell me what solutions you've looked at for non-trivial application development. Tell me where they work, tell me where they fall short. Tell me what you're working on and how you chose the tools. Don't tell me why I'm wrong -- tell me why you're right. Deal? Discuss.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/blog/writing-conference-proposals.html b/www/blog/writing-conference-proposals.html new file mode 100644 index 0000000..d606e0f --- /dev/null +++ b/www/blog/writing-conference-proposals.html @@ -0,0 +1,136 @@ +Writing Conference Proposals

rmurphey adventures in javascript

Writing Conference Proposals

I've had several office hours sessions in the last couple of weeks, and one topic that comes up again and again is how to write a talk description.

+

If you think about it, conference organizers don't have a whole lot to go on when they're choosing talks, unless they already know who you are. Even if your name is well-known, though, organizers may still not know who you are -- lots of conferences are taking a blind approach to selecting speakers. That means that no matter who you are, your talk description might be the only thing organizers have on which to base their decision. When you give your talk, you'll need to engage your audience; the abstract is your chance to engage the organizer.

+

After answering the question several times, I've realized that I have a pretty explainable -- some might call it formulaic -- approach to writing abstracts for a certain common type of talk. It works well for talks about how you solved a problem, talks about how you came to learn a thing you didn't know, and even "10 things you didn't know about X" talks. I thought I'd try to explain it here.

+

Paragraph 1: The context

The first paragraph is where you set the scene, and make it clear to your reader that they have been in the situation you're going to talk about. This is where you establish a connection, baiting a hook that you'll set later.

+
+

You've got the hang of this whole JavaScript thing. Your code works on ancient browsers, and positively sings on new ones. AMD, SPA, MVC -- you can do that stuff in your sleep.

+
+

Paragraph 2: Well, actually ...

The second paragraph is where you break the bad news, which savvy readers may already know: the thing you laid out in the first paragraph is more complicated than it seems, or has downsides that people don't realize, or generally is a bad approach ... but only with the benefit of hindsight, which you just happen to have.

+
+

But now your users are trying to type in your Very Important Form, and nothing is showing up; that widget that's supposed to end up in a certain div is showing up somewhere completely different; and, rarely but not never, your app just doesn't load at all. You thought you had the hang of this whole JavaScript thing, but now you're in the world of third-party JavaScript, where all you control is a single script tag and where it's all but impossible to dream up every hostile environment in which your code will be expected to work. "It works on my machine" has never rung quite so hollow.

+
+

Paragraph 3: The promise

You've successfully induced a bit of anxiety in your reader -- and a strong desire to know what they don't know. The hook is set, so the last paragraph is the time to promise to relieve that anxiety -- but only if your talk is chosen!

+
+

In this talk, we'll take a look at some of the delightful bugs we've had to solve at Bazaarvoice while working on the third-party JavaScript app that collects and displays ratings and reviews for some of the world's largest retailers. We'll also look at some strategies for early detection -- and at some scenarios where you are just plain SOL.

+
+

Next

It turns out that in the process of writing your abstract, you've also written the most basic outline for your talk: on stage, you'll want to set the context, explain the complexity, then deliver on your promise. Pretty handy, if you ask me.

Comments

comments powered by Disqus
\ No newline at end of file diff --git a/www/feed.xml b/www/feed.xml new file mode 100644 index 0000000..5d6f3a2 --- /dev/null +++ b/www/feed.xml @@ -0,0 +1,2623 @@ +Adventures in JavaScripthttp://rmurphey.comWed, 25 Nov 2015 14:00:00 +0000en-UShourly1Building for HTTP/2building-for-http2http://rmurphey.com/2015/11/25/building-for-http2Wed, 25 Nov 2015 14:00:00 +0000Earlier this year, I got the chance to speak with Google's Ilya Grigorik about HTTP/2 for the 1.10 episode of the TTL Podcast. It was a great primer for me on how HTTP/2 works and what it means for how we build the web, but it wasn't until more recently that I started to think about what it means for how we build the web — that is, how we generate and deploy the HTML, CSS, and JS that power web applications.

+

If you're not familiar with HTTP/2, the basics are simultaneously simple and mind-boggling. Whereas its predecessors allowed each connection to a server to serve only one request at a time, HTTP/2 allows a connection to serve multiple requests simultaneously. A connection can also be used for a server to push a resource to a client — a protocol-level replacement for the technique we currently call “inlining.”

+

This is everything-you-thought-you-knew-is-wrong kind of stuff. In an HTTP/2 world, there are few benefits to concatenating a bunch of JS files together, and in many cases the practice will be actively harmful. Domain sharding becomes an anti-pattern. Throwing a bunch of <script> tags in your HTML is suddenly not a laughably terrible idea. Inlining of resources is a thing of the past. Browser caching — and cache busting — can occur on a per-module basis.

+

What does this mean for how we build and deploy applications? Let's start by looking at the state of the art in client-side application deployment prior to HTTP/2.

+

Deploying JavaScript Applications (2013)

In March of 2013, Alex Sexton wrote Deploying JavaScript Applications, and it's what I consider to be the canonical post on the topic for sites and apps that include more than about 50K of client-side code.

+

In his post, Alex describes a deployment that uses a "scout" approach: a small bit of code, included directly in the HTML or loaded via <script> tag.

+

The scout file exists to balance the desire for application resources to be highly cacheable vs. the need for changes to those resources to take effect quickly.

+

To meet that goal, the scout needs a short cache time when it's a file; if the scout is in the HTML, then the HTML itself needs a short cache time. The scout contains information about the location of the file(s) that provide the current version of the application, and the code necessary to load those files.

+

Files loaded by the scout can have extremely long cache times because the scout loads resources from versioned URLs: when a resource is updated, it is hosted at a new URL, and the scout is updated to load the resource from that new URL.

+

Why a scout approach rather than just loading the versioned files using <script> tags directly from the HTML? The scout technique lets you deploy changes to your JavaScript application without requiring a re-deploy of the server-side application. (In an ideal world this might not seem valuable, but in the real world, it often is.) When the scout is served separately from the HTML, it also allows for a different caching strategy for the HTML.

+

In this system, it's typical that the scout would load one or two JavaScript files that were generated by combining the modules needed for the initial state of the application. More code might be loaded later to support additional application behavior; again, that code would typically comprise a set of modules shipped in a single file.

+

There are a few shortcomings inherent to this approach, which are difficult to overcome without upsetting the balance between cacheability and changeability:

+
    +
  • Shipping the application as a large file with a long cache time works great for repeat visitors, but not so well for first-timers who have to wait for the large file to load.
  • +
  • All users have to download the whole large file again whenever something changes — even something small.
  • +
  • Even when nothing changes, a short cache time means repeat visitors may end up re-downloading the scout frequently.
  • +
+

Adding HTTP/2 to the mix — that is, flipping the switch that gets your server to start speaking HTTP/2 to browsers that understand it — has a nominal positive impact the performance of an app crafted for maximum performance on HTTP/1. Indeed, the applications most likely to see big improvements without big changes are applications whose deployments were poorly designed in the first place.

+

To see performance gains in a well-engineered deployment, we'll have to re-engineer the deployment itself.

+

Splitting it up

One of the most obvious opportunities is presented by HTTP/2's ability to handle multiple requests over the same connection. Rather than shipping a single large application file over the wire, what if we tell the scout to load the individual modules that make up the application? We would no longer have to invalidate the cache for the whole application every time we make a change.

+

A few reasons come to mind why this might be a bad idea.

+

The first is the concern that compression might suffer if shipping modules individually. As it turns out, though, combining multiple modules into a single file results in only slightly better compression than if the modules are compressed individually. For example, compressing a file containing minified versions of jQuery, Underscore, and Backbone results in 42,186-byte file; compressing each minified file individually results in a combined size of 42,975 bytes. The difference is 789 bytes -- barely meaningful.

+

Other second concern may be more legitimate: our server or CDN may be unhappy about serving one request per module; and it may be unduly complex to ship a single module per file, especially since any given request might fail for whatever reason. For the sake of discussion, we'll assume that it's reasonable to do some grouping of modules into individual files.

+

How to group those modules is up for debate. One strategy could be to group files according to their likelihood of changing, recognizing that library and framework modules don't change often, while application modules do. Another strategy would be to group files associated with a unit of useful functionality, though this leaves us needing a way to deliver code that's shared across units of functionality.

+

At Bazaarvoice, we solve this concern via a lightweight require/define system that ships in the scout file, allowing us to share vendor files such as jQuery and Backbone across applications. An application can express a dependency on a vendor file using NAMESPACE.require(), and vendor files declare themselves using NAMESPACE.define(). Once a vendor file has been defined, other modules on the page have access to it immediately via NAMESPACE.require().

+

Versioning

For HTTP/1.1-friendly builds, we always increment the version of the built application file, and embed a URL pointing to that new version in the scout file. We do this because it is essentially guaranteed that the contents of the application file have changed whenever we do a new build -- otherwise there would be no reason for the build.

+

For HTTP/2-friendly builds, we’re generating many smaller files; we only want to increment their version when something has changed.

+

For example, imagine a build that generates vendor-v1.js and application-v1.js; it also generates a scout that loads these two files. We then make a change to an application file, and we do another build, creating vendor-v2.js and application-v2.js. However, no vendor files have changed; our scout should now load to application-v2.js but still load vendor-v1.js. If our scout points to vendor-v2.js, we lose the benefit of being able to cache smaller pieces of our code.

+

This can be solved by using hashes of the file contents rather than version numbers: vendor-d41d8cd98f.js. If a file has not changed, its hash will remain the same. (Notably, inconsequential changes will change the hash -- for example, a new copyright comment that is inserted post-minification.) Plenty of build strategies already use content hashes for versioning; however, many still use integers, dates, or commit hashes, which change even when the contents of a file have not.

+

Given files whose names include a hash, our scout can include a manifest that prescribes the file to load for a given resource. The manifest would be generated by the build after all of the resources were generated.

+
module.exports = {
+  baseUrl : 'https://mysite.com/static/',
+  resources : {
+    vendor : 'vendor-d41d8cd98f.js',
+    application : 'application-a32e3ec23d.js'
+  }
+};
+
+

Push: Because you downloaded scout.js, you might also like ...

Another exciting opportunity in an HTTP/2 world is the ability to push a cascade of resources.

+

The first push opportunity is the scout itself: for sites and applications that currently ship the scout inlined in the initial HTML payload, server push affords an opportunity to send the scout as a separate resource when the initial HTML is requested.

+

There’s an interesting dilemma here: If the browser already has the resource cached, and the cache is still valid, it doesn’t need the server to push the resource. Currently, though, there’s no way for the browser to communicate its cache contents to the server. A browser can decline a push, but the server may have already started to send it. We’ve basically introduced a new tradeoff: server push can get the resource to the browser quickly, but we waste bandwidth if the browser doesn’t need it.

+

As discussed at the link above, a smart server could use session information to determine when to push -- for example, if the page is reloaded within a resource’s cache time, there is no need to re-push that resource to the same session -- but this makes push state-dependent, a frightening prospect if we hope to use CDNs to ensure efficient asset delivery.

+

Assuming we've generated a manifest as described above, we have the option of going a step further: we can separate the manifest and the scout, allowing the scout to have a far longer cache time than in a pre-HTTP/2 world. This is possible because the thing that typically changes about the scout is the version of the resources it loads, and it makes the most sense on a site where there are different payloads for different pages or users. For applications that previously included the scout in HTML, we can push the scout and the manifest, and have the scout request the manifest; for applications that loaded the scout as its own JS file, we can push the manifest when the scout file is loaded and, again, have the scout request the manifest.

+

This approach also makes a further case for a standardized scout: application-specific configuration can be shipped in the manifest, and a standardized scout can be shared across applications. This scout could be a file loaded via a script tag, where the script tag itself provides information about the application manifest to use:

+
<script src="/static/shared/js/scout.js"
+  data-manifest="/static/apps/myapp/manifest.js"></script>
+
+

The manifest contains information about the other resources that the scout will request, and can even be used by the server to determine what to push alongside the HTML.

+

A manifest could provide these instructions:

+
module.exports = {
+  baseUrl : 'https://mysite.com/static/',
+  resources : {
+    vendor : {
+      version : 'vendor-d41d8cd98f.js',
+      pushWith : [ 'scout' ]
+    },
+    application : {
+      version : 'application-a32e3ec23d.js',
+      pushWith : [ 'scout' ]
+    },
+    secondary : {
+      version : 'secondary-e43b8ad12f.js',
+      pushWith : [ ]
+    }
+  }
+};
+
+

Processing this manifest would require intelligence on the part of the CDN; it may be necessary to replace s3 storage with an actual server that is capable of making these decisions, fronted by a CDN that can intelligently relay responses that include server push.

+

The elephants in the room

There are two notable challenges to the rapid transition to an HTTP/2 world: the continued existence of legacy browsers, especially on mobile; and the requirement that HTTP/2 connections be conducted over TLS. Thankfully, the latter provides a reasonable opportunity to address the former. Let's, then, talk about the TLS requirement first.

+

HTTP/2 is a new protocol, and as such, it is greatly confusing to a large segment of the existing internet: proxies, antivirus software, and the like. During the development of HTTP/2 and SPDY before it, engineers observed that traffic that was transported on an insecure connection would frequently fail. The reason? The proxies, the antivirus software, and all the rest had certain expectations of HTTP traffic; HTTP/2 violated those expectations, and so HTTP/2 traffic was considered unsafe. The software that thwarted insecure HTTP/2 traffic didn't have the ability to inspect secure traffic, and so HTTP/2 traffic over a secure connection passed through just fine. Thus was born the requirement — which is a browser implementation detail, and not part of the HTTP/2 spec — that HTTP/2 web communication be conducted using TLS.

+

The Let's Encrypt project aims to eliminate the high cost of obtaining the certificate that enables secure HTTP communication; there will still be technical hurdles to using that certificate, but those should be surmountable for anyone who cares enough to engineer a performant HTTP/2 deployment.

+

In order for a browser and a server to communicate using HTTP/2, the browser and the server must first agree that they can. The TLS handshake that enables secure communication turns out to be the ideal time to negotiate the communication protocol, as well: no additional round trip is required for the negotiation.

+

When a server is handling a request, it knows whether the browser understands HTTP/2; we can use this information to shape our payload. We can send a legacy browser an HTML file that includes an inlined scout file, and that inlined scout file can include the manifest. The manifest can provide information about how to support legacy browsers:

+
module.exports = {
+  baseUrl : 'https://mysite.com/static/',
+  resources : {
+    // ...
+  },
+  legacyResources : {
+    legacyMain : {
+      initialLoad : true,
+      version : 'legacy-main-c312efa43e.js'
+    },
+    legacySecondary : {
+      version : 'legacy-secondary-a22cf1e2af.js'
+    }
+  }
+};
+
+

For Consideration: HTTP/2-friendly deployments with HTTP/1.1 support

Putting the pieces together, we arrive at a deployment process that does the following:

+
    +
  • Generates files that contain one or more modules, grouped by likelihood of changing, functionality, or another strategy. The file grouping strategy must persist across builds; new groupings would need a new, unique name that had not been used by earlier builds.
  • +
  • Generates legacy files, where those files contain modules that are grouped according to their likelihood to change, and according to whether they are required for initial load.
  • +
  • Names all files with a content hash.
  • +
  • Generates a manifest for the build, where the manifest includes:
      +
    • a baseUrl property whose value is a string that should be used as the base for generating a full URL to a resource, using the pattern <baseUrl>/<resource.version>
    • +
    • a resources property whose value is an object that, for each file, provides:
        +
      • the most recent changed version
      • +
      • a list of individual files which, when any of the files is requested, should trigger a push of the bundle
      • +
      +
    • +
    • a legacyResources property whose value is an object that, for each legacy bundle, provices:
        +
      • the most recent changed version
      • +
      • an optional initialLoad property whose value is true if the resource should be loaded immediately by the scout
      • +
      +
    • +
    +
  • +
  • Generates an HTTP/2 scout file* that provides the ability to load resources, and that loads a manifest.
  • +
  • Generates an HTTP/1 scout file* that provides the ability to load resources, and that includes the manifest.
  • +
  • Uploads the static resources.
  • +
  • Updates a delivery mechanism (such as a server or a CDN) based on the data in the new manifest.
  • +
+

The versioning and caching of the resources would be as follows:

+
    +
  • manifest Unversioned. Short cache time, e.g. 10 minutes, to allow for the rapid uptake of new resources for HTTP/2 browsers.
  • +
  • scout Unversioned. Medium cache time, e.g. one day, assuming the contents of this file are considered relatively stable.
  • +
  • legacy-scout Unversioned. Short cache time, e.g. 10 minutes, to allow for the rapid uptake of new resources for legacy browsers.
  • +
  • application and vendor files Versioned. Long cache time, e.g. one year, given that new versions will be picked up when a new manifest is loaded.
  • +
+

* In applications that a) control the initial HTML payload, and b) only use the scout to load other resources, it may not make sense to have a separate scout; it might be sufficient to just load those resources via <script> and <link> tags in the HTML itself. This approach isn't viable for applications that do not control the initial HTML payload, such as third-party applications.

+

Reality check

In several places so far, I’ve talked about the need for a server to make decisions about which resources it delivers, and when and how it delivers them. As I alluded to earlier, this could be profoundly challenging for CDNs, which traditionally simply receive a request and return a single resource in response. It also suggests the need for close collaboration between client and server development teams, and an increased knowledge of server-side technology for client-side developers.

+

CDN support of HTTP/2 in general is rather disappointing, with some major vendors providing nothing more than vague timelines for non-specific support.

+

As of this writing, I'm unaware of CDNs that support any notion of server push, but I'd be happy to find I am ill-informed. Ideally, CDNs need to provide applications with the ability to express how static assets relate to each other -- a task complicated by the fact that those relationships may be situational, such as in the case where an application doesn't want to push an asset that was just pushed to the same client 10 seconds before. One-size-fits-all push could be accomplished by setting a header on a file, indicating that other files should be pushed alongside it, but that doesn't allow for expressing more nuanced rules.

+

Even for applications that just want to split their payload into smaller files to take advantage of HTTP/2, and that don't intend to use server push, there is still a gap when it comes to providing a positive experience for HTTP/1.1 clients. CDNs need to surface the ability to change a response not just based on the URL that is requested, but the protocol of the request. Without this ability, we'll be stuck having to choose which protocol to support.

+

There is also work to be done on tooling, especially if we want to support HTTP/2 without significantly degrading the experience for legacy browsers. Ideally, our build tooling would figure out the optimal combination of files for us, with a knowledge of how the application was bundled previously so as not to squander past caching.

+

The developer story for HTTP/2 also leaves a lot to be desired as of this writing. Front-end developers are among the most likely in an organization to advocate for this new technology, but my experiences over a few weeks of learning about HTTP/2 suggest that the effort required to set up even a local environment will stretch the comfort zone for many. With a working local environment in hand, the tools to understand the differences between HTTP/2 and HTTP/1 behavior are limited and often confusing. Chrome presents information in its network tab that seems to conflict with the wall of text in its net-internals tool, especially when it comes to server push . Charles Proxy doesn't yet speak HTTP/2. Firefox shows pushed resources as an entry in the network tab, but they appear as though they were never received. nghttp2 provides great insight into how an HTTP/2 server is behaving, but it doesn't speak HTTP/1.1, so you can't use it to do comparisons. Measuring performance using a tool like WebPagetest requires a real certificate, which you may not have handy if you're just trying to experiment.

+

Alex wrote his 2013 post to document the product of years of experience in creating performant HTTP/1.1 deployments. HTTP/2 means we need to rethink everything we know about shipping applications to the web, and while the building blocks are there, there's still much to figure out about how we'll use them; the "right" answers are, in many cases, still TBD while we wait for vendors to act.

+

Further Reading

I've been bookmarking useful HTTP/2 resources as I come across them.

+

Thanks

Thanks to the many folks who have talked to me about the ideas in this post, but especially to Lon Ingram, Jake Archibald, and Andy Davies.

+]]>
Five Questionsfive-questionshttp://rmurphey.com/2015/10/08/five-questionsThu, 08 Oct 2015 22:00:00 +0000I recently started in a new role: I'm the dev lead of a project that was +already in the hands of a group of skilled developers before I showed up, a +project whose scope and technologies extend far beyond the experiences I've had +up until now.

+

As you might imagine, there have been a lot of challenges, but one that's been +particularly interesting has been figuring out how to meaningfully contribute +to decisions about systems I don't intimately understand. It's easy to be +inclined to sit those conversations out: I really don't yet know enough to +participate, and the "who am I to have a say?" instinct is strong.

+

The problem: that attitude will ensure that I never know enough to +participate, and though I am definitely out of my comfort zone, my job -- the +job I asked to do, and the job I have been asked to do -- is to participate, +to learn, and to change the definition of my comfort zone.

+

While I may not have the project-specific experience to lean on, I'm finding +that there are a few questions that help me understand, discuss, and -- ultimately -- consent or object to a technical plan. They're questions that +seem to work well across a spectrum of discussions; they work whether we're +talking about a wholly new system, a migration from an old system, or a +solution to a particularly prickly problem.

+

These questions don't just help me gain a better understanding of a topic, or +help me make better decisions; they've also helped me reframe my understanding +of my role as a lead.

+

Question 1: What are we doing and why?

When I hear the answer, I'm listening for whether the developer is clearly +articulating the problem and the solution. Do we clearly understand the +problem? Is the solution magical, or can we explain why it works? Are we +solving more than the problem, and thereby incurring unnecessary risk? Does the +developer agree that the work is necessary?

+

Question 2: How could it go wrong?

A developer who says nothing can go wrong probably hasn't been a developer +for very long. I want to hear far-fetched scenarios, and an explanation for +why they're far-fetched. I want to hear worst-case scenarios; good developers +have already thought about these plenty, they've worked to avoid them, and +yet they acknowledge their existence. The goal of this question isn't to plan +for everything; rather, the answers provide context for poking at assumptions.

+

Question 3: How will we know if it's going wrong?

This is probably my favorite question. If we're talking about developing a new +system or project, it's a question of how we'll know we're off track, which +leads to clear milestones and check-in points. If it's a migration to a new +system, or a solution to a bad bug, it's a question of how we'll know that +the new state is less good than we thought it would be. If the answer is +"customers will tell us," we're in dangerous territory. For services, I hope to hear answers about automated monitoring, but manual checks will suffice. For +new projects, I hope to hear near-term goals that will help us gauge progress.

+

Question 4: What will we do if it goes wrong?

The answer to this may not always be knowable -- obviously we won't always know +the ways things will go wrong -- but it's a useful exercise nonetheless. The +answer may be "we'll have to revert back to the old system and that will be +very hard," but that at least helps me understand the stakes of the decision. +For new projects, this is a great way to identify the point of no return -- +that is, the point in the project where starting over or changing course +becomes prohibitive.

+

Question 5: Is there an "undo" button?

Sometimes, the worst happens. Do we have an escape hatch? How hard will it be +to add one later vs. adding one now? Again, it may be OK if we don't have a +rollback plan, but knowing that answer should help guide the decision +about whether to proceed.

+
+

I'm learning that a lot of what makes me kind of OK (I hope!) at this dev lead +thing isn't a deep knowledge of the specific technologies that are the +underpinning of the project (though it's certainly important that I be able to +find my way around). Rather, it's my ability to ask these questions, and to +hear and understand the answers, and interpret them into action. I'm thankful +to the team that is giving me the chance.

+]]>
Pausing Office Hourspausing-office-hourshttp://rmurphey.com/2015/08/04/pausing-office-hoursTue, 04 Aug 2015 20:00:00 +0000I started doing office hours at the beginning of the year; it's been tremendous fun, super eye-opening, and just generally quite rewarding. I won't lie: it's also been a lot of time, especially the first few weeks when I terribly underestimated how many people would sign up.

+

I was out for dinner with a friend tonight and she asked me how it was going, and I had to pause for a second when I realized that we might not be sitting there, right then, if she hadn't signed up for a slot. Things like that -- and getting to see, on stage, speakers who trusted me with their talk idea months ago -- make me super-glad that I let (mostly) strangers put 30 minutes on my calendar a couple-few dozen times these last few months. Hopefully I helped a few people along the way too.

+

In the last couple of weeks I've started a new role at Bazaarvoice: after a few months of working on some fairly independent projects, I'm back to leading a team. I used to get a kick out of being the female lead of an otherwise-all-male team; now I'm honored and humbled to get to work with a team where I am one of five women. The ratio isn't quite 50-50, but it's closer than any team I've worked on before. It's also the first lead role I've had where my responsibilities span beyond the front end, a prospect both exciting and daunting.

+

Which is to say: My hands seem a bit fuller than they did back when I started office hours, so for now, it's time to hit the pause button while I focus on my new team. If you're interested in picking up where I left off, hit me up and I'm happy to spread the word.

+]]>
So You're Going on a Podcasttech-podcast-recording-tipshttp://rmurphey.com/2015/07/27/tech-podcast-recording-tipsMon, 27 Jul 2015 20:00:00 +0000I got a fair bit of experience recording a podcast back in the yayQuery days; when I decided to start another one, the technical side of it felt pretty familiar, minus the part where yayQuery was crazy enough to also record video. Back then, we mostly were talking to each other, so it was easy for us to all be on the same page about the technical requirements: headphones always, typing never (at least when you're also talking), and buy a good microphone. We also had 20-some episodes to get good at working with each other.

+

I've been recording the TTL Podcast for a few months now; it's a show with a different guest every week, so the tech challenges are different, and each show is completely different from the last. It has been great fun, and I can't believe how lucky I am to get to ask all of these people to talk to me and they keep saying yes.

+

I've learned a few things about how to be a good podcast guest along the way, but I haven't always been good at sharing them ahead of time with guests. This, then, is really just an attempt to write them down in a single place, with the added benefit that maybe they will be useful to others. This is mostly focused on being a guest of a show; I have lots to say about being a host, but I feel like that's a lot more complicated than this.

+

Technical

    +
  • Wear headphones, preferably the best ones you own. The iPhone headphones aren't nice, and actually leak noise like crazy. I alternate between using Klipsch and Shure (sorry, not sure of the model, so no link) in-ear headphones, both of which have a nice silicone seal to keep the sound I'm hearing in my ears and out of my microphone.
  • +
  • Use the best microphone you can. A MacBook's built-in microphone is decent enough in a pinch, but it's probably worth springing for an external microphone. I used the AT2020 for most of the yayQuery episodes, but I stepped up to a Shure SM7B to record TTL at the suggest of Alex Sexton. The USB mic is just fine and very reasonably priced; the Shure sounds absolutely lovely but is a bit more of an investment. If you don't want to spring for a mic, see if someone in your office has one you can borrow. If you have questions about audio gear, I am mostly clueless beyond what I've written above.
  • +
  • If you're a guest, always plan to record your side of the conversation. (If you're a host, always plan to record all sides of the conversation; I've lost an episode by failing to do this.) On a Mac, Quicktime has a simple audio recording feature. There's also plenty of other software that will do the same.
  • +
+

Preparation

    +
  • Listen to at least one episode of the show before you go on (and possibly before you even agree to go on).
  • +
  • Ask the host what they want to talk to you about, and try to have a decent sense of the outline of the conversation before you start. If the host doesn't have great guidance -- she's almost certainly less familiar with your work than you are -- it's generally very welcome for you to propose an outline yourself.
  • +
  • If you have access to a soundproofed room, consider using it. Avoid large, echo-y rooms, or rooms that will be subject to a lot of hallway or construction noise.
  • +
+

The Show

    +
  • Consider your biological needs before you start recording :) Except for a live show, you're always welcome to pause if you need to step away, but you may find yourself distracted in the meantime. Make sure you have water nearby!
  • +
  • Silence phone notifications (no vibrating phones; silence means silent); on your computer, close Twitter, your mail client, etc.; option-click the Notification Center icon in your Mac toolbar to put it in do-not-disturb mode (thanks Ralph Holzmann for that tip).
  • +
  • Unless it's a live show, feel free to pause and try again if you make a mistake or say something wrong. It's important that you announce that you're starting over, then pause, then start over -- that way it's easy to fix in post-production.
  • +
  • Remember that a podcast is a conversation, not a presentation. Unlike a presentation, you're conversing with a host who knows the audience and can ask you the questions that will help that audience connect with you. Use a video chat so you can watch the host for visual cues that she might want to interject.
  • +
+

That's my list, though undoubtedly I've left things out. If you have stuff to add, please share in the comments —

+]]>
Browser Testing and Code Coverage with Karma, Tape, and Webpackkarma-webpack-tape-code-coveragehttp://rmurphey.com/2015/07/20/karma-webpack-tape-code-coverageMon, 20 Jul 2015 13:00:00 +0000We recently set up a new project at Bazaarvoice for centralizing common UI modules. We started by using node-tap for unit tests, but given that these are UI modules, we quickly switched to using tape, because it has a fairly easy browser testing story with the help of Karma.

+

One thing that node-tap provided that tape did not provide out of the box was the ability to measure the code coverage of unit tests. Karma does provide this, but getting it hooked up while using Webpack -- which is our build tool of choice these days -- wasn't quite as clear as I would have liked. If you're looking to use Karma, tape, and Webpack, then hopefully this post will help you spend a bit less time than I did.

+

What You'll Need

By the time it was all said and done, I needed to npm install the following modules:

+
    +
  • karma
  • +
  • karma-phantomjs-launcher
  • +
  • karma-chrome-launcher
  • +
  • karma-tap
  • +
  • karma-webpack
  • +
  • karma-coverage
  • +
  • istanbul-instrumenter-loader
  • +
  • tape
  • +
+

The directory structure was simple:

+
    +
  • a root directory, containing karma.conf.js and package.json
  • +
  • a lib subdirectory, containing module files
  • +
  • a test/unit subdirectory, containing the unit tests
  • +
+

An example application file at lib/global/index.js looked like this:

+
/**
+ *  @fileOverview Provides a reference to the global object
+ *
+ *  Functions created via the Function constructor in strict mode are sloppy
+ *  unless the function body contains a strict mode pragma. This is a reliable
+ *  way to obtain a reference to the global object in any ES3+ environment.
+ *  see http://stackoverflow.com/a/3277192/46867
+ */
+'use strict';
+
+module.exports = (new Function('return this;'))();
+
+

An example test in test/unit/global/index.js looked like this:

+
var test = require('tape');
+var global = require('../../../lib/global');
+
+test('Exports window', function (t) {
+  t.equal(global, window);
+  t.end();
+});
+
+

Testing CommonJS Modules in the Browser

The applications that consume these UI modules use Webpack, so we author the modules (and their tests) as CommonJS modules. Of course, browsers can't consume CommonJS directly, so we need to generate files that browsers can consume. There are several tools we can choose for this task, but since we've otherwise standardized on Webpack, we wanted to use Webpack here as well.

+

Since our goal is to load the tests in the browser, we use the test file as the "entry" file. Webpack processes the dependencies of an entry file to generate a new file that contains the entry file's contents as well as the contents of its dependencies. This new file is the one that Karma will load into the browser to run the tests.

+

Getting this to happen is pretty straightforward with the karma-webpack plugin to Karma. The only catch was the need to tell Webpack how to deal with the fs dependency in tape. Here's the initial Karma configuration that got the tests running:

+
var webpack = require('webpack');
+
+module.exports = function(config) {
+  config.set({
+    plugins: [
+      require('karma-webpack'),
+      require('karma-tap'),
+      require('karma-chrome-launcher'),
+      require('karma-phantomjs-launcher')
+    ],
+
+    basePath: '',
+    frameworks: [ 'tap' ],
+    files: [ 'test/**/*.js' ],
+
+    preprocessors: {
+      'test/**/*.js': [ 'webpack' ]
+    },
+
+    webpack: {
+      node : {
+        fs: 'empty'
+      }
+    },
+
+    webpackMiddleware: {
+      noInfo: true
+    },
+
+    reporters: [ 'dots' ],
+    port: 9876,
+    colors: true,
+    logLevel: config.LOG_INFO,
+    autoWatch: true,
+    browsers: ['Chrome'],
+    singleRun: false
+  })
+};
+
+

However, as I mentioned above, I wanted to get code coverage information. Karma offers the karma-coverage plugin, but that alone was insufficient in Webpack land: it would end up instrumenting the whole Webpack output -- including the test code itself! -- and thus reporting highly inaccurate coverage numbers.

+

I ended up reading a karma-webpack issue that told me someone else had already solved this exact problem by creating a Webpack loader to instrument modules at build time. By adjusting our Webpack configuration to only apply this loader to application modules -- not to test code or vendor code -- the Webpack output ends up properly instrumented for the karma-coverage plugin to work with it. Our final Karma config ends up looking like this:

+
var webpack = require('webpack');
+
+module.exports = function(config) {
+  config.set({
+    plugins: [
+      require('karma-webpack'),
+      require('karma-tap'),
+      require('karma-chrome-launcher'),
+      require('karma-phantomjs-launcher'),
+      require('karma-coverage')
+    ],
+
+    basePath: '',
+    frameworks: [ 'tap' ],
+    files: [ 'test/**/*.js' ],
+
+    preprocessors: {
+      'test/**/*.js': [ 'webpack' ]
+    },
+
+    webpack: {
+      node : {
+        fs: 'empty'
+      },
+
+      // Instrument code that isn't test or vendor code.
+      module: {
+        postLoaders: [{
+          test: /\.js$/,
+          exclude: /(test|node_modules)\//,
+          loader: 'istanbul-instrumenter'
+        }]
+      }
+    },
+
+    webpackMiddleware: {
+      noInfo: true
+    },
+
+    reporters: [
+      'dots',
+      'coverage'
+    ],
+
+    coverageReporter: {
+      type: 'text',
+      dir: 'coverage/'
+    },
+
+    port: 9876,
+    colors: true,
+    logLevel: config.LOG_INFO,
+    autoWatch: true,
+    browsers: ['Chrome'],
+    singleRun: false
+  })
+};
+
+

Even with the coverage hiccup, the speed with which I was able to get Karma set up the way I wanted -- and working with TravisCI -- was nothing short of breathtaking. I'm late to the Karma party, but I had no idea it could be this easy. If you haven't checked it out yet, you should.

+]]>
The TTL Podcastttl-podcasthttp://rmurphey.com/2015/05/11/ttl-podcastMon, 11 May 2015 20:12:00 +0000Over the past several months, I've been on a few different podcasts, plus I've been having a lot of fun doing office hours, and generally talking a lot with other people who do the kind of work that I do. I've particulary enjoyed talking about a subject that Alex Sexton dubbed Front-End Ops.

+

It has occurred to me that a) I'm really super fortunate to get to have conversations about this stuff with super-smart people; b) there aren't yet a lot of great sources of information about front-end ops in the real world; and c) I used to be on a podcast and that sure was fun.

+

To that end, I threw a tweet out into the world to see who might be willing to talk to me and let me record the conversation. I got enough great responses that I decided to don my podcasting hat again for a little bit, and the result is the TTL Podcast.

+

ttlpodcast.com

+

If you're a mid-level front-end dev looking to level up, I'd humbly suggest that this is very much a show for you -- you'll get to listen in on the thought process of some of the best front-end devs I know. That said, it's not just a show for those aspiring to take the front-end world by storm; it's also a chance for those who are already in the trenches, doing daily battle with WebDriver and trying to shave 10 more milliseconds off page load, to commiserate asynchronously. I know I personally have learned a ton -- in some cases I've seen a new angle on a problem, and in other cases I've had some serious Developer Guilt assuaged.

+

I've released three episodes so far -- conversations with Alex, Burak Yiğit Kaya (Disqus), and Daniel Espeset and Seth Walker (Etsy). More episodes are in the pipeline, including developers from Walmart Labs, Yammer, FT Labs, and The Guardian.

+

While the initial focus has been on front-end ops, I can see the scope widening over time to cover, generally, the tools and challenges of doing front-end dev at significant scale. If you or someone you know would be a good person to talk to about that sort of thing, I hope you'll let me know.

+

While I'm here, I want to give huge and sincere thanks to SauceLabs and Travis CI for their support of the show; to Una Kravets for finding time in her busy life to make me a website; to my sister, who's been kind enough to pitch in with the editing; and to Bazaarvoice for giving me the freedom to take on a project like this.

+]]>
Writing Conference Proposalswriting-conference-proposalshttp://rmurphey.com/2015/01/26/writing-conference-proposalsMon, 26 Jan 2015 21:00:00 +0000I've had several office hours sessions in the last couple of weeks, and one topic that comes up again and again is how to write a talk description.

+

If you think about it, conference organizers don't have a whole lot to go on when they're choosing talks, unless they already know who you are. Even if your name is well-known, though, organizers may still not know who you are -- lots of conferences are taking a blind approach to selecting speakers. That means that no matter who you are, your talk description might be the only thing organizers have on which to base their decision. When you give your talk, you'll need to engage your audience; the abstract is your chance to engage the organizer.

+

After answering the question several times, I've realized that I have a pretty explainable -- some might call it formulaic -- approach to writing abstracts for a certain common type of talk. It works well for talks about how you solved a problem, talks about how you came to learn a thing you didn't know, and even "10 things you didn't know about X" talks. I thought I'd try to explain it here.

+

Paragraph 1: The context

The first paragraph is where you set the scene, and make it clear to your reader that they have been in the situation you're going to talk about. This is where you establish a connection, baiting a hook that you'll set later.

+
+

You've got the hang of this whole JavaScript thing. Your code works on ancient browsers, and positively sings on new ones. AMD, SPA, MVC -- you can do that stuff in your sleep.

+
+

Paragraph 2: Well, actually ...

The second paragraph is where you break the bad news, which savvy readers may already know: the thing you laid out in the first paragraph is more complicated than it seems, or has downsides that people don't realize, or generally is a bad approach ... but only with the benefit of hindsight, which you just happen to have.

+
+

But now your users are trying to type in your Very Important Form, and nothing is showing up; that widget that's supposed to end up in a certain div is showing up somewhere completely different; and, rarely but not never, your app just doesn't load at all. You thought you had the hang of this whole JavaScript thing, but now you're in the world of third-party JavaScript, where all you control is a single script tag and where it's all but impossible to dream up every hostile environment in which your code will be expected to work. "It works on my machine" has never rung quite so hollow.

+
+

Paragraph 3: The promise

You've successfully induced a bit of anxiety in your reader -- and a strong desire to know what they don't know. The hook is set, so the last paragraph is the time to promise to relieve that anxiety -- but only if your talk is chosen!

+
+

In this talk, we'll take a look at some of the delightful bugs we've had to solve at Bazaarvoice while working on the third-party JavaScript app that collects and displays ratings and reviews for some of the world's largest retailers. We'll also look at some strategies for early detection -- and at some scenarios where you are just plain SOL.

+
+

Next

It turns out that in the process of writing your abstract, you've also written the most basic outline for your talk: on stage, you'll want to set the context, explain the complexity, then deliver on your promise. Pretty handy, if you ask me.

+]]>
Office Hours for Aspiring Speakersoffice-hourshttp://rmurphey.com/2015/01/11/office-hoursSun, 11 Jan 2015 21:00:00 +0000Update: Office hours are on hold for now while I settle into a new role at Bazaarvoice.

+

I'm expecting that my 2015 is going to include a bit less speaking than in years past, so I'm hoping I can use some of that newly available time to help new speakers find their way to the stage. To that end, I'm kicking off "office hours" this week: a few slots a week where aspiring and up-and-coming speakers can borrow my ear for a bit to talk about their ideas, their fears, their questions, and their ambitions.

+

This idea isn't mine; I was inspired by a similar effort by Jen Myers, who has been offering mentoring sessions to aspiring speakers since 2013. I'm forever indebted to the folks who helped me get through my first talk, and I've been honored to give a gentle nudge to several other speakers in the years since.

+

If you're interested, you can sign up here. There's no script or agenda, and -- at least to start with -- I'm not going to try to suggest who should or shouldn't sign up. If you think it would be useful to you, go for it! My only ask is that you be seriously interested in giving coherent, informative, engaging talks on technical topics.

+]]>
Flying Lessonsflying-lessonshttp://rmurphey.com/2014/11/01/flying-lessonsSat, 01 Nov 2014 0:00:00 +0000In October of 2008, I'd been unemployed for about four months. I was doing some freelance work, but still feeling entirely uncertain about my ability to make a living. I decided to do what any marginally employed person might do: spend about $7,000 taking lessons to become a private pilot.

+

I have had a lifelong fascination with flying, and I'd taken lessons in gliders when I was a kid -- every four hours of helping at the airfield got me one 15-minute lesson. If you don't know about gliders, they're just airplanes except without an engine. On the up side, that means your engine can't fail; on the down side, if you mess up when you're trying to land, you don't exactly get a second chance. That whole landing thing always terrified me, and I was off to college before I ever managed to "solo."

+

A little more than 13 years later, I found myself in a Cessna 152 rolling down a 3,200-foot grass runway just outside of Durham, N.C., the 100-foot trees at the end of the runway growing ever-closer in the windshield until a dial on the instrument panel said we were going 55 knots -- which is basically like 55 miles per hour, but when you say knots you sound like a pilot (or a sailor, I guess) -- and the instructor said it was time to pull back on the yoke, ever so gently. And the plane lifted off the runway and the windshield was filled with more sky than ground and the trees passed below me and I was flying.

+

The FAA says you can get your license after just [40 hours of flying][ppl], plus a little bit of ground instruction, and on that October day I was sure I'd knock it out in 45 hours, tops. I'd been flying flight simulators since I was like six years old, when -- no joke -- I loaded the program off a cassette player. When I was a kid I went to the airport in my hometown to score expired navigational charts. Plus I had flown gliders, and plus, I was smart. How hard could it be?

+

You know how they say flying is safer than driving? You're pretty safe on that plane that you took to get here -- like, hundreds of times safer than in a car -- but it turns out that little planes flown by private pilots crash all the time. Pilots leave the gas caps unscrewed and all the fuel gets sucked out and they don't even notice until the engine sputters and dies. They overload their plane on a hot day and don't quite clear those trees at the end of that runway. They fly into weather for which their skills are no match, and end up running into a mountain -- euphemistically referred to as CFIT, or "controlled flight into terrain."

+

Lots of private pilots are shining examples of the Dunning-Kruger effect: unskilled and unaware of it, much like myself in those first few lessons. A typical private pilot has fewer than [100 hours][nyt] of flying time -- airline pilots have thousands or even tens of thousands -- but they have that piece of plastic that says they can fly a plane, and gosh darnit, they are going to fly a plane.

+

It would eventually take me six months and more than 60 hours to get my license, and by the end I was in no rush. In one of many sleepless nights during my training, I came to realize a thing: learning to fly wasn't just about learning to take off and land and get from point A to point B. Barring infinite money and infinite time, it was about learning how to be permanently new at a thing that could kill me if I screwed it up.

+

It's been six years since I rolled down that runway, and six years later, it occurs to me that there are a whole lot of parallels between that experience and my life as a developer. I remember showing up to the inaugural JSConf in 2009, feeling pretty secure in my standing as a bit of a jQuery badass, and being promptly blown away by just how large the JavaScript world actually was, even in 2009. I felt intimidated and overwhelmed and, I won't lie, a bit embarrassed at how little I actually knew and understood. Over time, though, I've realized: this is just the permanent state of things. I'd better get used to it.

+

This, then, is a talk about how to be new at a thing.

+
    +
  • not about learning new things, about being new at a thing
  • +
+

Aviate, Navigate, Communicate

    +
  • translation: know your priorities
  • +
  • flying: nowhere to pull over
  • +
  • priorites when flying: 1) do not die, 2) get where you're going
  • +
  • priorities at a new job: balance learning with project delivery.
  • +
  • study, ask, do
  • +
  • on my team, we try to be explicit about this with new folks.
  • +
+

All Available Information

    +
  • FAR §91.103
  • +
  • flying: you are responsible for not dying. you're expected to know about the weather, the airport you're flying to, the route you're taking, your aircraft's limitations, your own limitations, etc. the FAA will occasionally do "ramp checks" to make sure you've done your due diligence.
  • +
  • ultimately, this is all about not making assumptions. just because it's sunny out doesn't mean it will be in a couple of hours. just because you flew to an airport on a half tank of fuel last time doesn't mean it will work out the same way today, when there's a 40-knot headwind.
  • +
  • "all available information" is somewhat preposterous on its face these days; the amount of information available to us is unreal. i think of this more as a challenge: what information could i get that i haven't gotten? could it possibly be useful?
  • +
  • "all available information" means being diligent and methodical about gathering facts before making a decision. this is a lot slower than making decisions based on assumptions. later, when you're not new, you can make decisions based on assumptions -- we might call that instinct. but not when you're new.
  • +
  • when i was hired at bv, they brought me on to improve the organization and maintainability of a project's codebase. i didn't just read the code and get to work tearing it apart; i interviewed every developer on the team to find out where their pain points were, and learned that certain parts of the code, while terrible, weren't worth spending time on.
  • +
  • what information could you get that you haven't gotten? could it possibly be useful?
  • +
+

Climb, Communicate, Confess, Comply

    +
  • sometimes we get lost. it's ok!
  • +
  • step 1 is to realize you're lost
  • +
  • step 2 is to explain what's wrong
  • +
  • step 3 is to ask for help
  • +
  • step 4 is to do what they say

    +
  • +
  • i think a lot of people are reluctant to ask for help because they're afraid of how people will respond when it becomes clear they don't know everything. of course they don't know everything, they are new! i think also though that people don't know how to ask for help. in my experience, people are actually incredibly willing to help -- as long as you've done your due diligence. this means you've read the docs, done your google due diligence, read the surrounding code, explored the problem with debugging tools, and produced a reduced test case that demonstrates your problem.

    +
  • +
  • developing the skills to make a good request for help is essential to being good at being new at a thing.
  • +
+

The Checklist

The Go-Around

    +
  • sometimes, despite our best efforts, we need to start over
  • +
  • this isn't exceptional -- it's an entirely normal maneuver

    +
  • +
  • Your code is not a reflection of you. It isn’t a reflection of your beliefs, your upbringing, or your ability to be a good person. Your code is [...] a reflection of your thinking process at the time that you wrote it. - @rockbot

    +
  • +
+

Trust Your Instruments

    +
  • translation: learn how to tell what's going on when you don't know what's going on
  • +
+]]>
Writing Unit Tests for Existing JavaScriptunit-testshttp://rmurphey.com/2014/07/13/unit-testsSun, 13 Jul 2014 20:21:00 +0000My team at Bazaarvoice has been spending a lot of time lately thinking about quality and how we can have greater confidence that our software is working as it should.

+

We've long had functional tests in place that attempt to ask questions like "When a user clicks a button, will The Widget do The Thing?" These tests tell us a fair amount about the state of our product, but we've found that they're brittle -- even after we abstracted away the CSS selectors that they rely on -- and that they take approximately forever to run, especially if we want to run them in all of the browsers we support. The quality of the tests themselves is all over the map, too -- some of them are in fact unit tests, not really testing anything functional at all.

+

A few months ago we welcomed a new QA lead to our team as part of our renewed focus on quality. Having a team member who is taking a thoughtful, systematic approach to quality is a game-changer -- he's not just making sure that new features work, but rather has scrutinized our entire approach to delivering quality software, to great effect.

+

One of the things he has repeatedly emphasized is the need to push our tests down the stack. Our functional tests should be black-box -- writing them shouldn't require detailed knowledge of how the software works under the hood. Our unit tests, on the other hand, should provide broad and detailed coverage of the actual code base. In an ideal world, functional tests can be few and slow-ish, because they serve as an infrequent smoke test of the application; unit tests should be thorough, but execute quickly enough that we run them all the time.

+

Until now, our unit tests have been entirely focused on utility and framework code -- do we properly parse a URL, for example? -- not on code that's up close and personal with getting The Widget to do The Thing. I'd told myself that this was fine and right and good, but in reality I was pretty terrified of trying to bolt unit tests onto feature code of incredibly varying quality, months or even years after it was first written.

+

A week or so ago, thanks to some coaxing/chiding from fellow team members, I decided to bite the bullet and see just how bad it would be. A week later, I feel like I've taken the first ten steps in a marathon. Of course, taking those first steps involves making the decision to run, and doing enough training ahead of time that you don't die, so in that regard I've come a long way already. Here's what I've done and learned so far.

+

Step 0

I was lucky in that I wasn't starting entirely from scratch, but if you don't already have a unit testing framework in place, don't fret -- it's pretty easy to set up. We use Grunt with Mocha as our test framework and expect.js as our assertion library, but if I were starting over today I'd take a pretty serious look at Intern.

+

Our unit tests are organized into suites. Each suite consists of a number of files, each of which tests a single AMD module. Most of the modules under test when I started down this path were pretty isolated -- they didn't have a ton of dependencies generally, and had very few runtime dependencies. They didn't interact with other modules that much. Almost all of the existing unit test files loaded a module, executed its methods, and inspected the return value. No big deal.

+

Feature-related code -- especially already-written feature-related code -- is a different story. Views have templates. Models expect data. Models pass information to views, and views pass information to models. Some models need parents; others expect children. And pretty much everything depended on a global-ish message broker to pass information around.

+

Since the code was originally written without tests, we are guaranteed that it would be in various states of testability, but a broad rewrite for testability is of course off the table. We'll rewrite targeted pieces, but doing so comes with great risk. For the most part, our goal will be to write tests for what we have, then refactor cautiously once tests are in place.

+

We decided that the first place to start was with models, so I found the simplest model I could:

+
define([
+  'framework/bmodel',
+  'underscore'
+], function (BModel, _) {
+  return BModel.extend({
+    options : {},
+    name : 'mediaViewer',
+
+    init : function (config, options) {
+      _.extend(this.options, options);
+    }
+  });
+});
+
+

Why do we have a model that does approximately nothing? I'm not going to attempt to answer that, though there are Reasons -- but for the sake of this discussion, it certainly provides an easy place to start.

+

I created a new suite for model tests, and added a file to the suite to test the model. I could tell you that I naively plowed ahead thinking that I could just load the module and write some assertions, but that would be a lie.

+

Mocking: Squire.js

I knew from writing other tests, on this project and projects in the past, that I was going to need to "mock" some of my dependencies. For example, we have a module called ENV that is used for ... well, way too much, though it's better than it used to be. A large portion of ENV isn't used by any given module, but ENV itself is required by essentially every model and view.

+

Squire.js is a really fantastic library for doing mocking in RequireJS-land. It lets you override how a certain dependency will be fulfilled; so, when a module under test asks for 'ENV', you can use Squire to say "use this object that I've hand-crafted for this specific test instead."

+

I created an Injector module that does the work of loading Squire, plus mocking a couple of things that will be missing when the tests are executed in Node-land.

+
define([
+  'squire',
+  'jquery'
+], function (Squire, $) {
+  return function () {
+    var injector;
+
+    if (typeof window === 'undefined') {
+      injector = new Squire('_BV');
+
+      injector.mock('jquery', function () {
+        return $;
+      });
+
+      injector.mock('window', function () {
+        return {};
+      });
+    }
+    else {
+      injector = new Squire();
+    }
+
+    return injector;
+  };
+});
+
+

Next, I wired up the test to see how far I could get without mocking anything. Note that the main module doesn't actually load the thing we're going to test -- first, it sets up the mocks by calling the injector function, and then it uses the created injector to require the module we want to test. Just like a normal require, the injector.require is async, so we have to let our test framework know to wait until it's loaded before proceeding with our assertions.

+
define([
+  'test/unit/injector'
+], function (injector) {
+  injector = injector();
+
+  var MediaViewer;
+
+  describe('MediaViewer Model', function () {
+    before(function (done) {
+      injector.require([
+        'bv/c2013/model/mediaViewer'
+      ], function (M) {
+        MediaViewer = M;
+        done();
+      });
+    });
+
+    it('should be named', function () {
+      var m = new MediaViewer({});
+      expect(m.name).to.equal('mediaViewer');
+    });
+
+    it('should mix in provided options', function () {
+      var m = new MediaViewer({}, { foo : 'bar' });
+      expect(m.options.foo).to.equal('bar');
+    });
+  });
+});
+
+

This, of course, still failed pretty spectacularly. In real life, a model gets instantiated with a component, and a model also expects to have access to an ENV that has knowledge of the component. Creating a "real" component and letting the "real" ENV know about it would be an exercise in inventing the universe, and this is exactly what mocks are for.

+

While the "real" ENV is a Backbone model that is instantiated using customer-specific configuration data, a much simpler ENV suffices for the sake of testing a model's functionality:

+
define([
+  'backbone'
+], function (Backbone) {
+  return function (injector, opts) {
+    injector.mock('ENV', function () {
+      var ENV = new Backbone.Model({
+        componentManager : {
+          find : function () {
+            return opts.component;
+          }
+        }
+      });
+
+      return ENV;
+    });
+
+    return injector;
+  };
+});
+
+

Likewise, a "real" component is complicated and difficult to create, but the pieces of a component that this model needs to function are limited. Here's what the component mock ended up looking like:

+
define([
+  'underscore'
+], function (_) {
+  return function (settings) {
+    settings = settings || {};
+
+    settings.features = settings.features || [];
+
+    return {
+      trigger : function () {},
+      hasFeature : function (refName, featureName) {
+        return _.contains(settings.features, featureName);
+      },
+      getScope : function () {
+        return 'scope';
+      },
+      contentType : settings.contentType,
+      componentId : settings.id,
+      views : {}
+    };
+  };
+});
+
+

In the case of both mocks, we've taken some dramatic shortcuts: the real hasFeature method of a component is a lot more complicated, but in the component mock we create a hasFeature method whose return value can be easily known by the test that uses the mock. Likewise, the behavior of the componentManager's find method is complex in real life, but in our mock, the method just returns the same thing all the time. Our mocks are designed to be configurable by -- and predictable for -- the test that uses it.

+

Knowing what to mock and when and how is a learned skill. It's entirely possible to mock something in such a way that a unit test passes but the actual functionality is broken. We actually have pretty decent tests around our real component code, but not so much around our real ENV code. We should probably fix that, and then I can feel better about mocking ENV as needed.

+

So far, my approach has been: try to make a test pass without mocking anything, and then mock as little as possible after that. I've also made a point of trying to centralize our mocks in a single place, so we aren't reinventing the wheel for every test.

+

Finally: when I first set up the injector module, I accidentally made it so that the same injector would be shared by any test that included the module. This is bad, because you end up sharing mocks across tests -- violating the "only mock what you must" rule. The injector module shown above is correct in that it returns a function that can be used to create a new injector, rather than the injector itself.

+

Here's what the final MediaViewer test ended up looking like:

+
define([
+  // This properly sets up Squire and mocks window and jQuery
+  // if necessary (for running tests from the command line).
+  'test/unit/injector',
+
+  // This is a function that mocks the ENV module.
+  'test/unit/mocks/ENV',
+
+  // This is a function that mocks a component.
+  'test/unit/mocks/component'
+], function (injector, ENVMock, component) {
+  injector = injector();
+
+  // This will become the constructor for the model under test.
+  var MediaViewer;
+
+  // Create an object that can serve as a model's component.
+  var c = component();
+
+  // We also need to mock the ENV module and make it aware of
+  // the fake component we just created.
+  ENVMock(injector, { component : c });
+
+  describe('MediaViewer Model', function () {
+    before(function (done) {
+      injector.require([
+        'bv/c2013/model/mediaViewer'
+      ], function (M) {
+        MediaViewer = M;
+        done();
+      });
+    });
+
+    it('should be named', function () {
+      var m = new MediaViewer({
+        component : c
+      }, {});
+      expect(m.name).to.equal('mediaViewer');
+    });
+
+    it('should mix in provided options', function () {
+      var m = new MediaViewer({
+        component : c
+      }, { foo : 'bar' });
+
+      expect(m.options.foo).to.equal('bar');
+    });
+  });
+});
+
+

Spying: Sinon

After my stunning success with writing 49 lines of test code to test a 13-line model, I was feeling optimistic about testing views, too. I decided to tackle this fairly simple view first:

+
define([
+  'framework/bview',
+  'underscore',
+  'hbs!contentAuthorProfileInline',
+  'mf!bv/c2013/messages/avatar',
+  'bv/util/productInfo',
+  'framework/util/bvtracker',
+  'util/specialKeys'
+], function (BView, _, template, msgPack, ProductInfo, BVTracker, specialKeys) {
+  return BView.extend({
+    name : 'inlineProfile',
+
+    templateName : 'contentAuthorProfileInline',
+
+    events : {
+      'click .bv-content-author-name .bv-fullprofile-popup-target' : 'launchProfile'
+    },
+
+    template : template,
+
+    msgpacks : [msgPack],
+
+    launchProfile : function (e) {
+      // use r&r component outlet to trigger full profile popup component event
+      this.getTopModel().trigger( 'showfullprofile', this.model.get('Author') );
+
+      BVTracker.feature({
+        type : 'Used',
+        name : 'Click',
+        detail1 : 'ViewProfileButton',
+        detail2 : 'AuthorAvatar',
+        bvProduct : ProductInfo.getType(this),
+        productId : ProductInfo.getId(this)
+      });
+    }
+  });
+});
+
+

It turned out that I needed to do the same basic mocking for this as I did for the model, but this code presented a couple of interesting things to consider.

+

First, I wanted to test that this.getTopModel().trigger(...) triggered the proper event, but the getTopModel method was implemented in BView, not the code under test, and without a whole lot of gymnastics, it wasn't going to return an object with a trigger method.

+

Second, I wanted to know that BVTracker.feature was getting called with the right values, so I needed a way to inspect the object that got passed to it, but without doing something terrible like exposing it globally.

+

Enter Sinon and its spies. Spies let you observe methods as they are called. You can either let the method still do its thing while watching how it is called, or simply replace the method with a spy.

+

I solved the first problem by defining my own getTopModel method on the model instance, and having it return an object. I gave that object a trigger method that was actually just a spy -- for the sake of my test, I didn't care what trigger did, only how it was called. Other tests [will eventually] ensure that triggering this event has the desired effect on the targeted model, but for the sake of this test, we don't care.

+

Here's what the test looks like:

+
describe('#launchProfile', function () {
+  var spy;
+  var v;
+
+  before(function () {
+    spy = sinon.spy();
+
+    v = new InlineProfile({
+      // model and component are defined elsewhere
+      component : component,
+      model : model
+    });
+
+    model.set('Author', 'author');
+
+    v.getTopModel = function () {
+      return {
+        trigger : spy
+      };
+    };
+  });
+
+  it('should trigger showfullprofile event on top model', function () {
+    v.launchProfile();
+
+    expect(spy.lastCall.args[0]).to.equal('showfullprofile');
+    expect(spy.lastCall.args[1]).to.equal('author');
+  });
+});
+
+

I solved the second problem -- the need to see what's getting passed to BVTracker.feature -- by creating a BVTracker mock where every method is just a spy:

+
// This is a mock for BVTracker that can be used by unit tests.
+define([
+  'underscore'
+], function (_) {
+  return function (injector, opts) {
+    var BVTracker = {};
+
+    injector.mock('framework/util/bvtracker', function () {
+      _([
+        'error',
+        'pageview',
+        'feature'
+      ]).each(function (event) {
+        BVTracker[event] = sinon.spy();
+      });
+    });
+
+    return BVTracker;
+  };
+});
+
+

My test looked at the BVTracker.feature spy to see what it got when the view's launchProfile method was called:

+
it('should send a feature analytics event', function () {
+  v.launchProfile();
+
+  var evt = BVTracker.feature.lastCall.args[0];
+
+  expect(evt.type).to.equal('Used');
+  expect(evt.name).to.equal('Click');
+  expect(evt.detail1).to.equal('ViewProfileButton');
+  expect(evt.detail2).to.equal('AuthorAvatar');
+  expect(evt.bvProduct).to.equal('RatingsAndReviews');
+  expect(evt.productId).to.equal('product1');
+});
+
+

I've barely touched on what you can do with spies, or with Sinon in general. Besides providing simple spy functionality, Sinon delivers a host of functionality that makes tests easier to write -- swaths of which I haven't even begun to explore. One part I have explored is its ability to create fake XHRs and to fake whole servers, allowing you to test how your code behaves when things go wrong on the server. Do yourself a favor and spend some time reading through the excellent docs.

+

What to test ... and not

I've written tests now for a tiny handful of models and views. Setting up the mocks was a bit of a hurdle -- and there were plenty of other hurdles that are too specific to our project for me to talk about them in detail -- but overall, the hardest part has been figuring out what, exactly, to test. I crafted the examples above to be pretty straightforward, but reality is a lot more complicated.

+

Writing tests for existing code requires first understanding the code that's being tested and identifying interesting moments in that code. If there's an operation that affects the "public" experience of the module -- for example, if the value of a model attribute changes -- then we need to write a test that covers that operation's side effect(s). If there's code that runs conditionally, we need to test the behavior of that code when that condition is true -- and when it's not. If there are six possible conditions, we need to test them all. If a model behaves completely differently when it has a parent -- and this happens far too often in our code -- then we need to simulate the parent case, and simulate the standalone case.

+

It can be tempting to try to test the implementation details of existing code -- and difficult to realize that you're doing it even when you don't mean to. I try to stay focused on testing how other code might consume and interact with the module I'm testing. For example, if the module I'm testing triggers an event in a certain situation, I'm going to write a test that proves it, because some other code is probably expecting that event to get triggered. However, I'm not going to test that a method of a certain name gets called in a certain case -- that's an implementation detail that might change.

+

The exercise of writing unit tests against existing code proves to be a phenomenal incentive to write better code in the future. One comes to develop a great appreciation of methods that have return values, not side effects. One comes to loathe the person -- often one's past self -- who authored complex, nested conditional logic. One comes to worship small methods that do exactly one thing.

+

So far, I haven't rewritten any of the code I've been testing, even when I've spotted obvious flaws, and even when rewriting would make the tests themselves easier to write. I don't know how long I'll be able to stick to this; there are some specific views and models that I know will be nearly impossible to test without revisiting their innards. When that becomes necessary, I'm hoping I can do it incrementally, testing as I go -- and that our functional tests will give me the cover I need to know I haven't gone horribly wrong.

+

Spreading the love

Our team's next step is to widen the effort to get better unit test coverage of our code. We have something like 100 modules that need testing, and their size and complexity are all over the map. Over the coming weeks, we'll start to divide and conquer.

+

One thing I've done to try to make the effort easier is to create a scaffolding task using Grunt. Running grunt scaffold-test:model:modelName will generate a basic file that includes mocking that's guaranteed to be needed, as well as the basic instantiation that will be required and a couple of simple tests.

+

There's another senior team member who has led an effort in the past to apply unit tests to an existing code base, and he's already warned me to expect a bit of a bumpy road as the team struggles through the inevitable early challenges of trying to write unit tests for existing feature code. I expect there to be a pretty steep hill to climb at first, but at the very least, the work I've done so far has -- hopefully -- gotten us to the top of the vertical wall that had been standing in our way.

+

Further Reading

I'm not exactly the first person to write about this. You may find these items interesting:

+ +]]>
Two Things about Conditionals in JavaScriptjs-conditionalshttp://rmurphey.com/2012/12/10/js-conditionalsMon, 10 Dec 2012 21:40:00 +0000Just a quick post, inspired by Laura Kalbag's post, which included this gem:

+
+

We shouldn’t be fearful of writing about what we know. Even if you write from the most basic point of view, about something which has been ‘around for ages’, you’ll likely be saying something new to someone.

+
+

One: There is no else if

When you write something like this ...

+
function saySomething( msg ) {
+  if ( msg === 'Hello' ) {
+    console.log('Hello there');
+  } else if ( msg === 'Yo' ) {
+    console.log('Yo dawg');
+  }
+}
+
+

... then what you're actually writing is this ...

+
function saySomething( msg ) {
+  if ( msg === 'Hello' ) {
+    console.log('Hello there');
+  } else {
+    if ( msg === 'Yo' ) {
+      console.log('Yo dawg');
+    }
+  }
+}
+
+

That's because there is no else if in JavaScript. You know how you can write an if statement without any curly braces?

+
if ( foo ) bar() // please don't do this if you want your code to be legible
+
+

You're doing the same thing with the else part of the initial if statement when you write else if: you're skipping the curly braces for the second if block, the one you're providing to else. There's nothing wrong with else if per se, but it's worth knowing about what's actually happening.

+

Two: return Means Never Having to Say else

Consider some code like this:

+
function howBig( num ) {
+  if ( num < 10 ) {
+    return 'small';
+  } else if ( num >= 10 && num < 100 ) {
+    return 'medium';
+  } else if ( num >= 100 ) {
+    return 'big';
+  }
+}
+
+

If the number we pass to howBig is less than 10, then our function will return 'small'. As soon as it returns, none of the rest of the function will run -- this means we can skip the else part entirely, which means our code could look like this:

+
function howBig( num ) {
+  if ( num < 10 ) {
+    return 'small';
+  }
+
+  if ( num < 100 ) {
+    return 'medium';
+  }
+
+  if ( num >= 100 ) {
+    return 'big';
+  }
+}
+
+

But wait -- if the first if statement isn't true, and the second if statement isn't true, then we will always return 'big'. That means the third if statement isn't even required:

+
function howBig( num ) {
+  if ( num < 10 ) {
+    return 'small';
+  }
+
+  if ( num < 100 ) {
+    return 'medium';
+  }
+
+  return 'big';
+}
+
+

Note: this post was edited to improve a couple of the examples and to fix some typos.

+]]>
This is the Cigarettethis-is-the-cigarettehttp://rmurphey.com/2012/12/09/this-is-the-cigaretteSun, 09 Dec 2012 19:40:00 +0000

+

This is the cigarette I smoked* on Wednesday after I got out of a meeting in Boston and went to my desk and read my messages and learned that our birthmother "match" had fallen through.

+

The last three weeks have been among the happiest, most exciting, most terrifying times I can remember. Saying that we are sad and disappointed and et cetera doesn't really cover it, but, well, there it is. Our search will continue.

+

* Don't worry, Mom, I don't usually smoke. Desperate times, desperate measures.

+]]>
On Choosing a Syntax Highlighting Scheme for Your Next Presentationchoosing-presentation-color-schemehttp://rmurphey.com/2012/11/29/choosing-presentation-color-schemeThu, 29 Nov 2012 20:20:00 +0000This is a projector screen:

+

+

You will notice that it is white, or some reasonable approximation thereof. It is probably made of a reflective material that sparkles a bit when light shines on it. Still: white.

+

Do you know what color this screen is when you use a projector to display this image onto it?

+

+

It is still white. Crazy, I know! The thing is, projectors cannot project black; they can only not project any light on a region that you intend to be black.

+

Chances are you are reading this on an LCD screen of some sort, where the rules are completely different: they usually start out essentially black, not white, and pixels are brightened as required. The pixels that start out dark can generally stay pretty dark.

+

On a projection screen, on the other hand, the appearance of black is nothing more than an optical illusion, made possible by the projector projecting brightness everywhere else.

+

What does this mean? Lots of things, but in particular, it means that you should never, ever, ever use a color scheme with a dark background -- no matter how high-contrast and good it looks on your monitor -- if you will be presenting using a projector that is projecting onto a white screen. At least, assuming that you intend for your audience to be able to actually read the code.

+

Presentation Color Schemes That I Have Loved

    +
  • Ben Alman's TextMate Theme: Ben has tailored this to be incredible for presenting about JS code.
  • +
  • Tomorrow Theme: The light-background flavor is decent, but could probably stand to be higher-contrast, at least for some languages.
  • +
+]]>
Show & Telltimes-open-science-fairhttp://rmurphey.com/2012/11/25/times-open-science-fairSun, 25 Nov 2012 20:20:00 +0000I spoke at the Times Open Source Science Fair a couple of weeks ago. I'll admit that I was pretty skeptical of the concept when I was first asked, but as someone who used to work as an editor at a tiny newspaper in upstate New York, I wasn't about to say no when the Times asked me to come say hi.

+

A few days before the event, I got an email asking me for information about what I'd be showing off at my booth. Booth? Wat? They weren't kidding about the science fair thing, but what the heck was I going to show at a booth?

+

It turns out this is basically the best idea ever. I recruited my Bocoup colleague Rick Waldron to join me, and together we spent a whirlwind hour showing off robots powered by JavaScript to an endless stream of people walking up to our booth. Rick did a great job of setting up a demo that people could play with, and they took turns moving sliding potentiometers that controlled servos that moved an arm with a gripper at the end, trying to pick up Bocoup stickers. Ours was one of about a dozen booths showing off open-source projects, and the room was a wonderful madhouse.

+

After a break for dinner, I, Jeremy Ashkenas, and Zach Holman each gave 20-minute talks, but the talks were really just icing on the evening. The "science fair" format promoted such intentional interaction, in a way that traditional conferences just can't, no matter how great the hall track or the parties may be. The format invited and encouraged attendees to talk to the presenters -- indeed, if they didn't talk to the presenters, there wasn't much else for them to do. By the time the official talks came around, a super-casual, super-conversational atmosphere had already been established, and the energy that created was tangibly different from any event I've been to before.

+

I love conferences, and the sharing of knowledge that happens there, and there's a whole lot to be said for their speaker-audience format -- don't get me wrong. But I'd also love to see more events figure out how to integrate this show and tell format. "Booths" don't need to mean "vendors trying to sell things" -- they can actually be a great opportunity to facilitate conversation, and to let open source contributors show off their hard work.

+]]>
Recent Talksrecent-talkshttp://rmurphey.com/2012/11/21/recent-talksWed, 21 Nov 2012 10:40:00 +0000A post from Alex Russell reminded me that I've given a number of talks in the last few months, and some of them even have video on the internet.

+

I've been ridiculously spoiled to get to travel all over the place these last few months -- San Francisco, New York, Amsterdam, Berlin, Brighton -- and speak at some truly first-class conferences, sharing the stage, sharing meals, and sharing beers with some seriously amazing folks. My recent news means I'll be doing a lot less travel for the next little bit, but I'm ever-so-grateful for the opportunities I've had and the people I've gotten to see and meet these last few months.

+

Writing Testable JavaScript

This is the first talk I've developed that I've managed to give several times in rapid succession: three times in six days, including at Full Frontal, the online JS Summit, and to a group of developers at the New York Times. There's no video yet, but the slides are here, and there should be video soon, I think.

+

+ +

JS Minty Fresh

A fun talk at Fronteers about eliminating code smells from your JavaScript. The best feedback I got afterwards was from an attendee who said they felt at the beginning of the talk like the material was going to be too basic for them, and by the end of the talk, the material was nearly over their head. "I guess that makes you a good teacher," he said. Aw!

+ + +

Rebecca Murphey | JS Minty Fresh: Identifying and Eliminating Smells in Your Code Base | Fronteers 2012 from Fronteers on Vimeo.

+ +

Slides

+

If you like this, you should also check out the screencasts we released at Bocoup earlier this week.

+

Beyond the DOM: Sane Structure for JS Apps

An update of my code organization talk, delivered at the jQuery Conference in San Francisco. It's fun for me to see how my thinking around code organization has evolved and improved since my first, now-almost-embarassing talk at the 2009 jQuery Conference in Boston.

+ + +

Slides

+

Johnny Five: Bringing the JavaScript Culture to Hardware

This one was from the New York Times Open Source Science Fair, a fun night of about a dozen folks presenting open-source projects at "booths," followed by short talks about open source by Jeremy Ashkenas, me, and Zach Holman. The slides don't necessarily stand on their own very well, but the short version is: use JavaScript to make things in the real world, because it's ridiculously easy and ridiculously fun.

+

+ +

Getting Better at JavaScript

I put this together as a quickie for the Berlin UpFront user group -- it was the first talk I gave with my broken foot, and the last talk I'd give for weeks because I lost my voice a couple of hours later. There's not a whole lot here, but it was a fun talk and a fun group, and a topic that I get plenty of questions about. Again, no video, but here are the slides:

+

]]>
This is the Cup of Coffeethis-is-the-cup-of-coffeehttp://rmurphey.com/2012/11/14/this-is-the-cup-of-coffeeWed, 14 Nov 2012 10:40:00 +0000

+

This is the cup of coffee I was making earlier this week when Melissa gave me a thumbs-up while she talked on the phone to a woman in Pennsylvania who had just finished telling Melissa that yes, indeed, after 10 weeks or three years of waiting depending on how you count, a 29-year-old woman who's due to give birth in Iowa at the beginning of February has decided that Melissa and I should be so lucky as to get to be her baby girl's forever family.

+

Most people get to post ultrasound pictures on Twitter at moments like these, but for now this will suffice to remind me of the moment I found out I would get to be a mom. My head is spinning, and while on the one hand it's a little difficult to fathom that this is all just 10 weeks away, on the other hand I'm counting down the days.

+

Our adoption will be an open one; the meaning of "open" varies widely, but in our case it means we talked to the birth mother before she chose us, we'll be meeting her in a few weeks, we'll do our very best to be in Iowa for the delivery, and we'll stay in touch with letters and pictures afterwards. Melissa and I are grateful that we'll be able to adopt as a couple, though we are saddened that we have to adopt outside of our home state of North Carolina in order to do so. It's important to us that our child have both of us as her legal parents, and I don't hesitate to say that it's downright shitty that we have to jump through significant legal and financial hoops -- and stay in a hotel in Iowa with a newborn for an unknown number of days -- to make it so. It is what it is, and good people are working and voting to make it better, and it can't happen fast enough.

+

I've learned a lot about adoption these past few months, and I know a lot of people have a lot of questions, some of which they're reluctant to ask. If you're interested in learning more, I highly recommend In On It: What Adoptive Parents Would Like You to Know About Adoption. You're also welcome to ask me questions if you see me in real life or on the internets -- I can't promise I'll know the answers, but I promise to do my best.

+

In the meantime, wish us luck :)

+]]>
Using object literals for flow control and settingsobject-literalshttp://rmurphey.com/2011/07/24/object-literalsSun, 24 Jul 2011 0:00:00 +0000I got an email the other day from someone reading through jQuery Fundamentals -- they'd come across the section +about patterns for performance and compression, which is based on a +presentation by Paul Irish gave back at the 2009 jQuery +Conference in Boston.

+

In that section, there's a bit about alternative patterns for flow control -- +that is, deciding what a program should do next. We're all familiar with the +standard if statement:

+

{% codeblock lang:javascript %} +function isAnimal(thing) { + if (thing === 'dog' || thing === 'cat') { + console.log("yes!"); + } else { + console.log("no"); + } +} +{% endcodeblock %}

+

What stumped the person who emailed me, though, was when the same logic as we +see above was written like this:

+

{% codeblock lang:javascript %} +function isAnimal(thing) { + if (({ cat : 1, dog : 1 })[ thing ]) { + console.log("yes!"); + } else { + console.log("no"); + } +} +{% endcodeblock %}

+

What's happening here is that we're using a throwaway object literal to express +the conditions under which we will say a thing is an animal. We could have +stored the object in a variable first:

+

{% codeblock lang:javascript %} +function isAnimal(thing) { + var animals = { + cat : 1, + dog : 1 + };

+

if (animals[ thing ]) { + console.log("yes!"); + } else { + console.log("no"); + } +} +{% endcodeblock %}

+

However, that variable's only purpose would be to provide this one lookup, so +it can be argued that the version that doesn't bother setting the variable is +more economical. Reasonable people can probably disagree about whether this +economy of bytes is a good tradeoff for readability -- something like this is +perfectly readable to a seasoned developer, but potentially puzzling otherwise +-- but it's an interesting example of how we can use literals in JavaScript +without bothering to store a value in a variable.

+

The pattern works with an array, too:

+

{% codeblock lang:javascript %} +function animalByIndex(index) { + return [ 'cat', 'dog' ][ index ]; +} +{% endcodeblock %}

+

It's also useful for looking up values generally, which is how I find myself +using it most often these days in my work with Toura, where +we routinely branch our code depending on the form factor of the device we're +targeting:

+

{% codeblock lang:javascript %} +function getBlingLevel(device) { + return ({ + phone : 100, + tablet : 200 + })[ device.type ]; +} +{% endcodeblock %}

+

As an added benefit, constructs that use this pattern will return the +conveniently falsy undefined if you try to look up a value that doesn't have +a corresponding property in the object literal.

+

A great way to come across techniques like this is to read the source code of +your favorite library (and other libraries too). Unfortunately, once +discovered, these patterns can be difficult to decipher, even if you have +pretty good Google fu. Just in case your neighborhood blogger isn't available, +IRC is alive and well in 2011, and it's an excellent place to get access to +smart folks eager to take the time to explain.

+]]>
Lessons From a Rewritelessons-from-a-rewritehttp://rmurphey.com/2011/07/06/lessons-from-a-rewriteWed, 06 Jul 2011 9:50:00 +0000MVC and friends have been around for decades, but it’s only in the last couple +of years that broad swaths of developers have started applying those patterns +to JavaScript. As that awareness spreads, developers eager to use their +newfound insight are presented with a target-rich environment, and the +temptation to rewrite can be strong.

+
+

There’s a subtle reason that programmers always want to throw away the code +and start over. The reason is that they think the old code is a mess. … The reason +that they think the old code is a mess is because of a cardinal, fundamental +law of programming: It’s harder to read code than to write it. - Joel Spolsky +

+
+

When I started working with Toura Mobile late last year, they already had +a product: a web-based CMS to create the structure of a mobile application and +populate it with content, and a PhoneGap-based application to consume the +output of the CMS inside a native application. Customers were paying, but the +development team was finding that delivering new features was a struggle, and +bug fixes seemed just as likely to break something else as not. They contacted +me to see whether they should consider a rewrite.

+

With due deference to Spolsky, I don’t think it was a lack of readability +driving their inclination to rewrite. In fact, the code wasn’t all that +difficult to read or follow. The problem was that the PhoneGap side of things +had been written to solve the problems of a single-purpose, one-off +application, and it was becoming clear that it needed to be a flexible, +extensible delivery system for all of the content combinations clients could +dream up. It wasn’t an app — it was an app that made there be an app.

+
+

Where a new system concept or new technology is used, one has to build a system +to throw away, for even the best planning is not so omniscient as to get it +right the first time. Hence plan to throw one away; you will, anyhow. - Fred +Brooks, The Mythical Man Month

+
+

By the time I’d reviewed the code and started writing up my findings, the +decision had already been made: Toura was going to throw one away and start +from scratch. For four grueling and exciting months, I helped them figure out +how to do it better the second time around. In the end, I like to think we’ve +come up with a solid architecture that’s going to adapt well to clients’ +ever-changing needs. Here, then, are some of the lessons we learned along the +way.

+

Understand what you’re rewriting

I had spent only a few days with the codebase when we decided that we were +going to rewrite it. In some ways, this was good — I was a fresh set of eyes, +someone who could think about the system in a new way — but in other ways, it +was a major hindrance. We spent a lot of time at the beginning getting me up to +speed on what, exactly, we were making; things that went without saying for +existing team members did not, in fact, go without saying for me.

+

This constant need for explanation and clarification was frustrating at times, +both for me and for the existing team, but it forced us to state the problem in +plain terms. The value of this was incredible — as a team, we were far less +likely to accept assumptions from the original implementation, even assumptions +that seemed obvious.

+

One of the key features of Toura applications is the ability to update them +“over the air” — it’s not necessary to put a new version in an app store in +order to update an app’s content or even its structure. In the original app, +this was accomplished via generated SQL diffs of the data. If the app was at +version 3, and the data in the CMS was at version 10, then the app would +request a patch file to upgrade version 3 to version 10. The CMS had to +generate a diff for all possible combinations: version 3 to version 10, version +4 to version 10, etc. The diff consisted of queries to run against an SQLite +database on the device. Opportunities for failures or errors were rampant, +a situation exacerbated by the async nature of the SQLite interface.

+

In the new app, we replicated the feature with vastly less complexity +— whenever there is an update, we just make the full data available at an +app-specific URL as a JSON file, using the same format that we use to provide +the initial data for the app on the device. The new data is stored on the +device, but it’s also retained in memory while the application is running via +Dojo’s Item File Read Store, which allows us to query it synchronously. The +need for version-by-version diffs has been eliminated.

+

Restating the problem led to a simpler, more elegant solution that greatly +reduced the opportunities for errors and failure. As an added benefit, using +JSON has allowed us to meet needs that we never anticipated — the flexibility +it provides has become a valuable tool in our toolbox.

+

Identify pain points

If the point of a rewrite is to make development easier, then an important step +is to figure out what, exactly, is making development hard. Again, this was +a time to question assumptions — as it turned out, there were things that had +come to be accepted burdens that were actually relatively easy to address.

+

One of the biggest examples of this was the time required to develop and test +anything that might behave differently on one operating system versus another. +For example, the Android OS has limited support for the audio and video tags, +so a native workaround is required to play media on Android that is not +required on iOS.

+

In the original code, this device-specific branching was handled in a way that +undoubtedly made sense at the beginning but grew unwieldy over time. Developers +would create Mustache templates, wrapping the template tags in /* */ so the +templates were actually executable, and then compile those templates into plain +JavaScript files for production. Here are a few lines from one of those +templates:

+

{% codeblock lang:javascript %} +/ {{^android}} / +var mediaPath = "www/media/" + toura.pages.currentId + "/"; +/ {{/android}} / +/ {{#android}} / +var mediaPath = [Toura.getTouraPath(), toura.pages.currentId].join("/"); +/ {{/android}} / +var imagesList = [], dimensionsList = [], namesList = [], thumbsList = []; +var pos = -1, count = 0; +/ {{#android}} / +var pos = 0, count = 0; +/ {{/android}} / +{% endcodeblock %}

+

These templates were impossible to check with a code quality tool like JSHint, +because it was standard to declare the same variable multiple times. Multiple +declarations of the same variable meant that the order of those declarations +was important, which made the templates tremendously fragile. The theoretical +payoff was smaller code in production, but the cost of that byte shaving was +high, and the benefit somewhat questionable — after all, we’d be delivering the +code directly from the device, not over HTTP.

+

In the rewrite, we used a simple configuration object to specify information +about the environment, and then we look at the values in that configuration +object to determine how the app should behave. The configuration object is +created as part of building a production-ready app, but in development we can +alter configuration settings at will. Simple if statements replaced fragile +template tags.

+

Since Dojo allows specifying code blocks for exclusion based on the settings +you provide to the build process, we could mark code for exclusion if we really +didn’t want it in production.

+

By using a configuration object instead of template tags for branching, we +eliminated a major pain point in day-to-day development. While nothing matches +the proving ground of the device itself, it’s now trivial to effectively +simulate different device experiences from the comfort of the browser. We do +the majority of our development there, with a high degree of confidence that +things will work mostly as expected once we reach the device. If you’ve ever +waited for an app to build and install to a device, then you know how much +faster it is to just press Command-R in your browser instead.

+

Have a communication manifesto

Deciding that you’re going to embrace an MVC-ish approach to an application is +a big step, but only a first step — there are a million more decisions you’re +going to need to make, big and small. One of the widest-reaching decisions to +make is how you’ll communicate among the various pieces of the application. +There are all sorts of levels of communication, from application-wide state +management — what page am I on? — to communication between UI components — when +a user enters a search term, how do I get and display the results?

+

From the outset, I had a fairly clear idea of how this should work based on +past experiences, but at first I took for granted that the other developers +would see things the same way I did, and I wasn’t necessarily consistent +myself. For a while we had several different patterns of communication, +depending on who had written the code and when. Every time you went to use +a component, it was pretty much a surprise which pattern it would use.

+

After one too many episodes of frustration, I realized that part of my job was +going to be to lay down the law about this — it wasn’t that my way was more +right than others, but rather that we needed to choose a way, or else reuse and +maintenance was going to become a nightmare. Here’s what I came up with:

+
    +
  • myComponent.set(key, value) to change state (with the help of setter +methods from Dojo’s dijit._Widget mixin)
  • +
  • myComponent.on&lt;Event&gt;(componentEventData) to announce state changes +and user interaction; Dojo lets us +connect to the +execution of arbitrary methods, so other pieces could listen for these +methods to be executed.
  • +
  • dojo.publish(topic, [ data ]) to announce occurrences of app-wide interest, +such as when the window is resized
  • +
  • myComponent.subscribe(topic) to allow individual components react to +published topics
  • +
+

Once we spelled out the patterns, the immediate benefit +wasn’t maintainability or reuse; rather, we found that we didn’t have to make +these decisions on a component-by-component basis anymore, and we could focus +on the questions that were actually unique to a component. With conventions +we could rely on, we were constantly discovering new ways to abstract and DRY +our code, and the consistency across components meant it was easier to work +with code someone else had written.

+

Sanify asynchronicity

One of the biggest challenges of JavaScript development — well, besides working +with the DOM — is managing the asynchronicity of it all. In the old system, +this was dealt with in various ways: sometimes a method would take a success +callback and a failure callback; other times a function would return an object +and check one of its properties on an interval.

+

{% codeblock lang:javascript %} +images = toura.sqlite.getMedias(id, "image");

+

var onGetComplete = setInterval(function () { + if (images.incomplete) + return;

+

clearInterval(onGetComplete); + showImagesHelper(images.objs, choice) +},10); +{% endcodeblock %}

+

The problem here, of course, is that if images.incomplete never gets set to +false — that is, if the getMedias method fails — then the interval will never +get cleared. Dojo and now jQuery (since version 1.5) offer a facility for +handling this situation in an elegant and powerful way. In the new version of +the app, the above functionality looks something like this:

+

{% codeblock lang:javascript %} +toura.app.Data.get(id, ‘image’).then(showImages, showImagesFail); +{% endcodeblock %}

+

The get method of toura.app.Data returns an immutable promise +— the promise’s then method makes the resulting value of the asynchronous get +method available to showImages, but does not allow showImages to alter the +value. The promise returned by the get method can also be stored in a variable, +so that additional callbacks can be attached to it.

+

Using promises vastly simplifies asynchronous code, which can be one of the +biggest sources of complexity in a non-trivial application. By using promises, +we got code that was easier to follow, components that were thoroughly +decoupled, and new flexibility in how we responded to the outcome of an +asynchronous operation.

+

Naming things is hard

Throughout the course of the rewrite we were constantly confronted with one of +those pressing questions developers wrestle with: what should I name this +variable/module/method/thing? Sometimes I would find myself feeling slightly +absurd about the amount of time we’d spend naming a thing, but just recently +I was reminded how much power those names have over our thinking.

+

Every application generated by the Toura CMS consists of a set of “nodes,” +organized into a hierarchy. With the exception of pages that are standard +across all apps, such as the search page, the base content type for a page +inside APP is always a node — or rather, it was, until the other day. I was +working on a new feature and struggling to figure out how I’d display a piece +of content that was unique to the app but wasn’t really associated with a node +at all. I pored over our existing code, seeing the word node on what felt like +every other line. As an experiment, I changed that word node to baseObj in +a few high-level files, and suddenly a whole world of solutions opened up to me +— the name of a thing had limiting my thinking.

+

The lesson here, for me, is that the time we spent (and spend) figuring out +what to name a thing is not lost time; perhaps even more importantly, the goal +should be to give a thing the most generic name that still conveys what the +thing’s job — in the context in which you’ll use the thing — actually is.

+

Never write large apps

I touched on this earlier, but if there is one lesson I take from every large +app I’ve worked on, it is this:

+
+

The secret to building large apps is never build large apps. Break up your +applications into small pieces. Then, assemble those testable, bite-sized +pieces into your big application. - Justin Meyer

+
+

The more tied components are to each other, the less reusable they will be, and +the more difficult it becomes to make changes to one without accidentally +affecting another. Much like we had a manifesto of sorts for communication +among components, we strived for a clear delineation of responsibilities among +our components. Each one should do one thing and do it well.

+

For example, simply rendering a page involves several small, single-purpose +components:

+

{% codeblock lang:javascript %} +function nodeRoute(route, nodeId, pageState) { + pageState = pageState || {};

+

var nodeModel = toura.app.Data.getModel(nodeId), + page = toura.app.UI.getCurrentPage();

+

if (!nodeModel) { + toura.app.Router.home(); + return; + }

+

if (!page || !page.node || nodeId !== page.node.id) { + page = toura.app.PageFactory.createPage('node', nodeModel);

+
if (page.failure) {
+  toura.app.Router.back();
+  return;
+}
+
+toura.app.UI.showPage(pf, nodeModel);
+

}

+

page.init(pageState);

+

// record node pageview if it is node-only + if (nodeId && !pageState.assetType) { + dojo.publish('/node/view', [ route.hash ]); + }

+

return true; +} +{% endcodeblock %}

+

The router observes a URL change, parses the parameters for the route from the +URL, and passes those parameters to a function. The Data component gets the +relevant data, and then hands it to the PageFactory component to generate the +page. As the page is generated, the individual components for the page are also +created and placed in the page. The PageFactory component returns the generated +page, but at this point the page is not in the DOM. The UI component receives +it, places it in the DOM, and handles the animation from the old page to the +new one.

+

Every step is its own tiny app, making the whole process tremendously testable. +The output of one step may become the input to another step, but when input and +output are predictable, the questions our tests need to answer are trivial: +“When I asked the Data component for the data for node123, did I get the data +for node123?”

+

Individual UI components are their own tiny apps as well. On a page that +displays a videos node, we have a video player component, a video list +component, and a video caption component. Selecting a video in the list +announces the selection via the list’s onSelect method. Dojo allows us to +connect to the execution of object methods, so in the page controller, we have +this:

+

{% codeblock lang:javascript %} +this.connect(this.videoList, 'onSelect', function(assetId) { + var video = this._videoById(assetId); + this.videoCaption.set('content', video.caption || ''); + this.videoPlayer.play(assetId); +}); +{% endcodeblock %}

+

The page controller receives the message and passes it along to the other +components that need to know about it — components don’t communicate directly +with one another. This means the component that lists the videos can list +anything, not just videos — its only job is to announce a selection, not to do +anything as a result.

+

Keep rewriting

+

It takes confidence to throw work away … When people first start drawing, +they’re often reluctant to redo parts that aren’t right … they convince +themselves that the drawing is not that bad, really — in fact, maybe they meant +it to look that way. - Paul Graham, “Taste for Makers”

+
+

The blank slate offered by a rewrite allows us to fix old mistakes, but +inevitably we will make new ones in the process. As good stewards of our code, +we must always be open to the possibility of a better way of doing a thing. “It +works” should never be mistaken for “it’s done.”

+]]>
A new chaptera-new-chapterhttp://rmurphey.com/2011/05/31/a-new-chapterTue, 31 May 2011 0:00:00 +0000It was three years ago this summer that I got the call, bought the Yuengling, smoked the cigarettes, and began life as an independent consultant. It’s been (almost) three years of ups and downs, and, eventually, among the most rewarding experiences of my life. Day by day, I wrote my own job description, found my own clients, set my own schedule, and set my own agenda.

+ +

Starting tomorrow, it’s time for a new chapter in my working life: I’ll be joining Toura Mobile full-time as their lead JavaScript developer, continuing my work with them on creating a PhoneGap- and Dojo-based platform for the rapid creation of content-rich mobile applications.

+ +

I’ve been working with Toura for about six months now, starting shortly after I met Matt Rogish, their director of development, at a JavaScript event in New York. They brought me on as a consultant to review their existing application, and the eventual decision was to rewrite it from the ground up, using the lessons learned and knowledge gained from the first version to inform the second. It was a risky decision, but it’s paid off: earlier this year, Toura started shipping apps built with the rewritten system, and the care we took to create modular, loosely coupled components from the get-go has paid off immensely, meeting current needs while making it easier to develop new features. With the rewrite behind us, these days we’re using the solid foundation we built to allow users of the platform to create ever more customized experiences in their applications.

+ +

If you know me at all, you know that I’ve been pretty die-hard about being an independent consultant, so you might think this was a difficult decision. Oddly, it wasn’t — I’ve enjoyed these last several months immensely, the team I work with is fantastic, and I’ve never felt more proud of work I’ve done. Whenever I found myself wondering whether Toura might eventually tire of paying my consulting rates, I’d get downright mopey. Over the course of three years, I’ve worked hard for all of my clients, but this is the first time I’ve felt so invested in a project’s success or failure, like there was a real and direct correlation between my efforts and the outcome. It’s a heady feeling, and I hope and expect it to continue for a while.

+ +

By the way, I’ll be talking about the rewrite at both TXJS and GothamJS in the next few weeks.

+ +

Also: we’re hiring :)

]]>
Getting Better at JavaScriptgetting-better-at-javascripthttp://rmurphey.com/2011/05/20/getting-better-at-javascriptFri, 20 May 2011 0:00:00 +0000I seem to be getting a lot of emails these days asking a deceptively simple +question: “How do I get better at JavaScript?” What follows are some +semi-random thoughts on the subject:

+

The thing that I’ve come to realize about these questions is that some things +just take time. I wish I could write down “Ten Things You Need to Know to Make +You Amazing at the JavaScript,” but it doesn’t work that way. Books are +fantastic at exposing you to guiding principles and patterns, but if your brain +isn’t ready to connect them with real-world problems, it won’t.

+

The number one thing that will make you better at writing JavaScript is writing +JavaScript. It’s OK if you cringe at it six months from now. It’s OK if you +know it could be better if you only understood X, Y, or Z a little bit better. + Cultivate dissatisfaction , and fear the day when you aren’t disappointed with +the code you wrote last month.

+

Encounters with new concepts are almost always eventually rewarding, but in the +short term I’ve found they can be downright demoralizing if you’re not aware of +the bigger picture. The first step to being better at a thing is realizing you +could be better at that thing, and initially that realization tends to involve +being overwhelmed with all you don’t know. The first JSConf , in 2009, was +exactly this for me. I showed up eager to learn but feeling pretty cocky about +my skills. I left brutally aware of the smallness of my knowledge, and it was a +transformational experience: getting good at a thing involves seeking out +opportunities to feel small.

+

One of the most helpful things in my learning has been having access to smart +people who are willing to answer my questions and help me when I get stuck. +Meeting these people and maintaining relationships with them is hard work, and +it generally involves interacting with them in real life, not just on the +internet, but the dividends of this investment are unfathomable.

+

To that end, attend conferences. Talk to the speakers and ask them questions. +Write them emails afterwards saying that it was nice to meet them. Subscribe to +their blogs. Pay attention to what they’re doing and evangelize their good +work.

+

Remember, too, that local meetups can be good exposure to new ideas too, even +if on a smaller scale. The added bonus of local meetups is that the people +you’ll meet there are … local! It’s easy to maintain relationships with them +and share in learning with them in real life.

+

(An aside: If your company won’t pay for you to attend any conferences, make +clear how short-sighted your company’s decision is and start looking for a new +job, because your company does not deserve you. Then, if you can, cough up the +money and go anyway. As a self-employed consultant, I still managed to find +something like $10,000 to spend on travel- and conference-related expenses last +year, and I consider every penny of it to be money spent on being better at +what I do. When I hear about big companies that won’t fork over even a fraction +of that for an employee who is raising their hand and saying “help me be better +at what I do!”, I rage.)

+

Make a point of following the bug tracker and repository for an active +open-source project. Read the bug reports. Try the test cases. Understand the +commits. I admit that I have never been able to make myself do this for +extended periods of time, but I try to drop in on certain projects now and then +because it exposes me to arbitrary code and concepts that I might not otherwise +run into.

+

Read the source for your favorite library, and refer to it when you need to +know how a method works. Consult the documentation when there’s some part of +the source you don’t understand. When choosing tools and plugins, read the +source, and see whether there are things you’d do differently.

+

Eavesdrop on communities, and participate when you have something helpful to +add. Lurk on a mailing list or a forum or in an IRC channel, help other people +solve problems. If you’re not a help vampire — if you give more than you take — +the “elders” of a community will notice, and you will be rewarded with their +willingness to help you when it matters.

+

Finally, books:

+
    +
  • JavaScript: The Good Parts, by Douglas Crockford. It took me more than one +try to get through this not-very-thick book, and it is not gospel. However, +it is mandatory reading for any serious JavaScript developer.
  • +
  • Eloquent JavaScript , Marijn Haverbeke (also in print). This is another book +that I consider mandatory; you may not read straight through it, but you +should have it close at hand. I like it so much that I actually bought the +print version, and then was lucky enough to get a signed copy from Marijn at +JSConf 2011.
  • +
  • JavaScript Patterns, by Stoyan Stefanov. This was the book that showed me +there were names for so many patterns that I’d discovered purely through +fumbling around with my own code. I read it on the flight to the 2010 Boston +jQuery Conference, and it’s definitely the kind of book that I wouldn’t have +gotten as much out of a year earlier, when I had a lot less experience with +the kinds of problems it addresses.
  • +
  • Object-Oriented JavaScript, by Stoyan Stefanov. It’s been ages since I read +this book, and so I confess that I don’t have a strong recollection of it, +but it was probably the first book I read that got me thinking about +structuring JavaScript code beyond the “get some elements, do something with +them” paradigm of jQuery.
  • +
+

Good luck.

+]]>
\ No newline at end of file diff --git a/www/index.html b/www/index.html new file mode 100644 index 0000000..14776f7 --- /dev/null +++ b/www/index.html @@ -0,0 +1,118 @@ +Adventures in JavaScript Development

rmurphey adventures in javascript

Latest: Building for HTTP/2

Earlier this year, I got the chance to speak with Google's Ilya Grigorik about HTTP/2 for the 1.10 episode of the TTL Podcast. It was a great primer for me on how HTTP/2 works and what it means for how we build the web, but it wasn't until more recently that I started to think about what it means for how we build the web — that is, how we generate and deploy the HTML, CSS, and JS that power web applications.

+

If you're not familiar with HTTP/2, the basics are simultaneously simple and mind-boggling. Whereas its predecessors allowed each connection to a server to serve only one request at a time, HTTP/2 allows a connection to serve multiple requests simultaneously. A connection can also be used for a server to push a resource to a client — a protocol-level replacement for the technique we currently call “inlining.”

+

This is everything-you-thought-you-knew-is-wrong kind of stuff. In an HTTP/2 world, there are few benefits to concatenating a bunch of JS files together, and in many cases the practice will be actively harmful. Domain sharding becomes an anti-pattern. Throwing a bunch of <script> tags in your HTML is suddenly not a laughably terrible idea. Inlining of resources is a thing of the past. Browser caching — and cache busting — can occur on a per-module basis.

Read the rest of this entry »

\ No newline at end of file diff --git a/www/js/highlight.min.js b/www/js/highlight.min.js new file mode 100644 index 0000000..ec66d7e --- /dev/null +++ b/www/js/highlight.min.js @@ -0,0 +1 @@ +var hljs=new function(){function j(v){return v.replace(/&/gm,"&").replace(//gm,">")}function t(v){return v.nodeName.toLowerCase()}function h(w,x){var v=w&&w.exec(x);return v&&v.index==0}function r(w){var v=(w.className+" "+(w.parentNode?w.parentNode.className:"")).split(/\s+/);v=v.map(function(x){return x.replace(/^lang(uage)?-/,"")});return v.filter(function(x){return i(x)||/no(-?)highlight/.test(x)})[0]}function o(x,y){var v={};for(var w in x){v[w]=x[w]}if(y){for(var w in y){v[w]=y[w]}}return v}function u(x){var v=[];(function w(y,z){for(var A=y.firstChild;A;A=A.nextSibling){if(A.nodeType==3){z+=A.nodeValue.length}else{if(A.nodeType==1){v.push({event:"start",offset:z,node:A});z=w(A,z);if(!t(A).match(/br|hr|img|input/)){v.push({event:"stop",offset:z,node:A})}}}}return z})(x,0);return v}function q(w,y,C){var x=0;var F="";var z=[];function B(){if(!w.length||!y.length){return w.length?w:y}if(w[0].offset!=y[0].offset){return(w[0].offset"}function E(G){F+=""}function v(G){(G.event=="start"?A:E)(G.node)}while(w.length||y.length){var D=B();F+=j(C.substr(x,D[0].offset-x));x=D[0].offset;if(D==w){z.reverse().forEach(E);do{v(D.splice(0,1)[0]);D=B()}while(D==w&&D.length&&D[0].offset==x);z.reverse().forEach(A)}else{if(D[0].event=="start"){z.push(D[0].node)}else{z.pop()}v(D.splice(0,1)[0])}}return F+j(C.substr(x))}function m(y){function v(z){return(z&&z.source)||z}function w(A,z){return RegExp(v(A),"m"+(y.cI?"i":"")+(z?"g":""))}function x(D,C){if(D.compiled){return}D.compiled=true;D.k=D.k||D.bK;if(D.k){var z={};var E=function(G,F){if(y.cI){F=F.toLowerCase()}F.split(" ").forEach(function(H){var I=H.split("|");z[I[0]]=[G,I[1]?Number(I[1]):1]})};if(typeof D.k=="string"){E("keyword",D.k)}else{Object.keys(D.k).forEach(function(F){E(F,D.k[F])})}D.k=z}D.lR=w(D.l||/\b[A-Za-z0-9_]+\b/,true);if(C){if(D.bK){D.b="\\b("+D.bK.split(" ").join("|")+")\\b"}if(!D.b){D.b=/\B|\b/}D.bR=w(D.b);if(!D.e&&!D.eW){D.e=/\B|\b/}if(D.e){D.eR=w(D.e)}D.tE=v(D.e)||"";if(D.eW&&C.tE){D.tE+=(D.e?"|":"")+C.tE}}if(D.i){D.iR=w(D.i)}if(D.r===undefined){D.r=1}if(!D.c){D.c=[]}var B=[];D.c.forEach(function(F){if(F.v){F.v.forEach(function(G){B.push(o(F,G))})}else{B.push(F=="self"?D:F)}});D.c=B;D.c.forEach(function(F){x(F,D)});if(D.starts){x(D.starts,C)}var A=D.c.map(function(F){return F.bK?"\\.?("+F.b+")\\.?":F.b}).concat([D.tE,D.i]).map(v).filter(Boolean);D.t=A.length?w(A.join("|"),true):{exec:function(F){return null}}}x(y)}function c(T,L,J,R){function v(V,W){for(var U=0;U";V+=aa+'">';return V+Y+Z}function N(){if(!I.k){return j(C)}var U="";var X=0;I.lR.lastIndex=0;var V=I.lR.exec(C);while(V){U+=j(C.substr(X,V.index-X));var W=E(I,V);if(W){H+=W[1];U+=w(W[0],j(V[0]))}else{U+=j(V[0])}X=I.lR.lastIndex;V=I.lR.exec(C)}return U+j(C.substr(X))}function F(){if(I.sL&&!f[I.sL]){return j(C)}var U=I.sL?c(I.sL,C,true,S):e(C);if(I.r>0){H+=U.r}if(I.subLanguageMode=="continuous"){S=U.top}return w(U.language,U.value,false,true)}function Q(){return I.sL!==undefined?F():N()}function P(W,V){var U=W.cN?w(W.cN,"",true):"";if(W.rB){D+=U;C=""}else{if(W.eB){D+=j(V)+U;C=""}else{D+=U;C=V}}I=Object.create(W,{parent:{value:I}})}function G(U,Y){C+=U;if(Y===undefined){D+=Q();return 0}var W=v(Y,I);if(W){D+=Q();P(W,Y);return W.rB?0:Y.length}var X=z(I,Y);if(X){var V=I;if(!(V.rE||V.eE)){C+=Y}D+=Q();do{if(I.cN){D+=""}H+=I.r;I=I.parent}while(I!=X.parent);if(V.eE){D+=j(Y)}C="";if(X.starts){P(X.starts,"")}return V.rE?0:Y.length}if(A(Y,I)){throw new Error('Illegal lexeme "'+Y+'" for mode "'+(I.cN||"")+'"')}C+=Y;return Y.length||1}var M=i(T);if(!M){throw new Error('Unknown language: "'+T+'"')}m(M);var I=R||M;var S;var D="";for(var K=I;K!=M;K=K.parent){if(K.cN){D=w(K.cN,"",true)+D}}var C="";var H=0;try{var B,y,x=0;while(true){I.t.lastIndex=x;B=I.t.exec(L);if(!B){break}y=G(L.substr(x,B.index-x),B[0]);x=B.index+y}G(L.substr(x));for(var K=I;K.parent;K=K.parent){if(K.cN){D+=""}}return{r:H,value:D,language:T,top:I}}catch(O){if(O.message.indexOf("Illegal")!=-1){return{r:0,value:j(L)}}else{throw O}}}function e(y,x){x=x||b.languages||Object.keys(f);var v={r:0,value:j(y)};var w=v;x.forEach(function(z){if(!i(z)){return}var A=c(z,y,false);A.language=z;if(A.r>w.r){w=A}if(A.r>v.r){w=v;v=A}});if(w.language){v.second_best=w}return v}function g(v){if(b.tabReplace){v=v.replace(/^((<[^>]+>|\t)+)/gm,function(w,z,y,x){return z.replace(/\t/g,b.tabReplace)})}if(b.useBR){v=v.replace(/\n/g,"
")}return v}function p(A){var B=r(A);if(/no(-?)highlight/.test(B)){return}var y;if(b.useBR){y=document.createElementNS("http://www.w3.org/1999/xhtml","div");y.innerHTML=A.innerHTML.replace(/\n/g,"").replace(//g,"\n")}else{y=A}var z=y.textContent;var v=B?c(B,z,true):e(z);var x=u(y);if(x.length){var w=document.createElementNS("http://www.w3.org/1999/xhtml","div");w.innerHTML=v.value;v.value=q(x,u(w),z)}v.value=g(v.value);A.innerHTML=v.value;A.className+=" hljs "+(!B&&v.language||"");A.result={language:v.language,re:v.r};if(v.second_best){A.second_best={language:v.second_best.language,re:v.second_best.r}}}var b={classPrefix:"hljs-",tabReplace:null,useBR:false,languages:undefined};function s(v){b=o(b,v)}function l(){if(l.called){return}l.called=true;var v=document.querySelectorAll("pre code");Array.prototype.forEach.call(v,p)}function a(){addEventListener("DOMContentLoaded",l,false);addEventListener("load",l,false)}var f={};var n={};function d(v,x){var w=f[v]=x(this);if(w.aliases){w.aliases.forEach(function(y){n[y]=v})}}function k(){return Object.keys(f)}function i(v){return f[v]||f[n[v]]}this.highlight=c;this.highlightAuto=e;this.fixMarkup=g;this.highlightBlock=p;this.configure=s;this.initHighlighting=l;this.initHighlightingOnLoad=a;this.registerLanguage=d;this.listLanguages=k;this.getLanguage=i;this.inherit=o;this.IR="[a-zA-Z][a-zA-Z0-9_]*";this.UIR="[a-zA-Z_][a-zA-Z0-9_]*";this.NR="\\b\\d+(\\.\\d+)?";this.CNR="(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)";this.BNR="\\b(0b[01]+)";this.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~";this.BE={b:"\\\\[\\s\\S]",r:0};this.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[this.BE]};this.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[this.BE]};this.PWM={b:/\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such)\b/};this.CLCM={cN:"comment",b:"//",e:"$",c:[this.PWM]};this.CBCM={cN:"comment",b:"/\\*",e:"\\*/",c:[this.PWM]};this.HCM={cN:"comment",b:"#",e:"$",c:[this.PWM]};this.NM={cN:"number",b:this.NR,r:0};this.CNM={cN:"number",b:this.CNR,r:0};this.BNM={cN:"number",b:this.BNR,r:0};this.CSSNM={cN:"number",b:this.NR+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",r:0};this.RM={cN:"regexp",b:/\//,e:/\/[gim]*/,i:/\n/,c:[this.BE,{b:/\[/,e:/\]/,r:0,c:[this.BE]}]};this.TM={cN:"title",b:this.IR,r:0};this.UTM={cN:"title",b:this.UIR,r:0}}();hljs.registerLanguage("bash",function(b){var a={cN:"variable",v:[{b:/\$[\w\d#@][\w\d_]*/},{b:/\$\{(.*?)\}/}]};var d={cN:"string",b:/"/,e:/"/,c:[b.BE,a,{cN:"variable",b:/\$\(/,e:/\)/,c:[b.BE]}]};var c={cN:"string",b:/'/,e:/'/};return{aliases:["sh","zsh"],l:/-?[a-z\.]+/,k:{keyword:"if then else elif fi for break continue while in do done exit return set declare case esac export exec",literal:"true false",built_in:"printf echo read cd pwd pushd popd dirs let eval unset typeset readonly getopts source shopt caller type hash bind help sudo",operator:"-ne -eq -lt -gt -f -d -e -s -l -a"},c:[{cN:"shebang",b:/^#![^\n]+sh\s*$/,r:10},{cN:"function",b:/\w[\w\d_]*\s*\(\s*\)\s*\{/,rB:true,c:[b.inherit(b.TM,{b:/\w[\w\d_]*/})],r:0},b.HCM,b.NM,d,c,a]}});hljs.registerLanguage("ruby",function(f){var j="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?";var i="and false then defined module in return redo if BEGIN retry end for true self when next until do begin unless END rescue nil else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor";var b={cN:"yardoctag",b:"@[A-Za-z]+"};var c={cN:"value",b:"#<",e:">"};var k={cN:"comment",v:[{b:"#",e:"$",c:[b]},{b:"^\\=begin",e:"^\\=end",c:[b],r:10},{b:"^__END__",e:"\\n$"}]};var d={cN:"subst",b:"#\\{",e:"}",k:i};var e={cN:"string",c:[f.BE,d],v:[{b:/'/,e:/'/},{b:/"/,e:/"/},{b:"%[qw]?\\(",e:"\\)"},{b:"%[qw]?\\[",e:"\\]"},{b:"%[qw]?{",e:"}"},{b:"%[qw]?<",e:">"},{b:"%[qw]?/",e:"/"},{b:"%[qw]?%",e:"%"},{b:"%[qw]?-",e:"-"},{b:"%[qw]?\\|",e:"\\|"},{b:/\B\?(\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b/}]};var a={cN:"params",b:"\\(",e:"\\)",k:i};var h=[e,c,k,{cN:"class",bK:"class module",e:"$|;",i:/=/,c:[f.inherit(f.TM,{b:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?"}),{cN:"inheritance",b:"<\\s*",c:[{cN:"parent",b:"("+f.IR+"::)?"+f.IR}]},k]},{cN:"function",bK:"def",e:" |$|;",r:0,c:[f.inherit(f.TM,{b:j}),a,k]},{cN:"constant",b:"(::)?(\\b[A-Z]\\w*(::)?)+",r:0},{cN:"symbol",b:f.UIR+"(\\!|\\?)?:",r:0},{cN:"symbol",b:":",c:[e,{b:j}],r:0},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{cN:"variable",b:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{b:"("+f.RSR+")\\s*",c:[c,k,{cN:"regexp",c:[f.BE,d],i:/\n/,v:[{b:"/",e:"/[a-z]*"},{b:"%r{",e:"}[a-z]*"},{b:"%r\\(",e:"\\)[a-z]*"},{b:"%r!",e:"![a-z]*"},{b:"%r\\[",e:"\\][a-z]*"}]}],r:0}];d.c=h;a.c=h;var g=[{b:/^\s*=>/,cN:"status",starts:{e:"$",c:h}},{cN:"prompt",b:/^\S[^=>\n]*>+/,starts:{e:"$",c:h}}];return{aliases:["rb","gemspec","podspec","thor","irb"],k:i,c:[k].concat(g).concat(h)}});hljs.registerLanguage("diff",function(a){return{aliases:["patch"],c:[{cN:"chunk",r:10,v:[{b:/^\@\@ +\-\d+,\d+ +\+\d+,\d+ +\@\@$/},{b:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{b:/^\-\-\- +\d+,\d+ +\-\-\-\-$/}]},{cN:"header",v:[{b:/Index: /,e:/$/},{b:/=====/,e:/=====$/},{b:/^\-\-\-/,e:/$/},{b:/^\*{3} /,e:/$/},{b:/^\+\+\+/,e:/$/},{b:/\*{5}/,e:/\*{5}$/}]},{cN:"addition",b:"^\\+",e:"$"},{cN:"deletion",b:"^\\-",e:"$"},{cN:"change",b:"^\\!",e:"$"}]}});hljs.registerLanguage("javascript",function(a){return{aliases:["js"],k:{keyword:"in if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const class",literal:"true false null undefined NaN Infinity",built_in:"eval isFinite isNaN parseFloat parseInt decodeURI decodeURIComponent encodeURI encodeURIComponent escape unescape Object Function Boolean Error EvalError InternalError RangeError ReferenceError StopIteration SyntaxError TypeError URIError Number Math Date String RegExp Array Float32Array Float64Array Int16Array Int32Array Int8Array Uint16Array Uint32Array Uint8Array Uint8ClampedArray ArrayBuffer DataView JSON Intl arguments require module console window document"},c:[{cN:"pi",b:/^\s*('|")use strict('|")/,r:10},a.ASM,a.QSM,a.CLCM,a.CBCM,a.CNM,{b:"("+a.RSR+"|\\b(case|return|throw)\\b)\\s*",k:"return throw case",c:[a.CLCM,a.CBCM,a.RM,{b:/;/,r:0,sL:"xml"}],r:0},{cN:"function",bK:"function",e:/\{/,eE:true,c:[a.inherit(a.TM,{b:/[A-Za-z$_][0-9A-Za-z$_]*/}),{cN:"params",b:/\(/,e:/\)/,c:[a.CLCM,a.CBCM],i:/["'\(]/}],i:/\[|%/},{b:/\$[(.]/},{b:"\\."+a.IR,r:0}]}});hljs.registerLanguage("lua",function(b){var a="\\[=*\\[";var e="\\]=*\\]";var c={b:a,e:e,c:["self"]};var d=[{cN:"comment",b:"--(?!"+a+")",e:"$"},{cN:"comment",b:"--"+a,e:e,c:[c],r:10}];return{l:b.UIR,k:{keyword:"and break do else elseif end false for if in local nil not or repeat return then true until while",built_in:"_G _VERSION assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall coroutine debug io math os package string table"},c:d.concat([{cN:"function",bK:"function",e:"\\)",c:[b.inherit(b.TM,{b:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{cN:"params",b:"\\(",eW:true,c:d}].concat(d)},b.CNM,b.ASM,b.QSM,{cN:"string",b:a,e:e,c:[c],r:5}])}});hljs.registerLanguage("xml",function(a){var c="[A-Za-z0-9\\._:-]+";var d={b:/<\?(php)?(?!\w)/,e:/\?>/,sL:"php",subLanguageMode:"continuous"};var b={eW:true,i:/]+/}]}]}]};return{aliases:["html","xhtml","rss","atom","xsl","plist"],cI:true,c:[{cN:"doctype",b:"",r:10,c:[{b:"\\[",e:"\\]"}]},{cN:"comment",b:"",r:10},{cN:"cdata",b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{cN:"tag",b:"|$)",e:">",k:{title:"style"},c:[b],starts:{e:"",rE:true,sL:"css"}},{cN:"tag",b:"|$)",e:">",k:{title:"script"},c:[b],starts:{e:"<\/script>",rE:true,sL:"javascript"}},{b:"<%",e:"%>",sL:"vbscript"},d,{cN:"pi",b:/<\?\w+/,e:/\?>/,r:10},{cN:"tag",b:"",c:[{cN:"title",b:/[^ \/><\n\t]+/,r:0},b]}]}});hljs.registerLanguage("markdown",function(a){return{aliases:["md","mkdown","mkd"],c:[{cN:"header",v:[{b:"^#{1,6}",e:"$"},{b:"^.+?\\n[=-]{2,}$"}]},{b:"<",e:">",sL:"xml",r:0},{cN:"bullet",b:"^([*+-]|(\\d+\\.))\\s+"},{cN:"strong",b:"[*_]{2}.+?[*_]{2}"},{cN:"emphasis",v:[{b:"\\*.+?\\*"},{b:"_.+?_",r:0}]},{cN:"blockquote",b:"^>\\s+",e:"$"},{cN:"code",v:[{b:"`.+?`"},{b:"^( {4}|\t)",e:"$",r:0}]},{cN:"horizontal_rule",b:"^[-\\*]{3,}",e:"$"},{b:"\\[.+?\\][\\(\\[].*?[\\)\\]]",rB:true,c:[{cN:"link_label",b:"\\[",e:"\\]",eB:true,rE:true,r:0},{cN:"link_url",b:"\\]\\(",e:"\\)",eB:true,eE:true},{cN:"link_reference",b:"\\]\\[",e:"\\]",eB:true,eE:true}],r:10},{b:"^\\[.+\\]:",rB:true,c:[{cN:"link_reference",b:"\\[",e:"\\]:",eB:true,eE:true,starts:{cN:"link_url",e:"$"}}]}]}});hljs.registerLanguage("css",function(a){var b="[a-zA-Z-][a-zA-Z0-9_-]*";var c={cN:"function",b:b+"\\(",rB:true,eE:true,e:"\\("};return{cI:true,i:"[=/|']",c:[a.CBCM,{cN:"id",b:"\\#[A-Za-z0-9_-]+"},{cN:"class",b:"\\.[A-Za-z0-9_-]+",r:0},{cN:"attr_selector",b:"\\[",e:"\\]",i:"$"},{cN:"pseudo",b:":(:)?[a-zA-Z0-9\\_\\-\\+\\(\\)\\\"\\']+"},{cN:"at_rule",b:"@(font-face|page)",l:"[a-z-]+",k:"font-face page"},{cN:"at_rule",b:"@",e:"[{;]",c:[{cN:"keyword",b:/\S+/},{b:/\s/,eW:true,eE:true,r:0,c:[c,a.ASM,a.QSM,a.CSSNM]}]},{cN:"tag",b:b,r:0},{cN:"rules",b:"{",e:"}",i:"[^\\s]",r:0,c:[a.CBCM,{cN:"rule",b:"[^\\s]",rB:true,e:";",eW:true,c:[{cN:"attribute",b:"[A-Z\\_\\.\\-]+",e:":",eE:true,i:"[^\\s]",starts:{cN:"value",eW:true,eE:true,c:[c,a.CSSNM,a.QSM,a.ASM,a.CBCM,{cN:"hexcolor",b:"#[0-9A-Fa-f]+"},{cN:"important",b:"!important"}]}}]}]}]}});hljs.registerLanguage("http",function(a){return{i:"\\S",c:[{cN:"status",b:"^HTTP/[0-9\\.]+",e:"$",c:[{cN:"number",b:"\\b\\d{3}\\b"}]},{cN:"request",b:"^[A-Z]+ (.*?) HTTP/[0-9\\.]+$",rB:true,e:"$",c:[{cN:"string",b:" ",e:" ",eB:true,eE:true}]},{cN:"attribute",b:"^\\w",e:": ",eE:true,i:"\\n|\\s|=",starts:{cN:"string",e:"$"}},{b:"\\n\\n",starts:{sL:"",eW:true}}]}});hljs.registerLanguage("php",function(b){var e={cN:"variable",b:"(\\$|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*"};var a={cN:"preprocessor",b:/<\?(php)?|\?>/};var c={cN:"string",c:[b.BE,a],v:[{b:'b"',e:'"'},{b:"b'",e:"'"},b.inherit(b.ASM,{i:null}),b.inherit(b.QSM,{i:null})]};var d={v:[b.BNM,b.CNM]};return{aliases:["php3","php4","php5","php6"],cI:true,k:"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception default die require __FUNCTION__ enddeclare final try switch continue endfor endif declare unset true false trait goto instanceof insteadof __DIR__ __NAMESPACE__ yield finally",c:[b.CLCM,b.HCM,{cN:"comment",b:"/\\*",e:"\\*/",c:[{cN:"phpdoc",b:"\\s@[A-Za-z]+"},a]},{cN:"comment",b:"__halt_compiler.+?;",eW:true,k:"__halt_compiler",l:b.UIR},{cN:"string",b:"<<<['\"]?\\w+['\"]?$",e:"^\\w+;",c:[b.BE]},a,e,{cN:"function",bK:"function",e:/[;{]/,eE:true,i:"\\$|\\[|%",c:[b.UTM,{cN:"params",b:"\\(",e:"\\)",c:["self",e,b.CBCM,c,d]}]},{cN:"class",bK:"class interface",e:"{",eE:true,i:/[:\(\$"]/,c:[{bK:"extends implements"},b.UTM]},{bK:"namespace",e:";",i:/[\.']/,c:[b.UTM]},{bK:"use",e:";",c:[b.UTM]},{b:"=>"},c,d]}});hljs.registerLanguage("python",function(a){var f={cN:"prompt",b:/^(>>>|\.\.\.) /};var b={cN:"string",c:[a.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[f],r:10},{b:/(u|b)?r?"""/,e:/"""/,c:[f],r:10},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)"/,e:/"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)"/,e:/"/},a.ASM,a.QSM]};var d={cN:"number",r:0,v:[{b:a.BNR+"[lLjJ]?"},{b:"\\b(0o[0-7]+)[lLjJ]?"},{b:a.CNR+"[lLjJ]?"}]};var e={cN:"params",b:/\(/,e:/\)/,c:["self",f,d,b]};var c={e:/:/,i:/[${=;\n]/,c:[a.UTM,e]};return{aliases:["py","gyp"],k:{keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda nonlocal|10 None True False",built_in:"Ellipsis NotImplemented"},i:/(<\/|->|\?)/,c:[f,d,b,a.HCM,a.inherit(c,{cN:"function",bK:"def",r:10}),a.inherit(c,{cN:"class",bK:"class"}),{cN:"decorator",b:/@/,e:/$/},{b:/\b(print|exec)\(/}]}});hljs.registerLanguage("sql",function(a){var b={cN:"comment",b:"--",e:"$"};return{cI:true,i:/[<>]/,c:[{cN:"operator",bK:"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate savepoint release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup",e:/;/,eW:true,k:{keyword:"abs absolute acos action add adddate addtime aes_decrypt aes_encrypt after aggregate all allocate alter analyze and any are as asc ascii asin assertion at atan atan2 atn2 authorization authors avg backup before begin benchmark between bin binlog bit_and bit_count bit_length bit_or bit_xor both by cache call cascade cascaded case cast catalog ceil ceiling chain change changed char_length character_length charindex charset check checksum checksum_agg choose close coalesce coercibility collate collation collationproperty column columns columns_updated commit compress concat concat_ws concurrent connect connection connection_id consistent constraint constraints continue contributors conv convert convert_tz corresponding cos cot count count_big crc32 create cross cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime data database databases datalength date_add date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts datetimeoffsetfromparts day dayname dayofmonth dayofweek dayofyear deallocate declare decode default deferrable deferred degrees delayed delete des_decrypt des_encrypt des_key_file desc describe descriptor diagnostics difference disconnect distinct distinctrow div do domain double drop dumpfile each else elt enclosed encode encrypt end end-exec engine engines eomonth errors escape escaped event eventdata events except exception exec execute exists exp explain export_set extended external extract fast fetch field fields find_in_set first first_value floor flush for force foreign format found found_rows from from_base64 from_days from_unixtime full function get get_format get_lock getdate getutcdate global go goto grant grants greatest group group_concat grouping grouping_id gtid_subset gtid_subtract handler having help hex high_priority hosts hour ident_current ident_incr ident_seed identified identity if ifnull ignore iif ilike immediate in index indicator inet6_aton inet6_ntoa inet_aton inet_ntoa infile initially inner innodb input insert install instr intersect into is is_free_lock is_ipv4 is_ipv4_compat is_ipv4_mapped is_not is_not_null is_used_lock isdate isnull isolation join key kill language last last_day last_insert_id last_value lcase lead leading least leaves left len lenght level like limit lines ln load load_file local localtime localtimestamp locate lock log log10 log2 logfile logs low_priority lower lpad ltrim make_set makedate maketime master master_pos_wait match matched max md5 medium merge microsecond mid min minute mod mode module month monthname mutex name_const names national natural nchar next no no_write_to_binlog not now nullif nvarchar oct octet_length of old_password on only open optimize option optionally or ord order outer outfile output pad parse partial partition password patindex percent_rank percentile_cont percentile_disc period_add period_diff pi plugin position pow power pragma precision prepare preserve primary prior privileges procedure procedure_analyze processlist profile profiles public publishingservername purge quarter query quick quote quotename radians rand read references regexp relative relaylog release release_lock rename repair repeat replace replicate reset restore restrict return returns reverse revoke right rlike rollback rollup round row row_count rows rpad rtrim savepoint schema scroll sec_to_time second section select serializable server session session_user set sha sha1 sha2 share show sign sin size slave sleep smalldatetimefromparts snapshot some soname soundex sounds_like space sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_no_cache sql_small_result sql_variant_property sqlstate sqrt square start starting status std stddev stddev_pop stddev_samp stdev stdevp stop str str_to_date straight_join strcmp string stuff subdate substr substring subtime subtring_index sum switchoffset sysdate sysdatetime sysdatetimeoffset system_user sysutcdatetime table tables tablespace tan temporary terminated tertiary_weights then time time_format time_to_sec timediff timefromparts timestamp timestampadd timestampdiff timezone_hour timezone_minute to to_base64 to_days to_seconds todatetimeoffset trailing transaction translation trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse ucase uncompress uncompressed_length unhex unicode uninstall union unique unix_timestamp unknown unlock update upgrade upped upper usage use user user_resources using utc_date utc_time utc_timestamp uuid uuid_short validate_password_strength value values var var_pop var_samp variables variance varp version view warnings week weekday weekofyear weight_string when whenever where with work write xml xor year yearweek zon",literal:"true false null",built_in:"array bigint binary bit blob boolean char character date dec decimal float int integer interval number numeric real serial smallint varchar varying int8 serial8 text"},c:[{cN:"string",b:"'",e:"'",c:[a.BE,{b:"''"}]},{cN:"string",b:'"',e:'"',c:[a.BE,{b:'""'}]},{cN:"string",b:"`",e:"`",c:[a.BE]},a.CNM,a.CBCM,b]},a.CBCM,b]}});hljs.registerLanguage("handlebars",function(b){var a="each in with if else unless bindattr action collection debugger log outlet template unbound view yield";return{aliases:["hbs","html.hbs","html.handlebars"],cI:true,sL:"xml",subLanguageMode:"continuous",c:[{cN:"expression",b:"{{",e:"}}",c:[{cN:"begin-block",b:"#[a-zA-Z- .]+",k:a},{cN:"string",b:'"',e:'"'},{cN:"end-block",b:"\\/[a-zA-Z- .]+",k:a},{cN:"variable",b:"[a-zA-Z-.]+",k:a}]}]}});hljs.registerLanguage("perl",function(c){var d="getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qqfileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmgetsub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedirioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when";var f={cN:"subst",b:"[$@]\\{",e:"\\}",k:d};var g={b:"->{",e:"}"};var a={cN:"variable",v:[{b:/\$\d/},{b:/[\$\%\@](\^\w\b|#\w+(\:\:\w+)*|{\w+}|\w+(\:\:\w*)*)/},{b:/[\$\%\@][^\s\w{]/,r:0}]};var e={cN:"comment",b:"^(__END__|__DATA__)",e:"\\n$",r:5};var h=[c.BE,f,a];var b=[a,c.HCM,e,{cN:"comment",b:"^\\=\\w",e:"\\=cut",eW:true},g,{cN:"string",c:h,v:[{b:"q[qwxr]?\\s*\\(",e:"\\)",r:5},{b:"q[qwxr]?\\s*\\[",e:"\\]",r:5},{b:"q[qwxr]?\\s*\\{",e:"\\}",r:5},{b:"q[qwxr]?\\s*\\|",e:"\\|",r:5},{b:"q[qwxr]?\\s*\\<",e:"\\>",r:5},{b:"qw\\s+q",e:"q",r:5},{b:"'",e:"'",c:[c.BE]},{b:'"',e:'"'},{b:"`",e:"`",c:[c.BE]},{b:"{\\w+}",c:[],r:0},{b:"-?\\w+\\s*\\=\\>",c:[],r:0}]},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{b:"(\\/\\/|"+c.RSR+"|\\b(split|return|print|reverse|grep)\\b)\\s*",k:"split return print reverse grep",r:0,c:[c.HCM,e,{cN:"regexp",b:"(s|tr|y)/(\\\\.|[^/])*/(\\\\.|[^/])*/[a-z]*",r:10},{cN:"regexp",b:"(m|qr)?/",e:"/[a-z]*",c:[c.BE],r:0}]},{cN:"sub",bK:"sub",e:"(\\s*\\(.*?\\))?[;{]",r:5},{cN:"operator",b:"-\\w\\b",r:0}];f.c=b;g.c=b;return{aliases:["pl"],k:d,c:b}});hljs.registerLanguage("coffeescript",function(c){var b={keyword:"in if for while finally new do return else break catch instanceof throw try this switch continue typeof delete debugger super then unless until loop of by when and or is isnt not",literal:"true false null undefined yes no on off",reserved:"case default function var void with const let enum export import native __hasProp __extends __slice __bind __indexOf",built_in:"npm require console print module global window document"};var a="[A-Za-z$_][0-9A-Za-z$_]*";var f=c.inherit(c.TM,{b:a});var e={cN:"subst",b:/#\{/,e:/}/,k:b};var d=[c.BNM,c.inherit(c.CNM,{starts:{e:"(\\s*/)?",r:0}}),{cN:"string",v:[{b:/'''/,e:/'''/,c:[c.BE]},{b:/'/,e:/'/,c:[c.BE]},{b:/"""/,e:/"""/,c:[c.BE,e]},{b:/"/,e:/"/,c:[c.BE,e]}]},{cN:"regexp",v:[{b:"///",e:"///",c:[e,c.HCM]},{b:"//[gim]*",r:0},{b:/\/(?![ *])(\\\/|.)*?\/[gim]*(?=\W|$)/}]},{cN:"property",b:"@"+a},{b:"`",e:"`",eB:true,eE:true,sL:"javascript"}];e.c=d;return{aliases:["coffee","cson","iced"],k:b,i:/\/\*/,c:d.concat([{cN:"comment",b:"###",e:"###"},c.HCM,{cN:"function",b:"(^\\s*|\\B)("+a+"\\s*=\\s*)?(\\(.*\\))?\\s*\\B[-=]>",e:"[-=]>",rB:true,c:[f,{cN:"params",b:"\\([^\\(]",rB:true,c:[{b:/\(/,e:/\)/,k:b,c:["self"].concat(d)}]}]},{cN:"class",bK:"class",e:"$",i:/[:="\[\]]/,c:[{bK:"extends",eW:true,i:/[:="\[\]]/,c:[f]},f]},{cN:"attribute",b:a+":",e:":",rB:true,eE:true,r:0}])}});hljs.registerLanguage("nginx",function(c){var b={cN:"variable",v:[{b:/\$\d+/},{b:/\$\{/,e:/}/},{b:"[\\$\\@]"+c.UIR}]};var a={eW:true,l:"[a-z/_]+",k:{built_in:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll"},r:0,i:"=>",c:[c.HCM,{cN:"string",c:[c.BE,b],v:[{b:/"/,e:/"/},{b:/'/,e:/'/}]},{cN:"url",b:"([a-z]+):/",e:"\\s",eW:true,eE:true,c:[b]},{cN:"regexp",c:[c.BE,b],v:[{b:"\\s\\^",e:"\\s|{|;",rE:true},{b:"~\\*?\\s+",e:"\\s|{|;",rE:true},{b:"\\*(\\.[a-z\\-]+)+"},{b:"([a-z\\-]+\\.)+\\*"}]},{cN:"number",b:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{cN:"number",b:"\\b\\d+[kKmMgGdshdwy]*\\b",r:0},b]};return{aliases:["nginxconf"],c:[c.HCM,{b:c.UIR+"\\s",e:";|{",rB:true,c:[{cN:"title",b:c.UIR,starts:a}],r:0}],i:"[^\\s\\}]"}});hljs.registerLanguage("json",function(a){var e={literal:"true false null"};var d=[a.QSM,a.CNM];var c={cN:"value",e:",",eW:true,eE:true,c:d,k:e};var b={b:"{",e:"}",c:[{cN:"attribute",b:'\\s*"',e:'"\\s*:\\s*',eB:true,eE:true,c:[a.BE],i:"\\n",starts:c}],i:"\\S"};var f={b:"\\[",e:"\\]",c:[a.inherit(c,{cN:null})],i:"\\S"};d.splice(d.length,0,b,f);return{c:d,k:e,i:"\\S"}});hljs.registerLanguage("apache",function(a){var b={cN:"number",b:"[\\$%]\\d+"};return{aliases:["apacheconf"],cI:true,c:[a.HCM,{cN:"tag",b:""},{cN:"keyword",b:/\w+/,r:0,k:{common:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername"},starts:{e:/$/,r:0,k:{literal:"on off all"},c:[{cN:"sqbracket",b:"\\s\\[",e:"\\]$"},{cN:"cbracket",b:"[\\$%]\\{",e:"\\}",c:["self",b]},b,a.QSM]}}],i:/\S/}});hljs.registerLanguage("cpp",function(a){var b={keyword:"false int float while private char catch export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const struct for static_cast|10 union namespace unsigned long throw volatile static protected bool template mutable if public friend do return goto auto void enum else break new extern using true class asm case typeid short reinterpret_cast|10 default double register explicit signed typename try this switch continue wchar_t inline delete alignof char16_t char32_t constexpr decltype noexcept nullptr static_assert thread_local restrict _Bool complex _Complex _Imaginary",built_in:"std string cin cout cerr clog stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap array shared_ptr abort abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf"};return{aliases:["c","h","c++","h++"],k:b,i:""]',k:"include",i:"\\n"},a.CLCM]},{cN:"stl_container",b:"\\b(deque|list|queue|stack|vector|map|set|bitset|multiset|multimap|unordered_map|unordered_set|unordered_multiset|unordered_multimap|array)\\s*<",e:">",k:b,c:["self"]},{b:a.IR+"::"}]}});hljs.registerLanguage("makefile",function(a){var b={cN:"variable",b:/\$\(/,e:/\)/,c:[a.BE]};return{aliases:["mk","mak"],c:[a.HCM,{b:/^\w+\s*\W*=/,rB:true,r:0,starts:{cN:"constant",e:/\s*\W*=/,eE:true,starts:{e:/$/,r:0,c:[b]}}},{cN:"title",b:/^[\w]+:\s*$/},{cN:"phony",b:/^\.PHONY:/,e:/$/,k:".PHONY",l:/[\.\w]+/},{b:/^\t+/,e:/$/,r:0,c:[a.QSM,b]}]}}); \ No newline at end of file diff --git a/www/search.html b/www/search.html new file mode 100644 index 0000000..f263871 --- /dev/null +++ b/www/search.html @@ -0,0 +1,119 @@ +Search

rmurphey adventures in javascript

Search

    \ No newline at end of file diff --git a/www/tag/business/index.html b/www/tag/business/index.html new file mode 100644 index 0000000..cc1670d --- /dev/null +++ b/www/tag/business/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

    rmurphey adventures in javascript

    Tagged with "business"

      \ No newline at end of file diff --git a/www/tag/code/index.html b/www/tag/code/index.html new file mode 100644 index 0000000..a17afdf --- /dev/null +++ b/www/tag/code/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

      rmurphey adventures in javascript

      Tagged with "code"

        \ No newline at end of file diff --git a/www/tag/personal/index.html b/www/tag/personal/index.html new file mode 100644 index 0000000..e09346f --- /dev/null +++ b/www/tag/personal/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

        rmurphey adventures in javascript

        Tagged with "personal"

          \ No newline at end of file diff --git a/www/tag/web/index.html b/www/tag/web/index.html new file mode 100644 index 0000000..aca59b9 --- /dev/null +++ b/www/tag/web/index.html @@ -0,0 +1,116 @@ +Adventures in JavaScript

          rmurphey adventures in javascript

          Tagged with "web"

            \ No newline at end of file