From c1c7d01bcde70f5b9116408a88880f3aa69078ba Mon Sep 17 00:00:00 2001 From: Mark Reid Date: Tue, 20 Jan 2009 13:13:31 +1100 Subject: [PATCH] Transferred URLs to .name. Slight CSS tweaks for comment counts. New posts --- _scripts/import_comments.rb | 49 ++- _scripts/transfer_urls.rb | 78 ++++ css/screen.css | 1 + ...6-information-divergence-and-risk.markdown | 37 ++ ...16-ml-and-stats-people-on-twitter.markdown | 78 ++++ inductio.csv | 158 ------- inductio.sql | 392 ------------------ inductio.sql.zip | Bin 228587 -> 0 bytes 8 files changed, 225 insertions(+), 568 deletions(-) create mode 100644 _scripts/transfer_urls.rb create mode 100644 iem/_posts/2009-01-06-information-divergence-and-risk.markdown create mode 100644 iem/_posts/2009-01-16-ml-and-stats-people-on-twitter.markdown delete mode 100644 inductio.csv delete mode 100644 inductio.sql delete mode 100644 inductio.sql.zip diff --git a/_scripts/import_comments.rb b/_scripts/import_comments.rb index b9fea94..54a1b98 100644 --- a/_scripts/import_comments.rb +++ b/_scripts/import_comments.rb @@ -25,6 +25,8 @@ DB_USER = 'root' DB_NAME = 'inductio' +TARGET_URL = 'http://mark.reid.dev/iem/' + # Gets the first forum key associated with USER_KEY def forum_key forum_list = get('get_forum_list', :user_api_key => USER_KEY) @@ -53,37 +55,38 @@ def convert(row) } end -def clean(comment) - comment.gsub!(/<\/?p>/,'') - comment +# Remove extraneous paragraph separators. Disqus interprets double +# newline as paragraphs +def clean(comment) + comment.gsub(/<\/?p>/,'') end +# Compute the URL for the given comment based on the DB entry def url(row) date_path = row[:comment_date_gmt].strftime("%Y/%m/%d") - "http://mark.reid.dev/iem/#{row[:post_name]}.html" + "#{TARGET_URL}#{row[:post_name]}.html" end +# Get the Disqus thread ID for the comment in the DB row def thread(row) - ident_str = "test-#{row[:post_name]}" + ident_str = "#{row[:post_name]}" data = { :forum_api_key => FORUM_KEY, :title => row[:post_title], :identifier => ident_str } - puts "Getting thread #{ident_str}..." response = JSON.parse( DISQUS['thread_by_identifier'].post(data) ) + unless response['succeeded'] + raise "Bad response to get thread ID for #{ident_str}" + end - # puts response.to_yaml - # puts "--- (end thread response)" - - raise "Bad response to get thread ID for #{ident_str}" unless response['succeeded'] - - puts "Thread [#{ident_str}] has title '#{response['message']['thread']['title']}'" + puts "Set thread [#{ident_str}] title to '#{response['message']['thread']['title']}'" response['message']['thread']['id'] end +# Set the URL of the Disqus thread to the given value def update(thread_id, url) data = { :forum_api_key => FORUM_KEY, @@ -95,9 +98,11 @@ def update(thread_id, url) response = JSON.parse( DISQUS['update_thread'].post(data) ) end -# Converts and sends a comment from the DB to Disqus with the given thread ID @unconverted = [] @threads = {} +# Converts and sends a comment from the DB to Disqus with the given thread ID +# Failed conversions are stored in @unconverted and the thread_id to URL mapping +# in @threads is updated def post(row, thread_id) data = convert(row) data[:forum_api_key] = FORUM_KEY @@ -111,28 +116,36 @@ def post(row, thread_id) @threads[thread_id] = url(row) else puts "\tWARNING: Could not post comment by #{data[:author_name]} on #{data[:created_at]}" + puts data.to_yaml + "\n---" @unconverted << data end end # Processing begins here... DB_PASS = ENV['DB_PASS'] -DB = Sequel.mysql(DB_NAME, :user=>DB_USER, :password=>DB_PASS, :host=>'localhost') +DB = Sequel.mysql(DB_NAME, + :user=>DB_USER, :password=>DB_PASS, :host=>'localhost', :encoding => 'utf8' +) USER_KEY = ENV['DISQUS_KEY'] FORUM_KEY = forum_key -LIMIT = "limit 10" -QUERY = "select * from wp_comments, wp_posts where wp_comments.comment_post_ID = wp_posts.ID and comment_type != 'pingback' #{LIMIT}" +QUERY = "select * from wp_comments, wp_posts where wp_comments.comment_post_ID = wp_posts.ID and comment_type != 'pingback'" DB[QUERY].each do |row| puts "Processing #{row[:comment_type]} comment #{row[:comment_ID]}..." thread_id = thread(row) post(row, thread_id) end -puts "Number of failures: #{@unconverted.length}" - +# Update all of the threads with the correct URL @threads.each do |tid,url| update(tid,url) end +# Print unconverted data to STDOUT as YAML +puts "Number of failures: #{@unconverted.length}" +puts "\n\n***UNCOVERTED POSTS***" +@unconverted.each do |data| + puts data.to_yaml + puts "***" +end diff --git a/_scripts/transfer_urls.rb b/_scripts/transfer_urls.rb new file mode 100644 index 0000000..772373a --- /dev/null +++ b/_scripts/transfer_urls.rb @@ -0,0 +1,78 @@ +require 'rubygems' +require 'rest_client' +require 'json' + +DISQUS_BASE = 'http://disqus.com/api/' +DISQUS = RestClient::Resource.new DISQUS_BASE + +SOURCE_URL = 'http://mark.reid.dev/iem/' +TARGET_URL = 'http://mark.reid.name/iem/' + +THREADS = { +10211725 => 'http://mark.reid.name/iem/behold-jensens-inequality.html', +10211748 => 'http://mark.reid.name/iem/feed-bag-a-simple-rss-archiver.html', +10211737 => 'http://mark.reid.name/iem/visualising-reading.html', +10211738 => 'http://mark.reid.name/iem/snuck-flied-and-wedded.html', +10211739 => 'http://mark.reid.name/iem/super-crunchers.html', +10211728 => 'http://mark.reid.name/iem/colt-2008-highlights.html', +10211784 => 'http://mark.reid.name/iem/staying-organised-with-citeulike-and-bibdesk.html', +10211740 => 'http://mark.reid.name/iem/constructive-and-classical-mathematics.html', +10211730 => 'http://mark.reid.name/iem/the-earth-is-round.html', +10211753 => 'http://mark.reid.name/iem/information-divergence-and-risk.html', +10211742 => 'http://mark.reid.name/iem/ml-and-stats-people-on-twitter.html', +10211720 => 'http://mark.reid.name/iem/a-meta-index-of-data-sets.html', +10211710 => 'http://mark.reid.name/iem/introducing-inductio-ex-machina.html', +10211755 => 'http://mark.reid.name/iem/artificial-ai.html', +10211733 => 'http://mark.reid.name/iem/machine-learning-summer-school-2009.html', +10211711 => 'http://mark.reid.name/iem/clarity-and-mathematics.html', +10211713 => 'http://mark.reid.name/iem/a-cute-convexity-result.html', +} + +# Gets the first forum key associated with USER_KEY +def forum_key + forum_list = get('get_forum_list', :user_api_key => USER_KEY) + forum_id = forum_list[0]['id'] + get('get_forum_api_key', :user_api_key => USER_KEY, :forum_id => forum_id) +end + +# Encapsulates request, JSON parsing and error checking a REST call to Disqus +def get(command, args) + path = command + '?' + args.map {|k,v| "#{k}=#{v}"}.join('&') + response = JSON.parse( DISQUS[path].get ) + raise "Bad response to #{path}" unless response['succeeded'] + response['message'] +end + +def threads + thread_list = get('get_thread_list', :forum_api_key => FORUM_KEY) +end + +# Set the URL of the Disqus thread to the given value +def update(thread_id, url) + data = { + :forum_api_key => FORUM_KEY, + :thread_id => thread_id, + :url => url + } + + puts "Updating thread #{thread_id} with URL = #{url}" + response = JSON.parse( DISQUS['update_thread'].post(data) ) +end + +USER_KEY = ENV['DISQUS_KEY'] +FORUM_KEY = forum_key + +# Set the new URLs +# threads.each do |t| +# url = THREADS[t['id'].to_i] +# next if url.nil? +# update(t['id'], url) +# puts "Set thread #{t['id']} to #{url}" +# end + +# Check everything worked +threads.each do |t| + url = THREADS[t['id'].to_i] + next if url.nil? + puts "Thread #{t['id']} has #{url}" +end diff --git a/css/screen.css b/css/screen.css index a93174e..75c0b2e 100644 --- a/css/screen.css +++ b/css/screen.css @@ -96,6 +96,7 @@ hr { .left.inset { margin-left: 0 !important; } .list .title { font-weight: bold; } +.comments { font-size: smaller; display: block; float: right; color: silver;} .excerpt { color: black; } blockquote { diff --git a/iem/_posts/2009-01-06-information-divergence-and-risk.markdown b/iem/_posts/2009-01-06-information-divergence-and-risk.markdown new file mode 100644 index 0000000..d65e25b --- /dev/null +++ b/iem/_posts/2009-01-06-information-divergence-and-risk.markdown @@ -0,0 +1,37 @@ +--- +layout: post + +title: Information, Divergence and Risk for Binary Experiments +excerpt: A summary of a recent paper Bob and I posted to arXiv. +location: Canberra, Australia + +wordpress_url: http://conflate.net/inductio/?p=175 +wordpress_id: 175 +--- +[Bob Williamson][bob] and I have finished a [report][] outlining what we have been looking at for the last year or so and uploaded it to the arXiv. Weighing in at 89 pages, it covers a lot of ground in an attempt to unify a number of different classes of measures for problems that can be expressed as binary experiments. That is, where instances are drawn from two distributions. This include binary classification, class probability estimation, and hypothesis testing. + +We show that many of the usual measures of difficultly for these problems — divergence, information and Bayes risk — are very closely related. We also look at ways in which members of each class of measure can be expressed in terms of "primitive" members of those classes. In particular, Fisher-consistent losses (also known as proper scoring rules) can be written as weighted sums of cost-sensitive loss while all f-divergences can be written as weighted sums of something akin to cost-sensitive variational divergence. These "Choquet representations" make it easy to derive Pinsker-like bounds for arbitrary f-divergences (not just KL divergence) as well as results similar to those of Bartlett et al in their "[Convexity, classification and Risk Bounds][bartlett]". + +It should be made clear that many of these results are not new. However, what I like about our approach is that almost all of the results in the paper stem from a two observations about convex functions: they are invariant under the Legendre-Fenchel bidual, and they have a second-order integral Taylor expansion with non-negative weights. + +If any of this sounds interesting, you should grab the full paper from the [arXiv][report]. Here's the abstract: + +> We unify f-divergences, Bregman divergences, surrogate loss bounds (regret bounds), +> proper scoring rules, matching losses, cost curves, ROC-curves and information. We +> do this by systematically studying integral and variational representations of these +> objects and in so doing identify their primitives which all are related to cost-sensitive +> binary classification. As well as clarifying relationships between generative and +> discriminative views of learning, the new machinery leads to tight and more general +> surrogate loss bounds and generalised Pinsker inequalities relating f-divergences to +> variational divergence. The new viewpoint illuminates existing algorithms: it provides a +> new derivation of Support Vector Machines in terms of divergences and relates +> Maximum Mean Discrepancy to Fisher Linear Discriminants. It also suggests new +> techniques for estimating f-divergences. + +Now that we have a good understanding of binary experiments the aim is to build on these results and extend this type of work to other forms of machine learning problems. High on the list are multi-category classification, ranking and regression problems. + +Questions, criticism, suggestions and pointers to related work we may have missed are all welcome. + +[bartlett]: http://www.citeulike.org/user/mdreid/article/510440 +[report]: http://arxiv.org/abs/0901.0356 +[bob]: http://axiom.anu.edu.au/~williams/ \ No newline at end of file diff --git a/iem/_posts/2009-01-16-ml-and-stats-people-on-twitter.markdown b/iem/_posts/2009-01-16-ml-and-stats-people-on-twitter.markdown new file mode 100644 index 0000000..bc5b7a7 --- /dev/null +++ b/iem/_posts/2009-01-16-ml-and-stats-people-on-twitter.markdown @@ -0,0 +1,78 @@ +--- +layout: post + +title: ML and Stats People on Twitter +excerpt: Wherein I compile a list of interesting people who use Twitter to discuss machine learning and statistics. +location: Canberra, Australia + +wordpress_url: http://conflate.net/inductio/?p=171 +wordpress_id: 171 +--- +I started using the social, "micro-blogging" service [Twitter][] in February this year simply because I had been seeing so much commentary about it — both good and bad. Since then, I've posted [800+ updates][me], amassed over 100 [followers][] and [follow][] nearly that many myself. + +[twitter]: http://twitter.com/ +[me]: http://twitter.com/mdreid/ +[follow]: http://twitter.com/mdreid/friends +[followers]: http://twitter.com/mdreid/followers + +What has surprised me about Twitter is how many people I have found on there who are active, or at least interested, in machine learning and statistics. The day-to-day discussions, questions, advice and pointers I've got via Twitter have been illuminating and fun. + +In an effort to get to know some of these people a bit better I followed the links they provided in their respective profiles to see what they had to say about themselves. The descriptions below are based only on those links as I don't find Google-stalking very friendly. + +So, in no particular order, here they are: + +Students +---------- +* [Tim Danford](http://twitter.com/arthegall) +A computer science [Ph.D. student at MIT](http://people.csail.mit.edu/tdanford/) + +* [Mark James Adams](http://twitter.com/mja) +"[I am a student of quantitative genetics and a temperamental psychologist](http://affinity.raysend.com/record/about/author)" + +* Dave Warde-Farley +[Computer science Masters student at Toronto](http://www.cs.toronto.edu/~dwf/) working in machine learning + +* [Amir massoud Farahmand](http://twitter.com/SoloGen) +Ph.D. student looking at manifold learning (amongst other things) at the [University of Alberta](http://www.cs.ualberta.ca/~amir/). Runs the blog [thesilog](http://thesilog.sologen.net/). + +* [Markus Weimer](http://twitter.com/markusweimer) +Graduate student working on "[applications of machine learning to eLearning](http://weimo.de/about)". Also runs a [blog](http://weimo.de/) + +* [Ryan Rosario](http://twitter.com/DataJunkie) +Statistics and computer science graduate student. + +* [A.M. Santos](http://twitter.com/ansate) +Maths and statistics graduate student. + +Non-students +--------------- +* [Neal Richter](http://twitter.com/nealrichter) +Neal Richter - Runs the blog [aicoder](http://aicoder.blogspot.com/) + +* [Brendan O'Connor](http://twitter.com/brendan642) +[Research assistant](http://anyall.org/) in NLP at Stanford and consultant at [Dolores Labs](http://blog.doloreslabs.com/) + +* [Daniel Tunkelang](http://twitter.com/dtunkelang) +Chief scientist at the information retrieval company Endeca and owner of the blog [The Noisy Channel](http://thenoisychannel.com/) + +* [Jason Adams](http://twitter.com/ealdent) +Computational linguist work on sentiment analysis. Runs the blog [The Mendicant Bug](http://mendicantbug.com/). + +* [Mikio Braun](http://twitter.com/mikiobraun) +Post-doc at Technische Universität Berlin and a machine learning blogger at [Marginally Interesting](http://mikiobraun.blogspot.com/). + +* [Daniel Lemire](http://twitter.com/lemire) +Professor of computer science at the University of Quebec at Montreal and [blogger](http://www.daniel-lemire.com/blog/). + +* [Jason H. Moore](http://twitter.com/moorejh) +Professor of Genetics, Director of Bioinformatics at Dartmouth Medical School. Works on the [Multi-factor Dimensionality Reduction](http://sourceforge.net/projects/mdr/) software MDR and blogs at [Epistasis](http://compgen.blogspot.com/). + +* [Pete Skomoroch](http://twitter.com/peteskomoroch) +Director of analytics at Juice Analytics and [Data Wrangling](http://www.datawrangling.com/) blogger. + +* [Alex Smola](http://twitter.com/smolix) +Principal Researcher at Yahoo! Research and ex-colleague of mine at [NICTA](http://nicta.com.au) and the [ANU](http://anu.edu.au) a.k.a. "Mr. Kernel" + +If you are not on this list but think you should be, leave a comment below and I'll update this list. Conversely, if I've put you on this list and you don't wish to be associated with these sorts of people, leave a comment or send me an email and I'll remove you. + +Of course, feel free to follow [me][] if you'd like to keep up with what I'm doing. \ No newline at end of file diff --git a/inductio.csv b/inductio.csv deleted file mode 100644 index 939eed1..0000000 --- a/inductio.csv +++ /dev/null @@ -1,158 +0,0 @@ -id title slug time_modified date excerpt status content -2 About about 2008-06-17 08:52:28 2007-09-11 11:30:09 publish Inductio Ex Machina is [Mark Reid][me]'s machine learning research blog. \n \nI'm a [research fellow][me_anu] with the Statistical Machine Learning group at the [Australian National University](http://anu.edu.au) in Canberra. \n \nCurrently, I'm investigating representations of learning problems and looking at connections between them. The aim is to (eventually) build a conceptual map of machine learning in its many and varied flavours. That's still a long way off yet - I'm still orienting myself and scanning for landmarks. More details can be found at my [academic site](http://rsise.anu.edu.au/~mreid/). \n \nIf you want to keep up with what I'm reading you can check out my [CiteULike](http://www.citeulike.org/user/mdreid) profile or your may want to join the [Statistical Machine Learning](http://www.citeulike.org/groupfunc/3808/home) group I started there. \n \nYou can find out more about me at my [eponymous website](http://mark.reid.name). \n \n[me]: http://mark.reid.name \n[me_anu]: http://users.rsise.anu.edu.au/~mreid -12 Introducing Inductio Ex Machina introducing-inductio-ex-machina 2007-09-24 05:06:51 2007-09-22 07:45:48 publish Welcome to my machine learning research blog. \n \nI'm a newly minted Ph.D. graduate who has taken up a post-doctoral research fellow with the \n[Statistical Machine Learning][sml] group within the [Computer Sciences Laboratory][csl] at \nthe Australian National University. \n \n[sml]: http://csl.rsise.anu.edu.au/sml \n[csl]: http://csl.cecs.anu.edu.au/ \n \nThe plan here is to present for discussion any papers, questions or ideas related to machine \nlearning. In particular, I'll probably concentrate on my past and present interests, inclduing: \nstatistical learning theory, dimensional reduction, transfer learning, rule learning and the \nphilosophy of induction. \n \nAs well as being an introduction, this post also plays a second role, which is to claim this blog \nas part of my Technorati Profile \nthrough the inclusion of that previous link. -15 Anti-Learning anti-learning 2007-10-03 03:41:54 2007-10-03 03:03:53 publish Last week I saw an interesting PhD monitoring [presentation][] by [Justin Bedo][] on the \ncounter-intuitive phenomenon of "anti-learning". For certain datasets, learning a classifier from a small number of samples and inverting its predictions performs much better than the original classifier. Most of the theoretical results Justin mentioned about are discussed in a recent [paper][] and [video lecture][] by [Adam Kowalczyk][]. These build on [earlier work][] presented at ALT 2005. As John notes in his [blog post][] from a couple of years ago, the strangeness of anti-learning is due to our assumption that proximity implies similarity. \n \nThis anti-learning effect has been observed in naturally occurring esophageal adenocarcinoma data: a binary classification problem with nearly 10,000 features. In his talk, Justin presented evidence that the effect was real (by constructing a null hypotheses through repeated shuffling of the data labels) and relatively invariant to choice of learning algorithm. \n \nLike any good scientist, Justin and his colleagues replicated the phenomena in a simpler, \nsynthetic model in order to better understand what might be happening. The model proposed is one that modeling competition between features: if one feature has a large value the others are small but in the opposite direction and examples from different classes have distinct large features pointing in opposite directions. This results in examples from opposite classes being more similar (_i.e._, they have a larger positive inner product) than examples from the same class. At a stretch, this model is also biologically plausible if features are expressions of competing entities in a cell. \n \nThe algorithm proposed to deal with anti-learning uses some of the data available at training \ntime to test whether has anti-learning characteristics and, if so, inverts the resulting \nclassifier. This "burns" some of the information in the training data but can dramatically \nimprove performance when anti-learning is correctly identified. \n \nIt's an interesting example of a trade-off that can be made between data and background \nknowledge. With relatively few examples and the knowledge that you are in an anti-learning situation, you can flip classifications and do very well. As the amount of data available increases, the learning algorithm will converge to a good classifier, the assumption is less valuable and flipping classifications is costly. \n \n[adam kowalczyk]: http://users.rsise.anu.edu.au/~akowalczyk/ \n[paper]: http://adamk.antilearning.googlepages.com/ecml07.pdf \n[video lecture]: http://videolectures.net/mlss06au_kowalczyk_al/ \n[justin bedo]: http://holly.ath.cx/ \n[blog post]: http://hunch.net/?p=35 \n[presentation]: http://cecs.anu.edu.au/seminars/showone.pl?SID=523 \n[earlier work]: http://www.springerlink.com/content/e3ey7r6yxu68fye6/ -37 Visualising ROC and Cost Curve Duality visualising-roc-and-cost-curve-duality 2008-04-21 05:57:42 2008-04-21 05:57:42 Discussion of the point-line duality between Drummond and Holte's cost curves and ROC curves. An applet is provided to help visualise this relationship. publish I've been looking into the relationships between losses, divergences and other measures of predictors and problems recently and came across a 2006 paper by Drummond and Holte entitled Cost Curves: An improved method for visualizing classifier performance. This paper describes a representation of classifier performance that is very closely related to the usual ROC curve. However, unlike ROC plots of (False Positive Rate, True Positive Rate)-points for various operating conditions of the classifier cost curves show (cost, risk)-points. That is, for each cost plotted on the x axis, the y co-ordinate shows the cost-weighted loss for the classifier. \n \nAs explained in Drummond and Holte's paper, there is a simple point-line duality between ROC space and Cost-Loss space based on the definition of cost-weighted loss. If [tex](FP,TP)[/tex] is a point in ROC space then the cost-loss relationship [tex](c, L)[/tex] is linear and satisfies \n
\n[tex] \\displaymath L = (1-\\pi) c FP + \\pi (1-c) (1 - TP) [/tex] \n
\nwhere [tex]c[/tex] is the cost of a false positive and [tex]\\pi[/tex] the prior probability of the positive class[^1]. \n \nGiven a specific [tex]\\pi[/tex] this relationship is completely invertible. A point [tex](c,L)[/tex] in cost-loss space corresponds to the following line in ROC space \n
\n[tex]\\displaymath TP = \\frac{(1-\\pi) c}{\\pi(1-c)} FP + \\frac{(1-\\pi) c - L}{\\pi(1-c)}.[/tex] \n
\n \nMy ability to intuitively grasp this duality relationship was not that great so I hacked together the following applet to help. On the right is a black curve in ROC space representing five (False Positive, True Positive) rates for some imaginary classifier. The points are (0,0), (0.1, 0.5), (0.3, 0.8), (0.7, 0.95) and (1,1). The diagonal grey line on the ROC plot represents the performance of random classifiers - each increase in True Positive rate is countered by an equivalent decrease in False Positive rate. \n \nThe left plot, entitled "Cost Space" shows the (cost,loss) duals of both the black and grey curves from the right-hand plot. The grey diagonal on the right corresponds to a "tent" on the left that represents the best performance of a classifier that constantly predicts a single class. \n \n \n \n \n \n \n \n \n \n

\nThis browser does not have a Java Plug-in.
\nGet the latest Java Plug-in here.
\n

\n
\n \nIf you click in the applet area you can get a feel for the relationship between these two representations. When you move your mouse over ROC space you will see the corresponding line in cost space. Conversely, when you move your mouse over the cost space plot you will see the dual line in ROC space. \n \nThe bar at the bottom of the two plots controls the prior probability [tex]\\pi[/tex]. You can see how the dual curve in cost space changes as this parameter is modified. \n \nThe code for this applet is available through [GitHub](http://github.com). The visualisation aspects are written in [Processing](http://processing.org) and are [available here](http://github.com/mreid/siroc/). This relies on some [Java code](http://github.com/mreid/geovex/) I also wrote that does the point-line conversions. \n \n[Chris Drummond](http://www.site.uottawa.ca/~cdrummon/) has also created an [applet](http://www.site.uottawa.ca/~cdrummon/rocconversion.html) to do the same kind of conversion. The one here can be seen as complementary since his version allows the user to add data points and construct curves whereas mine just aims to make the key relationship interactive. \n \n[^1]: My description here differs slightly from Drummond and Holte's in that I am keeping priors and costs separate and not normalising the loss. -16 Antihubrisines antihubrisines 2007-10-03 05:39:36 2007-10-03 05:39:36 publish In keeping with the "Anti-" theme from my [last post][] I thought I'd share something I found in the treasure trove of [rants][] that [J. Michael Steele][]'s has put on the web for our edification.\n\nAntihubrisines, according to John W. Tukey in his 1986 paper, [Sunset Salvo][], are little pearls of wisdom to keep in mind if you suspect you are being afflicted by hubris. They are to "suffering philosophy" what antihistamines are to suffering sinuses:\n> To statisticians, hubris should mean the kind of pride that fosters \n> and inflated idea of one's powers and thereby keeps one from being \n> more than marginally helpful to others. ... The feeling of "Give me\n> (or more likely even, give my assistant) the data, and I will tell\n> you what the real answer is!" is one we must all fight against again\n> and again, and yet again.\n\nIncluded in Tukey's prescription are number of strains of advice, both qualitative and quantitative. Among my favourites is this very bracing tonic that should be administered whenever you plan to start number crunching:\n> The data may not contain the answer. The combination of some data \n> and an aching desire for an answer does not ensure that a \n> reasonable answer can be extracted from a given body of data.\n\n[last post]: http://conflate.net/inductio/theory/anti-learning\n[rants]: http://www-stat.wharton.upenn.edu/~steele/Rants.htm\n[j. michael steele]: http://www-stat.wharton.upenn.edu/~steele/\n[sunset salvo]: http://www-stat.wharton.upenn.edu/~steele/HoldingPen/SunsetSalvo.pdf -18 The Mathematical Grue the-mathematical-grue 2007-10-19 22:54:58 2007-10-19 06:57:15 publish A [discussion over at God Plays Dice][discussion] had me nodding in agreement: proving a theorem is like playing an adventure game. As Isabel puts it \n \n> You are in a maze of twisty little equations, all alike \n \nalluding to a particularly fiendish puzzle in the text adventure [Colossal Cave][]. \n \nHaving recently grappled with some tricky proofs I was wondering how they might play out as a piece of interactive fiction... \n \n You are sitting before a particularly thorny conjecture. \n Possible proofs lead away from here in several directions. \n \n > inventory \n \n You are carrying the following items: \n A ream of blank paper \n A pencil \n The Cauchy-Schwarz inequality \n Some half-remembered undergraduate mathematics \n \n > look conjecture \n \n You stare blankly at the conjecture. You think it might \n have something to do with convexity. \n \n > w \n \n You surf over to Wikipedia and read up on sub-tangents. \n The notation makes you confused. \n \n There is a lemma here. \n \n > take lemma \n \n Taken. \n \n > e \n \n You wander off to go get a bite to eat and some coffee. \n \n You see a colleague here. \n \n > talk colleague \n \n After explaining your conjecture your colleague mutters \n that it was probably proven in the 50s by a Russian. \n \n > s \n \n You sit back down at your desk and spend half an hour \n reading pages linked to from reddit. \n \n You see an unproved conjecture here. \n \n > use lemma \n [on the conjecture] \n \n With a bit of manipulation you turn the equation into one \n involving the expectation of a product. \n \n > use Cauchy-Schwarz \n [on the conjecture] \n \n Hooray! You now have a tight bound on a key quantity, \n proving your conjecture. \n \n > generalise assumptions \n \n Your theorem was eaten by a Grue. \n \n[discussion]: http://godplaysdice.blogspot.com/2007/10/you-are-in-maze-of-twisty-little.html \n[colossal cave]: http://en.wikipedia.org/wiki/Colossal_Cave_Adventure#Maze_of_twisty_little_passages -47 Scoring Rules and Prediction Markets 2008-08-11 11:59:12 0000-00-00 00:00:00 draft [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \n[robin hanson]: http://hanson.gmu.edu/ \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \nTrading Cash for Probability \n------------------------------- \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n \nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair: \n \n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n \nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n \nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n \nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n \nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either. \n \nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10 \n \nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain. \n \nProper Scoring Rules \n------------------------ \nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as "proper scoring rules" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4] \n \nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a "report" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \n \nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]\\langle s(r), w \\rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]\\langle s(r), w \\rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex]. \n \nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n \nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then \n
\n[tex] \n\\displaystyle \n\\max_{r} \\mathbb{E}_p \\langle s(r), w \\rangle = \\mathbb{E}_p \\langle s(p), w \\rangle . \n[/tex] \n
\nScoring rules that meet this criteria are described as "proper" or "Fisher consistent". \n \nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \n
\n[tex] \n\\displaystyle \n\\mathbb{E}_p \\langle s(r), w \\rangle = \\langle s(r), \\mathbb{E}_p w \\rangle = \\langle s(r), p \\rangle \n[/tex] \n
\nsince [tex]\\mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or "properness") condition requires that the derivatives of the scoring rule satisfy, for all [tex]p[/tex], \n
\n[tex] \n\\displaystyle \n\\langle \\frac{\\partial}{\\partial r_i} s(p), p \\rangle = 0. \n[/tex] \n
\nThat means the derivatives of the scoring rule must be orthogonal to [tex]p[/tex]. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here. \n \n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/ \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \nPrediction Markets in the Wild \n---------------------------------- \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage. \n \n[hubdub]: http://www.hubdub.com/ \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003). \n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956). \n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971). \n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -20 Principles of Learning Problem Design principles-of-learning-problem-design 2007-11-20 05:32:36 2007-11-20 05:32:36 publish Things have been a little quite around here of late, mainly because I've been working on a submission for the NIPS 2007 Workshop on [Principles of Learning Problem Design][nipsws] in early December. \n \nI'm pleased to say that I'll be presenting some recent results that [Bob][] and [I][me] have been working on under the heading of "Representations in Learning Task Design". The focus is on finding *primitives* and *combinators* for describing learning tasks (Aside: "problems" are what you are trying to solve, "tasks" are what you give to computers to solve them). \n \nUnsurprisingly, [cost-sensitive losses][csl] are one such primitive and when combined using weighted integration they can represent a variety of losses for a range of learning tasks including classification, regression and class probability estimation. \n \nSince this is a workshop paper, most of the results are still fairly preliminary and build on a lot of work by others. That said, I think it's a good approach as several previously known results are subsumed and simplified. I'll post our paper and slides once they are completed. \n \nLet me know if you are attending NIPS and we can try to catch up. Hope to see you at [Whistler][]. \n \n[nipsws]: http://hunch.net/~learning-problem-design/ \n[bob]: http://users.rsise.anu.edu.au/~williams/ \n[me]:http://users.rsise.anu.edu.au/~mreid/ \n[csl]: http://www-cse.ucsd.edu/users/elkan/rescale.pdf \n[whistler]: http://nips.cc/Conferences/2007/Program/schedule.php?Session=Workshops -21 A Crash Course in Convex Analysis a-crash-course-in-convex-analysis 2007-12-18 23:20:43 2007-12-18 23:20:43 publish I've been attempting to read an interesting [NIPS 2007][] paper entitled [Estimating divergence functionals and the likelihood ratio by convex risk minimzation][Nguyen et al 2007] and realised my knowledge of convex analysis was sketchy at best. \n \nFortunately, [Wikipedia][] pointed me to an excellent [summary of the Legendre-Fenchel transformation][LF transform] by [Hugo Touchette][]. A bit more digging around Hugo's site led me to a great [cheat sheet][] for convex analysis, covering many of the concepts that were causing me trouble. \n \nGreat stuff! \n \n[Nguyen et al 2007]: http://books.nips.cc/papers/files/nips20/NIPS2007_0782.pdf \n[Wikipedia]: http://en.wikipedia.org/wiki/Convex_conjugate \n[LF transform]: https://www.maths.qmul.ac.uk/~ht/archive/lfth2.pdf \n[Hugo Touchette]: http://www.maths.qmul.ac.uk/~ht/index.html \n[cheat sheet]: http://www.maths.qmul.ac.uk/~ht/archive/convex1.pdf \n[NIPS 2007]: http://books.nips.cc/nips20.html -22 NIPS 2007 Highlights nips-2007-highlights 2008-07-17 23:41:43 2007-12-21 14:50:03 private I attended my first NIPS conference this month and had a great time and was \nable to put faces to many names I've encountered on papers recently. \n \nThe quantity and quality of work was a somewhat overwhelming but I hear from \nmore seasoned NIPS attendees that this is par for the course. What follows are a \nfew topics and presentations that caught my attention during the conference and \nworkshop sessions this year. \n \n \n \n \nThe Conference \n-------------- \n \n \nThe Workshops \n------------- \nI attended two of the 19 workshops: \n[Principles of Learning Problem Design][plpd] (where I gave a talk on some of \nthe [representations][] work [Bob][] and I have been working on) and \n[Representations and Inference on Probability Distributions][ripd]. \n \n[Bob]: http://axiom.anu.edu.au/~williams/ \n[representations]: http://users.rsise.anu.edu.au/~mreid/research/ \n[plpd]: http://hunch.net/~learning-problem-design/ \n[ripd]: http://nips2007.kyb.tuebingen.mpg.de/ \n \nThere was a wide variety of work in the first of these workshops but the one \nthat I found most interesting was the poster that [Steve Hanneke][] talked me \nthrough on [Asymptotic Active Learning][aal]. Previous results show that active \nlearning is not any more powerful than standard supervised learning in terms of \nthe number of labelled samples required to beat a particular error rate with \nconfidence. The new result here is that a slight weakening of the usual PAC \nmodel - only requiring the learner find a good hypothesis rather than requiring \nthat the learner also "know" it is a good hypothesis - leads to sample \ncomplexities that are polylogarithmic (rather than linear) in the reciprocal of \nthe error rate. \n \n[Steve Hanneke]: http://www.cs.cmu.edu/~shanneke/ \n[aal]: http://hunch.net/~learning-problem-design/gamma.pdf \n \nIn the Representations and Inference on Probability Distributions workshop there \nwere several talks and posters I found interesting. I really enjoyed both the \ncontent and presentation of [Andrew McGregor][]'s talk on \n[Sketching and Streaming for Distributions][ssfd] \n \n[Andrew McGregor]: http://talk.ucsd.edu/andrewm/ \n[ssfd]: http://talk.ucsd.edu/andrewm/slides/07-nipsslides.pdf \n \n* Conjugate Projection Limits: \n [Peter Orbanz][] has some work looking at how to extend distribution updates \n followed by projections in infinite dimensional spaces. \n \n[Peter Orbanz]: https://www.ml.inf.ethz.ch/people/phdstudents/porbanz \n \n* Fourier decompositions of distributions over permutations: \n Very cool group theory-based work to define how you can represent \n distributions over permutations in terms of group characters. \n \n* Efficiently estimating divergences between distributions: \n Andrew McGregor gives some results on "sketching" distributions and uses \n results from communication theory to show that only L1 and L2 divergences \n can be (space) efficiently learnt from data streams (no revisiting data). \n \n* Frechet derivatives(?): \n Used in Maya Gupta's talk on extending divergences beyond vectors. \n \nInsights \n-------- \nMaya Gupta and colleagues' presentation started from the observation that the \nmean of a set of points is the least square minimiser and others' results \nshowing that Bregman divergences can be seen as the quantity to be minimised \nwhen other summary statistics are used. Using a nice observation that the \ndefinition of a Bregman divergence for F, $d_F(x,y)$ can be seen as the tail of \na Taylor series expansion \n\\[ \n\tF(x) = F(y) + \\del F(y)(y - x) + d_F(x,y) \n\\] \nthere are able to generalise divergences to objects other than vectors \n(e.g., functions). \n \nLe Song's presentation on the use of RKHSs to estimate expectations of functions \nfrom samples was neat. The trick here was that the mean of the samples in the \nHilbert space can "encode" information about the higher order moments of the \nsamples in the original space. The expectation of a function with respect to the \nsamples can be written as the inner product of the function and the mean in the \nHilbert space. They show that approximating well the sample means by an estimate \nof the density function in the Hilbert space will give good approximations in the \noriginal space of the expectation (w.r.t. to the sample distribution) of any \nfunction. This differs from the use of moment generating functions in that \ninfinitely many moments of the sample distribution can be estimated \nsimultaneously. -23 A Cute Convexity Result a-cute-convexity-result 2008-02-04 22:35:54 2008-02-04 04:06:18 publish Just when I thought I was starting to get my head around the multitudinous uses of convexity in statistics I was thrown by the following definition: \n \n> A function f over the interval (a,b) is convex if, for all choices of {x,y,z} \n> satisfying a < x < y < z < b the determinant \n> \n>
[tex] \\displaystyle \\left| \\begin{array}{ccc} 1 & 1 & 1 \\\\ x & y & z \\\\ f(x) & f(y) & f(z) \\end{array}\\right|[/tex]
\n> \n> is non-negative. \n \nAfter expanding the determinant and some algebraic twiddling I realised that this is just a very compact way of requiring that \n
\n[tex]\\displaystyle\\frac{z-y}{z-x} f(x) + \\frac{y-x}{z-x}f(z) \\geq f(y)[/tex] \n
\nwhich, after noticing that (z-y) + (y-x) = (z-x), of course is the more traditional way of saying a function is convex. \n \nWhat's neat about this determinant representation is that it extends nicely to what are known as kth-order convex functions (ones whose derivatives up to order k are convex). Specifically, f is k-convex whenever [tex]\\{x_i\\}_{i=0}^{k+1}[/tex] satisfy [tex]a < x_0 < \\ldots < x_{k+1} < b [/tex] and \n
\n[tex] \\displaystyle \\left| \n \\begin{array}{ccc} \n 1 & \\cdots & 1 \\\\ \n x_0 & \\cdots & x_{k+1} \\\\ \n x_0^2 & \\cdots & x_{k+1}^2 \\\\ \n \\vdots & \\ddots & \\vdots \\\\ \n x_0^k & \\cdots & x_{k+1}^k \\\\ \n f(x_1) & \\cdots & f(x_{k+1}) \n \\end{array} \\right| \\geq 0.[/tex] \n
\n \nWhile it is arguably less transparent than explicitly writing out all the convexity inequalities for each of the derivatives of f it certainly makes up for it with compactness. -24 Staying Organised with CiteULike and BibDesk staying-organised-with-citeulike-and-bibdesk 2008-02-07 06:42:05 2008-02-07 06:42:05 publish I recently started using [CiteULike][] to keep track of papers I read. For those not familiar with it, it deems itself to be "a free online service to organise your academic papers". In contrast to my offline bibliography organising tool, [BibDesk][], a service like this has at least three main advantages: \n \n* Reduced data entry: If someone else has already entered the article's details or CiteULike can scrape them of a journal's web page I don't have to enter them myself. \n \n* Easy access: The bibliography is stored on the web making access away from my machine or by others straightforward. It's also possible to upload copies of papers for personal use in case you're not able to get to your university's reverse proxy. \n \n* Social networks: When I see that someone else has read a paper I'm interested in I can easily see what else that person has read. I can also look at all the other papers that people have associated with a particular tag. \n \nLike Yaroslav, who also uses CiteULike as part of his [larger strategy][yaroslav] for staying organised, I have started using the Notes field for entries to keep track of important theorems, equations and cross-references of papers that I go over more than once. \n \nOf course, once you've collected a bunch of papers you can also export your bibliography as BibTeX or RIS so your can include citations in your papers. This is especially convenient with BibDesk. All I do is open a "New External File Group" in BibDesk and point it to the URL for my CiteULike account: `http://www.citeulike.org/bibtex/user/mdreid`. BibDesk keeps track of which external entries have or haven't been imported into your offline BibTeX file making it easy to quickly build a conference specific bibliography. \n \nI find this BibDesk and CiteULike combination the best of both worlds as it reduces the amount of data entry I need to do while still making it easy to push citations to [TextMate][] or [LyX][] when I'm writing. \n \n[citeulike]: http://citeulike.org/user/mdreid \n[bibdesk]: http://bibdesk.sourceforge.net/ \n[yaroslav]: http://yaroslavvb.blogspot.com/2008/02/strategies-for-organizing-literature.html \n[textmate]: http://macromates.com/ \n[lyx]: http://www.lyx.org/ -25 Clarity and Mathematics clarity-and-mathematics 2008-03-06 23:06:21 2008-02-19 10:36:34 publish John Langford has diagnosed a [complexity illness][] that afflicts research in academia. One of its symptoms is what he calls "Math++": the use of unnecessary and obfuscatory mathematics to improve the author's chance of publication. \n \nHaving recently ploughed through a large number of math-heavy articles during the preparation of a [COLT][] paper I have started to worry whether the illness is contagious. At present there is a rash of awkward notation breaking out in some sections of my draft. While I don't think I can completely get rid of it I'm hoping that I can at least tidy it up and turn it into something presentable. \n \nWanting to tidy up awkward mathematical expression is definitely not the same as wanting removing it completely. To switch [analogies][], maths is akin to a communications channel. The aim of the encoder is to cram information down the line so it can be decoded a the other end. Good mathematical notation encodes frequently occurring concepts with short, memorable terms and takes advantage of uses relationships between concepts. Using a side-channel -- e.g., the English text of the paper -- to ease the burden of decoding is also a good strategy. \n \nJohn also suggests treating Math++ (and other forms of complexity) with education. This doesn't necessarily mean give a lecture on your research but any attempt at communication. I've found that attempting to describe what I'm working on over lunch - and without a whiteboard - can be a good way to focus on the story of your research rather than the technicalities. I find technical details of a paper much easier to understand when I understand their motivation. \n \nEven if I don't completely cure my paper of Math++, I take some solace from Fernano Pereira who [points out][pereira] that research is a form of dialogue and that dialogue is inherently messy which is sometimes the reason mathematical exposition is less than perfect. It's only through repeated attempts to communicate ideas that one is able to figure out what is important. \n \n[pereira]: http://earningmyturns.blogspot.com/2008/02/complexity-illness.html \n[complexity illness]: http://hunch.net/?p=316 \n[COLT]: http://www.learningtheory.org/ \n[analogies]: http://apperceptual.wordpress.com/2007/12/20/readings-in-analogy-making/ \n -26 A Meta-index of Data Sets a-meta-index-of-data-sets 2008-02-22 04:43:37 2008-02-22 04:40:00 publish I had to go hunting around for some data to try some new ideas on recently. \nAs [handy][google results] as Google is, there's still a fair bit of \nchaff from which to sort the wheat. \n \n[google results]: http://google.com/search?q=machine+learning+data+sets \n \nFortunately, there is a lot of good stuff out there including well-organised \nindexes of data sets for various purposes. For my future reference (and for \nanyone else that may be interested) here are some of the better data set lists \nI found. \n \n*\t**UCI Repositories**: \n\tNo list of lists would be complete without this perennial [collection][uci] \n\tof machine learning data sets hosted by the University of California, \n\tIrvine. They also have a [repository of large data sets][kdd] for \n\tknowledge discovery in databases (KDD). \n \n[kdd]: http://kdd.ics.uci.edu/ \n[uci]: http://archive.ics.uci.edu/ml/ \n \n*\t**The Info**: \n\tThis [site][theinfo] "for people with large data sets" has a community \n\teditable [list of data sets][theinfo data] organised by topic. The \n\tcollection here has a web/text focus. \n \n[theinfo]: http://theinfo.org \n[theinfo data]: http://theinfo.org/get/data \n \n*\t**Text Retrieval**: \n\tThis [list][trec] kept by NIST has data sets for each of the various \n\ttracks at the Text Retrieval Conference, including data sets for \n\t[spam detection](http://trec.nist.gov/data/spam.html), \n\t[genomics](http://trec.nist.gov/data/genomics.html), \n\tand a [terabyte](http://trec.nist.gov/data/terabyte.html) track \n\t(although the data sets aren't quite up to a terabyte yet). \n \n[trec]: http://trec.nist.gov/data.html \n \n*\t**Time Series Data Library**: \n\tThis [collection][tsdl] has a large number of time varying data sets from \n\tfinance, demography, physics, sport and ecology. \n \n[tsdl]: http://www-personal.buseco.monash.edu.au/~hyndman/TSDL/ \n \n*\t**DMOZ Directory of Data Sets**: \n\tThis is a good [starting point][dmoz] for more lists of data sets for \n\tmachine learning. \n\t \n\tParts of DMOZ itself are [available in RDF][dmoz data] as a data set for \n\tresearchers. There is also a [processed version][dmoz processed] made \n\tavailable as part of the PASCAL [Ontology Learning Challenge][]. \n \n[dmoz]: http://www.dmoz.org/Computers/Artificial_Intelligence/Machine_Learning/Datasets/ \n[dmoz data]: http://rdf.dmoz.org/ \n[dmoz processed]: http://olc.ijs.si/dmozReadme.html \n[Ontology Learning Challenge]: http://olc.ijs.si/ \n \n*\t**Royal Statistical Society**: \n\tThis [collection][rss data] contains data sets used in research published in \n\tthe [journal of the Royal Statistical Society][rss]. This is an admirable \n\tidea that I wish more journals would take up. \n \n[rss data]: http://www.blackwellpublishing.com/rss/ \n[rss]: http://www.rss.org.uk/ \n \nAs well as the above institution or community organised lists, I also came \nacross some maintained by individuals. \n \n*\t**Daniel Lemire**: \n\tDaniel Lemire's "[Data for Database Research][lemire]" is organised by \n\tapplication areas, including data for earthquakes, weather, finance, climate \n\tand blogs. \n \n[lemire]: http://www.daniel-lemire.com/blog/data-for-data-mining/ \n \n*\t**Peter Skomoroch**: \n\tThe [list of data sets][skomoroch] over at [Data Wrangling][] is similar \n\tin spirit to the one here. \n \n[skomoroch]: http://www.datawrangling.com/some-datasets-available-on-the-web.html \n[Data Wrangling]: http://www.datawrangling.com/ \n \nA few specific data sets caught my eye, some new, and some I just hadn't seen \nbefore. \n \n*\t**Freebase Wikipedia Extraction**: \n\tThe [Wikipedia WEX][wex] data set is \n\tessentially a large (57 GB) graph of articles from wikipedia. \n \n[wex]: http://download.freebase.com/wex/ \n\t \n*\t**Enron Email**: \n\tThis [collection of email][enron] (400 Mb compressed) between Enron staff \n\tcontains about half a million messages organised into folders. It was \n\treleased publicly as part of the investigation into Enron and has been \n\tused by William Cohen and others as part of the CALO project. \n \n[enron]: http://www.cs.cmu.edu/~enron/ \n \n*\t**Freeway Traffic Analysis**: \n\tThis fairly [large data set][freeway] is a record of traffic flow on \n\tseveral lanes of the I-880 freeway in California in order to study the \n\teffect of roving tow-trucks on dealing with decongesting traffic \n\tincidents. \n \n[freeway]: http://ipa.eecs.berkeley.edu/~pettyk/FSP/ \n \nIf all else fails and you still cannot find a suitable data set for your \nresearch, you can always invoke the social web and trawl through bookmarks \non services like [del.icio.us](http://del.icio.us). The global \n[data set tag][global tag] can throw up some interesting hits occasionally but \nthere might be a higher wheat to chaff ratio in particular user's bookmarks, \nsuch as [Peter Skomoroch][skomoroch tag]. [Mine][] is not nearly as \ncomprehensive yet. \n \n[global tag]: http://del.icio.us/tag/dataset \n[skomoroch tag]: http://del.icio.us/pskomoroch/dataset \n[mine]: http://del.icio.us/mreid/dataset \n \nIt would be interesting to do a meta-analysis of all these data sets to see how \nour ability as a discipline to deal with larger and more complex data sets has \nincreased over time. As Daniel Lemire pointed out with some surprise recently, \n[processing a terabyte of data][small terabyte] isn't that uncommon. \n \n[small terabyte]: http://www.daniel-lemire.com/blog/archives/2008/02/21/when-a-terabyte-is-small/ -27 JMLR Discussion On Boosting jmlr-discussion-on-boosting 2008-03-03 06:50:47 2008-03-03 06:50:47 publish The upcoming [Volume 9][v9] of the [Journal of Machine Learning Research][jmlr] is dedicated a chunk of its pages to a paper entitled "[Evidence Contrary to the Statistical View of Boosting][mease08a]" by David Mease and Abraham Wyner. Following this is a number of responses by heavyweights including [boosting][]'s earliest proponents, Freund and Schapire, as well as Mease and Wyner's [rejoinder][mease08b] to the responses. The whole conversation is also available in a [single PDF][]. \n \nI've seen this format of argument, response and rejoinder a couple of times before in the statistical literature and I think it works really well. It brings the wealth of expert views that are usually found only at workshop or conference panel discussions but adds the benefits of written expression: careful thinking, less time pressure and access to reference material. \n \nI'm familiar with [AdaBoost][] but haven't really kept up with the recent research surrounding it. It seems that the crux of the discussion is regarding some of the widely held beliefs about the statistical interpretation of boosting (stumps are better than small trees as weak learners, LogitBoost is better than AdaBoost on noisy data). Simple experiments are described which, often surprisingly, contradict the prevailing wisdom. \n \nAlthough I have only had time to skim the entire discussion, one thing I've found really impressive about the contrary evidence Mease and Wyner provide is that all the R code for the experiments is [available][r code]. As can be seen in the subsequent discussion, this provides the responders with concrete reference points and several use them to refine or debate some of the interpretations. This is a perfect example of putting science back into Herbert Simon's [Science of the Artificial][sota], in which he argues that \n> Even when the programs themselves are only moderately large and intricate ... \n> too little is known about their task environments to permit accurate prediction of \n> how well they will perform. ... Here again theoretical analysis must be \n> accompanied by large amounts of \n> experimental work. \n \nNow that I'm back in the world of academic research, it's high time I revisited some of these foundational algorithms in machine learning. I'm hoping that by reading this discussion on boosting and playing with the experiments I can quickly get up to speed with the area. \n \n[sota]: http://www.librarything.com/work/253126 \n[r code]: http://www.davemease.com/contraryevidence/ \n[boosting]: http://www.boosting.org/ \n[v9]: http://jmlr.csail.mit.edu/papers/v9/ \n[jmlr]: http://jmlr.org/ \n[mease08a]: http://www.jmlr.org/papers/volume9/mease08a/mease08a.pdf \n[mease08b]: http://www.jmlr.org/papers/volume9/mease08b/mease08b.pdf \n[single pdf]:http://www.jmlr.org/papers/volume9/mease08a/mease08a_with_discussion.pdf \n[adaboost]: http://en.wikipedia.org/wiki/AdaBoost -28 Feed Bag: A Simple RSS Archiver feed-bag-a-simple-rss-archiver 2008-03-13 05:33:07 2008-03-13 05:33:07 publish One thing my [recent survey of freely available data sets][data] did not uncover was a collection of archived RSS feeds. This surprised me a little since I would imagine aggregators like [Bloglines](http://bloglines.com/), [Google Reader](http://google.com/reader) and [AideRSS](http://aiderss.com/) must have large databases of hundreds of thousands of RSS feeds. \n \n[data]: http://conflate.net/inductio/application/a-meta-index-of-data-sets/ \n \nHaving seen how easy it is to [create an RSS aggregator in ruby][igvita], I figured it should be just as easy to collect feeds in the same way and write them to a database via one of the many ORM (Object-Relational Mapping) layers available in ruby. The excellent [FeedNormalizer][] library makes the first part trivial and avoids having to worry whether a feed is RSS1, RSS2, Atom, etc. For the second part I thought I'd try something new and give the ORM library [Sequel][] a go and, in the interests of simplicity, have it talk to an [SQLite][] database. \n \n[igvita]: http://www.igvita.com/2007/03/22/agile-rss-aggregator-in-ruby/ \n[feednormalizer]: http://code.google.com/p/feed-normalizer/ \n[sequel]: http://code.google.com/p/ruby-sequel/ \n[sqlite]: http://www.sqlite.org/ \n \nThe part I liked most was how easy Sequel makes setting up database schema. This is the executable ruby code that defines the two tables I use in Feed Bag: \n \n class Feed < Sequel::Model(:feeds) \n set_schema do \n primary_key :id \n text :name \n text :url \n time :last_checked \n time :created \n end \n end \n \n class Entry < Sequel::Model(:entries) \n set_schema do \n primary_key :id \n text :url \n text :title \n text :content \n text :description \n time :time \n \n foreign_key :feed_id, :table => :feeds \n index :url \n end \n end \n \nUsing it is just as easy. From the ruby-side, if you have a feed `f` you get its associated entries using `f.entries` and once you have an entry `e` you can get its URL or title using `e.url` or `e.title`. Given how easy that is, there's little reason to resort to flat text file formats such as CSV when dealing with this sort of data. \n \nI've called the resulting ruby script "Feed Bag" and have [made it available][feedbag] on my academic website along with instructions for using it. Without comments, the scripts weighs in at about 130 lines of code and only took a few hours to write and debug, most of which was learning how to use FeedNormalizer and Sequel. \n \nI've been running Feed Bag on my machine since mid-January, collecting international news feeds from the BBC, New York Times, Washington Post, and 7 others without any trouble. So far it's collected over 25,000 feed items and stashed them in a 38Mb SQLite database. If any one is interested, I've made a bzip2 compressed version of an SQL dump of the database available for [download][] (3.4Mb). \n \nPlease let me know if you use the data for anything, or if you use Feed Bag to collect your own data set. \n \n[feedbag]: http://users.rsise.anu.edu.au/~mreid/code/feed_bag.html \n[download]: http://users.rsise.anu.edu.au/~mreid/files/data/IntlNews.sql.bz2 -29 ROC Curves for Machine Learning roc-curves-for-ml 2008-04-08 11:30:43 0000-00-00 00:00:00 draft I've recently been attempting to understand Receiver Operating Characteristic (ROC) curves and their relationship to loss, information and divergences. I've decided that the best way to understand this stuff is to attempt to explain it. So, in the spirit of [John Armstrong][]'s expository posts on category theory and integration, as well as Mark Chu-Carroll's "[Basics][]" series at Good Math, Bad Math, I plan to write a series of post explaining some facts about ROC curves. \n \n[John Armstrong]: http://unapologetic.wordpress.com/ \n[Basics]: http://scienceblogs.com/goodmath/goodmath/basics/ \n \nThere are already plenty of good introductions and tutorials on ROC curves on the web but they tend to be from a medical diagnosis perspective. I'll try to focus on the material that is relevant to machine learning including the use of ROC analysis in classification, probability estimation and ranking. My aim is to provide a reasonably self-contained set of posts that emphasise some of the more important and recent properties and relationships regarding ROC curves. \n \nPostmodern Classification \n----------------------------- \nSuppose you wanted some way of deciding whether a particular book was [postmodern][] or not. \nWhat you're after is some procedure that takes in a whole lot of details about a particular text and returns a "yes" or "no" answer to the question "Is this text postmodern?" \n \nWe can think about this type of procedure abstractly as a function [tex]r[/tex] from a set [tex]\\mathcal{X}[/tex] of _observations_ about books to the set [tex]\\{0,1\\}[/tex] of _labels_ where 1 indicates membership in the positive class (i.e., the book is postmodern) and 0 indicates non-membership. \n \nROC graphs give us a way of visually assessing sets of binary _classifiers_. These are functions that assign one of two labels to each observations [tex]x[/tex] in the set [tex]\\mathcal{X}[/tex]. We'll use 1 for the positive label and 0 for the negative label so that a classifier is a function \n[tex] \n r : \\mathcal{X} \\to \\{0,1\\}. \n[/tex] \nFor example, each observation in [tex]\\mathcal{X}[/tex] provides details of a particular text (book, film, TV show, _etc_.) and the classifier returns a 1 to indicate the text is [postmodern][] and returns 0 otherwise. \n \n[postmodern]: http://www-stat.wharton.upenn.edu/~steele/Resources/Writing%20Projects/PostmodernStatistics.htm \n \n \n -30 Algorithms, Programs and Similarity 2008-04-20 08:09:43 0000-00-00 00:00:00 draft Via [God Plays Dice](http://godplaysdice.blogspot.com/2008/04/smales-problems.html): In Smale's discussion of the Poincar conjecture, after pointing out that a big part of the importance of the Poincar conjecture is that it helped make manifolds respectable objects to study in their own right,he states: \n \n> I hold the conviction that there is a comparable phenomenon today in the notion of a "polynomial \n> time algorithm". Algorithms are becoming worthy of analysis in their own right, not merely as a \n> means to solve other problems. Thus I am suggesting that as the study of the set of solutions of an \n> equation (e.g. a manifold) played such an important role in 20th century mathematics, the study of \n> finding the solutions (e.g. an algorithm) may play an equally important role in the next century. \n \nIn light of [this paper][equal algs], it seems there are difficulties in defining what "finding the solution" might mean. The authors restrict their attention to non-interactive, small-step algorithms and argue that even in this restricted setting it is difficult to define what "equivalent" might mean. \n \nAn interesting example (Example 8) from that paper concerns the composition of two functions _f_ and _g_, implemented by programs _P_ and _Q_. Both programs take strings as input and output strings in linear time and logarithmic space. One definition of composition (call Q then pass its output to P as input) takes linear space (since the output of Q is created and stored) and logarithmic time, while another definition (essentially a type of lazy evaluation) takes logarithmic space (only compute the output of Q a character at a time and don't store it) and quadratic time. \n \nI don't think this is as big an issue as the authors make it: it's well known that time and space complexity can be traded off so defining equivalence of programs with respect to input-output behaviour *and* complexity is too fine grained. \n \n[equal algs]: http://research.microsoft.com/research/pubs/view.aspx?type=Technical%20Report&id=1434 \n \nThe authors finish with a few analogies: one saying that clustering without a decision function is similarly ill-defined, another that "equivalence of ideas" is an even more difficult beast to grasp but is somehow part of the answer to the question "when are two algorithms different?". When they express different ideas? \n \nThe question is a good one but there does not seem to be any easy resolution. \n \nWhat does this have to do with induction? As Goodman argued, "similarity without respects is a quack, an impostor and a fraud" since you cannot reasonable define similarity without providing some accepted terms of reference or _respects_. Certain "natural" types of similarity are natural only because of convention or human perception (colours, faces, shapes). I don't hold out hope for there being a "natural", universal sense of similarity for something as abstract as algorithms. That's not to say we can't build consensus piece by piece. \n \nPeter Turney's readings in analogy: http://apperceptual.wordpress.com/2007/12/20/readings-in-analogy-making/ \n \nImplications for patents on algorithms... -31 Private private 2008-04-08 00:43:31 2008-04-08 00:43:31 private This page is a collection of notes and sub-pages for things I'm not yet ready to show the world. \n \n \n -32 Questions questions 2008-04-08 01:28:55 2008-04-08 00:51:47 private Some research questions to expand on: \n \n### ROC and Families of Tasks \n \nWe know that a class probability estimation task is equivalent to a family of cost-sensitive classification tasks. How is this related to ROC curves? Given a probability estimator, applying a threshold gives a family of classifiers and an ROC curve. \n \n### Maximal AUC and Divergences \n \nThe maximal AUC obtainable for a given learning problem measures the separation of the label-conditional distributions, much like an f-divergence. However, it seems that maximal AUC and f-divergences are different in the sense that there is no _f_ such that maximal AUC is an f-divergence for that _f_. \n \nWhat kind of divergence is maximal AUC? Is it "special" in some way or is there a whole class of them? Fawcett's "[ROC Graphs with Instance-Varying Costs][rociv]" (2006) and Xie and Priebe's "[A Weighted Generalization of the Mann-Whitney-Wilcoxon Statistic][gmww]" both suggest (equivalent, I think) families based on transforming the axes of the ROC graph. \n \n[rociv]: http://www.citeulike.org/user/mdreid/article/2614937 \n[gmww]: http://www.citeulike.org/user/mdreid/article/2639710 \n \n### Clustering \n \nClustering seems to be a more primitive problem than classification or class probability estimation. In some sense, clustering takes a single distribution over observations and transforms it into a collection of label-conditional distributions. In this sense, clustering is considering the collection of functions _F_ from observations to some discrete set of finite cardinality. \n \nPapadimitriou argues in his "[Algorithms, Games and the Internet][agi]" (STOC 2001), clustering is not really well-defined: \n \n> There are far too many criteria for the goodness of a clustering . . . and far too little guidance \n> about choosing among them. . . . The criterion for choosing the best clustering scheme cannot be \n> determined unless the decision-making framework that drives it is made explicit \n \nThis would suggest that understanding the collection _F_ must be done in concert with some other family _G_ from labels output by functions in _F_ to predictions in some other set. The structure of _G_ and the type of loss incurred will determine how best to choose an element from _F_, (cf. Baxter's "Learning to Learn" and the choice of bias). \n \n[agi]: http://www.citeulike.org/user/mdreid/article/326513 -33 The Earth Is Round (p < 0.05) the-earth-is-round 2008-04-09 06:36:13 2008-04-09 06:36:13 publish I love finding old essays on statistics. The philosophical and methodological wars that rage within that discipline make for fun reading. Particularly enjoyable are those essays - inevitably written by older, well-respected researchers - who make a strong point with beautiful rhetorical flourish and no small amount of barbed humour. \n \nThe title of a [journal article][teir] ([PDF][teirpdf]) by Jacob Cohen (and this post) is a classic example. As you may have guessed, it's main aim is to rant against the misuse of p-values for null hypothesis significance testing (NHST). As well as including some extremely amusing quotes by Cohen and others, the paper does a fantastic job of curing the reader of any doubt regarding the correct interpretation of p-values. \n \n[teir]: http://www.citeulike.org/user/mdreid/article/2643653 \n[teirpdf]: http://web.math.umt.edu/wilson/Math444/Handouts/Cohen94_earth%20is%20round.pdf \n \nRepeat after me: "the p-value is NOT the probability the null hypothesis is true given the observed data". Or, as Cohen puts it: \n> What's wrong with NHST? Well, among many other things, it does not tell us what we want to \n> know, and we so much want to know what we want to know that, out of desperation, we \n> nevertheless believe that it does! What we want to know is "Given these data, what is the \n> probability that H0 is true?" But as most of us know, what it tells us is "Given \n> that H0 is true, what is the probability of these (or more extreme) data?" \n> These are not the same... \n \nMany people make this mistake as it's so easy to erroneously reason about conditional probabilities. The particular fallacy that occurs when p-values are interpreted as the probability the null hypothesis is true is assuming that Prob(H0|Data) = Prob(Data|H0). Cohen argues that we are confused by the intuitive appeal of reasoning with rare events as though they were impossible events. He highlights why this intuition can led us astray with a wonderful example. A low p-value is erroneously reasoned with as follows: \n> If the null hypothesis is correct, then these data are highly unlikely. \n> These data have occurred. \n> Therefore, the null hypothesis is highly unlikely. \n \nThis seems, at first glance, to be analogous to the non-probabilistic syllogism (namely _modus tollens_): \n> If a person is a Martian, then he is not a member of Congress. \n> This person is a member of Congress. \n> Therefore, he is not a Martian. \n \nAbsolutely nothing wrong with that. It's watertight. Now see what happens when the first line becomes a statement with very high probability instead of strictly true: \n> If a person is an American, then he is probably not a member of Congress. (TRUE, RIGHT?) \n> This person is a member of Congress. \n> Therefore, he is probably not an American. \n \nOuch! That last deduction should have made your eyes water. This is exactly what is going wrong when people misinterpret p-values. It's what you get for using Bayes' rule without knowing something more _unconditionally_ about the probability of being a member of Congress. This is a rare event and its rarity must be factored in when doing the probabilistic equivalent of implication. Similarly, without knowing anything about the prior probability of the null hypothesis you cannot say anything about its posterior probability. \n \nCohen nicely sums up the danger of treating deduction and induction analogously with a quote attributed to Morris Raphael Cohen: \n> All logic texts are divided into two parts. In the first part, on deductive logic, the fallacies are explained; in the second part, on inductive logic, they are committed. \n \n \n -36 Archives archives 2008-04-10 03:43:11 2008-04-10 03:39:14 publish Posts placed here for posterity. Sliced and diced for your convenience. -38 Research-Changing Books research-changing-books 2008-06-14 13:25:29 2008-05-26 06:45:44 In response to a post by Peter Turney, I list the books I feel shaped my research career. publish A [recent post by Peter Turney][turney] lists the books that have influenced his research. As well as compiling a great list of books that are now on my mental "must read one day" list, he makes a crucial point for compiling such a list: \n> If a reader cannot point to some tangible outcome from reading a book, \n> then the reader may be overestimating the personal impact of the book. \n \n[turney]: http://apperceptual.wordpress.com/2008/05/25/the-book-that-changed-my-life/ \n \nWith that in mind I tried to think of which books had a substantial impact on my research career. \n \nAlthough I can barely remember any of it now, the [manual][vic20] that came with the Commodore Vic 20 computer I read when I was around seven got me hooked on programming. In primary and secondary school it was that book and the subsequent Commodore 64 and Amiga manuals that set me on the road to studying computer science and maths. \n \n[vic20]: http://www.geocities.com/rmelick/prg.txt \n \nIn my second year at university I had the great fortune of being recommended Hofstadter's "[Gdel, Escher, Bach][geb]" by a fellow student. It is centrally responsible for getting me to start thinking about thinking and subsequently doing a PhD in machine learning. The fanciful but extremely well written detours into everything from genetics to Zen Buddhism also broadened my horizons immensely. \n \n[geb]: http://www.librarything.com/work/5619/book/12512722 \n \nI. J. Good's "[The Estimation of Probabilities][good]" was the tiny 1965 monograph I bought second-hand for $2 that made my thesis take a huge change in direction by giving it a Bayesian flavour. I now realise that a lot of that work had since been superseded by much more sophisticated Bayesian methods but sometimes finding a theory before it has been over-polished means that there is much more expository writing to aid intuition. It also helps that Good is a fabulous technical writer. \n \n[good]: http://www.librarything.com/work/2542774/book/12420041 \n \nPhilosophically, Nelson Goodman's "[Fact, Fiction and Forecast][fff]" also shaped my thinking about induction quite a lot. His ideas on the "virtuous circle" of basing current induction on the successes and failures of the past provided me with a philosophical basis for the transfer learning aspects of my research. I found his views a refreshing alternative to Popper's (also personally influential) take on induction in "[The Logic of Scientific Discovery][losd]". Whereas Popper beautifully characterises the line between metaphysical and scientific theories, Goodman tries to give an explanation of *how* we might practically come up with new theories in the first place given that there will be, in general, countless that adequately fit the available data. In a nutshell, his theory of "entrenchment" says that we accrete a network of terms by induction and use these terms as features for future induction depending on how successful they were when used in past inductive leaps. This is a view of induction inline with Hume's "habits of the mind" and one I find quite satisfying. \n \n[fff]: http://www.librarything.com/work/70761/book/12419989 \n[losd]: http://www.librarything.com/work/68144/book/31001290 \n \nWhile not directly related to machine learning or computer science, there are a few other books that helped me form opinions on the process of research in general. I read Scott's "[Write to the Point][wttp]" over a decade ago now but it still makes me stop, look at my writing and simplify it. My attitude to presenting technical ideas was also greatly influenced by reading Feynman's "[QED][]" lectures. They are a perfect example of communicating extremely deep and difficult ideas to a non-technical audience without condescension and misrepresentation. Finally, I read Kennedy's "[Academic Duty][ad]" just as I started my current post-doc and found it immensely insightful. I plan to reread it as I (hopefully) hit various milestone's in my academic career. \n \n[wttp]: http://www.librarything.com/work/1093218/book/31001976 \n[qed]: http://www.librarything.com/work/27937/book/12512712 \n[ad]: http://www.librarything.com/work/252530/book/20392830 \n \nOf course, like Peter, there are innumerable other books, papers and web pages that have shaped my thinking but the ones above are the ones that leap to mind when I think about how my research interests have developed over time. -39 Mark Reid me_shorthairdark_bw 2008-06-02 07:55:56 2008-06-02 07:55:56 Your Host inherit Photo of yours truly. Someone should really get better lighting in here... -40 Visualising 19th Century Reading in Australia visualising-reading 2008-12-09 12:05:57 2008-06-17 03:10:34 A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project. publish **** \n_Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. Those looking for the application I presented there: stay tuned, I will post the updated version here shortly. \n**** \n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20 \n \nI've recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I'm part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \n \nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I'll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool. \n \n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php \n[csl]: http://csl.cecs.anu.edu.au/ \n \nThe Australian Common Reader Project \n-------------------------------------------- \nOne of Julieanne's research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers' relationship with books and periodicals. \n \n[acrp]: http://www.api-network.com/hosted/acrp/ \n \nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \n \n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/ \n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin \n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \n \nBooks and Borrowers \n------------------------ \nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books. \nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \n \n \n \n \n \n \n \n \n \n \n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column) \nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book. \n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\n \nConceptually, each cell in the table contains a 1 if the person associated with the cell's column borrowed the book associated with the cell's row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473. \n \nBook Similarity \n----------------- \nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what "similar books" is required. \n \nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]\\mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]\\mathbf{b}_2 = (1,1,\\ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex]. \n \nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \n \nIn terms of the vector representation, this similarity measure is simply the "[inner product][]" between [tex]\\mathbf{b}_1[/tex] and [tex]\\mathbf{b}_2[/tex] and is written [tex]\\left<\\mathbf{b}_1,\\mathbf{b}_2\\right> = b_{1,1}b_{2,1} + \\cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers. \n \n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space \n \nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to "normalise" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the "size" of each of the vectors for those books. \n \nMathematically, we will denote the size of a book vector [tex]\\mathbf{b}_i[/tex] as [tex]\\|\\mathbf{b}_i\\| = \\sqrt{\\left<\\mathbf{b}_i,\\mathbf{b}_i\\right>}[/tex]. The similarity between two books then becomes: \n \n
\n[tex]\\displaystyle \n \\text{sim}(\\mathbf{b}_i,\\mathbf{b}_j) \n = \\frac{\\left<\\mathbf{b}_i,\\mathbf{b}_j\\right>}{\\|\\mathbf{b}_i\\|\\|\\mathbf{b}_j\\|} \n[/tex] \n
\n \nPrincipal Component Analysis \n--------------------------------- \nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \n \nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns. \n \n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis \n \n \n \n \n \n \n \n \n \n \n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books. \n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\n \nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]\\mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]\\mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors \nfor the first two rows of Table 2 then [tex]\\text{sim}(\\mathbf{c}_1,\\mathbf{c}_2)[/tex] \nwould be close to [tex]\\text{sim}(\\mathbf{b}_1,\\mathbf{b}_2)[/tex], the similarity of the \nfirst two rows in Table 1.[^1] \n \n[^1]: Technically, the guarantee of the "closeness" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair's \nsimilarity is estimated well. \n \nVisualising the Data \n---------------------- \nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2] \n \n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle's colour. \n \n
\nPlot of the books across all libraries in the ACRP database \n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book. \n

\n \nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library. \n \nDrilling Down and Interacting \n--------------------------------- \nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners' and Mechanics' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data. \n \n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales \n \nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books. \n \nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \n \nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \n \n[applet]: /inductio/wp-content/public/acrp/ \n \n
\nClick to open visualisation applet \n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\n
\n \nInstructions describing how to use the tool can be found below it. \nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the "Borrowers" bar will only show books with at least that many borrowers; and altering the "Similarity" bar will only draw lines to books with at least that proportion of books in common. \n \nFuture Work and Distant Reading \n------------------------------------- \nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls "distant reading" -- looking at books as objects and how they are read rather than the "close reading" of the text of individual books. \n \n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \n \nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \n \nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them. \n \nData and Code \n---------------- \nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location. \n \n[github]: http://github.com/mreid/acrp/tree/master \n[SQL]: http://en.wikipedia.org/wiki/SQL \n[R]: http://www.r-project.org/ \n[Processing]: http://processing.org/ \n \nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here. \n \n -41 ACRP Visualisation acrp 2008-06-10 11:13:43 2008-06-10 11:13:43 Applet showing neighbours of a book inherit A screen grab of the applet showing the neighbours of a selected book. -42 Constructive and Classical Mathematics constructive-and-classical-mathematics 2008-06-16 05:46:56 2008-06-12 02:08:12 publish I have a (very) amateur interest in the philosophy of mathematics. My interest was recently piqued again after finishing the very readable "[Introducing Philosophy of Mathematics][ipom]" by Michle Friend. Since then, I've been a lot more aware of terms like "constructivist", "realist", and "formalist" as they apply to mathematics. \n \nToday, I was flicking through the entry on "[Constructivist Mathematics][cm]" in the [Stanford Encyclopedia of Philosophy][seop] and found a simple example of some of the problems with non-constructive take on what disjunction means in mathematical statements. The article calls it "well-worn" but I hadn't seen it before. \n \nConsider the statement: \n> There exists irrational numbers a and b such that ab is rational. \n \nThe article gives a slick proof that this statement is true by invoking the [law of the excluded middle][lem] (LEM). That is, every number must be either rational or irrational. \n \nNow consider [tex]\\sqrt{2}^\\sqrt{2}[/tex]. By the LEM, this must rational or irrational: \n \n * Case 1: If it is rational then we have proved the statement since we know [tex]a = b = \\sqrt{2}[/tex] is irrational. \n \n * Case 2: If [tex]\\sqrt{2}^\\sqrt{2}[/tex] is irrational then choosing [tex]a = \\sqrt{2}^\\sqrt{2}[/tex] and [tex]b = \\sqrt{2}[/tex] as our two irrational numbers gives [tex]{\\sqrt{2}^{\\sqrt{2}^\\sqrt{2}}} = {\\sqrt{2}^2} = 2[/tex] -- a rational number. \n \nEither way, we've proven the existence of two irrational numbers yielding a rational one. \nThe problem with this is that this argument is non-constructive and so we don't know which of case 1 and case 2 is true, we only know that one of them must be[^1]. This is a simple case of reductio ad absurdum in disguise. \n \nAs a born-again computer scientist (my undergraduate degree was pure maths and my PhD in computer science) I've become increasingly suspicious of these sorts of proof and more [constructivist][] -- even [intuitionist][] -- in my tastes. I think the seed of doubt was planted during the awkward discussions of the [Axiom of Choice][] in my functional analysis lectures. The sense of unease is summed up nicely in the following joke: \n \n> The Axiom of Choice is obviously true, the well-ordering principle obviously false, \n> and who can tell about Zorn's lemma? \n \nOf course, all those concepts are equivalent but that's far from intuitive. \n \nI don't think I'm extremist enough to take a wholeheartedly computational view of mathematics -- denying all but the computable real numbers and functions, thereby making [all functions continuous][] -- but it is a tempting view of the subject. \n \nIn machine learning, I think there is a fairly pragmatic take on the philosophy of mathematics. For example, classical theorems from functional analysis are used to derive results involving kernels but when it comes to implementation, estimations and approximations are used with abandon. In my opinion, this is a [healthy way for the theory in this area to proceed][lemire]. As in physics, if the experimental work reveals inconsistencies with a theory, revisit the maths. If that doesn't work, [talk to the philosophers][dim]. \n \n[ipom]: http://www.librarything.com/work/3362656/book/17581191 \n[cm]: http://plato.stanford.edu/entries/mathematics-constructive/ \n[seop]: http://plato.stanford.edu/ \n[lem]: http://en.wikipedia.org/wiki/Law_of_the_excluded_middle \n[intuitionist]: http://en.wikipedia.org/wiki/Intuitionism \n[constructivist]: http://en.wikipedia.org/wiki/Constructivism_%28mathematics%29 \n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice \n[all functions continuous]: http://math.andrej.com/2006/03/27/sometimes-all-functions-are-continuous/ \n[lemire]: http://www.daniel-lemire.com/blog/archives/2008/06/05/why-pure-theory-is-wasteful/ \n[dim]: http://diveintomark.org/archives/2008/06/11/purity \n \n[^1]: It turns out that, by [Gelfond's Theorem](http://en.wikipedia.org/wiki/Gelfond's_theorem) that [tex]\\sqrt{2}^\\sqrt{2}[/tex] is transcendental, and therefore irrational so the second case alone proves the statement. However, I'm not sure what machinery is required to prove Gelfond's theorem. \n \n -43 PCA of All Libraries all_libraries 2008-06-17 02:07:10 2008-06-17 02:07:10 Plot of the books across all libraries in the ACRP database inherit -44 ICML Discussion Site icml-discussion-site 2008-07-01 15:40:23 2008-07-01 15:40:23 publish A little while ago, John Langford [suggested][jl1] that a discussion site be set up for ICML that allows attendees and others to talk about the accepted papers. \n \nHaving played around with various wiki systems and discussion sites in the past, I volunteered to help set something up. As John has [noted on his blog][jl2] the discussion site is now [up and running][icml]. \n \nThe main aim with this first attempt was to provide basic functionality: papers can be browsed by author, title and keyword; each paper has a discussion thread where anyone can leave comments. There are no comments at the time of writing this but I'm hoping this will change once the conference gets underway. \n \nProvided there are no disasters, the site will remain up for as long as it is useful. Ultimately, I'd like to add earlier conference proceedings to the site and ensure future conferences can be added as well. We will see how it goes this year and incorporate and feedback into future versions of the site. \n \nFor those interested in the technical details, I used [DokuWiki](http://wiki.splitbrain.org/wiki:dokuwiki) as the engine for the site along with a number of plugins, most importantly the [discussion plugin](http://wiki.splitbrain.org/plugin:discussion). \n \n[jl1]: http://hunch.net/?p=327 \n[jl2]: http://hunch.net/?p=335 \n[icml]: http://conflate.net/icml -45 Evaluation Methods for Machine Learning evaluation-methods-for-machine-learning 2008-07-21 11:18:19 2008-07-21 11:18:19 Some thoughts on the workshop on evaluation methods that I attended as part of ICML 2008 in Helsinki. publish Although I wasn't able to attend the talks at [ICML 2008][] I was able to participate in the [Workshop on Evaluation Methods for Machine Learning][emml] run by William Klement, [Chris Drummond][], and [Nathalie Japkowicz][]. \n \n[icml 2008]: http://icml2008.cs.helsinki.fi/ \n[emml]: http://www.site.uottawa.ca/ICML08WS/ \n[nathalie japkowicz]: http://www.site.uottawa.ca/~nat/ \n[chris drummond]: http://www.site.uottawa.ca/~cdrummon/ \n \nThis workshop at ICML was a continuation of previous workshops held at AAAI that aim to cast a critical eye on the methods used in machine learning to experimentally evaluate the performance of algorithms. \n \nIt kicked off with a series of mini debates with Nathalie and Chris articulating the opposing sides. The questions included the following: \n \n * Should we change how evaluation is done? \n * Is evaluation central to empirical work? \n * Are statistical tests critical to evaluation? \n * Are the UCI data sets sufficient for evaluation? \n \nThere were three papers I particularly liked: [Janez Demsar][]'s talk "[On the Appropriateness of Statistical Tests in Machine Learning][appropriateness]", [Edith Law][]'s "[The Problem of Accuracy as an Evaluation Criterion][accuracy]", and [Chris Drummond][]'s call for a mild-mannered revolution "[Finding a Balance between Anarchy and Orthodoxy][anarchy]". \n \n[janez demsar]: http://www.ailab.si/janez/ \n[appropriateness]: http://www.site.uottawa.ca/ICML08WS/papers/J_Demsar.pdf \n[edith law]: http://www.cs.cmu.edu/~elaw/ \n[accuracy]: http://www.site.uottawa.ca/ICML08WS/papers/E_Law.pdf \n[anarchy]: http://www.site.uottawa.ca/ICML08WS/papers/C_Drummond.pdf \n \nJanez's talk touched on a number of criticisms that [I had found in Jacob Cohen's paper "The Earth is Round (p < 0.05)"][round earth] making the case that people often incorrectly report and incorrectly interpret p-values for statistical tests. Unfortunately, as Janez points out, since machine learning is a discipline that (rightly) places emphasis on results it is difficult as a reviewer to reject a paper that presents an ill-motivated and confusing idea if its authors have shown that, statistically, it outperforms similar approaches. \n \n[round earth]: http://conflate.net/inductio/2008/04/the-earth-is-round/ \n \nEdith's talk argued that accuracy is sometimes a poor measure of performance making all this concern over whether we are constructing statistical tests for it (or AUC) moot. In particular, for tasks like salient region detection in images, language translation and music tagging there is no single correct region, translation or tag. Whether or not a particular region/translation/tag is "correct" or not is impossible to determine independent of the more difficult tasks of image recognition/language understanding/music identification. Solving these for the purposes of evaluation would make a solution to the smaller tasks redundant. Instead of focusing on evaluation of the smaller tasks, Edith suggests ways in which games that humans play on the web -- such as the [ESP Game][] -- can be used to evaluate machine performance on these tasks by playing learning algorithms against humans. \n \n[esp game]: http://www.espgame.org/ \n \nFinally, Chris's talk made the bold claim that the way we approach evaluation in machine learning is an "impoverished realization of a controversial methodology", namely statistical hypothesis testing. "Impoverished" because when we do do hypothesis testing it is in the narrowest of senses, mainly to test that my algorithm is better than yours on this handful of data sets. "Controversial" since many believe science to have social, exploratory and accidental aspects --- much more than just the clinical proposing of hypotheses for careful testing. \n \nWhat these papers and the workshop as a whole showed me was how unresolved my position is on these and other questions regarding evaluation. On the one hand I spent a lot of time painstakingly setting up, running and analysing experiments for my [PhD research][] on inductive transfer in order to evaluate the methods I was proposing. I taught myself how to correctly control for confounding factors, use the [Bonferroni correction][] to adjust significance levels and other esoterica of statistical testing. Applying all these procedures carefully to my work felt very scientific and I was able to create many pretty graphs and tables replete with confidence intervals, p-values and the like. On the other hand -- and with sufficient hindsight -- it's not clear how much value this type of analysis added to the thesis overall (apart from demonstrating to my reviewers that I could do it). \n \n[phd research]: http://www.library.unsw.edu.au/~thesis/adt-NUN/public/adt-NUN20070512.173744/index.html \n[bonferroni correction]: http://en.wikipedia.org/wiki/Bonferroni_correction \n \nThe dilemma is this: when one algorithm or approach clearly dominates another details such as p-values, t-tests and the like only obscure the results; and when two algorithms are essentially indistinguishable using "significance" levels to pry them apart seems to be grasping at straws. \n \nThat's not to say that we should get rid of empirical evaluation all together. Rather, we should carefully choose (or create) our data sets and empirical questions so as to gain as much insight as possible and go beyond "my algorithm is better than yours". Statistical tests should not mark the end of an experimental evaluation but rather act as a starting point for further questions and carefully constructed experiments that resolve those questions. \n -46 COLT 2008 Highlights colt-2008-highlights 2008-07-27 23:19:39 2008-07-27 11:40:46 publish I'm a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n \n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop. \n \n[icml]: http://icml2008.cs.helsinki.fi/ \n[uai]: http://uai2008.cs.helsinki.fi/ \n[colt]: http://colt2008.cs.helsinki.fi/ \n \n[john]: http://hunch.net/?p=341 \n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html \n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html \n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html \n \nI didn't make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I've already [written about][evaluation] that and didn't attend any of the UAI sessions, I'll focus on the COLT stuff I found interesting. \n \n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/ \n \nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he'd given this type of talk before as he handled the many subsequent questions directly and decisively. I'm really interested in the work being done here so I'll write more about prediction markets in a later post. \n \n[robin hanson]: http://hanson.gmu.edu/home.html \n \n[Gbor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n \n[gbor lugosi]: http://www.econ.upf.es/~lugosi/ \n \n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n \nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n \nIf I understand it correctly, Klein's system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were "closer" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., "direction" and "direccin") to find possible cognates. \n \n[Dan Klein]: http://www.cs.berkeley.edu/~klein/ \n \nThere was a variety of good papers at COLT this year. Of the talks I saw, two stood out for me. \n \n[The True Sample Complexity of Active Learning][balcan hanneke] by Balcan, Hanneke and Wortman showed the importance of choosing the right theoretic model for analysis. In active learning the learner is able to choose which unlabelled examples have their labels revealed. \nIntuitively, one would think that this should make learning easier than the normal supervised learning where the learner has no say in the matter. \n \nPrevious results showed that this was basically not the case. Subtly, those results asked that the active learner achieve a certain error rate but also _verify_ that that rate was achieved. What Nina and her co-authors showed was that if you remove this extra requirement then active learning does indeed make learning much easier, often with exponential improvements in sample complexity over "passive" learning. \n \n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf \n \n[An Efficient Reduction of Ranking to Classification][ailon] by Ailon and Mohri was also impressive. They build on earlier work that showed how a ranking problem can be reduced to learning a binary preference relation between the items to be ranked. One crucial part of this reduction is turning a learnt preference relation into a ranking. That is, taking pair-wise assessments of relative item quality and laying out all those items along a line so as to best preserve those pair-wise relationships. \n \nWhat Ailon and Mohri show is that simply applying a randomised quicksort to the pair-wise comparisons for n items will give a good reduction to a ranking in O(n log n) time. "Good" here means that the regret of the reduction-based ranking over the best possible is bounded by the regret of the classifier that learns the preference relation over the best possible classification. Furthermore, if you are only interested in the top k of n items you can get a good ranking in O(k log k + n) time. What's particularly nice about this work is the tools they use to analysis randomised quicksort are very general and will probably find other applications. \n \n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf \n \nFinally, while I didn't attend the talk at COLT, a couple of people have told me that Abernethy et al.'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I've since skimmed through it and it is a very nice paper, well-written and replete with interesting connections. I'm not that familiar with bandit learning work but this paper is has a good summary of recent results and is intriguing enough to make me want to investigate further. \n \n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf \n \n -48 Prediction Markets 47-revision 2008-07-25 07:18:14 2008-07-25 07:18:14 inherit The invited talks were all really interesing. [Robin Hanson][] gave a great introduction to prediction markets, describing how they can be used to extract information through the use of market scoring rules. Essentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\n -49 COLT 2008 Highlights 46-revision 2008-07-25 07:24:46 2008-07-25 07:24:46 inherit I'm a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn't make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I've already [written about][evaluation] that and didn't attend any of the UAI sessions, I'll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he'd given this type of talk before as he handled the many subsequent questions directly and decisively. I'm really interested in the work being done here so I'll write more about prediction markets in a latter post. \n\n[Gbor Lugosi][] talkcovered a number of important concentration inequalities, focusing on mainly on the logarithmic Sobelov inequalities. \n\nConcentration Inequalities\n-----------------------------\nGbor Lugosi's talk \non concentration inequalities was very well done, covering a number of \nimportant theorems in this area in a very clear manner. It was a perfect\nexample of a maths talk where details were eschewed without compromising\naccuracy in order to give insight into the inequalities.\n\nPapers\n-------\n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman.\n\n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri.\n\n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and\nSrebo\n\n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade\n\nWhile I didn't attend the talk at COLT, a couple of people have told me that Abernethy et al.'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I've since skimmed through it and it is a very nice paper, well-written and replete with interesting connections.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n -50 COLT 2008 Highlights 46-revision-2 2008-07-26 08:20:39 2008-07-26 08:20:39 inherit I'm a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn't make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I've already [written about][evaluation] that and didn't attend any of the UAI sessions, I'll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he'd given this type of talk before as he handled the many subsequent questions directly and decisively. I'm really interested in the work being done here so I'll write more about prediction markets in a later post. \n\n[Gbor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n\n[gbor lugosi]: http://www.econ.upf.es/~lugosi/\n\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n\nIf I understand it correctly, Klein's system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were "closer" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., "_direc_tion" and "direc \n\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\n\nPeter Grnwald.\n\nPapers\n-------\n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman.\n\n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri.\n\n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and\nSrebo\n\n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade\n\nWhile I didn't attend the talk at COLT, a couple of people have told me that Abernethy et al.'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I've since skimmed through it and it is a very nice paper, well-written and replete with interesting connections.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n -51 COLT 2008 Highlights 46-revision-3 2008-07-26 08:21:28 2008-07-26 08:21:28 inherit I'm a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n \n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop. \n \n[icml]: http://icml2008.cs.helsinki.fi/ \n[uai]: http://uai2008.cs.helsinki.fi/ \n[colt]: http://colt2008.cs.helsinki.fi/ \n \n[john]: http://hunch.net/?p=341 \n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html \n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html \n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html \n \nI didn't make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I've already [written about][evaluation] that and didn't attend any of the UAI sessions, I'll focus on the COLT stuff I found interesting. \n \n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/ \n \nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he'd given this type of talk before as he handled the many subsequent questions directly and decisively. I'm really interested in the work being done here so I'll write more about prediction markets in a later post. \n \n[Gbor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n \n[gbor lugosi]: http://www.econ.upf.es/~lugosi/ \n \n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n \nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n \nIf I understand it correctly, Klein's system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were "closer" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., "_direc_tion" and "_direc_cin") to find possible cognates. \n \n[Dan Klein]: http://www.cs.berkeley.edu/~klein/ \n \nPeter Grnwald. \n \nPapers \n------- \n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman. \n \n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri. \n \n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and \nSrebo \n \n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade \n \nWhile I didn't attend the talk at COLT, a couple of people have told me that Abernethy et al.'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I've since skimmed through it and it is a very nice paper, well-written and replete with interesting connections. \n \n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf \n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf \n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf \n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf \n \n -52 COLT 2008 Highlights 46-revision-4 2008-07-26 08:42:21 2008-07-26 08:42:21 inherit I'm a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn't make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I've already [written about][evaluation] that and didn't attend any of the UAI sessions, I'll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he'd given this type of talk before as he handled the many subsequent questions directly and decisively. I'm really interested in the work being done here so I'll write more about prediction markets in a later post. \n\n[robin hanson]: \n\n[Gbor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n\n[gbor lugosi]: http://www.econ.upf.es/~lugosi/\n\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n\nIf I understand it correctly, Klein's system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were "closer" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., "direction" and "direccin") to find possible cognates. \n\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\n\nPeter Grnwald.\n\nPapers\n-------\n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman.\n\n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri.\n\n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and\nSrebo\n\n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade\n\nWhile I didn't attend the talk at COLT, a couple of people have told me that Abernethy et al.'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I've since skimmed through it and it is a very nice paper, well-written and replete with interesting connections.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n -53 COLT 2008 Highlights 46-revision-5 2008-07-27 11:39:02 2008-07-27 11:39:02 inherit I'm a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn't make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I've already [written about][evaluation] that and didn't attend any of the UAI sessions, I'll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he'd given this type of talk before as he handled the many subsequent questions directly and decisively. I'm really interested in the work being done here so I'll write more about prediction markets in a later post. \n\n[robin hanson]: http://hanson.gmu.edu/home.html\n\n[Gbor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n\n[gbor lugosi]: http://www.econ.upf.es/~lugosi/\n\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n\nIf I understand it correctly, Klein's system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were "closer" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., "direction" and "direccin") to find possible cognates. \n\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\n\nThere was a variety of good papers at COLT this year. Of the talks I saw, two stood out for me.\n\n[The True Sample Complexity of Active Learning][balcan hanneke] by Balcan, Hanneke and Wortman showed the importance of choosing the right theoretic model for analysis. In active learning the learner is able to choose which unlabelled examples have their labels revealed. \nIntuitively, one would think that this should make learning easier than the normal supervised learning where the learner has no say in the matter. \n\nPrevious results showed that this was basically not the case. Subtly, those results asked that the active learner achieve a certain error rate but also _verify_ that that rate was achieved. What Nina and her co-authors showed was that if you remove this extra requirement then active learning does indeed make learning much easier, often with exponential improvements in sample complexity over "passive" learning.\n\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n\n[An Efficient Reduction of Ranking to Classification][ailon] by Ailon and Mohri was also impressive. They build on earlier work that showed how a ranking problem can be reduced to learning a binary preference relation between the items to be ranked. One crucial part of this reduction is turning a learnt preference relation into a ranking. That is, taking pair-wise assessments of relative item quality and laying out all those items along a line so as to best preserve those pair-wise relationships. \n\nWhat Ailon and Mohri show is that simply applying a randomised quicksort to the pair-wise comparisons for n items will give a good reduction to a ranking in O(n log n) time. "Good" here means that the regret of the reduction-based ranking over the best possible is bounded by the regret of the classifier that learns the preference relation over the best possible classification. Furthermore, if you are only interested in the top k of n items you can get a good ranking in O(k log k + n) time. What's particularly nice about this work is the tools they use to analysis randomised quicksort are very general and will probably find other applications.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n\nFinally, while I didn't attend the talk at COLT, a couple of people have told me that Abernethy et al.'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I've since skimmed through it and it is a very nice paper, well-written and replete with interesting connections. I'm not that familiar with bandit learning work but this paper is has a good summary of recent results and is intriguing enough to make me want to investigate further.\n\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n -54 COLT 2008 Highlights 46-revision-6 2008-07-27 11:40:46 2008-07-27 11:40:46 inherit I'm a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n \n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop. \n \n[icml]: http://icml2008.cs.helsinki.fi/ \n[uai]: http://uai2008.cs.helsinki.fi/ \n[colt]: http://colt2008.cs.helsinki.fi/ \n \n[john]: http://hunch.net/?p=341 \n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html \n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html \n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html \n \nI didn't make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I've already [written about][evaluation] that and didn't attend any of the UAI sessions, I'll focus on the COLT stuff I found interesting. \n \n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/ \n \nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he'd given this type of talk before as he handled the many subsequent questions directly and decisively. I'm really interested in the work being done here so I'll write more about prediction markets in a later post. \n \n[robin hanson]: http://hanson.gmu.edu/home.html \n \n[Gbor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n \n[gbor lugosi]: http://www.econ.upf.es/~lugosi/ \n \n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n \nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n \nIf I understand it correctly, Klein's system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were "closer" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., "direction" and "direccin") to find possible cognates. \n \n[Dan Klein]: http://www.cs.berkeley.edu/~klein/ \n \nThere was a variety of good papers at COLT this year. Of the talks I saw, two stood out for me. \n \n[The True Sample Complexity of Active Learning][balcan hanneke] by Balcan, Hanneke and Wortman showed the importance of choosing the right theoretic model for analysis. In active learning the learner is able to choose which unlabelled examples have their labels revealed. \nIntuitively, one would think that this should make learning easier than the normal supervised learning where the learner has no say in the matter. \n \nPrevious results showed that this was basically not the case. Subtly, those results asked that the active learner achieve a certain error rate but also _verify_ that that rate was achieved. What Nina and her co-authors showed was that if you remove this extra requirement then active learning does indeed make learning much easier, often with exponential improvements in sample complexity over "passive" learning. \n \n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf \n \n[An Efficient Reduction of Ranking to Classification][ailon] by Ailon and Mohri was also impressive. They build on earlier work that showed how a ranking problem can be reduced to learning a binary preference relation between the items to be ranked. One crucial part of this reduction is turning a learnt preference relation into a ranking. That is, taking pair-wise assessments of relative item quality and laying out all those items along a line so as to best preserve those pair-wise relationships. \n \nWhat Ailon and Mohri show is that simply applying a randomised quicksort to the pair-wise comparisons for n items will give a good reduction to a ranking in O(n log n) time. "Good" here means that the regret of the reduction-based ranking over the best possible is bounded by the regret of the classifier that learns the preference relation over the best possible classification. Furthermore, if you are only interested in the top k of n items you can get a good ranking in O(k log k + n) time. What's particularly nice about this work is the tools they use to analysis randomised quicksort are very general and will probably find other applications. \n \n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf \n \nFinally, while I didn't attend the talk at COLT, a couple of people have told me that Abernethy et al.'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I've since skimmed through it and it is a very nice paper, well-written and replete with interesting connections. I'm not that familiar with bandit learning work but this paper is has a good summary of recent results and is intriguing enough to make me want to investigate further. \n \n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf \n \n -55 Prediction Markets 47-revision-2 2008-07-27 23:23:13 2008-07-27 23:23:13 inherit [Robin Hanson][] gave a great introduction to prediction markets, describing how they can be used to extract information through the use of market scoring rules. Essentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning -56 Prediction Markets 47-revision-3 2008-08-04 11:34:53 2008-08-04 11:34:53 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nMarket Scoring Rules\n------------------------\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n[book and market maker]: http://blog.commerce.net/?p=251 -57 Prediction Markets 47-revision-4 2008-08-04 12:12:29 2008-08-04 12:12:29 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n[book and market maker]: http://blog.commerce.net/?p=251 -58 Prediction Markets 47-revision-5 2008-08-05 04:27:22 2008-08-05 04:27:22 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251 -59 Prediction Markets 47-revision-6 2008-08-05 06:56:12 2008-08-05 06:56:12 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), -60 Prediction Markets 47-revision-7 2008-08-05 06:58:36 2008-08-05 06:58:36 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) -61 Prediction Markets 47-revision-8 2008-08-05 23:55:25 2008-08-05 23:55:25 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -62 Prediction Markets 47-revision-9 2008-08-06 00:25:49 2008-08-06 00:25:49 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say r -- and then I toss the coin. If it comes up heads then I pay you 1-(1-r)^2 \n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -63 Prediction Markets 47-revision-10 2008-08-06 00:27:06 2008-08-06 00:27:06 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex> otherwise I pay you 1 - r^2.\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -64 Prediction Markets 47-revision-11 2008-08-06 00:39:22 2008-08-06 00:39:22 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as\n[tex]\ndisplaymath \ns(r) = (1-(1-r)^2, 1-r^2).\n[/tex]\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as\n[tex]\ns(r)(w) = left<(1-(1-r)^2, 1-r^2), (1-w, w)right> = (1-(1-r)(1-w)\n[/tex]\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -65 Prediction Markets 47-revision-12 2008-08-06 00:40:43 2008-08-06 00:40:43 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector\n[tex]\ns(r) = (1-(1-r)^2, 1-r^2).\n[/tex]\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as\n[tex]\ns(r)(w) = left< (1-(1-r)^2, 1-r^2), (1-w, w) right> = (1-(1-r)^2)(1-w) + (1-r^2)w.\n[/tex]\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -66 Prediction Markets 47-revision-13 2008-08-06 00:45:28 2008-08-06 00:45:28 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = [1-(1-r)^2, 1-r^2].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex]\ndisplaystyle\ns(r)(w) = < [1-(1-r)^2, 1-r^2], [w, 1-w] > = (1-(1-r)^2)(1-w) + (1-r^2)w.\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -69 Prediction Markets 47-revision-16 2008-08-06 00:48:21 2008-08-06 00:48:21 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = [1-(1-r)^2, 1-r^2].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] displaystyle s(r)(w) = 1-(1-r)^2 , 1-r^2 ] [w, 1-w] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -67 Prediction Markets 47-revision-14 2008-08-06 00:45:57 2008-08-06 00:45:57 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = [1-(1-r)^2, 1-r^2]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \ns(r)(w) = < [1-(1-r)^2, 1-r^2], [w, 1-w] > = (1-(1-r)^2)w + (1-r^2)(1-w). \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -68 Prediction Markets 47-revision-15 2008-08-06 00:46:35 2008-08-06 00:46:35 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = [1-(1-r)^2, 1-r^2]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \ns(r)(w) = [1-(1-r)^2, 1-r^2] cdot [w, 1-w] = (1-(1-r)^2)w + (1-r^2)(1-w). \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -70 Prediction Markets 47-revision-17 2008-08-06 00:48:34 2008-08-06 00:48:34 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = [1-(1-r)^2, 1-r^2]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] displaystyle s(r)(w) = [ 1-(1-r)^2 , 1-r^2 ] [ w , 1-w ] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -73 Prediction Markets 47-revision-20 2008-08-06 00:50:52 2008-08-06 00:50:52 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -71 Prediction Markets 47-revision-18 2008-08-06 00:49:21 2008-08-06 00:49:21 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] displaystyle s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -72 Prediction Markets 47-revision-19 2008-08-06 00:49:52 2008-08-06 00:49:52 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -74 Prediction Markets 47-revision-21 2008-08-06 00:51:16 2008-08-06 00:51:16 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] [/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -75 Prediction Markets 47-revision-22 2008-08-06 00:52:51 2008-08-06 00:52:51 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{eqnarray}\ns(r)(w) & = langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle [/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -76 Prediction Markets 47-revision-23 2008-08-06 00:53:21 2008-08-06 00:53:21 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \nbegin{eqnarray} \ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\ \n & = & (1-(1-r)^2)w + (1-r^2)(1-w). \nend{eqnarray} \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -77 Prediction Markets 47-revision-24 2008-08-06 00:53:54 2008-08-06 00:53:54 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \nbegin{eqnarray} \ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\ \n & = & x. \nend{eqnarray} \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -78 Prediction Markets 47-revision-25 2008-08-06 00:54:47 2008-08-06 00:54:47 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \nbegin{eqnarray*} \ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\ \n & = & x + y. \nend{eqnarray*} \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -79 Prediction Markets 47-revision-26 2008-08-06 00:55:45 2008-08-06 00:55:45 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n \n(See Jeffrey[^1] for a discussion of betting arguments for probability). \n \nScoring rules are a class of reward schemes that encourage truthful reporting. \n \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \nbegin{array}{rcl} \ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\ \n & = & x + y. \nend{array} \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n \n \n \nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -80 Prediction Markets 47-revision-27 2008-08-06 01:19:09 2008-08-06 01:19:09 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads you will see that\n
\n[tex]\nmathbb{E}_p[ s(r)\n[/tex]\n
\n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I'll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -81 Prediction Markets 47-revision-28 2008-08-06 01:40:02 2008-08-06 01:40:02 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely but not the true probability [tex]p[/tex]\n\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -82 Prediction Markets 47-revision-29 2008-08-06 01:40:16 2008-08-06 01:40:16 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/ \n \nPrediction Markets \n--------------------- \nSuppose you really wanted to know whether or not \n \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n \nEssentially, people trade in contracts such as "Pays $1 if it rains next Monday". If you're 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract's expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you'll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem. \n \nScoring Rules \n--------------- \nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n \nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n \nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function \n[tex] \ns(r) = left[ 1-(1-r)^2 , 1-r^2 right]. \n[/tex] \n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \nbegin{array}{rcl} \ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\ \n & = & (1-(1-r)^2)w + (1-r^2)(1-w). \nend{array} \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that \n
\n[tex] \ndisplaystyle \nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2 \n[/tex] \n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads. \n \nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event. \nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post. \n \nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is \n
\n[tex] \ndisplaystyle \nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1). \n[/tex] \n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex]. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \n[robin hanson]: http://hanson.gmu.edu/ \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401) \n \nReferences \n------------ \n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971). \n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -83 Prediction Markets and Scoring Rules 47-revision-30 2008-08-06 02:37:52 2008-08-06 02:37:52 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract "Pays $1 to bearer if it rains next Monday". If I'm 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract's expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -84 Prediction Markets and Scoring Rules 47-revision-31 2008-08-06 07:08:52 2008-08-06 07:08:52 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract "Pays $1 to bearer if it rains next Monday". If I'm 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract's expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a similar analysis of Hanson's logarithmic market scoring rule that helped me understand the example I pre\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -85 Prediction Markets and Scoring Rules 47-revision-32 2008-08-06 07:10:26 2008-08-06 07:10:26 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I've been investigating certain aspects of "vanilla" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hansons-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract "Pays $1 to bearer if it rains next Monday". If I'm 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract's expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\nThe trick as a "market maker" is to set pr\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -86 Prediction Markets and Scoring Rules 47-revision-33 2008-08-07 00:09:16 2008-08-07 00:09:16 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract "Pays $1 to bearer if it rains next Monday". If I'm 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract's expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\n[TODO: The trick as a "market maker" is to be able to update the prices you set after each trade in order to converge to the true probability of an event.]\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^2] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^3]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -87 Prediction Markets and Scoring Rules 47-revision-34 2008-08-07 06:52:12 2008-08-07 06:52:12 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider contracts for whether or not it rains next Monday. For a binary event like this the contracts come in the pair: A) "Pays $1 to bearer if it rains next Monday" and B) "Pays $1 to bearer if it does not rain next Monday". If I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50. If you buy one of B) it suggests that you think the chance of rain is more than 0.5. In this case I should update my prices to reflect what this trade has told me about what you think the chances are. If you buy another, I should raise my price slightly again. Continuing this, eventually I'll reach a price where \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\n[TODO: The trick as a "market maker" is to be able to update the prices you set after each trade in order to converge to the true probability of an event.]\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^2] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^3]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -88 Prediction Markets and Scoring Rules 47-revision-35 2008-08-07 12:23:06 2008-08-07 12:23:06 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement was by John McCarthy[^2] in 1956 and a more in depth was given by Savage[^3] in 1971.\n\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^2] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^3]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -89 Prediction Markets and Scoring Rules 47-revision-36 2008-08-07 12:33:42 2008-08-07 12:33:42 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971.\n\n\n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -90 Prediction Markets and Scoring Rules 47-revision-37 2008-08-08 01:00:49 2008-08-08 01:00:49 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971.\n\n\n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -92 Prediction Markets and Scoring Rules 47-revision-39 2008-08-10 23:28:15 2008-08-10 23:28:15 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nA scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function that computes the expected value of a correct prediction --- [tex]s_1(r)[/tex] --- and an incorrect prediction --- [tex]s_0(r)[/tex] --- from a reported probability [tex]r[/tex] of an event. This simplifies the game of gradually increasing the cost of the contracts as more are bought to a simple offer of a payoff for a reported probability. The key feature of a _proper_ scoring rule is that its expected value is maximised when the true probability of an event is reported. That is, if [tex]pin[0,1][/tex] is the true probability of an event then\n
\n[tex]\ndisplaystyle\nmax_{rin[0,1]} mathbb{E}_p[ s(r) ] = mathbb{E}_p[ s(p) ].\n[/tex]\n
\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -91 Prediction Markets and Scoring Rules 47-revision-38 2008-08-10 23:25:39 2008-08-10 23:25:39 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nA scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function that computes the expected value of a correct ([tex]s_1(r)[/tex]) and an incorrect ([tex]s_0(r)[/tex]) prediction from a reported probability [tex]r[/tex] of an event. This simplifies the game of gradually increasing the cost of the contracts as more are bought to a simple offer of a payoff for a reported probability. The key feature of a _proper_ scoring rule is that its expected value is maximised when the true probability of an event is reported. That is, if [tex]pin[0,1][/tex] is the true probability of an event then\n
\n[tex]\ndisplaystyle\nmax_rin[0,1] mathbb{E}_p[ s(r) ] = mathbb{E}_p[ s(p) ]\n[/tex]\n
\n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -93 Prediction Markets and Scoring Rules 47-revision-40 2008-08-10 23:28:41 2008-08-10 23:28:41 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \n[robin hanson]: http://hanson.gmu.edu/ \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \nTrading Cash for Probability \n------------------------------- \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n \nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair: \n \n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n \nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n \nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n \nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n \nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either. \n \nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10 \n \nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain. \n \nProper Scoring Rules \n------------------------ \nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4] \n \nA scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function that computes the expected value of a correct prediction --- [tex]s_1(r)[/tex] --- and an incorrect prediction --- [tex]s_0(r)[/tex] --- from a reported probability [tex]r[/tex] of an event. This simplifies the game of gradually increasing the cost of the contracts as more are bought to a simple offer of a payoff for a reported probability. The key feature of a _proper_ scoring rule is that its expected value is maximised when the true probability of an event is reported. That is, if [tex]pin[0,1][/tex] is the true probability of an event then \n
\n[tex] \ndisplaystyle \nmax_{rin [0,1]} mathbb{E}_p [ s(r) ] = mathbb{E}_p [ s(p) ]. \n[/tex] \n
\n \nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails. \n
\n[tex] \ndisplaystyle \nbegin{array}{rcl} \ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\ \n & = & (1-(1-r)^2)w + (1-r^2)(1-w). \nend{array} \n[/tex] \n
\n \nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that \n
\n[tex] \ndisplaystyle \nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2 \n[/tex] \n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads. \n \nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event. \nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post. \n \nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is \n
\n[tex] \ndisplaystyle \nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1). \n[/tex] \n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex]. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here. \n \n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/ \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \nPrediction Markets in the Wild \n---------------------------------- \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage. \n \n[hubdub]: http://www.hubdub.com/ \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003). \n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956). \n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971). \n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -94 Scoring Rules and Prediction Markets 47-revision-41 2008-08-11 10:44:25 2008-08-11 10:44:25 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a _report_ [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. A convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product\n[tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex].\n\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that its expected payoff is maximised when you report truthfully. That is, if [tex]p[/tex] is the true probability of the event occurring (i.e., [tex]w_1 = 1[/tex]) then\n
\n[tex]\ndisplaystyle\nmax_{r} mathbb{E}_p [ s(r) ] = mathbb{E}_p [ s(p) ].\n[/tex]\n
\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It's a very nice piece of work which I'll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -95 Scoring Rules and Prediction Markets 47-revision-42 2008-08-11 11:07:24 2008-08-11 11:07:24 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as "proper scoring rules" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a "report" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \n\nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex].\n\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then\n
\n[tex]\ndisplaystyle\nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle .\n[/tex]\n
\nScoring rules that meet this criteria are described as "proper" or "Fisher consistent".\n\nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \n
\n[tex]\ndisplaystyle\nmathbb{E}_p langle s(r), w rangle \n= langle s(r), mathbb{E}_p w rangle\n= langle s(r), p rangle\n[/tex]\n
\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or "properness") condition can be restated as requiring that the gradient of the scoring rule disappear when\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -96 Scoring Rules and Prediction Markets 47-revision-43 2008-08-11 11:08:21 2008-08-11 11:08:21 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \n[robin hanson]: http://hanson.gmu.edu/ \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \nTrading Cash for Probability \n------------------------------- \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n \nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair: \n \n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n \nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n \nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n \nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n \nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either. \n \nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10 \n \nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain. \n \nProper Scoring Rules \n------------------------ \nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as "proper scoring rules" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4] \n \nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a "report" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \n \nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex]. \n \nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n \nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then \n
\n[tex] \ndisplaystyle \nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle . \n[/tex] \n
\nScoring rules that meet this criteria are described as "proper" or "Fisher consistent". \n \nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \n
\n[tex] \ndisplaystyle \nmathbb{E}_p langle s(r), w rangle \n= langle s(r), mathbb{E}_p w rangle \n= langle s(r), p rangle \n[/tex] \n
\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or "properness") condition can be restated as requiring that the gradient of the scoring rule disappear when [tex]r = p[/tex]. That is, [tex](nabla_r s)(p) = 0[/tex]. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here. \n \n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/ \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \nPrediction Markets in the Wild \n---------------------------------- \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage. \n \n[hubdub]: http://www.hubdub.com/ \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003). \n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956). \n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971). \n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -97 Scoring Rules and Prediction Markets 47-revision-44 2008-08-11 11:09:38 2008-08-11 11:09:38 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n \n[robin hanson]: http://hanson.gmu.edu/ \n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/ \n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule \n \nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view. \n \nTrading Cash for Probability \n------------------------------- \nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n \nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair: \n \n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n \nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n \nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n \nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n \nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either. \n \nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10 \n \nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain. \n \nProper Scoring Rules \n------------------------ \nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as "proper scoring rules" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4] \n \nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a "report" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \n \nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex]. \n \nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n \nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then \n
\n[tex] \ndisplaystyle \nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle . \n[/tex] \n
\nScoring rules that meet this criteria are described as "proper" or "Fisher consistent". \n \nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \n
\n[tex] \ndisplaystyle \nmathbb{E}_p langle s(r), w rangle = langle s(r), mathbb{E}_p w rangle = langle s(r), p rangle \n[/tex] \n
\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or "properness") condition can be restated as requiring that the gradient of the scoring rule disappear when [tex]r = p[/tex]. That is, [tex](nabla_r s)(p) = 0[/tex]. \n \nMarket Scoring Rules \n------------------------ \n \nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's. \n \nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here. \n \n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/ \n \n[This leads to telescoping rule for MSRs] \n \nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc. \n \nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out. \n \nPrediction Markets in the Wild \n---------------------------------- \n \nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago. \n \nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage. \n \n[hubdub]: http://www.hubdub.com/ \n[david pennock]: http://dpennock.com/ \n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/ \n[ec08]: http://www.sigecom.org/ec08/ \n \nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making). \n \nResearch shows that in the areas they have been used prediction markets are [powerful][]. \n \n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election. \n \n[powerful]: http://artificialmarkets.com/ \n[electoralmarkets]: http://www.electoralmarkets.com/ \n[john]: http://hunch.net/?p=396 \n[intrade]: http://www.intrade.com/ \n \nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance. \n \n[pam]: http://dpennock.com/pam.html \n \n \n[book and market maker]: http://blog.commerce.net/?p=251 \n \nReferences \n------------ \n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003). \n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956). \n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971). \n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -98 Scoring Rules and Prediction Markets 47-revision-45 2008-08-11 11:53:40 2008-08-11 11:53:40 inherit [Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through "market scoring rules"[^1]. I've been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I've uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) "Pays $1 to bearer if it rains next Monday", and \n* B) "Pays $1 to bearer if it does not rain next Monday". \n\nIf I'm 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A's expected value for you is $0.30 and contract B's value is $0.70. \n\nIf I'm selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I've gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let's say I modify the price by $0.01 each time. \n\nContinuing this, I'll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I'll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as "proper scoring rules" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a "report" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \n\nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex].\n\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then\n
\n[tex]\ndisplaystyle\nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle .\n[/tex]\n
\nScoring rules that meet this criteria are described as "proper" or "Fisher consistent".\n\nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \n
\n[tex]\ndisplaystyle\nmathbb{E}_p langle s(r), w rangle = langle s(r), mathbb{E}_p w rangle = langle s(r), p rangle\n[/tex]\n
\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or "properness") condition requires that the derivatives of the scoring rule satisfy, for all [tex]p[/tex],\n
\n[tex]\ndisplaystyle\nlangle frac{partial}{partial r_i} s(p), p rangle = 0.\n[/tex]\n
\nThat means the derivatives of the scoring rule must be orthogonal to [tex]p[/tex]. \n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else's.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson's logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty nave question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is "the person who wants the information". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet "play money" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called "terrorism market" was not as bad an idea as I first though. The main points of David's argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008). -99 Prediction and the Axiom of Choice prediction-and-the-axiom-of-choice 2008-09-22 21:55:08 2008-08-29 01:32:51 Some thoughts on Hardin and Taylor's paper "A Peculiar Connection Between the Axiom of Choice and Predicting the Future". publish A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor]. \n \n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf \n[hardin]: http://maven.smith.edu/~chardin/ \n[taylor]: http://www.math.union.edu/people/faculty/taylora.html \n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/ \n \nIts main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?). \n \n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that \n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine. \n \nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \n \n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem \n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice \n \nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member, say u, of [v]t with respect to the ordering and outputs u(t). \n \nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it's a very simple strategy. \n \nThe author's suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the -strategy just chooses the "simplest" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What's odd about the result is that *no matter what bias is chosen the -strategy will only ever make countably many mistakes*. \n \nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that's even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting. \n \n[banach-tarski paradox]: http://en.wikipedia.org/wiki/Banach-Tarski_paradox \n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/ -100 Prediction and the Axiom of Choice 99-revision 2008-08-25 09:18:02 2008-08-25 09:18:02 inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via \n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html -101 Prediction and the Axiom of Choice 99-revision-2 2008-08-27 10:03:28 2008-08-27 10:03:28 inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nThe well-ordering acts as a bias (in the sense of Mitchell) but it is non-constructible. The theorem says that any choice of bias will lead to good prediction since the -strategy works for any choice of ordering.\n\nOne way to look at this is evidence against the use of the axiom of choice (Zorn's lemma / well-ordering principle), adding to the Banach-Tarski paradox. -102 Prediction and the Axiom of Choice 99-revision-3 2008-08-28 08:03:28 2008-08-28 08:03:28 inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nIt's main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?).\n\n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that\n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine.\n\nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem](http://en.wikipedia.org/wiki/Well-ordering_theorem) --- otherwise known as the [Axiom of Choice](http://en.wikipedia.org/wiki/Axiom_of_choice). \n\nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member of [v]t with respect to\n\nThe well-ordering acts as a bias (in the sense of Mitchell) but it is non-constructible. The theorem says that any choice of bias will lead to good prediction since the -strategy works for any choice of ordering.\n\nOne way to look at this is evidence against the use of the axiom of choice (Zorn's lemma / well-ordering principle), adding to the Banach-Tarski paradox. -103 Prediction and the Axiom of Choice 99-revision-4 2008-08-28 08:09:00 2008-08-28 08:09:00 inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nIt's main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?).\n\n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that\n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine.\n\nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem](http://en.wikipedia.org/wiki/Well-ordering_theorem) --- otherwise known as the [Axiom of Choice](http://en.wikipedia.org/wiki/Axiom_of_choice). \n\nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member, say u, of [v]t with respect to the ordering and outputs u(t).\n\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it's a very simple strategy.\n\nThe well-ordering acts as a bias (in the sense of Mitchell) but it is non-constructible. The theorem says that any choice of bias will lead to good prediction since the -strategy works for any choice of ordering.\n\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][] --- evidence against the use of the axiom of choice (and its equivalents) in matheme\nOne way to look at this is evidence against the use of the axiom of choice (Zorn's lemma / well-ordering principle), adding to the. \n\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/ -104 Prediction and the Axiom of Choice 99-revision-5 2008-08-29 01:27:30 2008-08-29 01:27:30 inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nIt's main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?).\n\n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that\n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine.\n\nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \n\n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\n\nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member, say u, of [v]t with respect to the ordering and outputs u(t).\n\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it's a very simple strategy.\n\nThe author's suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the -strategy just chooses the "simplest" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What's odd about the result is that *no matter what bias is chosen the -strategy will only ever make countably many mistakes*.\n\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that's even vaguely practical. Still, the paper is a \n\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/ -105 Prediction and the Axiom of Choice 99-revision-6 2008-08-29 01:28:18 2008-08-29 01:28:18 inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor]. \n \n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf \n[hardin]: http://maven.smith.edu/~chardin/ \n[taylor]: http://www.math.union.edu/people/faculty/taylora.html \n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/ \n \nIt's main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?). \n \n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that \n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine. \n \nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \n \n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem \n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice \n \nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member, say u, of [v]t with respect to the ordering and outputs u(t). \n \nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it's a very simple strategy. \n \nThe author's suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the -strategy just chooses the "simplest" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What's odd about the result is that *no matter what bias is chosen the -strategy will only ever make countably many mistakes*. \n \nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that's even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting. \n \n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/ -106 Prediction and the Axiom of Choice 99-revision-7 2008-08-29 01:30:19 2008-08-29 01:30:19 Some thoughts on Hardin and Taylor's paper "A Peculiar Connection Between the Axiom of Choice and Predicting the Future". inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor]. \n \n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf \n[hardin]: http://maven.smith.edu/~chardin/ \n[taylor]: http://www.math.union.edu/people/faculty/taylora.html \n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/ \n \nIt's main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?). \n \n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that \n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine. \n \nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \n \n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem \n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice \n \nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member, say u, of [v]t with respect to the ordering and outputs u(t). \n \nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it's a very simple strategy. \n \nThe author's suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the -strategy just chooses the "simplest" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What's odd about the result is that *no matter what bias is chosen the -strategy will only ever make countably many mistakes*. \n \nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that's even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting. \n \n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/ -107 Prediction and the Axiom of Choice 99-revision-8 2008-08-29 01:32:22 2008-08-29 01:32:22 Some thoughts on Hardin and Taylor's paper "A Peculiar Connection Between the Axiom of Choice and Predicting the Future". inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor]. \n \n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf \n[hardin]: http://maven.smith.edu/~chardin/ \n[taylor]: http://www.math.union.edu/people/faculty/taylora.html \n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/ \n \nIt's main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?). \n \n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that \n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine. \n \nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \n \n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem \n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice \n \nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member, say u, of [v]t with respect to the ordering and outputs u(t). \n \nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it's a very simple strategy. \n \nThe author's suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the -strategy just chooses the "simplest" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What's odd about the result is that *no matter what bias is chosen the -strategy will only ever make countably many mistakes*. \n \nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that's even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting. \n \n[banach-tarski paradox]: http://en.wikipedia.org/wiki/Banach-Tarski_paradox \n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/ -109 Structured Machine Learning: The Next Ten Years 2008-09-03 04:35:36 0000-00-00 00:00:00 draft \n \n[paper]: http://dx.doi.org/10.1007/s10994-008-5079-1 -110 Structured Machine Learning: The Next Ten Years 109-revision 2008-09-03 04:35:20 2008-09-03 04:35:20 inherit -111 A Year of Research Blogging a-year-of-research-blogging 2008-09-22 06:59:46 2008-09-22 07:00:09 Looking back on a year of research blogging about machine learning. publish Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've had almost 6,000 visits and over 10,000 page views. \n \n
\nVisitors over the last year \n
\n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). It's my most read post with over 2,500 views. \n \nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views. \n \nOf course, it is natural to want to increase those figures but overall I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -112 year-stats picture-1 2008-09-11 23:06:22 2008-09-11 23:06:22 Visitors over the last year inherit -113 A Year of Research Blogging 111-revision 2008-09-11 23:16:14 2008-09-11 23:16:14 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've jad almost 6,000 visits and over 10,000 page views.\n\n[caption id="attachment_112" align="alignnone" width="300" caption="Visitors over the last year"]Visitors over the last year[/caption]\n\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php).\n\nOf course, it is natural to increase both those figures but overall, I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area.\n\nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include expositions on some of the work I've been turning into papers. -115 A Year of Research Blogging 111-revision-3 2008-09-11 23:17:50 2008-09-11 23:17:50 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've jad almost 6,000 visits and over 10,000 page views. \n \nVisitors over the last year \n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). \n \nOf course, it is natural to increase both those figures but overall, I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -114 A Year of Research Blogging 111-revision-2 2008-09-11 23:16:50 2008-09-11 23:16:50 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've jad almost 6,000 visits and over 10,000 page views. \n \n[caption id="attachment_112" align="alignnone" width="300" caption="Visitors over the last year"]Visitors over the last year[/caption] \n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). \n \nOf course, it is natural to increase both those figures but overall, I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -116 A Year of Research Blogging 111-revision-4 2008-09-11 23:18:24 2008-09-11 23:18:24 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've jad almost 6,000 visits and over 10,000 page views. \n \n
\nVisitors over the last year \n
\n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). \n \nOf course, it is natural to increase both those figures but overall, I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -117 Cheap Supervised Training Instances 2008-09-16 01:02:14 0000-00-00 00:00:00 A brief discussion of a paper describing the use of the Amazon Mechanical Turk to buy cheap, human annotations for the creation of supervised training sets. draft \n \n[paper]: http://blog.doloreslabs.com/wp-content/uploads/2008/09/amt_emnlp08_accepted.pdf \n[dolores]: http://blog.doloreslabs.com/2008/09/amt-fast-cheap-good-machine-learning/ \n[lingpipe]: http://lingpipe-blog.com/2008/09/15/dolores-labs-text-entailment-data-from-amazon-mechanical-turk/ -118 Cheap Supervised Training Instances 117-revision 2008-09-16 00:50:14 2008-09-16 00:50:14 inherit \n\n[paper]: http://blog.doloreslabs.com/wp-content/uploads/2008/09/amt_emnlp08_accepted.pdf\n[dolores]: http://blog.doloreslabs.com/2008/09/amt-fast-cheap-good-machine-learning/\n[lingpipe]: http://lingpipe-blog.com/2008/09/15/dolores-labs-text-entailment-data-from-amazon-mechanical-turk/ -119 A Year of Research Blogging 111-revision-5 2008-09-11 23:19:43 2008-09-11 23:19:43 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've jad almost 6,000 visits and over 10,000 page views. \n \n
\nVisitors over the last year \n
\n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). Since then I've had a fairly steady rate of around 30 visits a day. \n \nOf course, it is natural to want to increase those figures but overall I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -120 A Year of Research Blogging 111-revision-6 2008-09-21 02:47:50 2008-09-21 02:47:50 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've had almost 6,000 visits and over 10,000 page views. \n \n
\nVisitors over the last year \n
\n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). It's my most read post with over 2,500 views. \n \nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views. \n \nOf course, it is natural to want to increase those figures but overall I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -121 A Year of Research Blogging 111-revision-7 2008-09-21 11:31:49 2008-09-21 11:31:49 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've had almost 6,000 visits and over 10,000 page views. \n \n
\nVisitors over the last year \n
\n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). It's my most read post with over 2,500 views. \n \nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views. \n \nOf course, it is natural to want to increase those figures but overall I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -122 A Year of Research Blogging 111-revision-8 2008-09-22 06:59:10 2008-09-22 06:59:10 Looking back on a year of research blogging about machine learning. inherit Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n \nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I've had almost 6,000 visits and over 10,000 page views. \n \n
\nVisitors over the last year \n
\n \nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly's [post on the same topic](http://kk.org/cooltools/archives/002879.php). It's my most read post with over 2,500 views. \n \nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views. \n \nOf course, it is natural to want to increase those figures but overall I've been fairly happy at the frequency of posts I've written and the number of readers I've attracted to what is a fairly narrow subject area. \n \nSo, one year down and one year to go on my current post-doc. In the year ahead, I'll continue writing about research by others in machine learning but I'll also try to include more expositions of some of my own work. -123 Prediction and the Axiom of Choice 99-revision-9 2008-08-29 01:32:51 2008-08-29 01:32:51 Some thoughts on Hardin and Taylor's paper "A Peculiar Connection Between the Axiom of Choice and Predicting the Future". inherit A curious paper entitled ["A Peculiar Connection Between the Axiom of Choice and Predicting the Future"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR's Hammer][xor]. \n \n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf \n[hardin]: http://maven.smith.edu/~chardin/ \n[taylor]: http://www.math.union.edu/people/faculty/taylora.html \n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/ \n \nIt's main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the -strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ? s < t + ?). \n \n"Well", you think, "that's induction solved then. I'm going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market." Unfortunately for your bank balance, the authors note that \n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine. \n \nIn other words, the result is purely theoretical. Worse than that, the definition of the -strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \n \n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem \n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice \n \nAside from being completely non-constructive the -strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -? < s < t. When presented with the values for some unknown v up to time t the -strategy simply chooses the "smallest" member, say u, of [v]t with respect to the ordering and outputs u(t). \n \nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it's a very simple strategy. \n \nThe author's suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the -strategy just chooses the "simplest" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What's odd about the result is that *no matter what bias is chosen the -strategy will only ever make countably many mistakes*. \n \nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that's even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting. \n \n[banach-tarski paradox]: http://en.wikipedia.org/wiki/Banach-Tarski_paradox \n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/ -124 Super Crunchers super-crunchers 2008-09-27 06:49:14 2008-09-27 06:49:14 A review of the book "Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart" by Ian Ayers. publish [Ian Ayers][] is a surprisingly engaging writer, taking what many would consider a very dry topic -- statistics -- and turning it into a thought-provoking, but flawed, book entitled [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc]. \n \n[Ian Ayers]: http://islandia.law.yale.edu/ayers/indexhome.htm \n[sc]: http://www.randomhouse.com/bantamdell/supercrunchers/ \n \nFrom the opening pages, Ayers pits the "super crunchers" -- people applying statistics to large data sets -- against experts in an area, be it viticulture, baseball, or marketing. With barely suppressed glee he describes how number crunching out-predicts the experts time and time again. The point being that as collecting, storing and analysing large amounts of data becomes cheaper and cheaper, more and more decision-making will take the results of "super crunching" into account, with experts either having to step aside or learn some statistical chops. To back arguments for the rise of "super crunching" Ayers draws on a large number of examples from a variety of areas and even experiments with the technique himself, describing how he used it to help choose the title of his book. \n \nAlthough I am more or less convinced by Ayers' arguments I found myself questioning his credibility in several places during the book. I think the main reason for this was due to the tone of the book occasionally crossing the fine line separating "enthusiastic, popular account" and "overly simplistic, gushing rave". The constant use of "super crunching" throughout the book got on my nerves after a while. It began to overemphasise the newness of what could as easily be called "statistical analysis". After a while I mentally replaced "super crunching" with the less sensational "statistical analysis" wherever I encountered it. \n \nConversely, Ayers constantly refers to "regression" when talking about the techniques analysts use to make predictions. At first, I thought this was a convenient short-hand for a range of techniques that he didn't want to spend time distinguishing between. It was only when neural networks are described as "a newfangled competitor to the tried-and-true regression formula" and "an important contributor to the Super Crunching revolution" that I realised that Ayers may not know as much about the nuts and bolts of computational statistics as I first thought. This impression was confirmed when Ayers later confuses "summary statistics" for "sufficient statistics" and talks tautologically of "binary bytes". \n \nStylistically, there is too much foreshadowing and repetition of topics throughout the book for my liking. This feels a little condescending at times, as does him directly asking the reader to stop and think about a concept or problem at various points. \n \nOverall, I wanted to like this book more than I did. It was a light, enjoyable read and I wholeheartedly agree with Ayers' belief in the continuing importance of statistics in decision-making and his call to improve the average person's intuition of statistics. Unfortunately, I found much of "Super Crunchers" substituting enthusiasm for coherence, as well as impressions and anecdote for any kind of meaningful argument. \n \n -125 Super Crunchers 124-revision 2008-09-22 23:31:07 2008-09-22 23:31:07 A review of the book "Super Crunchers: Why Thinking-By-N" by inherit Ayers is a very engaging writer\n\nneural networks, lack of discussion about model biases.\n\nrepetition, foreshadowing\n\n"summary statistics"\n\n"binary bytes"\n\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation. -126 Big Data and Super Crunchers 124-revision-2 2008-09-25 05:56:47 2008-09-25 05:56:47 A review of the book "Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart" by Ian Ayers and its similarities to a recent controversial article in Wired on "Big Data". inherit Ayers is a very engaging writer\n\nneural networks, lack of discussion about model biases.\n\nrepetition, foreshadowing\n\n"summary statistics"\n\n"binary bytes"\n\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.\n\nShalizi on Chris Anderson: http://cscs.umich.edu/~crshalizi/weblog/581.html points to Pierara's criticisms (which can equally be applied to Super Crunchers). In a nutshell, -127 Big Data and Super Crunchers 124-revision-3 2008-09-25 05:57:41 2008-09-25 05:57:41 A review of the book "Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart" by Ian Ayers and its similarities to a recent controversial article in Wired on "Big Data". inherit Ayers is a very engaging writer \n \nneural networks, lack of discussion about model biases. \n \nrepetition, foreshadowing \n \n"summary statistics" \n \n"binary bytes" \n \nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation. \n \n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara's criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don't have constraints (a.k.a. inductive biases) you will just memorise the training examples. -128 Big Data and Super Crunchers 124-revision-4 2008-09-25 05:59:33 2008-09-25 05:59:33 A review of the book "Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart" by Ian Ayers and its similarities to a recent controversial article in Wired on "Big Data". inherit Ayers is a very engaging writer \n \nneural networks, lack of discussion about model biases. \n \nrepetition, foreshadowing \n \n"summary statistics" \n \n"binary bytes" \n \nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation. \n \n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara's criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don't have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis's response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson's article. -129 Big Data and the Super Crunchers 124-revision-5 2008-09-25 06:01:24 2008-09-25 06:01:24 A review of the book "Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart" by Ian Ayers and its similarities to a recent controversial article in Wired on "Big Data". inherit Ayers is a very engaging writer \n \nneural networks, lack of discussion about model biases. \n \nrepetition, foreshadowing \n \n"summary statistics" \n \n"binary bytes" \n \nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation. \n \n[Chris Anderson](http://www.edge.org/3rd_culture/anderson08/anderson08_index.html) \n \n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara's criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don't have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis's response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson's article. -131 Super Crunchers 124-revision-7 2008-09-27 06:48:33 2008-09-27 06:48:33 A review of the book "Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart" by Ian Ayers. inherit [Ian Ayers][] is a surprisingly engaging writer, taking what many would consider a very dry topic -- statistics -- and turning it into a thought-provoking, but flawed, book entitled [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc]. \n \n[Ian Ayers]: http://islandia.law.yale.edu/ayers/indexhome.htm \n[sc]: http://www.randomhouse.com/bantamdell/supercrunchers/ \n \nFrom the opening pages, Ayers pits the "super crunchers" -- people applying statistics to large data sets -- against experts in an area, be it viticulture, baseball, or marketing. With barely suppressed glee he describes how number crunching out-predicts the experts time and time again. The point being that as collecting, storing and analysing large amounts of data becomes cheaper and cheaper, more and more decision-making will take the results of "super crunching" into account, with experts either having to step aside or learn some statistical chops. To back arguments for the rise of "super crunching" Ayers draws on a large number of examples from a variety of areas and even experiments with the technique himself, describing how he used it to help choose the title of his book. \n \nAlthough I am more or less convinced by Ayers' arguments I found myself questioning his credibility in several places during the book. I think the main reason for this was due to the tone of the book occasionally crossing the fine line separating "enthusiastic, popular account" and "overly simplistic, gushing rave". The constant use of "super crunching" throughout the book got on my nerves after a while. It began to overemphasise the newness of what could as easily be called "statistical analysis". After a while I mentally replaced "super crunching" with the less sensational "statistical analysis" wherever I encountered it. \n \nConversely, Ayers constantly refers to "regression" when talking about the techniques analysts use to make predictions. At first, I thought this was a convenient short-hand for a range of techniques that he didn't want to spend time distinguishing between. It was only when neural networks are described as "a newfangled competitor to the tried-and-true regression formula" and "an important contributor to the Super Crunching revolution" that I realised that Ayers may not know as much about the nuts and bolts of computational statistics as I first thought. This impression was confirmed when Ayers later confuses "summary statistics" for "sufficient statistics" and talks tautologically of "binary bytes". \n \nStylistically, there is too much foreshadowing and repetition of topics throughout the book for my liking. This feels a little condescending at times, as does him directly asking the reader to stop and think about a concept or problem at various points. \n \nOverall, I wanted to like this book more than I did. It was a light, enjoyable read and I wholeheartedly agree with Ayers' belief in the continuing importance of statistics in decision-making and his call to improve the average person's intuition of statistics. Unfortunately, I found much of "Super Crunchers" substituting enthusiasm for coherence, as well as impressions and anecdote for any kind of meaningful argument. \n \n -130 Super Crunchers 124-revision-6 2008-09-27 06:47:53 2008-09-27 06:47:53 A review of the book "Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart" by Ian Ayers and its similarities to a recent controversial article in Wired on "Big Data". inherit [Ian Ayers][] is a surprisingly engaging writer, taking what many would consider a very dry topic -- statistics -- and turning it into a thought-provoking, but flawed, book entitled [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc].\n\n[Ian Ayers]: http://islandia.law.yale.edu/ayers/indexhome.htm\n[sc]: http://www.randomhouse.com/bantamdell/supercrunchers/\n\nFrom the opening pages, Ayers pits the "super crunchers" -- people applying statistics to large data sets -- against experts in an area, be it viticulture, baseball, or marketing. With barely suppressed glee he describes how number crunching out-predicts the experts time and time again. The point being that as collecting, storing and analysing large amounts of data becomes cheaper and cheaper, more and more decision-making will take the results of "super crunching" into account, with experts either having to step aside or learn some statistical chops. To back arguments for the rise of "super crunching" Ayers draws on a large number of examples from a variety of areas and even experiments with the technique himself, describing how he used it to help choose the title of his book. \n\nAlthough I am more or less convinced by Ayers' arguments I found myself questioning his credibility in several places during the book. I think the main reason for this was due to the tone of the book occasionally crossing the fine line separating "enthusiastic, popular account" and "overly simplistic, gushing rave". The constant use of "super crunching" throughout the book got on my nerves after a while. It began to overemphasise the newness of what could as easily be called "statistical analysis". After a while I mentally replaced "super crunching" with the less sensational "statistical analysis" wherever I encountered it.\n\nConversely, Ayers constantly refers to "regression" when talking about the techniques analysts use to make predictions. At first, I thought this was a convenient short-hand for a range of techniques that he didn't want to spend time distinguishing between. It was only when neural networks are described as "a newfangled competitor to the tried-and-true regression formula" and "an important contributor to the Super Crunching revolution" that I realised that Ayers may not know as much about the nuts and bolts of computational statistics as I first thought. This impression was confirmed when Ayers later confuses "summary statistics" for "sufficient statistics" and talks tautologically of "binary bytes".\n\nStylistically, there is too much foreshadowing and repetition of topics throughout the book for my liking. This feels a little condescending at times, as does him directly asking the reader to stop and think about a concept or problem at various points. \n\nOverall, I wanted to like this book more than I did. It was a light, enjoyable read and I wholeheartedly agree with Ayers' belief in the continuing importance of statistics in decision-making and his call to improve the average person's intuition of statistics. Unfortunately, I found much of "Super Crunchers" substituting enthusiasm for coherence, as well as impressions and anecdote for any kind of meaningful argument. \n\nI started and finished reading Ian Ayer's book [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc] over a couple of days last week. In a nutshell, it was an engaging read, replete with interesting anecdotes about how the low cost of collecting, storing and analysing data has led to statistical techniques out-predicting experts in many areas of business. Unfortunately, \n\nI've posted a [review][] of it at [LibraryThing][]\n\n[sc]: http://www.librarything.com/work/book/36140381\n[review]: \n[LibraryThing]: http://librarything.com\n\nAyers is a very engaging writer\n\nInteresting anecdotes on how randomisation is being used by business, especially web business, to aid decision-making. Also describes pitfalls.\n\nneural networks, lack of discussion about model biases.\n\nrepetition, foreshadowing, slightly condescending in places.\n\n"summary statistics"\n\n"binary bytes"\n\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.\n\n[Chris Anderson](http://www.edge.org/3rd_culture/anderson08/anderson08_index.html)\n\n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara's criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don't have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis's response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson's article. -132 Big Data 2008-09-27 06:49:44 0000-00-00 00:00:00 draft [Chris Anderson](http://www.edge.org/3rd_culture/anderson08/anderson08_index.html) \n \n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara's criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don't have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis's response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson's article. -133 Big Data 132-revision 2008-09-27 06:49:20 2008-09-27 06:49:20 inherit -134 Snuck, flied and wedded snuck-flied-and-wedded 2008-10-19 21:04:41 2008-10-19 10:33:10 A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular. publish Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \n \n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html \n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562 \n \nThe question the authors of the paper ask is, "At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?" They find a very simple rule to describe this rate: the rate of "regularization" of a word is inversely proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular. \n \nExtrapolating from this rule, the author's note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_. \n \nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \n \n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html -135 Snuck, flied and wedded 134-revision 2008-10-15 05:45:31 2008-10-15 05:45:31 inherit Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular and iregular verbs over the last \n\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562 -136 Snuck, flied and wedded 134-revision-2 2008-10-19 10:23:20 2008-10-19 10:23:20 inherit Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \n\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562\n\nThe question the authors of the paper ask is, "At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?" They find a very simple rule to describe this rate: the rate of "regularization" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular.\n\nExtrapolating from this rule, the author's note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_ -137 Snuck, flied and wedded 134-revision-3 2008-10-19 10:32:01 2008-10-19 10:32:01 A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular. inherit Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \n\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562\n\nThe question the authors of the paper ask is, "At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?" They find a very simple rule to describe this rate: the rate of "regularization" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular.\n\nExtrapolating from this rule, the author's note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_.\n\nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \n\n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html -138 Snuck, flied and wedded 134-revision-4 2008-10-19 10:32:14 2008-10-19 10:32:14 A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular. inherit Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \n \n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html \n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562 \n \nThe question the authors of the paper ask is, "At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?" They find a very simple rule to describe this rate: the rate of "regularization" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular. \n \nExtrapolating from this rule, the author's note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_. \n \nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \n \n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html -139 Snuck, flied and wedded 134-revision-5 2008-10-19 10:33:10 2008-10-19 10:33:10 A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular. inherit Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \n \n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html \n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562 \n \nThe question the authors of the paper ask is, "At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?" They find a very simple rule to describe this rate: the rate of "regularization" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular. \n \nExtrapolating from this rule, the author's note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_. \n \nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \n \n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html -140 Artificial AI artificial-ai 2008-11-07 01:52:41 2008-11-07 01:52:41 publish Anyone who has working in the area for long enough knows how difficult creating any type of artificial intelligence can be. Like many before me, I've decided to cheat a little and create an artificial AI. I take partial credit for the initial idea but it is my wife, Julieanne, who has been responsible for most of the development over the last nine months. \n \nWe had our official release on the 26th of October and even though she's been here for less than two weeks she is already exceeding all our expectations. \n \nWe refer to her as "Ada Molly Reid". \n \n
\nAn Artificial AI, completed on the 26th of October, 2008.
\n \n -141 Ada Molly Reid ada 2008-11-07 01:48:53 2008-11-07 01:48:53 An Artificial AI, completed on the 26th of October, 2008. inherit -142 Artificial AI 140-revision 2008-11-07 01:52:09 2008-11-07 01:52:09 inherit Anyone who has working in the area for long enough knows how difficult creating any type of artificial intelligence can be. Like many before me, I've decided to cheat a little and create an artificial AI. I take partial credit for the initial idea but it is my wife, Julieanne, who has been responsible for most of the development over the last nine months. \n\nWe had our official release on the 26th of October and even though she's been here for less than two weeks she is already exceeding all our expectations. \n\nWe refer to her as "Ada Molly Reid".\n\n
\nAn Artificial AI, completed on the 26th of October, 2008.
\n\n -143 Behold! Jensen's Inequality behold-jensens-inequality 2008-11-17 06:26:15 2008-11-17 06:26:15 Unsatisfied with the very algebraic and formal proofs of Jensen's inequality, I present a diagram that gives a graphical intuition for the result. publish I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_ \n
\n[tex] \n\\displaystyle J_f(x) := \\mathbb{E}\\left[ f\\left(x\\right) \\right] - f\\left(\\mathbb{E}\\left[ x \\right]\\right) \n[/tex] \n
\nwhere [tex]\\mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) \\geq 0[/tex] or equivalently, \n
\n[tex] \n\\displaystyle \\mathbb{E}\\left[ f\\left(x\\right) \\right] \\geq f\\left(\\mathbb{E}\\left[ x \\right]\\right). \n[/tex] \n
\n \nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other. \n \nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams. \n \n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC \n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC \n \nI was quite happy then to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n \n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi"]Jensen\\'s Inequality[/caption] \n \nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen's inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex]. \n \nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of , must also be convex and lie within the epigraph of (the blue shaded area above ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]\\sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by \n
\n[tex]\\displaystyle \n \\mathbb{E}[(x, f(x))] = \\sum_{i=1}^n p_i \\left(x_i, f(x_i)\\right) \n[/tex] \n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]\\mathbb{E}[(x, f(x))] = \\left(\\mathbb{E}[x], \\mathbb{E}[f(x)]\\right)[/tex] it must lie above [tex]f\\left(\\mathbb{E}[x]\\right)[/tex] thus giving the result. \n \nAlthough the diagram in Figure 1 assumes a 1-dimensional space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above . Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result. \n \nA somewhat surprising fact about Jensen's inequality is that its converse is also true. By this I mean that if is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then is necessarily convex. The contrapositive of this statement is: non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex]. \n \nConsidering Figure 1 again gives some intuition as to why this must be the case. If was non-convex then its epigraph must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]\\mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of . \n \nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen's inequality. There are too many edge cases and subtleties (especially in the continuous case) that I've ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen's inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory. \n \n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence \n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 \n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ \n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality \n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -144 Behold! Jensen's Inequality 143-revision 2008-11-17 02:02:18 2008-11-17 02:02:18 inherit I have been making quite a bit of use of Jensen's inequality recently. It is a fairly simple but very important inequality in the study of convex functions.\n
\n[tex]\ndisplaystyle fleft(mathbb{E}left[ X right]right) leq mathbb{E}left[\n[/tex]\n
\n\nIt turns out that DeGroot's notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the "gap" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 -145 Behold! Jensen's Inequality 143-revision-2 2008-11-17 02:11:36 2008-11-17 02:11:36 inherit I have been making quite a bit of use of Jensen's inequality recently. It is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true \n\n
\n[tex]\ndisplaystyle fleft(mathbb{E}left[ X right]right) leq mathbb{E}left[ fleft(Xright) right]\n[/tex]\n
\n\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n lambda_i x_i right) leq sum_{i=1}^n lambda_ifleft(Xright) right]\n[/tex]\n
\n\nIt turns out that DeGroot's notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the "gap" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 -146 Behold! Jensen's Inequality 143-revision-3 2008-11-17 02:29:00 2008-11-17 02:29:00 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if [tex]f : X to mathbb{R}[tex] is a real-valued convex function over [tex]X[tex] and [tex]x in X[tex] is a random variable then \n
\n[tex]\ndisplaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right]\n[/tex]\n
\nwhere [tex]mathbb{E}[tex] represents the \n\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it.\n\nFirst of all here's a statement of Jensen's inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n\nIt turns out that DeGroot's notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the "gap" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ -147 Behold! Jensen's Inequality 143-revision-4 2008-11-17 02:30:19 2008-11-17 02:30:19 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if \n[tex]f : X to mathbb{R}[tex] is a real-valued convex function over [tex]X[tex] and [tex]x in X[tex] is a random variable then \n
\n[tex]displaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right][/tex]\n
\nwhere [tex]mathbb{E}[tex] denotes expectation.\n\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it.\n\nFirst of all here's a statement of Jensen's inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n\nIt turns out that DeGroot's notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the "gap" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ -148 Behold! Jensen's Inequality 143-revision-5 2008-11-17 02:30:21 2008-11-17 02:30:21 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if \n[tex]f : X to mathbb{R}[tex] \nis a real-valued convex function over [tex]X[tex] and [tex]x in X[tex] is a random variable then \n
\n[tex]displaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right][/tex] \n
\nwhere [tex]mathbb{E}[tex] denotes expectation. \n \nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n \nFirst of all here's a statement of Jensen's inequality for discrete distributions. \n
\n[tex] \ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex] \n
\n \n \nIt turns out that DeGroot's notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the "gap" between the two side of the inequality. \n \n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise] \n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419. \n \n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence \n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 \n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ -149 Behold! Jensen's Inequality 143-revision-6 2008-11-17 02:31:15 2008-11-17 02:31:15 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if [tex]f[/tex] is a real-valued convex function over [tex]X[/tex] and [tex]x in X[/tex] is a random variable then \n
\n[tex] \ndisplaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right] \n[/tex] \n
\nwhere [tex]mathbb{E}[tex] denotes expectation. \n \nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n \nFirst of all here's a statement of Jensen's inequality for discrete distributions. \n
\n[tex] \ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex] \n
\n \n \nIt turns out that DeGroot's notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the "gap" between the two side of the inequality. \n \n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise] \n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419. \n \n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence \n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 \n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ -151 Behold! Jensen's Inequality 143-revision-8 2008-11-17 02:56:23 2008-11-17 02:56:23 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. It can also be used to derive a [general AM-GM inequality][amgm] and many I've been interested in it because DeGroot's notion of _statistical information_[^DeGroot1962] and measures of the distance between probability distributions called _[f-divergences][]_ can be expressed as a Jensen gap. Furthermore, these two quantities are \n\n\nI have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it.\n\nFirst of all here's a statement of Jensen's inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality -150 Behold! Jensen's Inequality 143-revision-7 2008-11-17 02:48:28 2008-11-17 02:48:28 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is always\n\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it.\n\nFirst of all here's a statement of Jensen's inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n\nIt turns out that DeGroot's notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the "gap" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ -152 Behold! Jensen's Inequality 143-revision-9 2008-11-17 02:57:21 2008-11-17 02:57:21 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_ \n
\n[tex] \ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right) \n[/tex] \n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently, \n
\n[tex] \ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right). \n[/tex] \n
\n \nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of _statistical information_[^DeGroot1962] and measures of the distance between probability distributions called _[f-divergences][]_ can be expressed as a Jensen gap. \n \nI have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite happy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n \nFirst of all here's a statement of Jensen's inequality for discrete distributions. \n
\n[tex] \ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex] \n
\n \n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise] \n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419. \n \n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence \n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 \n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ \n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality -153 Behold! Jensen's Inequality 143-revision-10 2008-11-17 03:06:00 2008-11-17 03:06:00 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n\n However, I've had a bit of trouble intuitively grasping why its true. I was therefore quite happy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it.\n\nFirst of all here's a statement of Jensen's inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality -154 Jensen's Inequality jensen 2008-11-17 03:43:57 2008-11-17 03:43:57 Jensen's Inequality inherit A graphical proof of Jensen's Inequality -155 Behold! Jensen's Inequality 143-revision-11 2008-11-17 03:44:37 2008-11-17 03:44:37 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was therefore quite happy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it.\n[caption id="attachment_154" align="aligncenter" width="485" caption="Jensen's Inequality"]Jensen's Inequality[/caption]\n\n\nFirst of all here's a statement of Jensen's inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality -156 Behold! Jensen's Inequality 143-revision-12 2008-11-17 04:56:45 2008-11-17 04:56:45 inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was therefore quite happy to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it.\n\n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. A graphical demonstration of Jensen's Inequality. The expectations shown are "]Jensen's Inequality[/caption]\n\n\nFirst of all here's a statement of Jensen's inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -157 Visualising 19th Century Reading in Australia 40-revision 2008-06-17 05:09:52 2008-06-17 05:09:52 A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project. inherit I've recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I'm part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \n \nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I'll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool. \n \n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php \n[csl]: http://csl.cecs.anu.edu.au/ \n \nThe Australian Common Reader Project \n-------------------------------------------- \nOne of Julieanne's research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers' relationship with books and periodicals. \n \n[acrp]: http://www.api-network.com/hosted/acrp/ \n \nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \n \n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/ \n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin \n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \n \nBooks and Borrowers \n------------------------ \nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books. \nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \n \n \n \n \n \n \n \n \n \n \n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column) \nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book. \n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\n \nConceptually, each cell in the table contains a 1 if the person associated with the cell's column borrowed the book associated with the cell's row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473. \n \nBook Similarity \n----------------- \nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what "similar books" is required. \n \nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex]. \n \nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \n \nIn terms of the vector representation, this similarity measure is simply the "[inner product][]" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers. \n \n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space \n \nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to "normalise" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the "size" of each of the vectors for those books. \n \nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes: \n \n
\n[tex]displaystyle \n text{sim}(mathbf{b}_i,mathbf{b}_j) \n = frac{left}{|mathbf{b}_i||mathbf{b}_j|} \n[/tex] \n
\n \nPrincipal Component Analysis \n--------------------------------- \nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \n \nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns. \n \n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis \n \n \n \n \n \n \n \n \n \n \n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books. \n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\n \nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors \nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex] \nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the \nfirst two rows in Table 1.[^1] \n \n[^1]: Technically, the guarantee of the "closeness" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair's \nsimilarity is estimated well. \n \nVisualising the Data \n---------------------- \nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2] \n \n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle's colour. \n \n
\nPlot of the books across all libraries in the ACRP database \n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book. \n

\n \nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library. \n \nDrilling Down and Interacting \n--------------------------------- \nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners' and Mechanics' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data. \n \n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales \n \nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books. \n \nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \n \nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \n \n[applet]: /inductio/wp-content/public/acrp/ \n \n
\nClick to open visualisation applet \n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\n
\n \nInstructions describing how to use the tool can be found below it. \nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the "Borrowers" bar will only show books with at least that many borrowers; and altering the "Similarity" bar will only draw lines to books with at least that proportion of books in common. \n \nFuture Work and Distant Reading \n------------------------------------- \nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls "distant reading" -- looking at books as objects and how they are read rather than the "close reading" of the text of individual books. \n \n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \n \nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \n \nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them. \n \nData and Code \n---------------- \nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location. \n \n[github]: http://github.com/mreid/acrp/tree/master \n[SQL]: http://en.wikipedia.org/wiki/SQL \n[R]: http://www.r-project.org/ \n[Processing]: http://processing.org/ \n \nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here. \n \n -158 Behold! Jensen's Inequality 143-revision-13 2008-11-17 05:49:27 2008-11-17 05:49:27 Unsatisfied with the very algebraic and formal proofs of Jensen's inequality, I present a diagram that gives a graphical intuition for the result. inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was quite happy then to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n\n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi"]Jensen's Inequality[/caption]\n\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen's inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\n\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of , must also be convex and lie within its epigraph (the blue shaded area above ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right)\n[/tex]\n
\nis a convex combination and so must also lie within the dashed polygon. Thus, \n\nThe general result for non-discrete distributions can also be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above . Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\n\nA somewhat surprising fact about Jensen's inequality is that its converse is also true. By this I mean that if is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then is necessarily convex. The contrapositive of this statement is: non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\n\nConsidering Figure 1 again gives some intuition as to why this must be the case. If was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of .\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -159 Behold! Jensen's Inequality 143-revision-14 2008-11-17 06:21:18 2008-11-17 06:21:18 Unsatisfied with the very algebraic and formal proofs of Jensen's inequality, I present a diagram that gives a graphical intuition for the result. inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was quite happy then to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n\n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi"]Jensen's Inequality[/caption]\n\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen's inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\n\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of , must also be convex and lie within the epigraph of (the blue shaded area above ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \n = left(mathbb{E}[x], mathbb{E}[f(x)]right)\n[/tex]\n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result.\n\nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above . Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\n\nA somewhat surprising fact about Jensen's inequality is that its converse is also true. By this I mean that if is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then is necessarily convex. The contrapositive of this statement is: non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\n\nConsidering Figure 1 again gives some intuition as to why this must be the case. If was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of .\n\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen's inequality. There are too many edge cases and subtleties (especially in the continuous case) that I've ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen's inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -160 Behold! Jensen's Inequality 143-revision-15 2008-11-17 06:21:56 2008-11-17 06:21:56 Unsatisfied with the very algebraic and formal proofs of Jensen's inequality, I present a diagram that gives a graphical intuition for the result. inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_ \n
\n[tex] \ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right) \n[/tex] \n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently, \n
\n[tex] \ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right). \n[/tex] \n
\n \nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other. \n \nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams. \n \n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC \n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC \n \nI was quite happy then to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n \n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi"]Jensen's Inequality[/caption] \n \nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen's inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex]. \n \nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of , must also be convex and lie within the epigraph of (the blue shaded area above ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by \n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \n = left(mathbb{E}[x], mathbb{E}[f(x)]right) \n[/tex] \n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result. \n \nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above . Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result. \n \nA somewhat surprising fact about Jensen's inequality is that its converse is also true. By this I mean that if is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then is necessarily convex. The contrapositive of this statement is: non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex]. \n \nConsidering Figure 1 again gives some intuition as to why this must be the case. If was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of . \n \nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen's inequality. There are too many edge cases and subtleties (especially in the continuous case) that I've ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen's inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory. \n \n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence \n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 \n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ \n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality \n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -161 Behold! Jensen's Inequality 143-revision-16 2008-11-17 06:24:32 2008-11-17 06:24:32 Unsatisfied with the very algebraic and formal proofs of Jensen's inequality, I present a diagram that gives a graphical intuition for the result. inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was quite happy then to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n\n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi"]Jensen's Inequality[/caption]\n\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen's inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\n\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of , must also be convex and lie within the epigraph of (the blue shaded area above ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \n[/tex]\n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result.\n\nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above . Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\n\nA somewhat surprising fact about Jensen's inequality is that its converse is also true. By this I mean that if is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then is necessarily convex. The contrapositive of this statement is: non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\n\nConsidering Figure 1 again gives some intuition as to why this must be the case. If was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of .\n\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen's inequality. There are too many edge cases and subtleties (especially in the continuous case) that I've ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen's inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -162 Behold! Jensen's Inequality 143-revision-17 2008-11-17 06:24:51 2008-11-17 06:24:51 Unsatisfied with the very algebraic and formal proofs of Jensen's inequality, I present a diagram that gives a graphical intuition for the result. inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_ \n
\n[tex] \ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right) \n[/tex] \n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently, \n
\n[tex] \ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right). \n[/tex] \n
\n \nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other. \n \nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams. \n \n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC \n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC \n \nI was quite happy then to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n \n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi"]Jensen's Inequality[/caption] \n \nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen's inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex]. \n \nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of , must also be convex and lie within the epigraph of (the blue shaded area above ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by \n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \n[/tex] \n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result. \n \nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above . Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result. \n \nA somewhat surprising fact about Jensen's inequality is that its converse is also true. By this I mean that if is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then is necessarily convex. The contrapositive of this statement is: non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex]. \n \nConsidering Figure 1 again gives some intuition as to why this must be the case. If was non-convex then its epigraph must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of . \n \nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen's inequality. There are too many edge cases and subtleties (especially in the continuous case) that I've ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen's inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory. \n \n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence \n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 \n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ \n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality \n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -163 Behold! Jensen's Inequality 143-revision-18 2008-11-17 06:25:38 2008-11-17 06:25:38 Unsatisfied with the very algebraic and formal proofs of Jensen's inequality, I present a diagram that gives a graphical intuition for the result. inherit I have been making quite a bit of use of Jensen's inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_ \n
\n[tex] \ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right) \n[/tex] \n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen's inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently, \n
\n[tex] \ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right). \n[/tex] \n
\n \nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I've been interested in it because DeGroot's notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other. \n \nJensen's inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I've read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams. \n \n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC \n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC \n \nI was quite happy then to have found a graphical "proof" of Jensen's inequality. By this I mean a proof in the style of the [proof of Pythagoras' theorem][pythagoras] that is simply a diagram with the word "Behold!" above it. \n \n[caption id="attachment_154" align="aligncenter" width="485" caption="Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi"]Jensen's Inequality[/caption] \n \nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen's inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex]. \n \nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of , must also be convex and lie within the epigraph of (the blue shaded area above ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by \n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \n[/tex] \n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result. \n \nAlthough the diagram in Figure 1 assumes a 1-dimensional space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above . Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result. \n \nA somewhat surprising fact about Jensen's inequality is that its converse is also true. By this I mean that if is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then is necessarily convex. The contrapositive of this statement is: non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex]. \n \nConsidering Figure 1 again gives some intuition as to why this must be the case. If was non-convex then its epigraph must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of . \n \nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen's inequality. There are too many edge cases and subtleties (especially in the continuous case) that I've ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen's inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory. \n \n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence \n[uise]: http://projecteuclid.org/euclid.aoms/1177704567 \n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/ \n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality \n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki -164 Machine Learning Summer School 2009 machine-learning-summer-school-2009 2008-11-18 11:18:27 2008-11-18 11:18:27 A plug for the 2009 Machine Learning Summer School in Canberra, Australia. I will be giving a presentation there. publish The annual [Machine Learning Summer School][mlss] is being held in Canberra at the [Australian National University][anu] in January next year. It will be part of the joint [Summer Schools in Logic and Learning][ssll]. \n \nFrom the 2009 [MLSS website][mlss2009]: \n> This school is suitable for all levels, both for people without previous knowledge in \n> Machine Learning, and those wishing to broaden their expertise in this area. It will \n> allow the participants to get in touch with international experts in this field. \n> Exchange of students, joint publications and joint projects will result because \n> of this collaboration. \n \nThe Summer schools will run from the 26-30th of January 2009 and [registration][] is open. Note that there is a 20% surcharge for registrations after the 19th of December 2008 so get registering. \n \nI've been fortunate enough to have been given a spot on the [program][]. I'll be talking about some the work I've been doing with [Bob Williamson][bob] this year on analysing relationships between various notions of risk, divergence and information in binary valued prediction problems. \n \nLeave a comment if you're planning to attend and I'll make sure I say hi. \n \nHope to see you there! \n \n[mlss]: http://mlss.cc/ \n[mlss2009]: http://ssll.cecs.anu.edu.au/about/mlss \n[anu]: http://anu.edu.au/ \n[ssll]: http://ssll.cecs.anu.edu.au/ \n[program]: http://ssll.cecs.anu.edu.au/program \n[bob]: http://axiom.anu.edu.au/~williams/ \n[registration]: http://ssll.cecs.anu.edu.au/registration -165 Machine Learning Summer School 2009 164-revision 2008-11-18 11:16:49 2008-11-18 11:16:49 A plug for the 2009 Machine Learning Summer School in Canberra, Australia. I will be giving a presentation there. inherit The annual [Machine Learning Summer School][mlss] is being held in Canberra at the [Australian National University][anu] in January next year. It will be part of the joint [Summer Schools in Logic and Learning][ssll]. \n\nFrom the 2009 [MLSS website][mlss2009]:\n> This school is suitable for all levels, both for people without previous knowledge in \n> Machine Learning, and those wishing to broaden their expertise in this area. It will \n> allow the participants to get in touch with international experts in this field. \n> Exchange of students, joint publications and joint projects will result because \n> of this collaboration. \n\nThe Summer schools will run from the 26-30th of January 2009 and [registration][] is open. Note that there is a 20% surcharge for registrations after the 19th of December 2008 so get registering.\n\nI've been fortunate enough to have been given a spot on the [program][]. I'll be talking about some the work I've been doing with [Bob Williamson][bob] this year on analysing relationships between various notions of risk, divergence and information in binary valued prediction problems.\n\nLeave a comment if you're planning to attend and I'll make sure I say hi. Hope to see you there!\n\n[mlss]: http://mlss.cc/\n[mlss2009]: http://ssll.cecs.anu.edu.au/about/mlss\n[anu]: http://anu.edu.au/\n[ssll]: http://ssll.cecs.anu.edu.au/\n[program]: http://ssll.cecs.anu.edu.au/program\n[bob]: http://axiom.anu.edu.au/~williams/ -166 Machine Learning Summer School 2009 164-revision-2 2008-11-18 11:17:37 2008-11-18 11:17:37 A plug for the 2009 Machine Learning Summer School in Canberra, Australia. I will be giving a presentation there. inherit The annual [Machine Learning Summer School][mlss] is being held in Canberra at the [Australian National University][anu] in January next year. It will be part of the joint [Summer Schools in Logic and Learning][ssll]. \n \nFrom the 2009 [MLSS website][mlss2009]: \n> This school is suitable for all levels, both for people without previous knowledge in \n> Machine Learning, and those wishing to broaden their expertise in this area. It will \n> allow the participants to get in touch with international experts in this field. \n> Exchange of students, joint publications and joint projects will result because \n> of this collaboration. \n \nThe Summer schools will run from the 26-30th of January 2009 and [registration][] is open. Note that there is a 20% surcharge for registrations after the 19th of December 2008 so get registering. \n \nI've been fortunate enough to have been given a spot on the [program][]. I'll be talking about some the work I've been doing with [Bob Williamson][bob] this year on analysing relationships between various notions of risk, divergence and information in binary valued prediction problems. \n \nLeave a comment if you're planning to attend and I'll make sure I say hi. \n \nHope to see you there! \n \n[mlss]: http://mlss.cc/ \n[mlss2009]: http://ssll.cecs.anu.edu.au/about/mlss \n[anu]: http://anu.edu.au/ \n[ssll]: http://ssll.cecs.anu.edu.au/ \n[program]: http://ssll.cecs.anu.edu.au/program \n[bob]: http://axiom.anu.edu.au/~williams/ \n[registration]: http://ssll.cecs.anu.edu.au/registration -167 Visualising 19th Century Reading in Australia 40-autosave 2008-12-09 12:04:10 2008-12-09 12:04:10 A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project. inherit _Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. I will post the updated application with notes shortly.\n\n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20\n\nI've recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I'm part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \n\nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I'll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool.\n\n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php\n[csl]: http://csl.cecs.anu.edu.au/\n\nThe Australian Common Reader Project\n--------------------------------------------\nOne of Julieanne's research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers' relationship with books and periodicals. \n\n[acrp]: http://www.api-network.com/hosted/acrp/\n\nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \n\n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/\n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin\n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \n\nBooks and Borrowers\n------------------------\nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books.\nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \n\n\n\n\n\n\n\n\n\n\n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column)\nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book.\n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\n\nConceptually, each cell in the table contains a 1 if the person associated with the cell's column borrowed the book associated with the cell's row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473.\n\nBook Similarity\n-----------------\nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what "similar books" is required.\n\nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]\\mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]\\mathbf{b}_2 = (1,1,\\ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex].\n\nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \n \nIn terms of the vector representation, this similarity measure is simply the "[inner product][]" between [tex]\\mathbf{b}_1[/tex] and [tex]\\mathbf{b}_2[/tex] and is written [tex]\\left<\\mathbf{b}_1,\\mathbf{b}_2\\right> = b_{1,1}b_{2,1} + \\cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers.\n\n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space\n\nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to "normalise" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the "size" of each of the vectors for those books. \n\nMathematically, we will denote the size of a book vector [tex]\\mathbf{b}_i[/tex] as [tex]\\|\\mathbf{b}_i\\| = \\sqrt{\\left<\\mathbf{b}_i,\\mathbf{b}_i\\right>}[/tex]. The similarity between two books then becomes:\n\n
\n[tex]\\displaystyle\n \\text{sim}(\\mathbf{b}_i,\\mathbf{b}_j) \n = \\frac{\\left<\\mathbf{b}_i,\\mathbf{b}_j\\right>}{\\|\\mathbf{b}_i\\|\\|\\mathbf{b}_j\\|}\n[/tex]\n
\n\nPrincipal Component Analysis\n---------------------------------\nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \n\nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns.\n\n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis\n\n\n\n\n\n\n\n\n\n\n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books.\n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\n\nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]\\mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]\\mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors\nfor the first two rows of Table 2 then [tex]\\text{sim}(\\mathbf{c}_1,\\mathbf{c}_2)[/tex]\nwould be close to [tex]\\text{sim}(\\mathbf{b}_1,\\mathbf{b}_2)[/tex], the similarity of the\nfirst two rows in Table 1.[^1]\n\n[^1]: Technically, the guarantee of the "closeness" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair's\nsimilarity is estimated well.\n\nVisualising the Data\n----------------------\nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2]\n\n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle's colour.\n\n
\nPlot of the books across all libraries in the ACRP database\n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book.\n

\n\nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library.\n\nDrilling Down and Interacting\n---------------------------------\nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners' and Mechanics' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data.\n\n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales\n\nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books.\n\nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \n\nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \n\n[applet]: /inductio/wp-content/public/acrp/\n\n
\nClick to open visualisation applet\n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\n
\n\nInstructions describing how to use the tool can be found below it. \nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the "Borrowers" bar will only show books with at least that many borrowers; and altering the "Similarity" bar will only draw lines to books with at least that proportion of books in common.\n\nFuture Work and Distant Reading\n-------------------------------------\nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls "distant reading" -- looking at books as objects and how they are read rather than the "close reading" of the text of individual books. \n\n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \n\nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \n\nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them.\n\nData and Code\n----------------\nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location.\n\n[github]: http://github.com/mreid/acrp/tree/master \n[SQL]: http://en.wikipedia.org/wiki/SQL\n[R]: http://www.r-project.org/\n[Processing]: http://processing.org/\n\nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here.\n\n -168 Visualising 19th Century Reading in Australia 40-revision-2 2008-11-17 05:01:20 2008-11-17 05:01:20 A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project. inherit I've recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I'm part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \n \nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I'll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool. \n \n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php \n[csl]: http://csl.cecs.anu.edu.au/ \n \nThe Australian Common Reader Project \n-------------------------------------------- \nOne of Julieanne's research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers' relationship with books and periodicals. \n \n[acrp]: http://www.api-network.com/hosted/acrp/ \n \nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \n \n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/ \n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin \n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \n \nBooks and Borrowers \n------------------------ \nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books. \nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \n \n \n \n \n \n \n \n \n \n \n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column) \nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book. \n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\n \nConceptually, each cell in the table contains a 1 if the person associated with the cell's column borrowed the book associated with the cell's row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473. \n \nBook Similarity \n----------------- \nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what "similar books" is required. \n \nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex]. \n \nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \n \nIn terms of the vector representation, this similarity measure is simply the "[inner product][]" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers. \n \n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space \n \nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to "normalise" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the "size" of each of the vectors for those books. \n \nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes: \n \n
\n[tex]displaystyle \n text{sim}(mathbf{b}_i,mathbf{b}_j) \n = frac{left}{|mathbf{b}_i||mathbf{b}_j|} \n[/tex] \n
\n \nPrincipal Component Analysis \n--------------------------------- \nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \n \nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns. \n \n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis \n \n \n \n \n \n \n \n \n \n \n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books. \n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\n \nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors \nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex] \nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the \nfirst two rows in Table 1.[^1] \n \n[^1]: Technically, the guarantee of the "closeness" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair's \nsimilarity is estimated well. \n \nVisualising the Data \n---------------------- \nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2] \n \n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle's colour. \n \n
\nPlot of the books across all libraries in the ACRP database \n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book. \n

\n \nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library. \n \nDrilling Down and Interacting \n--------------------------------- \nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners' and Mechanics' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data. \n \n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales \n \nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books. \n \nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \n \nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \n \n[applet]: /inductio/wp-content/public/acrp/ \n \n
\nClick to open visualisation applet \n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\n
\n \nInstructions describing how to use the tool can be found below it. \nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the "Borrowers" bar will only show books with at least that many borrowers; and altering the "Similarity" bar will only draw lines to books with at least that proportion of books in common. \n \nFuture Work and Distant Reading \n------------------------------------- \nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls "distant reading" -- looking at books as objects and how they are read rather than the "close reading" of the text of individual books. \n \n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \n \nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \n \nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them. \n \nData and Code \n---------------- \nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location. \n \n[github]: http://github.com/mreid/acrp/tree/master \n[SQL]: http://en.wikipedia.org/wiki/SQL \n[R]: http://www.r-project.org/ \n[Processing]: http://processing.org/ \n \nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here. \n \n -169 Visualising 19th Century Reading in Australia 40-revision-3 2008-12-09 12:04:46 2008-12-09 12:04:46 A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project. inherit _Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. Those looking for the application I presented there: stay tuned, I will post the updated version here shortly. \n \n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20 \n \nI've recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I'm part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \n \nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I'll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool. \n \n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php \n[csl]: http://csl.cecs.anu.edu.au/ \n \nThe Australian Common Reader Project \n-------------------------------------------- \nOne of Julieanne's research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers' relationship with books and periodicals. \n \n[acrp]: http://www.api-network.com/hosted/acrp/ \n \nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \n \n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/ \n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin \n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \n \nBooks and Borrowers \n------------------------ \nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books. \nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \n \n \n \n \n \n \n \n \n \n \n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column) \nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book. \n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\n \nConceptually, each cell in the table contains a 1 if the person associated with the cell's column borrowed the book associated with the cell's row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473. \n \nBook Similarity \n----------------- \nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what "similar books" is required. \n \nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex]. \n \nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \n \nIn terms of the vector representation, this similarity measure is simply the "[inner product][]" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers. \n \n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space \n \nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to "normalise" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the "size" of each of the vectors for those books. \n \nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes: \n \n
\n[tex]displaystyle \n text{sim}(mathbf{b}_i,mathbf{b}_j) \n = frac{left}{|mathbf{b}_i||mathbf{b}_j|} \n[/tex] \n
\n \nPrincipal Component Analysis \n--------------------------------- \nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \n \nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns. \n \n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis \n \n \n \n \n \n \n \n \n \n \n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books. \n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\n \nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors \nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex] \nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the \nfirst two rows in Table 1.[^1] \n \n[^1]: Technically, the guarantee of the "closeness" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair's \nsimilarity is estimated well. \n \nVisualising the Data \n---------------------- \nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2] \n \n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle's colour. \n \n
\nPlot of the books across all libraries in the ACRP database \n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book. \n

\n \nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library. \n \nDrilling Down and Interacting \n--------------------------------- \nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners' and Mechanics' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data. \n \n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales \n \nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books. \n \nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \n \nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \n \n[applet]: /inductio/wp-content/public/acrp/ \n \n
\nClick to open visualisation applet \n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\n
\n \nInstructions describing how to use the tool can be found below it. \nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the "Borrowers" bar will only show books with at least that many borrowers; and altering the "Similarity" bar will only draw lines to books with at least that proportion of books in common. \n \nFuture Work and Distant Reading \n------------------------------------- \nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls "distant reading" -- looking at books as objects and how they are read rather than the "close reading" of the text of individual books. \n \n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \n \nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \n \nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them. \n \nData and Code \n---------------- \nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location. \n \n[github]: http://github.com/mreid/acrp/tree/master \n[SQL]: http://en.wikipedia.org/wiki/SQL \n[R]: http://www.r-project.org/ \n[Processing]: http://processing.org/ \n \nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here. \n \n -170 Visualising 19th Century Reading in Australia 40-revision-4 2008-12-09 12:05:28 2008-12-09 12:05:28 A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project. inherit ---- \n_Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. Those looking for the application I presented there: stay tuned, I will post the updated version here shortly. \n---- \n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20 \n \nI've recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I'm part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \n \nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I'll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool. \n \n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php \n[csl]: http://csl.cecs.anu.edu.au/ \n \nThe Australian Common Reader Project \n-------------------------------------------- \nOne of Julieanne's research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers' relationship with books and periodicals. \n \n[acrp]: http://www.api-network.com/hosted/acrp/ \n \nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \n \n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/ \n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin \n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \n \nBooks and Borrowers \n------------------------ \nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books. \nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \n \n \n \n \n \n \n \n \n \n \n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column) \nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book. \n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\n \nConceptually, each cell in the table contains a 1 if the person associated with the cell's column borrowed the book associated with the cell's row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473. \n \nBook Similarity \n----------------- \nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what "similar books" is required. \n \nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex]. \n \nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \n \nIn terms of the vector representation, this similarity measure is simply the "[inner product][]" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers. \n \n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space \n \nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to "normalise" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the "size" of each of the vectors for those books. \n \nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes: \n \n
\n[tex]displaystyle \n text{sim}(mathbf{b}_i,mathbf{b}_j) \n = frac{left}{|mathbf{b}_i||mathbf{b}_j|} \n[/tex] \n
\n \nPrincipal Component Analysis \n--------------------------------- \nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \n \nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns. \n \n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis \n \n \n \n \n \n \n \n \n \n \n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books. \n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\n \nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors \nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex] \nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the \nfirst two rows in Table 1.[^1] \n \n[^1]: Technically, the guarantee of the "closeness" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair's \nsimilarity is estimated well. \n \nVisualising the Data \n---------------------- \nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2] \n \n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle's colour. \n \n
\nPlot of the books across all libraries in the ACRP database \n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book. \n

\n \nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library. \n \nDrilling Down and Interacting \n--------------------------------- \nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners' and Mechanics' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data. \n \n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales \n \nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books. \n \nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \n \nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \n \n[applet]: /inductio/wp-content/public/acrp/ \n \n
\nClick to open visualisation applet \n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\n
\n \nInstructions describing how to use the tool can be found below it. \nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the "Borrowers" bar will only show books with at least that many borrowers; and altering the "Similarity" bar will only draw lines to books with at least that proportion of books in common. \n \nFuture Work and Distant Reading \n------------------------------------- \nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls "distant reading" -- looking at books as objects and how they are read rather than the "close reading" of the text of individual books. \n \n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \n \nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \n \nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them. \n \nData and Code \n---------------- \nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location. \n \n[github]: http://github.com/mreid/acrp/tree/master \n[SQL]: http://en.wikipedia.org/wiki/SQL \n[R]: http://www.r-project.org/ \n[Processing]: http://processing.org/ \n \nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here. \n \n -171 ML and Stats People on Twitter 2008-12-19 04:11:45 0000-00-00 00:00:00 draft I started using the social, "micro-blogging" service [Twitter][] in February this year simply because I had been seeing so much commentary about it both good and bad. Since then, I've posted [800+ updates], amassed over 100 [followers][] and [follow][] nearly that many myself. \n \n[twitter]: http://twitter.com/ \n[follow]: http://twitter.com/mdreid/friends \n[followers]: http://twitter.com/mdreid/followers \n \nWhat has surprised me about Twitter is how many people I have found on there who are active, or at least interested, in machine learning and statistics. \n \nCollection of people research in or around statistics and machine learning on twitter: \n \n@arthegall \n@mja \n@nealrichter \n@dwf \n@brendan642 \n@dtunkelang \n@ealdent \n@mikiobraun \n@lemire \n@SoloGen \n@markusweimer \n@pongba \n@moorejh \n@peteskomoroch \n@smolix \n@DataJunkie \n@filterfish \n@ansate -172 ML and Stats People on Twitter 171-revision 2008-12-17 21:45:40 2008-12-17 21:45:40 inherit Collection of people research in or around statistics and machine learning on twitter:\n\n@arthegall\n@mja\n@nealrichter\n@dwf\n@brendan642\n@dtunkelang\n@ealdent\n@mikiobraun\n@lemire\n@SoloGen\n@markusweimer\n@pongba\n@moorejh\n@peteskomoroch\n@smolix\n@ -173 ML and Stats People on Twitter 171-revision-2 2008-12-19 02:21:26 2008-12-19 02:21:26 inherit I started using the social, "micro-blogging" service [Twitter][] in February this year simply because I had been seeing so much commentary about it both good and bad. Since then, I've posted [800+ updates], amassed over 100 followers and follow \n\n[twitter]: http://twitter.com/\n\nCollection of people research in or around statistics and machine learning on twitter:\n\n@arthegall\n@mja\n@nealrichter\n@dwf\n@brendan642\n@dtunkelang\n@ealdent\n@mikiobraun\n@lemire\n@SoloGen\n@markusweimer\n@pongba\n@moorejh\n@peteskomoroch\n@smolix\n@DataJunkie\n@filterfish\n@ansate -174 ML and Stats People on Twitter 171-revision-3 2008-12-19 02:55:29 2008-12-19 02:55:29 inherit I started using the social, "micro-blogging" service [Twitter][] in February this year simply because I had been seeing so much commentary about it both good and bad. Since then, I've posted [800+ updates], amassed over 100 [followers][] and [follow][] nearly that many myself.\n\n[twitter]: http://twitter.com/\n[follow]: http://twitter.com/mdreid/friends\n[followers]: http://twitter.com/mdreid/followers\n\nWhat has surprised me about Twitter is how many people I have found on there who are active, or at least interested, in machine learning and statistics.\n\nCollection of people research in or around statistics and machine learning on twitter:\n\n@arthegall\n@mja\n@nealrichter\n@dwf\n@brendan642\n@dtunkelang\n@ealdent\n@mikiobraun\n@lemire\n@SoloGen\n@markusweimer\n@pongba\n@moorejh\n@peteskomoroch\n@smolix\n@DataJunkie\n@filterfish\n@ansate diff --git a/inductio.sql b/inductio.sql deleted file mode 100644 index c499c8f..0000000 --- a/inductio.sql +++ /dev/null @@ -1,392 +0,0 @@ --- MySQL dump 10.10 --- --- Host: localhost Database: confla_wordpress --- ------------------------------------------------------ --- Server version 5.0.27-standard-log - -/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; -/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; -/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; -/*!40101 SET NAMES utf8 */; -/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; -/*!40103 SET TIME_ZONE='+00:00' */; -/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; -/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; -/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; -/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; - --- --- Table structure for table `wp_WP_HASHCASH` --- - -DROP TABLE IF EXISTS `wp_WP_HASHCASH`; -CREATE TABLE `wp_WP_HASHCASH` ( - `hash` varchar(32) NOT NULL, - `day` datetime NOT NULL, - KEY `hash` (`hash`,`day`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1; - --- --- Dumping data for table `wp_WP_HASHCASH` --- - -LOCK TABLES `wp_WP_HASHCASH` WRITE; -/*!40000 ALTER TABLE `wp_WP_HASHCASH` DISABLE KEYS */; -/*!40000 ALTER TABLE `wp_WP_HASHCASH` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp__wp_hashcash` --- - -DROP TABLE IF EXISTS `wp__wp_hashcash`; -CREATE TABLE `wp__wp_hashcash` ( - `hash` varchar(32) NOT NULL, - `day` datetime NOT NULL, - KEY `wphc_hashday_indx` (`hash`,`day`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1; - --- --- Dumping data for table `wp__wp_hashcash` --- - -LOCK TABLES `wp__wp_hashcash` WRITE; -/*!40000 ALTER TABLE `wp__wp_hashcash` DISABLE KEYS */; -INSERT INTO `wp__wp_hashcash` VALUES ('3b58b93343ff00c8bb404cd07bc76efc','2008-01-16 02:56:12'); -/*!40000 ALTER TABLE `wp__wp_hashcash` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_comments` --- - -DROP TABLE IF EXISTS `wp_comments`; -CREATE TABLE `wp_comments` ( - `comment_ID` bigint(20) unsigned NOT NULL auto_increment, - `comment_post_ID` int(11) NOT NULL default '0', - `comment_author` tinytext NOT NULL, - `comment_author_email` varchar(100) NOT NULL default '', - `comment_author_url` varchar(200) NOT NULL default '', - `comment_author_IP` varchar(100) NOT NULL default '', - `comment_date` datetime NOT NULL default '0000-00-00 00:00:00', - `comment_date_gmt` datetime NOT NULL default '0000-00-00 00:00:00', - `comment_content` text NOT NULL, - `comment_karma` int(11) NOT NULL default '0', - `comment_approved` varchar(20) NOT NULL default '1', - `comment_agent` varchar(255) NOT NULL default '', - `comment_type` varchar(20) NOT NULL default '', - `comment_parent` bigint(20) NOT NULL default '0', - `user_id` bigint(20) NOT NULL default '0', - `openid` tinyint(1) NOT NULL default '0', - PRIMARY KEY (`comment_ID`), - KEY `comment_approved` (`comment_approved`), - KEY `comment_post_ID` (`comment_post_ID`), - KEY `comment_approved_date_gmt` (`comment_approved`,`comment_date_gmt`), - KEY `comment_date_gmt` (`comment_date_gmt`) -) ENGINE=MyISAM AUTO_INCREMENT=275 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_comments` --- - -LOCK TABLES `wp_comments` WRITE; -/*!40000 ALTER TABLE `wp_comments` DISABLE KEYS */; -INSERT INTO `wp_comments` VALUES (4,18,'Chalk is a “Feelie” « The Unapologetic Mathematician','','http://unapologetic.wordpress.com/2007/10/19/chalk-is-a-feelie/','66.135.48.143','2007-10-20 04:27:12','2007-10-19 18:27:12','

[...] the more specific connection to interactive fiction. Then Mark at Inductio Ex Machina contributed this sample transcript of such a [...]

\n',0,'1','Incutio XML-RPC -- WordPress/MU','pingback',0,0,0),(3,12,'Mark','mark@conflate.net','http://conflate.net','150.203.214.92','2007-09-24 14:32:36','2007-09-24 04:32:36','

Nothing to see here, I\'m just testing out whether the comments are working.

\n\n

I\'m assuming markdown is enabled for these?

\n',0,'1','Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/522.11.1 (KHTML, like Gecko) Version/3.0.3 Safari/522.12.1','',0,2,0),(5,18,'CliftonSnyder.net » From Inductio Ex Machina: The Mathematical Grue','','http://cliftonsnyder.net/?p=94','65.60.186.20','2007-10-27 03:50:44','2007-10-26 17:50:44','

[...] Cave meets nerd humor; this is fantastic stuff: The Mathematical Grue. Non-nerds needn’t apply. Just to give you a [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.0.10','pingback',0,0,0),(37,25,'Mark Reid','mark@threewordslong.com','http://conflate.net/inductio','203.143.165.108','2008-02-25 11:37:30','2008-02-25 00:37:30','

My undergrad degree was in pure maths so I\'m quite susceptible to bouts of Math++. I would have thought lots of exposure when I was young would have helped boost my immunity.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-au) AppleWebKit/523.15.1 (KHTML, like Gecko) Version/3.0.4 Safari/523.15','',0,0,0),(36,23,'Daniel Lemire','lemire@acm.org','http://www.daniel-lemire.com/','24.37.15.142','2008-02-25 09:58:22','2008-02-24 22:58:22','

Quite cute indeed.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en; rv:1.8.1.8) Gecko/20071010 Camino/1.5.2 (MultiLang)','',0,0,0),(35,25,'Daniel Lemire','lemire@acm.org','http://www.daniel-lemire.com/','24.37.15.142','2008-02-25 09:55:43','2008-02-24 22:55:43','

I doubt Math++ helps your career on the long run.

\n\n

I have been guilty of Math++ myself, but given that I was trained as a mathematician, this is understandable.

\n\n

Philosophers suffer from a similar syndrome: they tend to write very long sentences with big words.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en; rv:1.8.1.8) Gecko/20071010 Camino/1.5.2 (MultiLang)','',0,0,0),(32,23,'Vishal','vishal.lama@gmail.com','http://topologicalmusings.wordpress.com/','209.33.229.20','2008-02-20 18:22:42','2008-02-20 07:22:42','

Wow! Really nice result.

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080201 Firefox/2.0.0.12','',0,0,0),(34,26,'Daniel Lemire','lemire@acm.org','http://www.daniel-lemire.com/','24.37.15.142','2008-02-24 17:59:16','2008-02-24 06:59:16','

You forgot swivel. It has some pretty good data sets.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en; rv:1.8.1.8) Gecko/20071010 Camino/1.5.2 (MultiLang)','',0,0,0),(51,0,'','','','75.126.57.55','2008-03-13 16:33:20','2008-03-13 05:33:20','\n',0,'1','Incutio XML-RPC -- WordPress/2.3.3','',0,0,0),(52,26,'Brendan O\'Connor','brenocon@gmail.com','http://socialscienceplusplus.blogspot.com/','76.191.205.197','2008-03-13 20:59:27','2008-03-13 09:59:27','

An addition: Many Eyes -- a site where anyone can upload data sets. It also has visualizations people have done.\nhttp://www.many-eyes.com/\nhttp://services.alphaworks.ibm.com/manyeyes/home

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9b4) Gecko/2008030317 Firefox/3.0b4','',0,0,0),(39,26,'Vishal','vishal.lama@gmail.com','http://topologicalmusings.wordpress.com/','209.33.229.20','2008-02-26 17:17:14','2008-02-26 06:17:14','

This is really precious. Thanks for posting this. I might actually need this.

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080201 Firefox/2.0.0.12','',0,0,0),(40,25,'Kieran G. Larkin','spiralphase@yahoo.com.au','http://www.physics.usyd.edu.au/~larkin/','203.12.172.254','2008-02-27 08:59:30','2008-02-26 21:59:30','

I can recommend Terry Tao’s tips on writing readable maths papers, even if I don’t always follow them:

\n\n

http://terrytao.wordpress.com/advice-on-writing-papers/

\n\n

Recently I spent (wasted more like) an afternoon reading a long and densely mathematical paper on Euclidean Motion Group Representations. After 32 pages I finally realised the point of the paper. To paraphrase (the equations) it proved that if one takes a pizza and cuts it into N separate sectors, then reassembling all the N sectors in the original configuration gives the original pizza. I wish the authors had given a similar summary in their work, interspersing the equations with some motivation and insight into their long, arduous and yet noble derivations. Then I’d have more time to contribute inciteful (sic) comments to blogs like this.

\n\n

Postscript: I do not wish to undermine the importance of finding rigorous proofs for what appear to be deceptively simple and obvious geometrical “facts”, but I would like to be informed by clearer exposition. I guess that means I’ll never be pure, mathematically speaking!

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0','',0,0,0),(54,23,'Delip Rao','delip.rao@gmail.com','http://resnotebook.blogspot.com','128.220.117.40','2008-03-15 08:08:30','2008-03-14 21:08:30','

I am tempted to post Alex\'s classic paper \"down with determinants!\".

\n\n

http://www.axler.net/DwD.pdf

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1.12) Gecko/20080201 Firefox/2.0.0.12','',0,0,0),(108,25,'The Seductive Power of Mathematics « Apperceptual','','http://apperceptual.wordpress.com/2008/05/24/the-seductive-power-of-mathematics/','72.233.2.65','2008-05-25 02:07:15','2008-05-24 16:07:15','

[...] In machine learning and artificial intelligence, I sometimes worry that there is an excess of math. Others have complained about “the use of unnecessary and obfuscatory mathematics to improve the [...]

\n',0,'1','Incutio XML-RPC -- WordPress/MU','pingback',0,0,0),(226,42,'Prediction and the Axiom of Choice < Inductio Ex Machina','','http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/','75.126.57.55','2008-08-29 11:33:02','2008-08-29 01:33:02','

[...] the intuitionist in me looks upon this result much as I see the Banach-Tarski paradox. That is, as evidence against [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.6','pingback',0,0,0),(107,26,'Mark Reid','mark@conflate.net','http://conflate.net/inductio','203.143.165.108','2008-05-14 09:42:26','2008-05-13 23:42:26','

Thanks everyone for the extra links.

\n\n

I\'ve also recently discovered DataMob which appears to be a collection of datasets and interfaces.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_2; en-au) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.1 Safari/525.18','',0,2,0),(261,143,'Mark Reid','mark@conflate.net','http://mark.reid.name','59.167.49.189','2008-11-19 07:19:30','2008-11-18 21:19:30','

Thanks for spotting the typo Ron. It\'s now fixed.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; en-au) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1','',0,2,0),(210,45,'COLT 2008 Highlights < Inductio Ex Machina','','http://conflate.net/inductio/2008/07/colt-2008-highlights/','75.126.57.55','2008-07-27 21:41:46','2008-07-27 11:41:46','

[...] sessions but I did catch the workshop on evaluation in machine learning. Since I’ve already written about that and didn’t attend any of the UAI sessions, I’ll focus on the COLT stuff I found [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.6','pingback',0,0,0),(211,46,'Robin Hanson','rhanson@gmu.edu','http://hanson.gmu.edu','70.17.103.150','2008-07-27 23:22:43','2008-07-27 13:22:43','

Thanks for the praise!

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.1) Gecko/2008070208 Firefox/3.0.1','',0,0,0),(80,33,'Vishal Lama','vishal.lama@gmail.com','http://topologicalmusings.wordpress.com/','209.33.225.110','2008-04-09 16:50:05','2008-04-09 06:50:05','

Fantastic post! This reminds of a very famous large (Indian) religious group/sect that argues man could not have landed on the moon because the probability of doing so was/is extremely small!

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5','',0,0,1),(269,164,'Ahmad Yamin','abyamin000@yahoo.com','','202.56.7.21','2008-12-06 03:30:48','2008-12-05 17:30:48','

hi, I am from Bangladesh and I like to join there.\nRight now I am working Marketing dept of Rajshahi University....let me know about the issues

\n',0,'1','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)','',0,0,0),(270,164,'Mark Reid','mark@conflate.net','http://mark.reid.name','59.167.62.92','2008-12-06 11:42:16','2008-12-06 01:42:16','

Hi Ahmad,

\n\n

All the relevant details can be found in the links I provided. If you have any particular questions you need answers to you should ask the summer school organisers - I am just a speaker.

\n\n

Their addresses can be found by following the first link.

\n',0,'1','Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_4_11; en) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.22','',0,2,0),(271,143,'Justin Domke','domke@cs.umd.edu','http://justindomke.wordpress.com','128.8.118.5','2008-12-09 11:15:45','2008-12-09 01:15:45','

David MacKay\'s book contains a sort-of similar figure. (Less of a \'proof\', but more intuitive.) Check out p. 35 of these notes:

\n\n

http://www.inference.phy.cam.ac.uk/mackay/itprnn/ps/22.40.pdf

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.18) Gecko/20081029 Firefox/2.0.0.18','',0,0,0),(193,40,'Laurens vd Maaten','l.vandermaaten@micc.unimaas.nl','','137.120.254.119','2008-07-18 20:55:09','2008-07-18 10:55:09','

I think you should try using t-SNE instead of PCA, as it typically gives much better visualizations.

\n\n

Example plots, Matlab code, and a paper on t-SNE are available from: http://www.cs.unimaas.nl/l.vandermaaten/tsne

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_4; nl-nl) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1','',0,0,0),(267,134,'what is \"irregular\"','wray.buntine@nicta.com.au','','203.143.165.81','2008-11-26 13:10:58','2008-11-26 03:10:58','

Reading more into this paper, the question arises, why is a verb irregular. It seems these are archaic verbs ... reflecting times past. Older languages related to English did vowel changes to conjugate verbs (e.g., Old Norse, see modern Icelandic as an example), not unlike many non-Indo-European languages like Finnish or Arabic.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.4) Gecko/2008102920 Firefox/3.0.4','',0,0,0),(258,143,'Daniel Lemire','lemire@gmail.com','http://www.daniel-lemire.com/','96.20.156.153','2008-11-17 23:56:06','2008-11-17 13:56:06','

Thanks! I am sad to say that I did not know this inequality. (Or, more likely, I forgot about it.)

\n\n

(Of course, I knew of the various basic special cases, such as f(x) = x^2.)

\n\n

You are doing a good job formatting math. in your blog too!

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; fr; rv:1.9.0.1) Gecko/2008070206 Firefox/3.0.1','',0,0,0),(196,33,'Evaluation Methods for Machine Learning < Inductio Ex Machina','','http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/','75.126.57.55','2008-07-21 21:18:54','2008-07-21 11:18:54','

[...] talk touched on a number of criticisms that I had found in Jacob Cohen’s paper “The Earth is Round (p < 0.05)” making the case that people often incorrectly report and incorrectly interpret p-values for [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.5.1','pingback',0,0,0),(274,40,'leacrzbp','abxnnd@yisvyu.com','http://ppjfpejmdksa.com/','84.16.235.39','2008-12-23 11:20:01','2008-12-23 01:20:01','

AcDzlU msvsqowfsmpu, [url=http://kbpjygdvszbb.com/]kbpjygdvszbb[/url], [link=http://heyxmflsubey.com/]heyxmflsubey[/link], http://wtqkopfyirmr.com/

\n\n

[WORDPRESS HASHCASH] The poster sent us \'0 which is not a hashcash value.

\n',0,'spam','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)','',0,0,0),(273,124,'Bob Carpenter','carp@alias-i.com','http://lingpipe-blog.com/','76.190.199.32','2008-12-18 05:18:46','2008-12-17 19:18:46','

I also thought about getting this book, so thanks for saving me some time. I was so turned off by the breathless style of The Numerati (another pop book about data mining) that I think I\'ll wait a while before delving into another pop quant book.

\n\n

I believe the right question to ask is whether we need domain experts at all, or just need a whole lot of data.

\n\n

I think the answer\'s pretty obvious. Even the basic structure of a statistical model entails a large degree of design in everything from setting up dependencies to selecting predictors.

\n\n

The most accurate natural language systems bring in all kinds of human-generated knowledge sources from labeled data for classifiers or part-of-speech taggers to domain-specific dictionaries to full-blown ontologies.

\n\n

I\'d cut just about anybody slack for not sorting out all of our redundant terminology. I only just recently realized that so-called max entropy classifiers, logistic regression, and one-layer neural nets with sigmoid/softmax activation were the same thing, and that L1 norms, Laplace priors, double-exponential priors, and the \"lasso\" are the same thing.

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.4) Gecko/2008102920 Firefox/3.0.4','',0,0,0),(222,42,'Mark Reid','mark@conflate.net','http://conflate.net/inductio','203.143.165.16','2008-08-14 10:14:00','2008-08-14 00:14:00','

Andrej Bauer has an excellent post on the use of intuitionistic mathematics in physics with links to its implications for infinitesimals and physics.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_4; en-au) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1','',0,2,0),(223,38,'Books that Change Lives « digitalclass','','http://digitalclass.wordpress.com/2008/08/17/books-that-change-lives/','72.232.131.30','2008-08-18 06:57:06','2008-08-17 20:57:06','

[...] Turney, AI researcher Tim O’Reilly, Publisher Steven Leckart, Editor Mark Reid, AI researcher Trent Hamm, financial blogger Eric Rawlins, data architect Larry Winget, [...]

\n\n

[WORDPRESS HASHCASH] The comment\'s server IP (72.232.131.30) doesn\'t match the comment\'s URL host IP (72.232.101.42) and so is spam.

\n',0,'1','The Incutio XML-RPC PHP Library -- WordPress/MU','pingback',0,0,0),(79,26,'Dolores Labs 日本 » Blog Archive » The Manifesto マニフェスト','','http://jp.doloreslabs.com/?p=3','67.207.137.234','2008-04-09 07:45:50','2008-04-08 20:45:50','

[...] 以前、マシンラーニング(機械学習)についてどんな論文を書こうかスタンフォードで考えていたとき、会話はいつもどんなデータセットが利用可能かということに左右されていました。現存する使用可能データを把握し、そこから何をしたいかを見極めていたのです。ある目的のためにデザインされたデータを転用するための議論に膨大な時間を費やしていました。データを使用する多くの分野で同じことが起きていると思います。 [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.3.3','pingback',0,0,0),(74,23,'Mark Reid','mark@threewordslong.com','http://conflate.net/inductio','203.143.165.108','2008-04-03 15:16:42','2008-04-03 04:16:42','

Delip: Obviously, you succumbed to the temptation to post it. :)

\n\n

I hadn\'t seen \"Down with Determinants\" before. He makes some good points but, like any tool, they can be used appropriately or inappropriately.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_2; en-au) AppleWebKit/525.13 (KHTML, like Gecko) Version/3.1 Safari/525.13','',0,0,0),(76,28,'Mark Reid','mark@conflate.net','http://conflate.net/inductio','203.143.165.108','2008-04-03 15:37:01','2008-04-03 04:37:01','

I\'ve set up a git repository for Feed Bag over at GitHub

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_2; en-au) AppleWebKit/525.13 (KHTML, like Gecko) Version/3.1 Safari/525.13','',0,2,1),(82,26,'Rufus Pollock','rufus.pollock@okfn.org','http://www.ckan.net','81.101.137.128','2008-05-01 01:55:33','2008-04-30 15:55:33','

Have you seen http://www.ckan.net?

\n\n

CKAN is the Comprehensive Knowledge Archive Network, a registry of open knowledge packages and projects (and a few closed ones). CKAN is the place to search for open knowledge resources as well as register your own – be that a set of Shakespeare\'s works, a global population density database, the voting records of MPs, or 30 years of US patents.

\n\n

Those familiar with freshmeat or CPAN can think of CKAN as providing an analogous service for open knowledge.

\n',0,'1','Mozilla/5.0 (X11; U; Linux i686; en-GB; rv:1.9b5) Gecko/2008041514 Firefox/3.0b5','',0,0,0),(250,140,'Manu','manuelaraoz@hotmail.com','http://www.ochentacentavos.com.ar','190.30.5.101','2008-11-07 12:01:18','2008-11-07 02:01:18','

Congratulations!

\n\n

This project seems promising :D

\n',0,'1','Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.17) Gecko/20061201 Firefox/2.0.0.17 (Ubuntu-feisty)','',0,0,0),(251,140,'Jason','jason.brownlee05@gmail.com','http://neverreadpassively.com','118.208.250.41','2008-11-07 14:45:55','2008-11-07 04:45:55','

Congratulations!

\n',0,'1','Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.4; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3','',0,0,0),(252,140,'Vishal','vishal.lama@gmail.com','http://topologicalmusings.wordpress.com/','64.255.84.194','2008-11-07 15:47:10','2008-11-07 05:47:10','

My heartiest congratulations!

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/0.3.154.9 Safari/525.19','',0,0,0),(133,38,'links for 2008-06-15 « Simply… A User','','http://simplyauser.wordpress.com/2008/06/15/links-for-2008-06-15/','66.135.48.142','2008-06-15 10:31:51','2008-06-15 00:31:51','

[...] Research-Changing Books < Inductio Ex Machina (tags: books inspiration list literature marketing recommendations toread **) [...]

\n',0,'1','Incutio XML-RPC -- WordPress/MU','pingback',0,0,0),(146,40,'Bart','test@fastmail.fm','','59.97.56.63','2008-06-18 16:46:03','2008-06-18 06:46:03','

Thank you. Thank you. Thank you so much for this article.

\n',0,'1','Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9b5) Gecko/2008050509 Firefox/3.0b5','',0,0,0),(147,38,'Lone Gunman :: Book Lists','','http://www.lonegunman.co.uk/2008/06/20/book-lists/','208.97.175.18','2008-06-20 16:50:47','2008-06-20 06:50:47','

[...] Mark Reid, AI researcher [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.3.3','pingback',0,0,0),(173,40,'Peter Skomoroch','peter.skomoroch@gmail.com','http://www.datawrangling.com','69.109.79.65','2008-06-28 14:45:35','2008-06-28 04:45:35','

Great post, I wish more people would post the associated code behind visualizations the way you have here...

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0','',0,0,0),(138,26,'Visualising 19th Century Reading in Australia < Inductio Ex Machina','','http://conflate.net/inductio/2008/06/visualising-reading/','75.126.57.55','2008-06-17 13:10:39','2008-06-17 03:10:39','

[...] on the lookout for interesting data sets, I suggested that we apply some basic data analysis tools to the database to see what kind of [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.5.1','pingback',0,0,0),(139,40,'Daniel Lemire','lemire@gmail.com','http://www.daniel-lemire.com','96.20.156.153','2008-06-17 13:46:26','2008-06-17 03:46:26','

Here is a vaguely related reference:

\n\n

http://www.daniel-lemire.com/fr/abstracts/CASTA2006.html

\n\n

Owen Kaser, Daniel Lemire, Steven Keith, The LitOLAP Project: Data Warehousing with Literature, CaSTA 2006, Fredericton, 2006.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9) Gecko/2008061004 Firefox/3.0','',0,0,0),(253,140,'Rajeev','rajs2010@gmail.com','','61.95.189.180','2008-11-08 10:49:11','2008-11-08 00:49:11','

CONGRATULATIONS.\ni guess it will take considerable amount of time before humans can better such \'projects\' without \'cheating\'.

\n',0,'1','Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.12) Gecko/20080129 Iceweasel/2.0.0.12 (Debian-2.0.0.12-0etch1)','',0,0,0),(242,134,'anon','anon@aol.com','','216.178.67.113','2008-10-20 02:22:04','2008-10-19 16:22:04','

It seems that you meant to write \"inversely proportional\" instead of \"proportional\".

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; en-us) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1','',0,0,0),(243,134,'Mark Reid','mark@conflate.net','http://mark.reid.name','59.167.37.57','2008-10-20 07:05:52','2008-10-19 21:05:52','

Anon, thanks for catching that. It\'s now fixed.

\n',0,'1','Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_4_11; en) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.22','',0,2,0),(254,140,'Reader','reader@world.www','','128.220.117.40','2008-11-09 12:34:58','2008-11-09 02:34:58','

Congratulations!

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3','',0,0,0),(260,143,'Ron Yang','ron.yang@oracle.com','','69.250.173.205','2008-11-19 05:37:08','2008-11-18 19:37:08','

I think there\'s a typo in your picture.. instead of f(E[(x]), how about f(E[x])

\n',0,'1','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/0.4.154.18 Safari/525.19','',0,0,0),(236,12,'A Year of Research Blogging < Inductio Ex Machina','','http://conflate.net/inductio/2008/09/a-year-of-research-blogging/','75.126.57.55','2008-09-22 17:00:14','2008-09-22 07:00:14','

[...] Just a short post to reflect on the year that has passed since I started this blog. [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.6','pingback',0,0,0),(237,124,'ansate','postulant@gmail.com','http://www.xanga.com/ansate','67.40.34.203','2008-09-28 00:31:58','2008-09-27 14:31:58','

thanks for the review! I\'d been thinking about reading this but hadn\'t gotten to it. Sounds like he\'s enthusiastic about the same things I am, but doesn\'t add enough to the discussion to be worth it to us geeks who used these arguments in our grad school entrance essays.

\n',0,'1','Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.4; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3','',0,0,0),(238,124,'Ricardo Niederberger Cabral','ricardo@isnotworking.com','http://isnotworking.com/','189.25.194.175','2008-09-30 10:21:41','2008-09-30 00:21:41','

Great review!

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3','',0,0,0),(126,42,'Daniel Lemire','lemire@gmail.com','http://www.daniel-lemire.com','96.20.156.153','2008-06-12 23:10:34','2008-06-12 13:10:34','

Assuming we live in a Turing Machine, which is entirely possible, then who cares about anything that is not computable?

\n\n

That is the value of experimental work and receiving feedback. We assume there is such a thing as a discontinuous function. That\'s a nice model. But does it match anything we can experience?

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9) Gecko/2008061004 Firefox/3.0','',0,0,0),(124,24,'Fr.','briatte+markreid@gmail.com','http://phnk.com/','78.151.67.123','2008-06-01 20:37:57','2008-06-01 10:37:57','

Interesting trick, thanks for mentioning it. My own post has aged, I\'ll add an update to redirect readers.

\n',0,'1','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_3; en-us) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.1 Safari/525.20','',0,0,0),(125,24,'Boîte noire » Archive du blog » A bibliographic workflow using CiteULike and BibDesk','','http://phnk.com/blog/tech/citeulike-and-bibdesk/','88.191.250.14','2008-06-01 20:41:25','2008-06-01 10:41:25','

[...] Update, 1 June 2008: the Web import function is now perfectly functional, and will import many references from Google Scholar. Mark Reid also has a powerful way to connect CiteULike and BibDesk through external file groups. [...]

\n',0,'1','Incutio XML-RPC -- WordPress/2.5.1','pingback',0,0,0); -/*!40000 ALTER TABLE `wp_comments` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_links` --- - -DROP TABLE IF EXISTS `wp_links`; -CREATE TABLE `wp_links` ( - `link_id` bigint(20) NOT NULL auto_increment, - `link_url` varchar(255) NOT NULL default '', - `link_name` varchar(255) NOT NULL default '', - `link_image` varchar(255) NOT NULL default '', - `link_target` varchar(25) NOT NULL default '', - `link_category` bigint(20) NOT NULL default '0', - `link_description` varchar(255) NOT NULL default '', - `link_visible` varchar(20) NOT NULL default 'Y', - `link_owner` int(11) NOT NULL default '1', - `link_rating` int(11) NOT NULL default '0', - `link_updated` datetime NOT NULL default '0000-00-00 00:00:00', - `link_rel` varchar(255) NOT NULL default '', - `link_notes` mediumtext NOT NULL, - `link_rss` varchar(255) NOT NULL default '', - PRIMARY KEY (`link_id`), - KEY `link_category` (`link_category`), - KEY `link_visible` (`link_visible`) -) ENGINE=MyISAM AUTO_INCREMENT=30 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_links` --- - -LOCK TABLES `wp_links` WRITE; -/*!40000 ALTER TABLE `wp_links` DISABLE KEYS */; -INSERT INTO `wp_links` VALUES (8,'http://hunch.net','Machine Learning (Theory)','','',0,'John Langford\'s Blog','Y',2,7,'0000-00-00 00:00:00','contact met','',''),(12,'http://ml.typepad.com/machine_learning_thoughts/','Machine Learning Thoughts','','',0,'Olivier Bousquet\'s Blog','Y',2,0,'0000-00-00 00:00:00','','','http://ml.typepad.com/machine_learning_thoughts/index.rdf'),(10,'http://emotion.inrialpes.fr/~dangauthier/blog/','Yet Another Machine Learning Blog','','',0,'Pierre Dangauthier\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(11,'http://yaroslavvb.blogspot.com/','Machine Learning, etc','','',0,'Yaroslav Bulatov\'s Blog','Y',2,0,'0000-00-00 00:00:00','contact met','',''),(13,'http://www.stat.columbia.edu/~cook/movabletype/mlm/','Statistical Modeling, Causal Inference, and Social Science','','',0,'','Y',2,0,'0000-00-00 00:00:00','','',''),(14,'http://www.dataminingblog.com/','Data Mining Research','','',0,'Sandro Saitta\'s Blog','N',2,0,'0000-00-00 00:00:00','','',''),(15,'http://geomblog.blogspot.com/','The Geomblog','','',0,'Ruminations on computational geometry, algorithms, theoretical computer science and life','N',2,0,'0000-00-00 00:00:00','','',''),(16,'http://mehve.org/ywml/','yw\'s machine learning blog','','',0,'A Blog by Yee Whye Teh','N',2,0,'0000-00-00 00:00:00','','',''),(17,'http://machine-learning.blogspot.com/','Business Intelligence, Data Mining & Machine Learning','','',0,'José CarlosCortizo Pérez\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(18,'http://apperceptual.wordpress.com/','Apperceptual','','',0,'Peter Turney\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(19,'http://wcohen.blogspot.com/','Cranial Darwinism','','',0,'William Cohen\'s Blog','Y',2,0,'0000-00-00 00:00:00','contact met','',''),(20,'http://undirectedgrad.blogspot.com/','Undirected Grad','','',0,'Jurgen Van Gael\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(21,'http://www.datawrangling.com/','Data Wrangling','','',0,'Pete Skomoroch\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(22,'http://lingpipe-blog.com/','LingPipe Blog','','',0,'Alias-i\'s Blog on NLP and Text Analytics','Y',2,0,'0000-00-00 00:00:00','','',''),(23,'http://www.vetta.org/','Vetta','','',0,'Shane Legg\'s Machine Learning Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(24,'http://thesilog.sologen.net/','Thesilog','','',0,'Amir massoud Farahmand\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(25,'http://gimmereward.wordpress.com/','Gimme Reward','','',0,'István Szita\'s blog on reinforcement learning','Y',2,0,'0000-00-00 00:00:00','','',''),(26,'http://readingsml.blogspot.com/','Readings in Machine Learning','','',0,'Csaba Szepesvári\'s blog','Y',2,0,'0000-00-00 00:00:00','','',''),(27,'http://machinelearner.blogspot.com/','Machine Learner','','',0,'Lei Tang\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''),(28,'http://radfordneal.wordpress.com/','Radford Neal\'s Blog','','',0,'','Y',2,0,'0000-00-00 00:00:00','','',''),(29,'http://mikiobraun.blogspot.com/','Marginally Interesting','','',0,'Mikio Braun\'s Blog','Y',2,0,'0000-00-00 00:00:00','','',''); -/*!40000 ALTER TABLE `wp_links` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_openid_identities` --- - -DROP TABLE IF EXISTS `wp_openid_identities`; -CREATE TABLE `wp_openid_identities` ( - `uurl_id` bigint(20) NOT NULL auto_increment, - `user_id` bigint(20) NOT NULL default '0', - `url` text, - `hash` char(32) default NULL, - PRIMARY KEY (`uurl_id`), - UNIQUE KEY `uurl` (`hash`), - KEY `url` (`url`(30)), - KEY `user_id` (`user_id`) -) ENGINE=MyISAM AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; - --- --- Dumping data for table `wp_openid_identities` --- - -LOCK TABLES `wp_openid_identities` WRITE; -/*!40000 ALTER TABLE `wp_openid_identities` DISABLE KEYS */; -INSERT INTO `wp_openid_identities` VALUES (1,2,'http://mark.reid.name/','a3ed658401c3933bfa57153fc4052db5'); -/*!40000 ALTER TABLE `wp_openid_identities` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_options` --- - -DROP TABLE IF EXISTS `wp_options`; -CREATE TABLE `wp_options` ( - `option_id` bigint(20) NOT NULL auto_increment, - `blog_id` int(11) NOT NULL default '0', - `option_name` varchar(64) NOT NULL default '', - `option_value` longtext NOT NULL, - `autoload` varchar(20) NOT NULL default 'yes', - PRIMARY KEY (`option_id`,`blog_id`,`option_name`), - KEY `option_name` (`option_name`) -) ENGINE=MyISAM AUTO_INCREMENT=690 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_options` --- - -LOCK TABLES `wp_options` WRITE; -/*!40000 ALTER TABLE `wp_options` DISABLE KEYS */; -INSERT INTO `wp_options` VALUES (1,0,'siteurl','http://conflate.net/inductio/','yes'),(2,0,'blogname','Inductio Ex Machina','yes'),(3,0,'blogdescription','Thoughts on Machine Learning and Inference','yes'),(66,0,'wp_user_roles','a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:51:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}','yes'),(5,0,'users_can_register','','yes'),(6,0,'admin_email','mark@conflate.net','yes'),(7,0,'start_of_week','1','yes'),(8,0,'use_balanceTags','','yes'),(9,0,'use_smilies','','yes'),(10,0,'require_name_email','1','yes'),(11,0,'comments_notify','1','yes'),(12,0,'posts_per_rss','5','yes'),(13,0,'rss_excerpt_length','50','yes'),(14,0,'rss_use_excerpt','0','yes'),(15,0,'mailserver_url','mail.example.com','yes'),(16,0,'mailserver_login','login@example.com','yes'),(17,0,'mailserver_pass','password','yes'),(18,0,'mailserver_port','110','yes'),(19,0,'default_category','1','yes'),(20,0,'default_comment_status','open','yes'),(21,0,'default_ping_status','open','yes'),(22,0,'default_pingback_flag','1','yes'),(23,0,'default_post_edit_rows','10','yes'),(24,0,'posts_per_page','1','yes'),(25,0,'what_to_show','posts','yes'),(26,0,'date_format','F j, Y','yes'),(27,0,'time_format','g:i a','yes'),(28,0,'links_updated_date_format','F j, Y g:i a','yes'),(29,0,'links_recently_updated_prepend','','yes'),(30,0,'links_recently_updated_append','','yes'),(31,0,'links_recently_updated_time','120','yes'),(32,0,'comment_moderation','','yes'),(33,0,'moderation_notify','1','yes'),(34,0,'permalink_structure','/%year%/%monthnum%/%postname%/','yes'),(35,0,'gzipcompression','','yes'),(36,0,'hack_file','','yes'),(37,0,'blog_charset','UTF-8','yes'),(38,0,'moderation_keys','','no'),(39,0,'active_plugins','a:6:{i:0;s:19:\"akismet/akismet.php\";i:1;s:19:\"googleanalytics.php\";i:2;s:34:\"latexrender/latexrender-plugin.php\";i:3;s:12:\"markdown.php\";i:4;s:9:\"stats.php\";i:5;s:15:\"wp-hashcash.php\";}','yes'),(40,0,'home','','yes'),(41,0,'category_base','','yes'),(42,0,'ping_sites','http://rpc.pingomatic.com/\r\nhttp://api.feedster.com/ping\r\nhttp://api.my.yahoo.com/rss/ping\r\nhttp://blogsearch.google.com/ping/RPC2\r\nhttp://ping.blo.gs/\r\nhttp://ping.feedburner.com\r\nhttp://rpc.technorati.com/rpc/ping\r\nhttp://rpc.weblogs.com/RPC2\r\nhttp://www.newsisfree.com/RPCCloud','yes'),(43,0,'advanced_edit','0','yes'),(44,0,'comment_max_links','4','yes'),(45,0,'gmt_offset','10','yes'),(46,0,'default_email_category','1','yes'),(47,0,'recently_edited','a:5:{i:0;s:25:\"/themes/simplr/footer.php\";i:2;s:24:\"/themes/simplr/print.css\";i:3;s:25:\"/themes/simplr/single.php\";i:4;s:26:\"/themes/simplr/sidebar.php\";i:5;s:27:\"/themes/simplr/comments.php\";}','no'),(48,0,'use_linksupdate','1','yes'),(49,0,'template','simplr','yes'),(50,0,'stylesheet','simplr','yes'),(51,0,'comment_whitelist','','yes'),(305,0,'page_uris','a:1:{s:5:\"about\";i:2;}','yes'),(53,0,'blacklist_keys','','no'),(54,0,'comment_registration','','yes'),(55,0,'rss_language','en','yes'),(56,0,'html_type','text/html','yes'),(57,0,'use_trackback','0','yes'),(58,0,'default_role','subscriber','yes'),(59,0,'db_version','8201','yes'),(60,0,'uploads_use_yearmonth_folders','1','yes'),(61,0,'upload_path','wp-content/uploads','yes'),(62,0,'secret','CNfhnWz7^FWz5o$5i1oTu%nTzNLt9KR$(pp(aT8gJIbJ4&ySCqXMK4D05DPKsi8a','yes'),(63,0,'blog_public','1','yes'),(64,0,'default_link_category','2','yes'),(65,0,'show_on_front','posts','yes'),(68,0,'rss_0ff4b43bd116a9d8720d689c80e7dfd4','O:9:\"MagpieRSS\":19:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:10:{i:0;a:12:{s:5:\"title\";s:28:\"WordPress 2.7 “Coltrane”\";s:4:\"link\";s:50:\"http://wordpress.org/development/2008/12/coltrane/\";s:8:\"comments\";s:59:\"http://wordpress.org/development/2008/12/coltrane/#comments\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 02:28:57 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:8:\"category\";s:11:\"Development\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=477\";s:11:\"description\";s:318:\"The first thing you’ll notice about 2.7 is its new interface. From the top down, we’ve listened to your feedback and thought deeply about the design and the result is a WordPress that’s just plain faster. Nearly every task you do on your blog will take fewer clicks and be faster in 2.7 than it [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:11933:\"

The first thing you’ll notice about 2.7 is its new interface. From the top down, we’ve listened to your feedback and thought deeply about the design and the result is a WordPress that’s just plain faster. Nearly every task you do on your blog will take fewer clicks and be faster in 2.7 than it did in a previous version. (Download it now, or read on for more.)

\n

Next you’ll begin to notice the new features subtly sprinkled through the new interface: the new dashboard that you can arrange with drag and drop to put the things most important to you on top, QuickPress, comment threading, paging, and the ability to reply to comments from your dashboard, the ability to install any plugin directly from WordPress.org with a single click, and sticky posts.

\n

Digging in further you might notice that every screen is customizable. Let’s say you never care about author on your post listings — just click “Screen Options” and uncheck it and it’s instantly gone from the page. The same for any module on the dashboard or write screen. If your screen is narrow and the menu is taking up too much horizontal room, click the arrow to minimize it to be icon-only, and then go to the write page and drag and drop everything from the right column into the main one, so your posting area is full-screen. (For example I like hiding everything except categories, tags, and publish. I put categories and tags on the right, and publish under the post box.)

\n

For a visual introduction to what 2.7 is, check out this video (available in HD, and full screen):

\n

\n

It’s all about you. It’s the next generation of WordPress, which is why we’ve bestowed it with the honor of being named for John Coltrane. And you can download it today.

\n

Last, but certainly not least, this may be the last time you ever have to manually upgrade WordPress again. We heard how tired you were of doing upgrades for yourself and your friends, so now WordPress includes a built-in upgrade that will automatically notify you of new releases, and when you’re ready it will download them, install them, and upgrade your blog with a single click.

\n

(As with any interface change it may take a little bit of time to acclimate yourself but soon you’ll find yourself whizzing through the screens. Even people who have hated it at first tell us after a few days they wonder how they got by before.)

\n

The Story Behind 2.7

\n

The real reason Coltrane is such a huge leap forward is because the community was so involved with every step of the process. Over 150 people contributed code directly to the release, our highest ever, with many tens of thousands more participating in the polls, surveys, tests, mailing lists, and other feedback mechanisms the WordPress dev team used in putting this release together.

\n

For some of the back story in the development of 2.7, check out these blog posts (thanks to WeblogToolsCollection for the list):

\n\n

This was interesting to us, a blogging software release we actually blogged about, but the process was hugely informative. Prior to its release today Crazyhorse and 2.7 had been tested by tens of thousands of people on their blogs, hundreds of thousands of you count .com. The volume of feedback was so high that we decided to push back the release date a month to take time to incorporate it all and do more revisions based on what you guys said.

\n

For those of you wondering why we didn’t call this release 3.0, it’s because we abhor version number inflation. 3.0 will just be the next release after 2.9. The major features in new point releases approach also works well for products like OS X, with huge changes between a 10.3 and 10.4.

\n

The Future

\n

Those of you following along at home might have noticed this was our second major redesign of WordPress this year. Whoa nelly! While that wasn’t ideal, and I especially sympathize with those of you creating books or tutorials around WordPress, there’s good news. The changes to WordPress in 2.5 and 2.7 were necessary for us to break free of much of the legacy cruft and interface bloat that had built up over the years (gradually) and more importantly provide us with a UI framework and interface language we can use at the foundation to build tomorrow’s WordPress on, to express ideas we haven’t been able to before. So at the end of 2009 I expect, interface-wise, WordPress to look largely the same as it does now.

\n

That said, we couldn’t be more excited about the future with regards to features. Now that we’ve cleared out more basic things, we are looking forward in the coming year to really tackling media handling including audio and video, better tools for plugin and theme developers, widgets, theme updates, more integrated and contextual help, and easier integration with projects like BuddyPress and bbPress.

\n

Thank Yous

\n

We would like to take a moment to thank the following WordPress.org users for being a part of 2.7: Verena Segert, Ben Dunkle, 082net, _ck_, Aaron Brazell, Aaron Campbell, Aaron Harp, aaron_guitar, abackstrom, Alex Rabe, Alex Shiels, anderswc, andr, Andrew Ozz, andy, Andy Peatling, Austin Matzko, axelseaa, bendalton, Benedict Eastaugh, Betsy Kimak, Björn Wijers, bobrik, brianwhite, bubel, Byrne Reese, caesarsgrunt, capripot, Casey Bisson, Charles E. Frees-Melvin, Chris Johnston, codestyling, corischlegel, count_0, Daniel Jalkut, Daniel Torreblanca, David McFarlane, dbuser123, Demetris Kikizas, Dion Hulse, docwhat, Donncha O Caoimh, Doug Stewart, Dougal Campbell, dsader, dtsn, dwc, g30rg3x, guillep2k, Hailin Wu, Hans Engel, Jacob Santos, Jamie Rumbelow, Jan Brasna, Jane Wells, Jean-LucfromBrussels, Jennifer Hodgdon, Jeremy Clarke, Jérémie Bresson, jick, Joe Taiabjee, John Blackbourn, John Conners, John Lamansky, johnhennmacc, Joost de Valk, Joseph Scott, kashani, Kim Parsell, Lloyd Budd, Lutz Schröer, Malaiac, Mark Jaquith, Mark Steel, Matt Freedman, Matt Mullenweg, Matt Thomas, matthewh84, mattyrob, mcs_trekkie, Michael Adams, Michael Hampton, MichaelH, mictasm, Mike Schinkel, msi08, msw0418, mtekk, Nick Momrik, Nikolay Bachiyski, Noel Jackson, Otto, Ozh, paddya, paul, pedrop, pishmishy, Po0ky, RanYanivHartstein, raychampagne, rdworth, reinkim, rickoman, rm53, rnt, Robert Accettura, roganty, Ryan Boren, Ryan McCue, Sam Bauers, Sam_a, schiller, Scott Houst, sekundek, Shane, Simek, Simon Wheatley, sivel, st_falcon, stefano, strider72, tai, takayukister, techcookies, Terragg, thinlight, tott, Trevor Fitzgerald, tschai, Txanny, Valiallah (Mani) Monajjemi, Viper007Bond, Vladimir Kolesnikov, wasp, wet, wfrantz, x11tech, xknown, xorax, ydekproductions, yoavf, yonosoytu, yoshi, zedlander

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:55:\"http://wordpress.org/development/2008/12/coltrane/feed/\";}s:7:\"summary\";s:318:\"The first thing you’ll notice about 2.7 is its new interface. From the top down, we’ve listened to your feedback and thought deeply about the design and the result is a WordPress that’s just plain faster. Nearly every task you do on your blog will take fewer clicks and be faster in 2.7 than it [...]\";s:12:\"atom_content\";s:11933:\"

The first thing you’ll notice about 2.7 is its new interface. From the top down, we’ve listened to your feedback and thought deeply about the design and the result is a WordPress that’s just plain faster. Nearly every task you do on your blog will take fewer clicks and be faster in 2.7 than it did in a previous version. (Download it now, or read on for more.)

\n

Next you’ll begin to notice the new features subtly sprinkled through the new interface: the new dashboard that you can arrange with drag and drop to put the things most important to you on top, QuickPress, comment threading, paging, and the ability to reply to comments from your dashboard, the ability to install any plugin directly from WordPress.org with a single click, and sticky posts.

\n

Digging in further you might notice that every screen is customizable. Let’s say you never care about author on your post listings — just click “Screen Options” and uncheck it and it’s instantly gone from the page. The same for any module on the dashboard or write screen. If your screen is narrow and the menu is taking up too much horizontal room, click the arrow to minimize it to be icon-only, and then go to the write page and drag and drop everything from the right column into the main one, so your posting area is full-screen. (For example I like hiding everything except categories, tags, and publish. I put categories and tags on the right, and publish under the post box.)

\n

For a visual introduction to what 2.7 is, check out this video (available in HD, and full screen):

\n

\n

It’s all about you. It’s the next generation of WordPress, which is why we’ve bestowed it with the honor of being named for John Coltrane. And you can download it today.

\n

Last, but certainly not least, this may be the last time you ever have to manually upgrade WordPress again. We heard how tired you were of doing upgrades for yourself and your friends, so now WordPress includes a built-in upgrade that will automatically notify you of new releases, and when you’re ready it will download them, install them, and upgrade your blog with a single click.

\n

(As with any interface change it may take a little bit of time to acclimate yourself but soon you’ll find yourself whizzing through the screens. Even people who have hated it at first tell us after a few days they wonder how they got by before.)

\n

The Story Behind 2.7

\n

The real reason Coltrane is such a huge leap forward is because the community was so involved with every step of the process. Over 150 people contributed code directly to the release, our highest ever, with many tens of thousands more participating in the polls, surveys, tests, mailing lists, and other feedback mechanisms the WordPress dev team used in putting this release together.

\n

For some of the back story in the development of 2.7, check out these blog posts (thanks to WeblogToolsCollection for the list):

\n\n

This was interesting to us, a blogging software release we actually blogged about, but the process was hugely informative. Prior to its release today Crazyhorse and 2.7 had been tested by tens of thousands of people on their blogs, hundreds of thousands of you count .com. The volume of feedback was so high that we decided to push back the release date a month to take time to incorporate it all and do more revisions based on what you guys said.

\n

For those of you wondering why we didn’t call this release 3.0, it’s because we abhor version number inflation. 3.0 will just be the next release after 2.9. The major features in new point releases approach also works well for products like OS X, with huge changes between a 10.3 and 10.4.

\n

The Future

\n

Those of you following along at home might have noticed this was our second major redesign of WordPress this year. Whoa nelly! While that wasn’t ideal, and I especially sympathize with those of you creating books or tutorials around WordPress, there’s good news. The changes to WordPress in 2.5 and 2.7 were necessary for us to break free of much of the legacy cruft and interface bloat that had built up over the years (gradually) and more importantly provide us with a UI framework and interface language we can use at the foundation to build tomorrow’s WordPress on, to express ideas we haven’t been able to before. So at the end of 2009 I expect, interface-wise, WordPress to look largely the same as it does now.

\n

That said, we couldn’t be more excited about the future with regards to features. Now that we’ve cleared out more basic things, we are looking forward in the coming year to really tackling media handling including audio and video, better tools for plugin and theme developers, widgets, theme updates, more integrated and contextual help, and easier integration with projects like BuddyPress and bbPress.

\n

Thank Yous

\n

We would like to take a moment to thank the following WordPress.org users for being a part of 2.7: Verena Segert, Ben Dunkle, 082net, _ck_, Aaron Brazell, Aaron Campbell, Aaron Harp, aaron_guitar, abackstrom, Alex Rabe, Alex Shiels, anderswc, andr, Andrew Ozz, andy, Andy Peatling, Austin Matzko, axelseaa, bendalton, Benedict Eastaugh, Betsy Kimak, Björn Wijers, bobrik, brianwhite, bubel, Byrne Reese, caesarsgrunt, capripot, Casey Bisson, Charles E. Frees-Melvin, Chris Johnston, codestyling, corischlegel, count_0, Daniel Jalkut, Daniel Torreblanca, David McFarlane, dbuser123, Demetris Kikizas, Dion Hulse, docwhat, Donncha O Caoimh, Doug Stewart, Dougal Campbell, dsader, dtsn, dwc, g30rg3x, guillep2k, Hailin Wu, Hans Engel, Jacob Santos, Jamie Rumbelow, Jan Brasna, Jane Wells, Jean-LucfromBrussels, Jennifer Hodgdon, Jeremy Clarke, Jérémie Bresson, jick, Joe Taiabjee, John Blackbourn, John Conners, John Lamansky, johnhennmacc, Joost de Valk, Joseph Scott, kashani, Kim Parsell, Lloyd Budd, Lutz Schröer, Malaiac, Mark Jaquith, Mark Steel, Matt Freedman, Matt Mullenweg, Matt Thomas, matthewh84, mattyrob, mcs_trekkie, Michael Adams, Michael Hampton, MichaelH, mictasm, Mike Schinkel, msi08, msw0418, mtekk, Nick Momrik, Nikolay Bachiyski, Noel Jackson, Otto, Ozh, paddya, paul, pedrop, pishmishy, Po0ky, RanYanivHartstein, raychampagne, rdworth, reinkim, rickoman, rm53, rnt, Robert Accettura, roganty, Ryan Boren, Ryan McCue, Sam Bauers, Sam_a, schiller, Scott Houst, sekundek, Shane, Simek, Simon Wheatley, sivel, st_falcon, stefano, strider72, tai, takayukister, techcookies, Terragg, thinlight, tott, Trevor Fitzgerald, tschai, Txanny, Valiallah (Mani) Monajjemi, Viper007Bond, Vladimir Kolesnikov, wasp, wet, wfrantz, x11tech, xknown, xorax, ydekproductions, yoavf, yonosoytu, yoshi, zedlander

\n\";}i:1;a:12:{s:5:\"title\";s:25:\"2.7 Release Candidate Two\";s:4:\"link\";s:66:\"http://wordpress.org/development/2008/12/27-release-candidate-two/\";s:8:\"comments\";s:75:\"http://wordpress.org/development/2008/12/27-release-candidate-two/#comments\";s:7:\"pubdate\";s:31:\"Wed, 10 Dec 2008 00:55:33 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:8:\"category\";s:11:\"Development\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=474\";s:11:\"description\";s:315:\"There comes a time in every WordPress release when it’s ready for the world , to come out of its cocoon and feel the light of the world on its wings for the first time.\nIt’s not quite that time yet, but we’re as close as we’ve ever been, hence the immediate availability of 2.7 Release [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:836:\"

There comes a time in every WordPress release when it’s ready for the world , to come out of its cocoon and feel the light of the world on its wings for the first time.

\n

It’s not quite that time yet, but we’re as close as we’ve ever been, hence the immediate availability of 2.7 Release Candidate 2, or RC2 for short.

\n

Of course if you were already testing 2.7, you can just use the built-in core updater (Tools > Upgrade) to download and install RC2 for you (and later upgrade you to the final release when it’s available) but if not you can use the download link above.

\n

We feel this release is pretty much exactly what we’re going to ship as 2.7, barring any final bugs or polish tweaks that you report or we find.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:71:\"http://wordpress.org/development/2008/12/27-release-candidate-two/feed/\";}s:7:\"summary\";s:315:\"There comes a time in every WordPress release when it’s ready for the world , to come out of its cocoon and feel the light of the world on its wings for the first time.\nIt’s not quite that time yet, but we’re as close as we’ve ever been, hence the immediate availability of 2.7 Release [...]\";s:12:\"atom_content\";s:836:\"

There comes a time in every WordPress release when it’s ready for the world , to come out of its cocoon and feel the light of the world on its wings for the first time.

\n

It’s not quite that time yet, but we’re as close as we’ve ever been, hence the immediate availability of 2.7 Release Candidate 2, or RC2 for short.

\n

Of course if you were already testing 2.7, you can just use the built-in core updater (Tools > Upgrade) to download and install RC2 for you (and later upgrade you to the final release when it’s available) but if not you can use the download link above.

\n

We feel this release is pretty much exactly what we’re going to ship as 2.7, barring any final bugs or polish tweaks that you report or we find.

\n\";}i:2;a:12:{s:5:\"title\";s:33:\"WordPress 2.7 Release Candidate 1\";s:4:\"link\";s:74:\"http://wordpress.org/development/2008/12/wordpress-27-release-candidate-1/\";s:8:\"comments\";s:83:\"http://wordpress.org/development/2008/12/wordpress-27-release-candidate-1/#comments\";s:7:\"pubdate\";s:31:\"Mon, 01 Dec 2008 22:26:14 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Ryan Boren\";}s:8:\"category\";s:8:\"Releases\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=467\";s:11:\"description\";s:312:\"With the release of RC1, we’re in the final leg of development before the release of 2.7.  280 commits since beta 3 have polished the new admin UI (including new menu icons created by the winners of our icon design contest) and fixed all known blocker bugs.\nWe think RC1 is ready for everyone to try [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:843:\"

With the release of RC1, we’re in the final leg of development before the release of 2.7.  280 commits since beta 3 have polished the new admin UI (including new menu icons created by the winners of our icon design contest) and fixed all known blocker bugs.

\n

We think RC1 is ready for everyone to try out.  Please download RC1 and help us make the final release the best it can be.  As always, back up your blog before upgrading.

\n

Get RC1.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:79:\"http://wordpress.org/development/2008/12/wordpress-27-release-candidate-1/feed/\";}s:7:\"summary\";s:312:\"With the release of RC1, we’re in the final leg of development before the release of 2.7.  280 commits since beta 3 have polished the new admin UI (including new menu icons created by the winners of our icon design contest) and fixed all known blocker bugs.\nWe think RC1 is ready for everyone to try [...]\";s:12:\"atom_content\";s:843:\"

With the release of RC1, we’re in the final leg of development before the release of 2.7.  280 commits since beta 3 have polished the new admin UI (including new menu icons created by the winners of our icon design contest) and fixed all known blocker bugs.

\n

We think RC1 is ready for everyone to try out.  Please download RC1 and help us make the final release the best it can be.  As always, back up your blog before upgrading.

\n

Get RC1.

\n\";}i:3;a:12:{s:5:\"title\";s:15:\"WordPress 2.6.5\";s:4:\"link\";s:55:\"http://wordpress.org/development/2008/11/wordpress-265/\";s:8:\"comments\";s:64:\"http://wordpress.org/development/2008/11/wordpress-265/#comments\";s:7:\"pubdate\";s:31:\"Tue, 25 Nov 2008 17:33:56 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Ryan Boren\";}s:8:\"category\";s:11:\"Development\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=462\";s:11:\"description\";s:391:\"WordPress 2.6.5 is immediately available and fixes one security problem and three bugs. We recommend everyone upgrade to this release.\nThe security issue is an XSS exploit discovered by Jeremias Reith that fortunately only affects IP-based virtual servers running on Apache 2.x. If you are interested only in the security fix, copy wp-includes/feed.php and wp-includes/version.php from [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:1251:\"

WordPress 2.6.5 is immediately available and fixes one security problem and three bugs. We recommend everyone upgrade to this release.

\n

The security issue is an XSS exploit discovered by Jeremias Reith that fortunately only affects IP-based virtual servers running on Apache 2.x. If you are interested only in the security fix, copy wp-includes/feed.php and wp-includes/version.php from the 2.6.5 release package.

\n

2.6.5 contains three other small fixes in addition to the XSS fix. The first prevents accidentally saving post meta information to a revision. The second prevents XML-RPC from fetching incorrect post types. The third adds some user ID sanitization during bulk delete requests. For a list of changed files, consult the full changeset between 2.6.3 and 2.6.5.

\n

Note that we are skipping version 2.6.4 and jumping from 2.6.3 to 2.6.5 to avoid confusion with a fake 2.6.4 release that made the rounds. There is not and never will be a version 2.6.4.

\n

Get WordPress 2.6.5.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:60:\"http://wordpress.org/development/2008/11/wordpress-265/feed/\";}s:7:\"summary\";s:391:\"WordPress 2.6.5 is immediately available and fixes one security problem and three bugs. We recommend everyone upgrade to this release.\nThe security issue is an XSS exploit discovered by Jeremias Reith that fortunately only affects IP-based virtual servers running on Apache 2.x. If you are interested only in the security fix, copy wp-includes/feed.php and wp-includes/version.php from [...]\";s:12:\"atom_content\";s:1251:\"

WordPress 2.6.5 is immediately available and fixes one security problem and three bugs. We recommend everyone upgrade to this release.

\n

The security issue is an XSS exploit discovered by Jeremias Reith that fortunately only affects IP-based virtual servers running on Apache 2.x. If you are interested only in the security fix, copy wp-includes/feed.php and wp-includes/version.php from the 2.6.5 release package.

\n

2.6.5 contains three other small fixes in addition to the XSS fix. The first prevents accidentally saving post meta information to a revision. The second prevents XML-RPC from fetching incorrect post types. The third adds some user ID sanitization during bulk delete requests. For a list of changed files, consult the full changeset between 2.6.3 and 2.6.5.

\n

Note that we are skipping version 2.6.4 and jumping from 2.6.3 to 2.6.5 to avoid confusion with a fake 2.6.4 release that made the rounds. There is not and never will be a version 2.6.4.

\n

Get WordPress 2.6.5.

\n\";}i:4;a:12:{s:5:\"title\";s:27:\"The Results of Project Icon\";s:4:\"link\";s:69:\"http://wordpress.org/development/2008/11/the-results-of-project-icon/\";s:8:\"comments\";s:78:\"http://wordpress.org/development/2008/11/the-results-of-project-icon/#comments\";s:7:\"pubdate\";s:31:\"Mon, 17 Nov 2008 17:02:34 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Jane Wells\";}s:8:\"category\";s:29:\"User Interface2.7contesticons\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=441\";s:11:\"description\";s:345:\"The community has voted, and the votes have been tallied. The winner of Project Icon, with 35% of the votes, is Entry ID “BD,” otherwise known as Ben Dunkle. Congratulations, Ben! The runner-up was VS, otherwise known as Verena Segert, so we’ll be attaching that set to the alternate color palette that is selectable from [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:8013:\"

\"\"The community has voted, and the votes have been tallied. The winner of Project Icon, with 35% of the votes, is Entry ID “BD,” otherwise known as Ben Dunkle. Congratulations, Ben! The runner-up was VS, otherwise known as Verena Segert, so we’ll be attaching that set to the alternate color palette that is selectable from the profile screen. As we prepare for RC1, Ben and Verena will be revising a couple of their icons so that both sets will use the same metaphors, creating the colored “on” states, and creating the larger size of each icon for use in the h2 screen headers. We are very grateful to have had the opportunity to select from so many great options, and would like to express again our appreciation for all the designers who participated in the contest. Thanks also to the more than 3700 people who completed the voting survey and took the time to weigh on on the individual icon sets.

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Q.18 Which one of the sets do you think we should use as a basis for the 2.7 icons?
Icon Set# of votes% of votes
BD128535%
VS108029%
GB242411%
OSD37610%
LS3008%
GB12356%
\n

The wide lead of BD and VS made it clear that voters had a clear preference for these sets.

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Q.20 If you could choose a runner-up, which would you choose?
Icon Set# of votes% of votes
VS91627%
BD64719%
LS52216%
OSD48814%
GB246214%
GB133110%
\n

Question 20 was not mandatory, so a few hundred people skipped it, but the responses we did get (3366 of them) reinforced the fact that the two most popular sets were also the most popular 2nd choices, which made the decision of the judges to go with the popular vote an easy one (take that, electoral college!).

\n

A few of the individual icon metaphors also had a significant lead over the other choices.
\nDashboard: 1333 voters (40%) chose a house as the best metaphor. We agree, so both Ben and Verena will be replacing their Dashboard icons.

\n

Media: 2097 voters (65%) chose the combination camera + musical note icon, which was part of Ben’s set. We also really loved it, and Verena will amend her media icon to incorporate this idea.

\n

Plugins: 1682 voters (53%) selected the outlet plug metaphor, which both Ben and Verena used in their sets.

\n

Tools: 1581 voters (49%) liked the combination of two tools better than anything else, so Ben and Verena will try this approach.

\n

So those are the results, and soon you’ll see the new icons coming to a 2.7 installation near you.

\n

Need another look at the entries to remember which one you liked best? Here are some reminder images, as well as the identity of each set’s creator.

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\"WinningBD was Ben Dunkle, a designer, professor and artist from upstate/western New York State. In case you’ve already forgotten, Ben’s icon set is the winner of Project Icon and will become the default icon set after a few minor changes.\"VerenaVS was Verena Segert, our runner-up, a designer from Germany who presented sets in both grayscale and blue. Her blue icons received more specific voter comments than the gray ones, so we’re planning the second color palette to be in shades of blue so that we can use the blue icon set.
\"Guillaume\"GuillaumeGB was Guillaume Berry, a designer from France who submitted two sets in the same style in order to propose a couple of different metaphors. One of his sets came in third while the other came in last, but whether you only look at the higher scoring set or you combine their votes, Guillaume had the next highest percentage of votes, and many people liked the metaphors he used for various icons. In fact, given the enthusiasm of the community for Guillaume’s icons, we think a great plugin would be one that would allow the user to upload the icon set of their choice. Any volunteers?
\"MenuOSD was the Open Source Design class at Parson’s in New york City, taught by Mushon Zer-Aviv and consisting of students Alexandra Zsigmond, Ed Nacional, Karen Messing, Khurram Bajwa, Leonie Leibenfrost. Teacher and students worked together to determine their metaphors and visual style.\"LukeLS was Luke Smith, a designer from Iowa who specializes in icons among his other design pursuits.
\n

If you need to hire an icon designer any time soon, we highly recommend our Project Icon contestants, who all delivered great work in a very short timeframe. It was great to work with all of them, even for such a short assignment.

\n

So, to sum up:

\n
    \n
  1. The winning icon sets by Ben Dunkle and Verena Segert will be incorporated into WordPress 2.7 RC1.
  2. \n
  3. Someone should write a plugin that would allow anyone to upload a custom icon set (I bet the other contestants could be convinced to release their icon sets for such a purpose).
  4. \n
  5. 2.7 is still trucking away, but we can always use help with patches, especially for IE6! (I know, that wasn’t in the main post, but it’s true, so hmph)
  6. \n
\n

Thanks again to everyone who participated in this experiment, and we hope you enjoyed it as much as we did. And congratulations again to Ben and Verena!

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:74:\"http://wordpress.org/development/2008/11/the-results-of-project-icon/feed/\";}s:7:\"summary\";s:345:\"The community has voted, and the votes have been tallied. The winner of Project Icon, with 35% of the votes, is Entry ID “BD,” otherwise known as Ben Dunkle. Congratulations, Ben! The runner-up was VS, otherwise known as Verena Segert, so we’ll be attaching that set to the alternate color palette that is selectable from [...]\";s:12:\"atom_content\";s:8013:\"

\"\"The community has voted, and the votes have been tallied. The winner of Project Icon, with 35% of the votes, is Entry ID “BD,” otherwise known as Ben Dunkle. Congratulations, Ben! The runner-up was VS, otherwise known as Verena Segert, so we’ll be attaching that set to the alternate color palette that is selectable from the profile screen. As we prepare for RC1, Ben and Verena will be revising a couple of their icons so that both sets will use the same metaphors, creating the colored “on” states, and creating the larger size of each icon for use in the h2 screen headers. We are very grateful to have had the opportunity to select from so many great options, and would like to express again our appreciation for all the designers who participated in the contest. Thanks also to the more than 3700 people who completed the voting survey and took the time to weigh on on the individual icon sets.

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Q.18 Which one of the sets do you think we should use as a basis for the 2.7 icons?
Icon Set# of votes% of votes
BD128535%
VS108029%
GB242411%
OSD37610%
LS3008%
GB12356%
\n

The wide lead of BD and VS made it clear that voters had a clear preference for these sets.

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Q.20 If you could choose a runner-up, which would you choose?
Icon Set# of votes% of votes
VS91627%
BD64719%
LS52216%
OSD48814%
GB246214%
GB133110%
\n

Question 20 was not mandatory, so a few hundred people skipped it, but the responses we did get (3366 of them) reinforced the fact that the two most popular sets were also the most popular 2nd choices, which made the decision of the judges to go with the popular vote an easy one (take that, electoral college!).

\n

A few of the individual icon metaphors also had a significant lead over the other choices.
\nDashboard: 1333 voters (40%) chose a house as the best metaphor. We agree, so both Ben and Verena will be replacing their Dashboard icons.

\n

Media: 2097 voters (65%) chose the combination camera + musical note icon, which was part of Ben’s set. We also really loved it, and Verena will amend her media icon to incorporate this idea.

\n

Plugins: 1682 voters (53%) selected the outlet plug metaphor, which both Ben and Verena used in their sets.

\n

Tools: 1581 voters (49%) liked the combination of two tools better than anything else, so Ben and Verena will try this approach.

\n

So those are the results, and soon you’ll see the new icons coming to a 2.7 installation near you.

\n

Need another look at the entries to remember which one you liked best? Here are some reminder images, as well as the identity of each set’s creator.

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\"WinningBD was Ben Dunkle, a designer, professor and artist from upstate/western New York State. In case you’ve already forgotten, Ben’s icon set is the winner of Project Icon and will become the default icon set after a few minor changes.\"VerenaVS was Verena Segert, our runner-up, a designer from Germany who presented sets in both grayscale and blue. Her blue icons received more specific voter comments than the gray ones, so we’re planning the second color palette to be in shades of blue so that we can use the blue icon set.
\"Guillaume\"GuillaumeGB was Guillaume Berry, a designer from France who submitted two sets in the same style in order to propose a couple of different metaphors. One of his sets came in third while the other came in last, but whether you only look at the higher scoring set or you combine their votes, Guillaume had the next highest percentage of votes, and many people liked the metaphors he used for various icons. In fact, given the enthusiasm of the community for Guillaume’s icons, we think a great plugin would be one that would allow the user to upload the icon set of their choice. Any volunteers?
\"MenuOSD was the Open Source Design class at Parson’s in New york City, taught by Mushon Zer-Aviv and consisting of students Alexandra Zsigmond, Ed Nacional, Karen Messing, Khurram Bajwa, Leonie Leibenfrost. Teacher and students worked together to determine their metaphors and visual style.\"LukeLS was Luke Smith, a designer from Iowa who specializes in icons among his other design pursuits.
\n

If you need to hire an icon designer any time soon, we highly recommend our Project Icon contestants, who all delivered great work in a very short timeframe. It was great to work with all of them, even for such a short assignment.

\n

So, to sum up:

\n
    \n
  1. The winning icon sets by Ben Dunkle and Verena Segert will be incorporated into WordPress 2.7 RC1.
  2. \n
  3. Someone should write a plugin that would allow anyone to upload a custom icon set (I bet the other contestants could be convinced to release their icon sets for such a purpose).
  4. \n
  5. 2.7 is still trucking away, but we can always use help with patches, especially for IE6! (I know, that wasn’t in the main post, but it’s true, so hmph)
  6. \n
\n

Thanks again to everyone who participated in this experiment, and we hope you enjoyed it as much as we did. And congratulations again to Ben and Verena!

\n\";}i:5;a:12:{s:5:\"title\";s:20:\"WordPress 2.7 Beta 3\";s:4:\"link\";s:61:\"http://wordpress.org/development/2008/11/wordpress-27-beta-3/\";s:8:\"comments\";s:70:\"http://wordpress.org/development/2008/11/wordpress-27-beta-3/#comments\";s:7:\"pubdate\";s:31:\"Sat, 15 Nov 2008 20:43:47 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"Mark Jaquith\";}s:8:\"category\";s:11:\"Releases2.7\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=437\";s:11:\"description\";s:387:\"WordPress 2.7 Beta 3 has been released for your testing pleasure. Here are some of the changes since Beta 2 (over 160 changes in total):\n\nNumerous style improvements and refinements.\nAll admin notices now go under the page title.\nPHP Notice fixes.\nDashboard widget options now properly save.\nMenu fixes.\nNew design for Quick Edit.\nCanonical feed URL fixes.\nWalker fixes.\nAn update [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:1720:\"

WordPress 2.7 Beta 3 has been released for your testing pleasure. Here are some of the changes since Beta 2 (over 160 changes in total):

\n
    \n
  • Numerous style improvements and refinements.
  • \n
  • All admin notices now go under the page title.
  • \n
  • PHP Notice fixes.
  • \n
  • Dashboard widget options now properly save.
  • \n
  • Menu fixes.
  • \n
  • New design for Quick Edit.
  • \n
  • Canonical feed URL fixes.
  • \n
  • Walker fixes.
  • \n
  • An update for Hello Dolly.
  • \n
  • Plugin installer updates.
  • \n
  • Numerous font updates.
  • \n
  • Updated login logo.
  • \n
  • Switch position of “Save Draft” and “Preview” buttons in publish module.
  • \n
  • File upload support for MS Office 2007+ file formats.
  • \n
  • Media upload buttons won’t show if the user doesn’t have the upload capability.
  • \n
  • Canonical redirects only do yes-www or no-www redirection for domains.
  • \n
  • Shift-click checkbox range selection improvement.
  • \n
  • Add New User page now separate.
  • \n
  • Tag suggest only suggests tags (not other taxonomy terms).
  • \n
  • QuickPress shows “Submit for Review” if user cannot publish.
  • \n
  • Private posts/pages, and password-protected posts/pages are rolled into new “Visibility” section of publish module.
  • \n
\n

If you have already installed Beta 1 or Beta 2, you can update to Beta 3 via the Tools -> Update menu. If you have problems, or if this is your first time in the 2.7 beta ring, you can download and upgrade the old fashioned way.

\n

Get 2.7 Beta 3.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:66:\"http://wordpress.org/development/2008/11/wordpress-27-beta-3/feed/\";}s:7:\"summary\";s:387:\"WordPress 2.7 Beta 3 has been released for your testing pleasure. Here are some of the changes since Beta 2 (over 160 changes in total):\n\nNumerous style improvements and refinements.\nAll admin notices now go under the page title.\nPHP Notice fixes.\nDashboard widget options now properly save.\nMenu fixes.\nNew design for Quick Edit.\nCanonical feed URL fixes.\nWalker fixes.\nAn update [...]\";s:12:\"atom_content\";s:1720:\"

WordPress 2.7 Beta 3 has been released for your testing pleasure. Here are some of the changes since Beta 2 (over 160 changes in total):

\n
    \n
  • Numerous style improvements and refinements.
  • \n
  • All admin notices now go under the page title.
  • \n
  • PHP Notice fixes.
  • \n
  • Dashboard widget options now properly save.
  • \n
  • Menu fixes.
  • \n
  • New design for Quick Edit.
  • \n
  • Canonical feed URL fixes.
  • \n
  • Walker fixes.
  • \n
  • An update for Hello Dolly.
  • \n
  • Plugin installer updates.
  • \n
  • Numerous font updates.
  • \n
  • Updated login logo.
  • \n
  • Switch position of “Save Draft” and “Preview” buttons in publish module.
  • \n
  • File upload support for MS Office 2007+ file formats.
  • \n
  • Media upload buttons won’t show if the user doesn’t have the upload capability.
  • \n
  • Canonical redirects only do yes-www or no-www redirection for domains.
  • \n
  • Shift-click checkbox range selection improvement.
  • \n
  • Add New User page now separate.
  • \n
  • Tag suggest only suggests tags (not other taxonomy terms).
  • \n
  • QuickPress shows “Submit for Review” if user cannot publish.
  • \n
  • Private posts/pages, and password-protected posts/pages are rolled into new “Visibility” section of publish module.
  • \n
\n

If you have already installed Beta 1 or Beta 2, you can update to Beta 3 via the Tools -> Update menu. If you have problems, or if this is your first time in the 2.7 beta ring, you can download and upgrade the old fashioned way.

\n

Get 2.7 Beta 3.

\n\";}i:6;a:12:{s:5:\"title\";s:27:\"WordPress 2.7: Project Icon\";s:4:\"link\";s:67:\"http://wordpress.org/development/2008/11/wordpress-27-project-icon/\";s:8:\"comments\";s:76:\"http://wordpress.org/development/2008/11/wordpress-27-project-icon/#comments\";s:7:\"pubdate\";s:31:\"Thu, 13 Nov 2008 19:13:10 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Jane Wells\";}s:8:\"category\";s:22:\"User Interface2.7icons\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=419\";s:11:\"description\";s:316:\"Earlier in the beta period, we put out a call here on the development blog for designers in the WordPress community who might be interested in designing custom icons for the 2.7 admin interface. Over a dozen icon designers from around the world responded, so rather than choose just one, we decided to turn the [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:3398:\"

Earlier in the beta period, we put out a call here on the development blog for designers in the WordPress community who might be interested in designing custom icons for the 2.7 admin interface. Over a dozen icon designers from around the world responded, so rather than choose just one, we decided to turn the icon design assignment into a contest so that more people could participate and the community could have a vote in what the new icons should look like.

\n

Once we decided to go with a contest format instead of a single-designer gig, about half the original volunteers changed their minds. The remaining designers each submitted two icons (Posts, Links) in their proposed style. At this stage a couple of designers were thanked for their submissions but eliminated from the competition because their icons were considered too far afield from the WordPress visual style. The remaining designers were given feedback on the icons they had submitted and given about a week to complete the icon set for the menu as well as the list/excerpt icons that are shown on the Edit Posts screen. All but one of these designers finished a complete set, giving us five sets in total.

\n

So now we need to choose a direction. For each of the icon sets, we’ll show you the set itself, the designer’s introduction, and some feedback from the lead developers. After you’ve reviewed all five, place your vote for the set you think has the visual style that is the most suitable for WordPress 2.7. This will be followed by additional votes on specific icons, so if you like the specific image used in one set but like the style of another, you can vote to change the metaphor for a given icon. You’ll also be able to leave general feedback throughout the voting process. When voting has concluded, we’ll review the comments and the votes, and will declare a winner.

\n

Things to bear in mind when making your selections:
\nA week is not a long time to create 13 icons. The winning set will undergo a revision to be refined, and some icons may be substituted. We asked for all icons in grayscale for the contest. An “on” state and a larger size for screen headers will be designed by the winner. It seemed like too much work to have everyone do multiple states for so many icons.

\n

Ready? Go and take the icon survey. (The survey has now been closed.) Voting will remain open for 48 hours from the time of this post to allow people from all time zones a chance to participate before we close the survey and make a decision (since we’d like to include the new icons in Beta 3).

\n

A Note Regarding the 2.7 Release Date:
\nAs we approach Beta 3, bug tickets continue to be added to Trac, the pain of making things look good in IE6 continues to be felt, and the need to improve accessibility looms. If you love WordPress, are a decent coder, and want to contribute like these icon designers contributed, please consider contributing a patch to help with one of these efforts. Jump right in on current Trac tickets, or pop into the #wordpress-dev IRC channel to ask what to do.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:72:\"http://wordpress.org/development/2008/11/wordpress-27-project-icon/feed/\";}s:7:\"summary\";s:316:\"Earlier in the beta period, we put out a call here on the development blog for designers in the WordPress community who might be interested in designing custom icons for the 2.7 admin interface. Over a dozen icon designers from around the world responded, so rather than choose just one, we decided to turn the [...]\";s:12:\"atom_content\";s:3398:\"

Earlier in the beta period, we put out a call here on the development blog for designers in the WordPress community who might be interested in designing custom icons for the 2.7 admin interface. Over a dozen icon designers from around the world responded, so rather than choose just one, we decided to turn the icon design assignment into a contest so that more people could participate and the community could have a vote in what the new icons should look like.

\n

Once we decided to go with a contest format instead of a single-designer gig, about half the original volunteers changed their minds. The remaining designers each submitted two icons (Posts, Links) in their proposed style. At this stage a couple of designers were thanked for their submissions but eliminated from the competition because their icons were considered too far afield from the WordPress visual style. The remaining designers were given feedback on the icons they had submitted and given about a week to complete the icon set for the menu as well as the list/excerpt icons that are shown on the Edit Posts screen. All but one of these designers finished a complete set, giving us five sets in total.

\n

So now we need to choose a direction. For each of the icon sets, we’ll show you the set itself, the designer’s introduction, and some feedback from the lead developers. After you’ve reviewed all five, place your vote for the set you think has the visual style that is the most suitable for WordPress 2.7. This will be followed by additional votes on specific icons, so if you like the specific image used in one set but like the style of another, you can vote to change the metaphor for a given icon. You’ll also be able to leave general feedback throughout the voting process. When voting has concluded, we’ll review the comments and the votes, and will declare a winner.

\n

Things to bear in mind when making your selections:
\nA week is not a long time to create 13 icons. The winning set will undergo a revision to be refined, and some icons may be substituted. We asked for all icons in grayscale for the contest. An “on” state and a larger size for screen headers will be designed by the winner. It seemed like too much work to have everyone do multiple states for so many icons.

\n

Ready? Go and take the icon survey. (The survey has now been closed.) Voting will remain open for 48 hours from the time of this post to allow people from all time zones a chance to participate before we close the survey and make a decision (since we’d like to include the new icons in Beta 3).

\n

A Note Regarding the 2.7 Release Date:
\nAs we approach Beta 3, bug tickets continue to be added to Trac, the pain of making things look good in IE6 continues to be felt, and the need to improve accessibility looms. If you love WordPress, are a decent coder, and want to contribute like these icon designers contributed, please consider contributing a patch to help with one of these efforts. Jump right in on current Trac tickets, or pop into the #wordpress-dev IRC channel to ask what to do.

\n\";}i:7;a:12:{s:5:\"title\";s:20:\"WordPress 2.7 Beta 2\";s:4:\"link\";s:61:\"http://wordpress.org/development/2008/11/wordpress-27-beta-2/\";s:8:\"comments\";s:70:\"http://wordpress.org/development/2008/11/wordpress-27-beta-2/#comments\";s:7:\"pubdate\";s:31:\"Thu, 06 Nov 2008 09:03:57 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Ryan Boren\";}s:8:\"category\";s:8:\"Releases\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=415\";s:11:\"description\";s:395:\"WordPress 2.7 Beta 2 is ready.  Here is a quick rundown of changes since beta 1.\n\nThe Upload button didn’t always show. Fixed.\nJS on the Dashboard broke for blogs with no comments, causing several UI elements to “freeze”. Fixed.\nRecent Drafts Dashboard module didn’t show correct times. Fixed.\nVarious Autosave fixes.\nRedirect after deleting a page from the editor [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:1726:\"

WordPress 2.7 Beta 2 is ready.  Here is a quick rundown of changes since beta 1.

\n
    \n
  • The Upload button didn’t always show. Fixed.
  • \n
  • JS on the Dashboard broke for blogs with no comments, causing several UI elements to “freeze”. Fixed.
  • \n
  • Recent Drafts Dashboard module didn’t show correct times. Fixed.
  • \n
  • Various Autosave fixes.
  • \n
  • Redirect after deleting a page from the editor went back to the deleted page. Fixed.
  • \n
  • Fixed loading of translations for default TinyMCE plugins.
  • \n
  • Added avatars to the edit users list.
  • \n
  • Added some missing translations.
  • \n
  • Fixed some validation errors.
  • \n
  • Fixed some PHP warnings and notices.
  • \n
  • Handle inconsistent file permissions during auto upgrade
  • \n
  • Change Publish box layout to better accommodate internationalized text
  • \n
  • Fix quick editing of the last page in the Edit Pages list
  • \n
  • Fix Screen Options for IE
  • \n
  • Fixes for choose tag from tag cloud
  • \n
  • Rewrite rules fixes for certain hosts
  • \n
  • Don’t check for updates on every page load
  • \n
  • Easier post box dropping
  • \n
  • Preview fixes
  • \n
  • RTL fixes
  • \n
  • Fixed broken wp-mail
  • \n
  • Plugin update and install fixes
  • \n
  • First draft of contextual help tab
  • \n
\n

If you have already installed beta 1, you can update to beta 2 via the Tools -> Update menu.  Beta 1 does have a bug in the automatic upgrade that breaks certain setups, so be prepared to download and install Beta 2 manually if you experience problems.

\n

Get 2.7 Beta 2.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:66:\"http://wordpress.org/development/2008/11/wordpress-27-beta-2/feed/\";}s:7:\"summary\";s:395:\"WordPress 2.7 Beta 2 is ready.  Here is a quick rundown of changes since beta 1.\n\nThe Upload button didn’t always show. Fixed.\nJS on the Dashboard broke for blogs with no comments, causing several UI elements to “freeze”. Fixed.\nRecent Drafts Dashboard module didn’t show correct times. Fixed.\nVarious Autosave fixes.\nRedirect after deleting a page from the editor [...]\";s:12:\"atom_content\";s:1726:\"

WordPress 2.7 Beta 2 is ready.  Here is a quick rundown of changes since beta 1.

\n
    \n
  • The Upload button didn’t always show. Fixed.
  • \n
  • JS on the Dashboard broke for blogs with no comments, causing several UI elements to “freeze”. Fixed.
  • \n
  • Recent Drafts Dashboard module didn’t show correct times. Fixed.
  • \n
  • Various Autosave fixes.
  • \n
  • Redirect after deleting a page from the editor went back to the deleted page. Fixed.
  • \n
  • Fixed loading of translations for default TinyMCE plugins.
  • \n
  • Added avatars to the edit users list.
  • \n
  • Added some missing translations.
  • \n
  • Fixed some validation errors.
  • \n
  • Fixed some PHP warnings and notices.
  • \n
  • Handle inconsistent file permissions during auto upgrade
  • \n
  • Change Publish box layout to better accommodate internationalized text
  • \n
  • Fix quick editing of the last page in the Edit Pages list
  • \n
  • Fix Screen Options for IE
  • \n
  • Fixes for choose tag from tag cloud
  • \n
  • Rewrite rules fixes for certain hosts
  • \n
  • Don’t check for updates on every page load
  • \n
  • Easier post box dropping
  • \n
  • Preview fixes
  • \n
  • RTL fixes
  • \n
  • Fixed broken wp-mail
  • \n
  • Plugin update and install fixes
  • \n
  • First draft of contextual help tab
  • \n
\n

If you have already installed beta 1, you can update to beta 2 via the Tools -> Update menu.  Beta 1 does have a bug in the automatic upgrade that breaks certain setups, so be prepared to download and install Beta 2 manually if you experience problems.

\n

Get 2.7 Beta 2.

\n\";}i:8;a:12:{s:5:\"title\";s:48:\"What’s your favorite thing about the 2.7 Beta?\";s:4:\"link\";s:85:\"http://wordpress.org/development/2008/11/whats-your-favorite-thing-about-the-27-beta/\";s:8:\"comments\";s:94:\"http://wordpress.org/development/2008/11/whats-your-favorite-thing-about-the-27-beta/#comments\";s:7:\"pubdate\";s:31:\"Sun, 02 Nov 2008 20:09:19 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Jane Wells\";}s:8:\"category\";s:8:\"Features\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=396\";s:11:\"description\";s:317:\"There have been a lot of posts and twitter announcements by people checking out the WordPress 2.7 Beta since it was announced yesterday. What’s your favorite thing about 2.7 so far? Or if you haven’t made the leap yet, to which feature are you most looking forward? Tell us in the poll below. \n What [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:1051:\"

There have been a lot of posts and twitter announcements by people checking out the WordPress 2.7 Beta since it was announced yesterday. What’s your favorite thing about 2.7 so far? Or if you haven’t made the leap yet, to which feature are you most looking forward? Tell us in the poll below.

\n

\n

If you have a extra minute or two, we’ve also put together a survey that lists all the new features and allows you to rate them, as well as give additional feedback if you’re so inclined. If you want to participate, take the 2.7 Beta Favorite Features survey.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:90:\"http://wordpress.org/development/2008/11/whats-your-favorite-thing-about-the-27-beta/feed/\";}s:7:\"summary\";s:317:\"There have been a lot of posts and twitter announcements by people checking out the WordPress 2.7 Beta since it was announced yesterday. What’s your favorite thing about 2.7 so far? Or if you haven’t made the leap yet, to which feature are you most looking forward? Tell us in the poll below. \n What [...]\";s:12:\"atom_content\";s:1051:\"

There have been a lot of posts and twitter announcements by people checking out the WordPress 2.7 Beta since it was announced yesterday. What’s your favorite thing about 2.7 so far? Or if you haven’t made the leap yet, to which feature are you most looking forward? Tell us in the poll below.

\n

\n

If you have a extra minute or two, we’ve also put together a survey that lists all the new features and allows you to rate them, as well as give additional feedback if you’re so inclined. If you want to participate, take the 2.7 Beta Favorite Features survey.

\n\";}i:9;a:12:{s:5:\"title\";s:20:\"WordPress 2.7 Beta 1\";s:4:\"link\";s:61:\"http://wordpress.org/development/2008/11/wordpress-27-beta-1/\";s:8:\"comments\";s:70:\"http://wordpress.org/development/2008/11/wordpress-27-beta-1/#comments\";s:7:\"pubdate\";s:31:\"Sat, 01 Nov 2008 08:00:11 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Ryan Boren\";}s:8:\"category\";s:8:\"Releases\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=387\";s:11:\"description\";s:332:\"The first public beta of WordPress 2.7 is here at last.  Join the thousands of people already testing 2.7 by downloading 2.7 Beta 1.  As previously mentioned on this blog, 2.7 is bringing a new visual design.  This design is almost completely implemented, but there are still a few areas that aren’t quite finished in [...]\";s:7:\"content\";a:1:{s:7:\"encoded\";s:1662:\"

The first public beta of WordPress 2.7 is here at last.  Join the thousands of people already testing 2.7 by downloading 2.7 Beta 1.  As previously mentioned on this blog, 2.7 is bringing a new visual design.  This design is almost completely implemented, but there are still a few areas that aren’t quite finished in Beta 1.  There are also several glitches in certain browsers.  Beta 1 provides the best experience in Firefox and Safari. Don’t worry, we are working on IE and Opera and will have those looking good in time for the final release.

\n

Speaking of the final release, it will not be available on November 10th as originally scheduled.  We are two weeks behind schedule at the moment.  We need a little more time to finish the visual design, do a round of user testing against that finished design, and do a proper round of public beta testing. Our plan is to keep working as if Nov. 10 is still the release date.  However, instead of releasing the final 2.7 on the 10th, we will make a release candidate available instead.  The release candidate is intended to be a high-quality, almost-finished release that we are comfortable recommending for broad use.  After Nov. 10, the focus will be on fixing high impact bugs turned up by those of you testing the release candidate. I suspect 2.7 will be ready for final release by the end of November.  A specific date will be set as we progress through the public beta cycle and get a feel for how solid the release is.

\n

Get WordPress 2.7 Beta 1.

\n\";}s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:66:\"http://wordpress.org/development/2008/11/wordpress-27-beta-1/feed/\";}s:7:\"summary\";s:332:\"The first public beta of WordPress 2.7 is here at last.  Join the thousands of people already testing 2.7 by downloading 2.7 Beta 1.  As previously mentioned on this blog, 2.7 is bringing a new visual design.  This design is almost completely implemented, but there are still a few areas that aren’t quite finished in [...]\";s:12:\"atom_content\";s:1662:\"

The first public beta of WordPress 2.7 is here at last.  Join the thousands of people already testing 2.7 by downloading 2.7 Beta 1.  As previously mentioned on this blog, 2.7 is bringing a new visual design.  This design is almost completely implemented, but there are still a few areas that aren’t quite finished in Beta 1.  There are also several glitches in certain browsers.  Beta 1 provides the best experience in Firefox and Safari. Don’t worry, we are working on IE and Opera and will have those looking good in time for the final release.

\n

Speaking of the final release, it will not be available on November 10th as originally scheduled.  We are two weeks behind schedule at the moment.  We need a little more time to finish the visual design, do a round of user testing against that finished design, and do a proper round of public beta testing. Our plan is to keep working as if Nov. 10 is still the release date.  However, instead of releasing the final 2.7 on the 10th, we will make a release candidate available instead.  The release candidate is intended to be a high-quality, almost-finished release that we are comfortable recommending for broad use.  After Nov. 10, the focus will be on fixing high impact bugs turned up by those of you testing the release candidate. I suspect 2.7 will be ready for final release by the end of November.  A specific date will be set as we progress through the public beta cycle and get a feel for how solid the release is.

\n

Get WordPress 2.7 Beta 1.

\n\";}}s:7:\"channel\";a:8:{s:5:\"title\";s:26:\"WordPress Development Blog\";s:4:\"link\";s:32:\"http://wordpress.org/development\";s:11:\"description\";s:33:\"WordPress development and updates\";s:7:\"pubdate\";s:31:\"Fri, 12 Dec 2008 17:53:29 +0000\";s:9:\"generator\";s:42:\"http://wordpress.org/?v=2.8-bleeding-10187\";s:8:\"language\";s:2:\"en\";s:2:\"sy\";a:2:{s:12:\"updateperiod\";s:6:\"hourly\";s:15:\"updatefrequency\";s:1:\"1\";}s:7:\"tagline\";s:33:\"WordPress development and updates\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:13:\"last_modified\";s:31:\"Fri, 12 Dec 2008 17:53:29 GMT\r\n\";s:4:\"etag\";s:36:\"\"ea271e349b3c985b52c9a010e693421b\"\r\n\";}','no'),(69,0,'rss_0ff4b43bd116a9d8720d689c80e7dfd4_ts','1229945566','no'),(70,0,'rss_867bd5c64f85878d03a060509cd2f92c','O:9:\"MagpieRSS\":19:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:50:{i:0;a:7:{s:5:\"title\";s:60:\"Weblog Tools Collection: WordPress Plugin Releases for 12/21\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4828\";s:4:\"link\";s:90:\"http://weblogtoolscollection.com/archives/2008/12/21/wordpress-plugin-releases-for-1221-2/\";s:11:\"description\";s:3785:\"

New Plugins

\n

Blog Copyright

\n

Blog Copyright injects a copyright notice into the blog footer.

\n

Art Direction Plugin

\n

This plugin allows you to have global archive and single page per-post styles.

\n

Admin Menu Editor

\n

Manually edit the Dashboard menu. You can reorder the menus, show/hide specific items, change access rights, and more.

\n

Vimeo Quicktags

\n

Enable administrator to embed vimeo video into blog. Options available are the same as provided by vimeo.

\n

Picapp

\n

The plugin makes it easier to insert free, quality photos into a blog post by integrating all these into your WordPress interface.

\n

Updated Plugins

\n

Author Avatars

\n

This plugin provides a widget and a shortcode which allow you to show avatars of blog users.

\n

Open Picture Window

\n

Opens a new browser window containing the image specified using JavaScript. You have the option to choose the features as well as choose if you want it to be centered.

\n

Add to Feed

\n

A simple feed enhancement plugin that allows you to add custom text or HTML to posts in your WordPress blog feed. You can add text before the content and/or after the content as well as a copyright message. Now, include a link to the current post as well.

\n

External Links

\n

The external links plugin for WordPress lets you process outgoing links differently from internal links.

\n

Auto-Close Comments, Pingbacks and Trackbacks

\n

Automatically close comments, pingbacks and trackbacks on your posts. You can choose to keep comments / pingbacks / trackbacks open on certain posts. You can now delete post revisions as well.

\n

Serial Posts

\n

This plugin allows you to assign a Serial name, using custom fields, to your posts and then automatically displays a list of other posts which have the same Serial name when viewing this post. You can create as many Serials as you need, therefore allowing you to create multiple groupings of posts.

\n

Excerpt Re-reloaded

\n

Create an excerpt choosing number of words, link text for the rest of the post, filter for allowed tags with autoclose. It works with WP 2.6+

\n

Old Post Promoter

\n

The main feature is the ability to promote an old post to either the 1st or 2nd position.

\n

pMetrics

\n

The pMetrics WordPress Plugin allows you to check blog stats directly from your WordPress Dashboard

\n

WordPress 2.7 Comments API

\n

This isn’t a plugin, but a PHP5 Class for use new WordPress 2.7 Comments API. Page in Spanish.

\";s:7:\"pubdate\";s:31:\"Sun, 21 Dec 2008 13:21:35 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:3785:\"

New Plugins

\n

Blog Copyright

\n

Blog Copyright injects a copyright notice into the blog footer.

\n

Art Direction Plugin

\n

This plugin allows you to have global archive and single page per-post styles.

\n

Admin Menu Editor

\n

Manually edit the Dashboard menu. You can reorder the menus, show/hide specific items, change access rights, and more.

\n

Vimeo Quicktags

\n

Enable administrator to embed vimeo video into blog. Options available are the same as provided by vimeo.

\n

Picapp

\n

The plugin makes it easier to insert free, quality photos into a blog post by integrating all these into your WordPress interface.

\n

Updated Plugins

\n

Author Avatars

\n

This plugin provides a widget and a shortcode which allow you to show avatars of blog users.

\n

Open Picture Window

\n

Opens a new browser window containing the image specified using JavaScript. You have the option to choose the features as well as choose if you want it to be centered.

\n

Add to Feed

\n

A simple feed enhancement plugin that allows you to add custom text or HTML to posts in your WordPress blog feed. You can add text before the content and/or after the content as well as a copyright message. Now, include a link to the current post as well.

\n

External Links

\n

The external links plugin for WordPress lets you process outgoing links differently from internal links.

\n

Auto-Close Comments, Pingbacks and Trackbacks

\n

Automatically close comments, pingbacks and trackbacks on your posts. You can choose to keep comments / pingbacks / trackbacks open on certain posts. You can now delete post revisions as well.

\n

Serial Posts

\n

This plugin allows you to assign a Serial name, using custom fields, to your posts and then automatically displays a list of other posts which have the same Serial name when viewing this post. You can create as many Serials as you need, therefore allowing you to create multiple groupings of posts.

\n

Excerpt Re-reloaded

\n

Create an excerpt choosing number of words, link text for the rest of the post, filter for allowed tags with autoclose. It works with WP 2.6+

\n

Old Post Promoter

\n

The main feature is the ability to promote an old post to either the 1st or 2nd position.

\n

pMetrics

\n

The pMetrics WordPress Plugin allows you to check blog stats directly from your WordPress Dashboard

\n

WordPress 2.7 Comments API

\n

This isn’t a plugin, but a PHP5 Class for use new WordPress 2.7 Comments API. Page in Spanish.

\";}i:1;a:7:{s:5:\"title\";s:25:\"Matt: Friendster Switches\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9779\";s:4:\"link\";s:41:\"http://ma.tt/2008/12/friendster-switches/\";s:11:\"description\";s:464:\"

Friendster Relaunches Blogs, Switches to WordPress MU. Basically Friendster has switched millions of blogs from Typepad to WordPress, presumably at least partly because Six Apart abandoned their Typepad platform for third parties starting in late 2006 with Le Monde.

\";s:7:\"pubdate\";s:31:\"Sun, 21 Dec 2008 01:02:11 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:464:\"

Friendster Relaunches Blogs, Switches to WordPress MU. Basically Friendster has switched millions of blogs from Typepad to WordPress, presumably at least partly because Six Apart abandoned their Typepad platform for third parties starting in late 2006 with Le Monde.

\";}i:2;a:7:{s:5:\"title\";s:26:\"Matt: Art Direction Plugin\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9776\";s:4:\"link\";s:42:\"http://ma.tt/2008/12/art-direction-plugin/\";s:11:\"description\";s:349:\"

WordPress “Art Direction” Plugin, from Automattician Noel Jackson. Basically allows you to do what Jason Santa Maria does for his blog without all the custom code and template hacking, just a simple plugin.

\";s:7:\"pubdate\";s:31:\"Sun, 21 Dec 2008 00:58:48 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:349:\"

WordPress “Art Direction” Plugin, from Automattician Noel Jackson. Basically allows you to do what Jason Santa Maria does for his blog without all the custom code and template hacking, just a simple plugin.

\";}i:3;a:7:{s:5:\"title\";s:59:\"Weblog Tools Collection: WordPress Theme Releases for 12/20\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4821\";s:4:\"link\";s:89:\"http://weblogtoolscollection.com/archives/2008/12/20/wordpress-theme-releases-for-1220-2/\";s:11:\"description\";s:2376:\"

Leviathan

\n

\"leviathan-theme\"

\n

Leviathan is a child theme available under the Hybrid theme framework. Two column, widget ready, gravatar ready with threaded comments support.

\n

deCoder

\n

\"wordpress-theme-decoder\"

\n

Fixed width, 2 columns, right sidebar, widget ready with threaded comments and avatars.

\n

WP-Christmas

\n

\"wp-christmas\"

\n

Three columns, Widget ready, Adsense ready, Left and Right Sidebar, Fixed width theme

\n

WP-Dodson

\n

\"wp-dodson\"

\n

Two columns, Widget ready, Adsense ready, Right Sidebar, Fixed width theme

\n

Emplode

\n

\"emplode\"

\n

A clean and simple two-column, fixed width theme supporting widgets.

\n

 

\n

Are you a theme author? Read how you can submit your themes to us.

\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 15:33:09 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:2376:\"

Leviathan

\n

\"leviathan-theme\"

\n

Leviathan is a child theme available under the Hybrid theme framework. Two column, widget ready, gravatar ready with threaded comments support.

\n

deCoder

\n

\"wordpress-theme-decoder\"

\n

Fixed width, 2 columns, right sidebar, widget ready with threaded comments and avatars.

\n

WP-Christmas

\n

\"wp-christmas\"

\n

Three columns, Widget ready, Adsense ready, Left and Right Sidebar, Fixed width theme

\n

WP-Dodson

\n

\"wp-dodson\"

\n

Two columns, Widget ready, Adsense ready, Right Sidebar, Fixed width theme

\n

Emplode

\n

\"emplode\"

\n

A clean and simple two-column, fixed width theme supporting widgets.

\n

 

\n

Are you a theme author? Read how you can submit your themes to us.

\";}i:4;a:7:{s:5:\"title\";s:59:\"Weblog Tools Collection: WordPress Gone Social - BuddyPress\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4809\";s:4:\"link\";s:86:\"http://weblogtoolscollection.com/archives/2008/12/20/wordpress-gone-social-buddypress/\";s:11:\"description\";s:2882:\"

First off, BuddyPress is looking real good these days. Secondly, I’d like to thank Andy Peatling, head honcho for the BuddyPress project for stopping by and lending us an hour of his time to explain exactly what BuddyPress is and does. On December 15th, 2008 Andy released the first beta version of the project. So what exactly is BuddyPress?

\n

\nBuddyPress is essentially a set of WordPress MU specific plugins. Each plugin component adds a distinct feature to BuddyPress and only handles functionality for that specific component (for example, private messaging). BuddyPress also has a core plugin that all other plugins require, it contains shared functions and performs the basic modifications to the WordPress MU interface.\n

\n

Tune into the show to hear Andy delve into the feature set of the project as well as taking questions from the audience. By the way, if you don’t have the ability to install WordPress MU and then BuddyPress, you can view and participate in a live demo by registering an account on http://www.testbp.org

\n

\"\"

\n

Announcements: This was our last show for 2008. Keith and I look forward to creating more great podcasts in 2009 and we both thank each and everyone of you who continue to support the show. Have a happy holiday and a merry new year.

\n

Mark E. Will Be On The Show For January 2nd To Discuss His Comprehensive Security Plugin

\n

WordPress Weekly Forums:
\nPlease join the forum for WordPress Weekly to discuss things you heard on the show, share tips and tricks, give feedback, or to let us know something you think would be great for the audience to know about.

\n

WPWeekly Meta:

\n

Next Episode: Friday January 2nd, 2008 8P.M. EST

\n

Subscribe To WPWeekly Via Itunes: Click here to subscribe

\n

Length Of Episode: 55 Minutes

\n

Download The Show: WordPressWeeklyEpisode34.mp3

\n

Listen To Episode #34:
\n

\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 08:16:11 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:13:\"Jeff Chandler\";}s:7:\"summary\";s:2882:\"

First off, BuddyPress is looking real good these days. Secondly, I’d like to thank Andy Peatling, head honcho for the BuddyPress project for stopping by and lending us an hour of his time to explain exactly what BuddyPress is and does. On December 15th, 2008 Andy released the first beta version of the project. So what exactly is BuddyPress?

\n

\nBuddyPress is essentially a set of WordPress MU specific plugins. Each plugin component adds a distinct feature to BuddyPress and only handles functionality for that specific component (for example, private messaging). BuddyPress also has a core plugin that all other plugins require, it contains shared functions and performs the basic modifications to the WordPress MU interface.\n

\n

Tune into the show to hear Andy delve into the feature set of the project as well as taking questions from the audience. By the way, if you don’t have the ability to install WordPress MU and then BuddyPress, you can view and participate in a live demo by registering an account on http://www.testbp.org

\n

\"\"

\n

Announcements: This was our last show for 2008. Keith and I look forward to creating more great podcasts in 2009 and we both thank each and everyone of you who continue to support the show. Have a happy holiday and a merry new year.

\n

Mark E. Will Be On The Show For January 2nd To Discuss His Comprehensive Security Plugin

\n

WordPress Weekly Forums:
\nPlease join the forum for WordPress Weekly to discuss things you heard on the show, share tips and tricks, give feedback, or to let us know something you think would be great for the audience to know about.

\n

WPWeekly Meta:

\n

Next Episode: Friday January 2nd, 2008 8P.M. EST

\n

Subscribe To WPWeekly Via Itunes: Click here to subscribe

\n

Length Of Episode: 55 Minutes

\n

Download The Show: WordPressWeeklyEpisode34.mp3

\n

Listen To Episode #34:
\n

\";}i:5;a:7:{s:5:\"title\";s:46:\"Lorelle on WP: 500,000 WordPress 2.7 Downloads\";s:4:\"guid\";s:36:\"http://lorelle.wordpress.com/?p=3463\";s:4:\"link\";s:70:\"http://lorelle.wordpress.com/2008/12/19/500000-wordpress-27-downloads/\";s:11:\"description\";s:7782:\"

\"WordPressDecember 11, 2008, WordPress 2.7 was announced and released to the public, one of the hottest versions of WordPress ever.

\n

Just 20 hours later, Ryan Boren announced that there have been 100,000 downloads in those first few hours, spinning the WordPress Counter.

\n

In the next few moments or so, The WordPress Counter rolled over 500,000 downloads of the new WordPress 2.7.

\n

\"WordPress

\n

The WordPress Counter is restarted with every version release.

\n

So let’s do a little math, with a reminder that I’m not good at math.

\n

Using the handy date calculation tool from Timeanddate.com, from and including Thursday, December 11, 2008, to and including Friday, December 19, 2008, nine days have passed. The tool told me this equals:

\n
    \n
  • 777,600 seconds (0.643 downloads a second)
  • \n
  • 12,960 minutes (38.58 downloads per minute)
  • \n
  • 216 hours (2,314.81 downloads per hour)
  • \n
\n

That’s some serious bandwidth action. Go download WordPress 2.7 now and let’s see how fast we can reach one million. By Monday? Wednesday? Anyone taking bets? \";-)\"

\n

If it takes 0.64 seconds per download, how long before the WordPress 2.7 download counter will read one billion? Want to guess? Place more bets?

\n

I announced this on my Twitter and Matt Harzewski (redwall_hp) admitted that while he had upgraded many WordPress blogs to 2.7 since its release, he had only downloaded one copy.

\n

Have you been recycling your WordPress downloads? \":D\"

\n

Related Articles

\n\n

\"\"
\n


\n

Site Search Tags: wordpress, wordpress news, downloads, wordpress 2.7, download wordpress, wordpress counter, time and date

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 03:57:11 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:17:\"Lorelle VanFossen\";}s:7:\"summary\";s:7782:\"

\"WordPressDecember 11, 2008, WordPress 2.7 was announced and released to the public, one of the hottest versions of WordPress ever.

\n

Just 20 hours later, Ryan Boren announced that there have been 100,000 downloads in those first few hours, spinning the WordPress Counter.

\n

In the next few moments or so, The WordPress Counter rolled over 500,000 downloads of the new WordPress 2.7.

\n

\"WordPress

\n

The WordPress Counter is restarted with every version release.

\n

So let’s do a little math, with a reminder that I’m not good at math.

\n

Using the handy date calculation tool from Timeanddate.com, from and including Thursday, December 11, 2008, to and including Friday, December 19, 2008, nine days have passed. The tool told me this equals:

\n
    \n
  • 777,600 seconds (0.643 downloads a second)
  • \n
  • 12,960 minutes (38.58 downloads per minute)
  • \n
  • 216 hours (2,314.81 downloads per hour)
  • \n
\n

That’s some serious bandwidth action. Go download WordPress 2.7 now and let’s see how fast we can reach one million. By Monday? Wednesday? Anyone taking bets? \";-)\"

\n

If it takes 0.64 seconds per download, how long before the WordPress 2.7 download counter will read one billion? Want to guess? Place more bets?

\n

I announced this on my Twitter and Matt Harzewski (redwall_hp) admitted that while he had upgraded many WordPress blogs to 2.7 since its release, he had only downloaded one copy.

\n

Have you been recycling your WordPress downloads? \":D\"

\n

Related Articles

\n\n

\"\"
\n


\n

Site Search Tags: wordpress, wordpress news, downloads, wordpress 2.7, download wordpress, wordpress counter, time and date

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:6;a:7:{s:5:\"title\";s:58:\"Weblog Tools Collection: 2hr Interview With Matt Mullenweg\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4800\";s:4:\"link\";s:87:\"http://weblogtoolscollection.com/archives/2008/12/19/2hr-interview-with-matt-mullenweg/\";s:11:\"description\";s:7115:\"

On Thursday, December 18th, I had the honor of having a fireside chat so to speak with Matt Mullenweg. The chat lasted a little over two hours and then, Matt stayed around after the show for an additional two hours to field questions from anybody that asked them. There are a number of things that I have taken away with this chat with Matt and I’ll be listing those in an article in the following days but without a shadow of a doubt, Matt is a stand up guy. He answered all of my questions, even the tough ones which were submitted by the community. While there is room left to debate the GPL and what is or isn’t compliant, Matt answered the GPL questions to the best of his ability and in most cases, his answers are nothing more than his personal opinion since certain aspects of the GPL would be much clearer if there was a court case to stand by.

\n

I really feel as though this two hour recording is the most important recording I’ve made yet and is the biggest contribution I have made so far, back to the community. If there was one podcast that you should listen to as it relates to WordPress, the GPL, Matt’s involvement with Automattic and the Project, this would be it. Special thanks to Matt Mullenweg for agreeing to come on the show to address all of the issues that were presented to him by me.

\n

To get a sample of the information discussed in this episode, here are the list of questions that I asked Matt. After this list, he took questions from anyone that asked them either by those who called in or sent them in the chat.

\n

Why were those themes removed from the repository and if you look back at the situation now, do you think you made a mistake by not making a public post about the removals?

\n

Can you explain why the new guideline was added to the theme repository?

\n

Why is it that so many people within the inner circle of the WordPress community believe you and Automattic don’t want anyone else profiting through or around WordPress?

\n

In your opinion, do you think that premium themes have actually benefited the community by way of furthering the overall development of WordPress themes?

\n

In a recent conversation, I saw you describe premium themes as propietary and how you felt that was a better word than premium. Why is that?

\n

How many of these debates and the way things are done are a result of their not being a court case to go by?

\n

Does it bother you at all to see countless debates on various WordPress theme author sites about the GPL and what is and not compliant with it?

\n

Drupal and Joomla have decided the commercial stuff is okay but why not WordPress?

\n

In November of 2007 hot off the heels of WordCamp Argentina, news came out about a possible theme marketplace where people sold themes through the marketplace and the theme author as well as Automattic each recieved a cut of the profits. Was that your way of trying to help premium theme authors and has their been any progress on the idea?

\n

The Drupal community has debated this GPL/Premium/Theme issue for a while. And a solid understanding has come from it:

\n

A theme is made up of several files - template files (ending in .php), CSS, images and JavaScript. The template files are considered a part of Drupal, which is licensed under the GPL, which means they are not restricted in their redistribution. You are free to share the .php files so others can benefit from them. However, the rest of the theme - images, CSS and JavaScript - is independent from Drupal and owned by us and licensed by you for one website per purchase. You may not publish or share these parts of the themes with anyone else. Please review our EULA for full details. (Taken from a Drupal Theme Developers page)

\n

When the notion of making money by selling themes pops up at WordCamps, you are quick to explain the WordPress.com business model of selling services and building support/value around the prodcut but this model will not work for everyone. What is a premium theme author to do?

\n

I’ve spoken to a few premium theme authors and they tell me that because of the GPL, nothing stops someone from picking up Brian Gardners themes, changing the footer link and then undercutting his business by selling support at a cheaper price. Is that a valid argument?

\n

Redistributing paid themes for free, which is ok under the GPL thus, rendering the business model of selling themes useless, as I understand it. Yet, that hasn’t happened and I wonder if that is because most end users are not aware of the GPL, all they see is the single-use multi-use licenses attached to themes

\n

Is there a way where premium theme companies such as iThemes and you or Automattic can come to a compromise?

\n

Lets say I have a template generator that outputs GPL themes, but has premium features. It could be used to create freebie themes which would be eligible to be in the repository, but since the generator outputs themes with a link back to my site which promotes the premium services, which in turn may be used for creating themes suitable for the repository, but again those themes have a link back to my site.

\n

Is it true that the notion of Child themes which appears to be gaining momentum can be viewed as a loophole as far as the GPL is concerned considering these are themes which are purele CSS and Image based?

\n

At what point do you stop accepting good themes that comply with the GPL because of a connection an author has with commercial themes. How far does it go.

\n

If WP.org is about the community, why are decisions made unilaterally, rather than by the community?

\n

Just out of curiosity, do you get annoyed sometimes by people blaming or mentioning Automattic for the decisions or things that take place for WordPress.org? I mean, Automattic and the WordPress project are two separate things.

\n

What is your role with automattic and what is your role with the wordpress.org project and is their ever a conflict of interest between the two?

\n

In your opinion, how far does the GPL go? CSS, images, phpfiles,

\n

Why have you not used the WordPress development blog to bring forth the issues of GPL and various other aspects of the project?

\n

This whole show has pretty much been dedicated to themes but how does all of this effect plugins, the plugin repository and such?\n

WPWeekly Meta:

\n

Subscribe To WPWeekly Via Itunes: Click here to subscribe

\n

Length Of Episode: 2 Hours 11 Minutes

\n

Download The Show: InterviewWithMatt.mp3

\n

Listen To The Special Interview With Matt Mullenweg:
\n

\n

Chat log from those who participated in the chatroom: Special Interview Chat Log

\";s:7:\"pubdate\";s:31:\"Fri, 19 Dec 2008 18:52:49 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:13:\"Jeff Chandler\";}s:7:\"summary\";s:7115:\"

On Thursday, December 18th, I had the honor of having a fireside chat so to speak with Matt Mullenweg. The chat lasted a little over two hours and then, Matt stayed around after the show for an additional two hours to field questions from anybody that asked them. There are a number of things that I have taken away with this chat with Matt and I’ll be listing those in an article in the following days but without a shadow of a doubt, Matt is a stand up guy. He answered all of my questions, even the tough ones which were submitted by the community. While there is room left to debate the GPL and what is or isn’t compliant, Matt answered the GPL questions to the best of his ability and in most cases, his answers are nothing more than his personal opinion since certain aspects of the GPL would be much clearer if there was a court case to stand by.

\n

I really feel as though this two hour recording is the most important recording I’ve made yet and is the biggest contribution I have made so far, back to the community. If there was one podcast that you should listen to as it relates to WordPress, the GPL, Matt’s involvement with Automattic and the Project, this would be it. Special thanks to Matt Mullenweg for agreeing to come on the show to address all of the issues that were presented to him by me.

\n

To get a sample of the information discussed in this episode, here are the list of questions that I asked Matt. After this list, he took questions from anyone that asked them either by those who called in or sent them in the chat.

\n

Why were those themes removed from the repository and if you look back at the situation now, do you think you made a mistake by not making a public post about the removals?

\n

Can you explain why the new guideline was added to the theme repository?

\n

Why is it that so many people within the inner circle of the WordPress community believe you and Automattic don’t want anyone else profiting through or around WordPress?

\n

In your opinion, do you think that premium themes have actually benefited the community by way of furthering the overall development of WordPress themes?

\n

In a recent conversation, I saw you describe premium themes as propietary and how you felt that was a better word than premium. Why is that?

\n

How many of these debates and the way things are done are a result of their not being a court case to go by?

\n

Does it bother you at all to see countless debates on various WordPress theme author sites about the GPL and what is and not compliant with it?

\n

Drupal and Joomla have decided the commercial stuff is okay but why not WordPress?

\n

In November of 2007 hot off the heels of WordCamp Argentina, news came out about a possible theme marketplace where people sold themes through the marketplace and the theme author as well as Automattic each recieved a cut of the profits. Was that your way of trying to help premium theme authors and has their been any progress on the idea?

\n

The Drupal community has debated this GPL/Premium/Theme issue for a while. And a solid understanding has come from it:

\n

A theme is made up of several files - template files (ending in .php), CSS, images and JavaScript. The template files are considered a part of Drupal, which is licensed under the GPL, which means they are not restricted in their redistribution. You are free to share the .php files so others can benefit from them. However, the rest of the theme - images, CSS and JavaScript - is independent from Drupal and owned by us and licensed by you for one website per purchase. You may not publish or share these parts of the themes with anyone else. Please review our EULA for full details. (Taken from a Drupal Theme Developers page)

\n

When the notion of making money by selling themes pops up at WordCamps, you are quick to explain the WordPress.com business model of selling services and building support/value around the prodcut but this model will not work for everyone. What is a premium theme author to do?

\n

I’ve spoken to a few premium theme authors and they tell me that because of the GPL, nothing stops someone from picking up Brian Gardners themes, changing the footer link and then undercutting his business by selling support at a cheaper price. Is that a valid argument?

\n

Redistributing paid themes for free, which is ok under the GPL thus, rendering the business model of selling themes useless, as I understand it. Yet, that hasn’t happened and I wonder if that is because most end users are not aware of the GPL, all they see is the single-use multi-use licenses attached to themes

\n

Is there a way where premium theme companies such as iThemes and you or Automattic can come to a compromise?

\n

Lets say I have a template generator that outputs GPL themes, but has premium features. It could be used to create freebie themes which would be eligible to be in the repository, but since the generator outputs themes with a link back to my site which promotes the premium services, which in turn may be used for creating themes suitable for the repository, but again those themes have a link back to my site.

\n

Is it true that the notion of Child themes which appears to be gaining momentum can be viewed as a loophole as far as the GPL is concerned considering these are themes which are purele CSS and Image based?

\n

At what point do you stop accepting good themes that comply with the GPL because of a connection an author has with commercial themes. How far does it go.

\n

If WP.org is about the community, why are decisions made unilaterally, rather than by the community?

\n

Just out of curiosity, do you get annoyed sometimes by people blaming or mentioning Automattic for the decisions or things that take place for WordPress.org? I mean, Automattic and the WordPress project are two separate things.

\n

What is your role with automattic and what is your role with the wordpress.org project and is their ever a conflict of interest between the two?

\n

In your opinion, how far does the GPL go? CSS, images, phpfiles,

\n

Why have you not used the WordPress development blog to bring forth the issues of GPL and various other aspects of the project?

\n

This whole show has pretty much been dedicated to themes but how does all of this effect plugins, the plugin repository and such?\n

WPWeekly Meta:

\n

Subscribe To WPWeekly Via Itunes: Click here to subscribe

\n

Length Of Episode: 2 Hours 11 Minutes

\n

Download The Show: InterviewWithMatt.mp3

\n

Listen To The Special Interview With Matt Mullenweg:
\n

\n

Chat log from those who participated in the chatroom: Special Interview Chat Log

\";}i:7;a:7:{s:5:\"title\";s:72:\"Weblog Tools Collection: Mastering Your WordPress 2.7 Theme & Admin Area\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4777\";s:4:\"link\";s:98:\"http://weblogtoolscollection.com/archives/2008/12/19/mastering-your-wordpress-27-theme-admin-area/\";s:11:\"description\";s:1177:\"

Mastering Your WordPress 2.7 Theme & Admin Area: Tips and Tricks.: Noupe has a nice writeup on mastering WordPress 2.7 Admin and understanding how to use the new features of WordPress 2.7 in your theme. These include

\n
    \n
  • 1.1. Enhancing Comment Display - Threading, Paging, etc.
  • \n
  • 1.2. Make this post sticky
  • \n
  • 1.3. Post Classes
  • \n
  • 1.4. wp page menu
  • \n
  • 1.5. Logout Link
  • \n
  • 2.1. Keyboard Shortcuts for browsing and moderating comments
  • \n
  • 2.2. Reply to and Edit comments from Admin Area
  • \n
  • 2.3. Edit comments from Admin Area
  • \n
  • 2.4. Allow plugin installations via web interface from Admin Area
  • \n
  • 2.5. Admin’s new Navigation Menu
  • \n
  • 2.6. Interesting Dashboard Modules: QuickPress & Recent Drafts
  • \n
  • 2.7. Quick Edit Option in Admin area
  • \n
  • 2.8. Auto Close Comments And Trackbacks
  • \n
  • 2.9. Bulk Edit Posts
  • \n
\n

I wish there were anchors or separate pages to link directly to each piece, but alas they are all lumped together. This is a nice read nonetheless.

\";s:7:\"pubdate\";s:31:\"Fri, 19 Dec 2008 15:12:57 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Mark Ghosh\";}s:7:\"summary\";s:1177:\"

Mastering Your WordPress 2.7 Theme & Admin Area: Tips and Tricks.: Noupe has a nice writeup on mastering WordPress 2.7 Admin and understanding how to use the new features of WordPress 2.7 in your theme. These include

\n
    \n
  • 1.1. Enhancing Comment Display - Threading, Paging, etc.
  • \n
  • 1.2. Make this post sticky
  • \n
  • 1.3. Post Classes
  • \n
  • 1.4. wp page menu
  • \n
  • 1.5. Logout Link
  • \n
  • 2.1. Keyboard Shortcuts for browsing and moderating comments
  • \n
  • 2.2. Reply to and Edit comments from Admin Area
  • \n
  • 2.3. Edit comments from Admin Area
  • \n
  • 2.4. Allow plugin installations via web interface from Admin Area
  • \n
  • 2.5. Admin’s new Navigation Menu
  • \n
  • 2.6. Interesting Dashboard Modules: QuickPress & Recent Drafts
  • \n
  • 2.7. Quick Edit Option in Admin area
  • \n
  • 2.8. Auto Close Comments And Trackbacks
  • \n
  • 2.9. Bulk Edit Posts
  • \n
\n

I wish there were anchors or separate pages to link directly to each piece, but alas they are all lumped together. This is a nice read nonetheless.

\";}i:8;a:7:{s:5:\"title\";s:48:\"Lorelle on WP: WordPress News on the Blog Herald\";s:4:\"guid\";s:36:\"http://lorelle.wordpress.com/?p=3450\";s:4:\"link\";s:74:\"http://lorelle.wordpress.com/2008/12/18/wordpress-news-on-the-blog-herald/\";s:11:\"description\";s:27262:\"

\"WordPressThe past few weeks of the Blog Herald’s WordPress News reports that I do have been huge. Each one now takes many hours to produce, rounding up all the news from WordPress developers, Plugin and Theme developers, WordPress.com, WordPress fan podcasts and blogs, and the WordPress Community.

\n

WordPress 2.7 is the biggest WordPress version ever, and Ryan Boren announced that there have been 100,000 downloads in the first 20 hours of release, with the WordPress counter reporting 469,208 a little over a week since the version was released. With those numbers, you can tell that there has been a lot to report on for WordPress recently.

\n

To honor the end of 2008 and all the great WordPress news over the past year, and to celebrate another year of bringing you the WordPress news on the Blog Herald every week, here is a listing of this year’s WordPress News Wednesday Reports:

\n\n

I’m not alone reporting on WordPress news and developments. The core sources and resources I depend upon that help me serve the WordPress Community are:

\n\n

While it is tradition to thank all the “little people” who helped me get where I am today, I honestly owe it all to the entire WordPress Community.

\n

I want to thank again the tons of people around the web whose blog content fills my feed reader every week with WordPress news, tips, guides, help, and general information. I read through hundreds of feeds and more hundreds of blog posts every week, learning what others are saying about WordPress and their WordPress blogging experience. Their perspective on WordPress, from every angle and opinion, helps me better understand how it works, and how I can better serve WordPress fans.

\n

You are my heroes and heroines that make my life much easier. You are appreciated more than you could ever know. I’ve reserved a virtual hug for every one of you until we can meet in person. For those I’ve already met, you know that hug well. \":D\"

\n

WordPress Tips on Twitter

\n

A few months ago, I started sharing all the articles and news information I’ve collected over the years or writing about and reporting on WordPress with my followers on Twitter. Each day I publish a random WordPress tip covering various versions, techniques, Theme design and development, Plugin writing and tips, and a variety of cool stuff about using WordPress.

\n

What to join the fun, follow lorelleonwp on Twitter for your daily dose of WordPress.

\n

To celebrate the end of a very successful year, I will be releasing ten WordPress tips on December 25 and January 1 throughout each of those days.

\n

Thanks again to all and to all, hope your 2008 was as good or better than mine (not possible \":D\" ), and that 2009 will be even more exciting with plenty of WordPress news, events, meetups, WordCamps, and in person hugs.

\n

See you soon at a WordCamp Event!

\n

\"\"
\n


\n

Site Search Tags: WordPress News, wordpress information, wordpress tips, wordpress themes, wordpress plugins, wordpress help, wordpress tutorials, wordpress versions, wordpress guides

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Fri, 19 Dec 2008 04:49:46 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:17:\"Lorelle VanFossen\";}s:7:\"summary\";s:27262:\"

\"WordPressThe past few weeks of the Blog Herald’s WordPress News reports that I do have been huge. Each one now takes many hours to produce, rounding up all the news from WordPress developers, Plugin and Theme developers, WordPress.com, WordPress fan podcasts and blogs, and the WordPress Community.

\n

WordPress 2.7 is the biggest WordPress version ever, and Ryan Boren announced that there have been 100,000 downloads in the first 20 hours of release, with the WordPress counter reporting 469,208 a little over a week since the version was released. With those numbers, you can tell that there has been a lot to report on for WordPress recently.

\n

To honor the end of 2008 and all the great WordPress news over the past year, and to celebrate another year of bringing you the WordPress news on the Blog Herald every week, here is a listing of this year’s WordPress News Wednesday Reports:

\n\n

I’m not alone reporting on WordPress news and developments. The core sources and resources I depend upon that help me serve the WordPress Community are:

\n\n

While it is tradition to thank all the “little people” who helped me get where I am today, I honestly owe it all to the entire WordPress Community.

\n

I want to thank again the tons of people around the web whose blog content fills my feed reader every week with WordPress news, tips, guides, help, and general information. I read through hundreds of feeds and more hundreds of blog posts every week, learning what others are saying about WordPress and their WordPress blogging experience. Their perspective on WordPress, from every angle and opinion, helps me better understand how it works, and how I can better serve WordPress fans.

\n

You are my heroes and heroines that make my life much easier. You are appreciated more than you could ever know. I’ve reserved a virtual hug for every one of you until we can meet in person. For those I’ve already met, you know that hug well. \":D\"

\n

WordPress Tips on Twitter

\n

A few months ago, I started sharing all the articles and news information I’ve collected over the years or writing about and reporting on WordPress with my followers on Twitter. Each day I publish a random WordPress tip covering various versions, techniques, Theme design and development, Plugin writing and tips, and a variety of cool stuff about using WordPress.

\n

What to join the fun, follow lorelleonwp on Twitter for your daily dose of WordPress.

\n

To celebrate the end of a very successful year, I will be releasing ten WordPress tips on December 25 and January 1 throughout each of those days.

\n

Thanks again to all and to all, hope your 2008 was as good or better than mine (not possible \":D\" ), and that 2009 will be even more exciting with plenty of WordPress news, events, meetups, WordCamps, and in person hugs.

\n

See you soon at a WordCamp Event!

\n

\"\"
\n


\n

Site Search Tags: WordPress News, wordpress information, wordpress tips, wordpress themes, wordpress plugins, wordpress help, wordpress tutorials, wordpress versions, wordpress guides

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:9;a:7:{s:5:\"title\";s:29:\"Gravatar: TinyPic & Gravatars\";s:4:\"guid\";s:31:\"http://blog.gravatar.com/?p=120\";s:4:\"link\";s:54:\"http://blog.gravatar.com/2008/12/18/tinypic-gravatars/\";s:11:\"description\";s:2182:\"

Michael C. writes in to let us know that TinyPic users now get their Gravatar for their profile image.  Thanks TinyPic, we think you’re pretty grrrrreat yourselves!

\n

As a side note I think it’s really telling that a service which hosts images and videos for people to easily link to decided to use Gravatar for their profiles.  TinyPic has the infrastructure in place to do this.  They already have uploading, caching servers, and a CDN figured out.  They might even have croppers and image effects floating around.  But 2 things are important here.  The first is that profile images still aren’t the at core of what they want to provide to people.  Second, and more importantly, is that enabling Gravatar support is good for their users — It’s easy, its simple, and it gives them value far beyond that individual profile page.

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Thu, 18 Dec 2008 19:30:03 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"apokalyptik\";}s:7:\"summary\";s:2182:\"

Michael C. writes in to let us know that TinyPic users now get their Gravatar for their profile image.  Thanks TinyPic, we think you’re pretty grrrrreat yourselves!

\n

As a side note I think it’s really telling that a service which hosts images and videos for people to easily link to decided to use Gravatar for their profiles.  TinyPic has the infrastructure in place to do this.  They already have uploading, caching servers, and a CDN figured out.  They might even have croppers and image effects floating around.  But 2 things are important here.  The first is that profile images still aren’t the at core of what they want to provide to people.  Second, and more importantly, is that enabling Gravatar support is good for their users — It’s easy, its simple, and it gives them value far beyond that individual profile page.

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:10;a:7:{s:5:\"title\";s:60:\"Weblog Tools Collection: WordPress Plugin Releases for 12/18\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4794\";s:4:\"link\";s:88:\"http://weblogtoolscollection.com/archives/2008/12/18/wordpress-plugin-releases-for-1218/\";s:11:\"description\";s:5100:\"

New Plugins

\n

WP AutoSuggest

\n

Search on my blog by typing in a keyword rather than waiting for the results page to load.

\n

PostRank

\n

This plugin adds the PostRank Top Posts Widget & Analytics to your blog. PostRank measures the engagement with each story by analyzing the types and frequency of social media interactions. The more interesting or relevant the story is, the more active your readers will be in sharing it.

\n

WP Lifestream

\n

Lifestream displays your social feeds and photos much like you would see it on many of the social networking sites.

\n

WordPress Video Solution Framework

\n

Video solutions framework, including player, transcoder and administration interface utilities as wpmu plugin. It powers wordpress.com video solution.

\n

wp2blosxom

\n

Exports all your posts to a zip file containing a blosxom style directory hierarchy of posts.

\n

Sliding Post - Slides latest posts with Carousel effect

\n

A plugin for WordPress which shows latest 10 posts with carousel sliding effect which is SEO friendly. User can set the height, width, and the time duration of the slide show.

\n

Paginator

\n

Adds the “paginator3000″ paging navigation to your WordPress blog.

\n

wp-mailfrom

\n

Allows you to configure the default email address and name used on email sent by WordPress.

\n

Quick Admin Links

\n

Widget to add useful admin links on every page, allowing you to add new posts/pages, edit existing posts/pages, go to the admin, or log out.

\n

Hide Update Reminder

\n

Hides the Update Reminder in the Admin for all non Admin users.

\n

Google Custom Search Plugin

\n

Google Custom Search plugin for WordPress replaces the default search engine with Google’s Custom Search for websites.

\n

Updated Plugins

\n

Kimili Flash Embed

\n

The Kimili Flash Embed is a plugin for popular open source blogging systems that allows you to easily place Flash movies on your site.

\n

TDO Mini Forms

\n

This plugin allows you to add highly customizable forms to your website that allows non-registered users and/or subscribers (or any flavor between) to submit posts. The posts can be kept in “draft” until an admin can publish them (also configurable). It can optionally use Akismet to check if submissions are spam.

\n

HeadMeta

\n

HeadMeta is a simple plugin to help with SEO and other purposes that require customized ‘meta’ and ‘link’ tags in the header of your theme.

\n

Advanced Category Excluder

\n

ACE can override your search results, your RSS feed listing, your category listing, your recent post, and recent post widgets and also your entry page, or even hide whole categories of posts from web crawlers if you want to.

\n

Blibahblubah

\n

This plugin allows the blog owner to customize the tag cloud so that any reader mousing over the tag cloud will see the words in the tag cloud flicker with a customizable effect. This is just a fancy bell and whistle to add to your blog.

\n

Facebook Posted Items

\n

This plugin fetches posted items from Facebook and displays them in an unordered list with proper links and comments.

\n

Simple Tags

\n

This is the perfect tool to manage perfectly your WP 2.3, 2.5, 2.6 and 2.7 tags.

\n

StatPress Reloaded

\n

This plugin shows you real time statistics about your blog. It collects information about visitors, spiders, search keywords, feeds, browsers, OS etc.

\n

Popupper

\n

Popupper is a plugin that enables a blogger to add popups of images and text into their posts.

\";s:7:\"pubdate\";s:31:\"Thu, 18 Dec 2008 12:01:04 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:5100:\"

New Plugins

\n

WP AutoSuggest

\n

Search on my blog by typing in a keyword rather than waiting for the results page to load.

\n

PostRank

\n

This plugin adds the PostRank Top Posts Widget & Analytics to your blog. PostRank measures the engagement with each story by analyzing the types and frequency of social media interactions. The more interesting or relevant the story is, the more active your readers will be in sharing it.

\n

WP Lifestream

\n

Lifestream displays your social feeds and photos much like you would see it on many of the social networking sites.

\n

WordPress Video Solution Framework

\n

Video solutions framework, including player, transcoder and administration interface utilities as wpmu plugin. It powers wordpress.com video solution.

\n

wp2blosxom

\n

Exports all your posts to a zip file containing a blosxom style directory hierarchy of posts.

\n

Sliding Post - Slides latest posts with Carousel effect

\n

A plugin for WordPress which shows latest 10 posts with carousel sliding effect which is SEO friendly. User can set the height, width, and the time duration of the slide show.

\n

Paginator

\n

Adds the “paginator3000″ paging navigation to your WordPress blog.

\n

wp-mailfrom

\n

Allows you to configure the default email address and name used on email sent by WordPress.

\n

Quick Admin Links

\n

Widget to add useful admin links on every page, allowing you to add new posts/pages, edit existing posts/pages, go to the admin, or log out.

\n

Hide Update Reminder

\n

Hides the Update Reminder in the Admin for all non Admin users.

\n

Google Custom Search Plugin

\n

Google Custom Search plugin for WordPress replaces the default search engine with Google’s Custom Search for websites.

\n

Updated Plugins

\n

Kimili Flash Embed

\n

The Kimili Flash Embed is a plugin for popular open source blogging systems that allows you to easily place Flash movies on your site.

\n

TDO Mini Forms

\n

This plugin allows you to add highly customizable forms to your website that allows non-registered users and/or subscribers (or any flavor between) to submit posts. The posts can be kept in “draft” until an admin can publish them (also configurable). It can optionally use Akismet to check if submissions are spam.

\n

HeadMeta

\n

HeadMeta is a simple plugin to help with SEO and other purposes that require customized ‘meta’ and ‘link’ tags in the header of your theme.

\n

Advanced Category Excluder

\n

ACE can override your search results, your RSS feed listing, your category listing, your recent post, and recent post widgets and also your entry page, or even hide whole categories of posts from web crawlers if you want to.

\n

Blibahblubah

\n

This plugin allows the blog owner to customize the tag cloud so that any reader mousing over the tag cloud will see the words in the tag cloud flicker with a customizable effect. This is just a fancy bell and whistle to add to your blog.

\n

Facebook Posted Items

\n

This plugin fetches posted items from Facebook and displays them in an unordered list with proper links and comments.

\n

Simple Tags

\n

This is the perfect tool to manage perfectly your WP 2.3, 2.5, 2.6 and 2.7 tags.

\n

StatPress Reloaded

\n

This plugin shows you real time statistics about your blog. It collects information about visitors, spiders, search keywords, feeds, browsers, OS etc.

\n

Popupper

\n

Popupper is a plugin that enables a blogger to add popups of images and text into their posts.

\";}i:11;a:7:{s:5:\"title\";s:47:\"Weblog Tools Collection: Matt, The GPL And More\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4791\";s:4:\"link\";s:75:\"http://weblogtoolscollection.com/archives/2008/12/17/matt-the-gpl-and-more/\";s:11:\"description\";s:991:\"

If you are a premium theme developer or have an interest in WordPress themes, you may have heard that over 200 themes were removed from the WordPress.org theme repository. The reasons behind the removals have yet to be made public and there is more afoot than just the removal of themes. Over the course of the past few days, debates and discussions have been taking place on numerous blogs regarding WordPress, the GPL, themes and much more. Matt has agreed to appear on WordPress Weekly on Thursday at 1 P.M. EST to set the record straight so to speak. My goal for this special episode of the show is to get an explanation as to what happened with the theme repository, the new guidelines, Matt’s stance on the GPL, the entire issue of premium themes, what will happen with plugins and a whole lot more. This episode is really important to me as I try to clear up as much of the muddy waters as I can.

\";s:7:\"pubdate\";s:31:\"Wed, 17 Dec 2008 23:38:17 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:13:\"Jeff Chandler\";}s:7:\"summary\";s:991:\"

If you are a premium theme developer or have an interest in WordPress themes, you may have heard that over 200 themes were removed from the WordPress.org theme repository. The reasons behind the removals have yet to be made public and there is more afoot than just the removal of themes. Over the course of the past few days, debates and discussions have been taking place on numerous blogs regarding WordPress, the GPL, themes and much more. Matt has agreed to appear on WordPress Weekly on Thursday at 1 P.M. EST to set the record straight so to speak. My goal for this special episode of the show is to get an explanation as to what happened with the theme repository, the new guidelines, Matt’s stance on the GPL, the entire issue of premium themes, what will happen with plugins and a whole lot more. This episode is really important to me as I try to clear up as much of the muddy waters as I can.

\";}i:12;a:7:{s:5:\"title\";s:59:\"Weblog Tools Collection: WordPress Theme Releases for 12/17\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4787\";s:4:\"link\";s:87:\"http://weblogtoolscollection.com/archives/2008/12/17/wordpress-theme-releases-for-1217/\";s:11:\"description\";s:2331:\"

WP Full Site

\n

\"WPFullSite\"

\n

WP Full site is a two column theme built for small businesses that want a website that enables them to use a Content Management System (CMS), have the option of a blog, and focuses on the necessities that all small business websites need.

\n

TimeCafe Premium Free

\n

\"TimeCafe\"

\n

It is a blue magazine style template with theme options menu, front page featured content slider, built in newsletter, custom typography, dropdown many, advertisement ready and much more! Theme is ready for WordPress 2.7! Enable threaded (nested) comments from your WordPress – Settings – Discussion menu and you’ll have different colors for comments up to 5 levels deep of replies.

\n

WP-Meditation

\n

\"WP-Meditation\"

\n

2 Column, fixed width, widget ready WordPress Theme

\n

Blue Christmas

\n

\"xmas\"

\n

A fun and bluish, fixed width, two columns, gravatar ready, widget ready Christmas theme.

\n

Also, check out the other themes by Blogtuine.com, viz. Prolog, Ext JS and JS03.

\";s:7:\"pubdate\";s:31:\"Wed, 17 Dec 2008 14:52:00 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:2331:\"

WP Full Site

\n

\"WPFullSite\"

\n

WP Full site is a two column theme built for small businesses that want a website that enables them to use a Content Management System (CMS), have the option of a blog, and focuses on the necessities that all small business websites need.

\n

TimeCafe Premium Free

\n

\"TimeCafe\"

\n

It is a blue magazine style template with theme options menu, front page featured content slider, built in newsletter, custom typography, dropdown many, advertisement ready and much more! Theme is ready for WordPress 2.7! Enable threaded (nested) comments from your WordPress – Settings – Discussion menu and you’ll have different colors for comments up to 5 levels deep of replies.

\n

WP-Meditation

\n

\"WP-Meditation\"

\n

2 Column, fixed width, widget ready WordPress Theme

\n

Blue Christmas

\n

\"xmas\"

\n

A fun and bluish, fixed width, two columns, gravatar ready, widget ready Christmas theme.

\n

Also, check out the other themes by Blogtuine.com, viz. Prolog, Ext JS and JS03.

\";}i:13;a:7:{s:5:\"title\";s:33:\"Matt: WordPress Interface History\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9771\";s:4:\"link\";s:49:\"http://ma.tt/2008/12/wordpress-interface-history/\";s:11:\"description\";s:357:\"

A Journey Through Five Years of WordPress Interface. Take a look at what the WordPress interface looked like at the beginning of 2008. How far we’ve come!

\";s:7:\"pubdate\";s:31:\"Wed, 17 Dec 2008 07:23:02 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:357:\"

A Journey Through Five Years of WordPress Interface. Take a look at what the WordPress interface looked like at the beginning of 2008. How far we’ve come!

\";}i:14;a:7:{s:5:\"title\";s:47:\"Gravatar: Mindtouch Deki Wiki supports Gravatar\";s:4:\"guid\";s:31:\"http://blog.gravatar.com/?p=117\";s:4:\"link\";s:74:\"http://blog.gravatar.com/2008/12/17/mindtouch-deki-wiki-supports-gravatar/\";s:11:\"description\";s:1656:\"

Sarah C. writes in to let us know that Mindtouch Deki supports Gravatars via an extension.  Gravatars help give a sense of community, as well as a sense of ownership to all kinds of sites.  A wiki is a great place for both, as shown in their examples.  Awesome work, guys!

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Wed, 17 Dec 2008 01:24:06 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"apokalyptik\";}s:7:\"summary\";s:1656:\"

Sarah C. writes in to let us know that Mindtouch Deki supports Gravatars via an extension.  Gravatars help give a sense of community, as well as a sense of ownership to all kinds of sites.  A wiki is a great place for both, as shown in their examples.  Awesome work, guys!

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:15;a:7:{s:5:\"title\";s:54:\"Weblog Tools Collection: easyComment Firefox Extension\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4779\";s:4:\"link\";s:83:\"http://weblogtoolscollection.com/archives/2008/12/16/easycomment-firefox-extension/\";s:11:\"description\";s:902:\"

easyComment Firefox Extension: “easyComment enables you to One-Click Comment on Wordpress Blogs which makes it an perfect tool for people who comment on a lot of different Weblogs and who don’t want to sign up on every single one. You’ll have to define your Personal Data once in the Preferences Dialog and every time you click the easyComment Button on the bottom Status Bar in your Firefox window, it searches the comment form on the current page and fills it with your data saving you a huge chunk of type-in work.” from the author’s post. Hat tip to Keith, found via the Weblog Tools Collection Crew LifeStream

\";s:7:\"pubdate\";s:31:\"Tue, 16 Dec 2008 23:15:32 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Mark Ghosh\";}s:7:\"summary\";s:902:\"

easyComment Firefox Extension: “easyComment enables you to One-Click Comment on Wordpress Blogs which makes it an perfect tool for people who comment on a lot of different Weblogs and who don’t want to sign up on every single one. You’ll have to define your Personal Data once in the Preferences Dialog and every time you click the easyComment Button on the bottom Status Bar in your Firefox window, it searches the comment form on the current page and fills it with your data saving you a huge chunk of type-in work.” from the author’s post. Hat tip to Keith, found via the Weblog Tools Collection Crew LifeStream

\";}i:16;a:7:{s:5:\"title\";s:60:\"Weblog Tools Collection: WordPress Plugin Releases for 12/16\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4775\";s:4:\"link\";s:88:\"http://weblogtoolscollection.com/archives/2008/12/16/wordpress-plugin-releases-for-1216/\";s:11:\"description\";s:3551:\"

New Plugins

\n

WP-Twitip-ID

\n

This plugin takes the pain out of adding another field to your comment form for users to enter their twitter username to be displayed as a link to follow them via their twitter page.

\n

Media Tags

\n

With this plugin you can now add tags to your media attachments. You can tag Word document, PDF documents basically anything you upload via the standard WordPress Media upload tool.

\n

CrossPress

\n

Automatically, cross-posting to associated site/blog to enabling the post-via-email option with PIN code e.g. multiply.com, livejournal.com, blogspot.

\n

Hackadelic Sliding Notes

\n

Ajax sliders for content fragments. Fancy replacement for foot- and other notes.

\n

GoogleMapper

\n

This plugin allows a WordPress Site Admin to enter locations of stores etc into the db. The user can then search for the closest location.

\n

Facebook Posted Items

\n

This plugin fetches posted items from Facebook and displays them in an unordered list with proper links and comments.

\n

Updated Plugins

\n

Sensitive Tag Cloud

\n

This WordPress plugin provides a configurable tag cloud that shows tags depending of the current context only.

\n

RelatedPosts

\n

This WordPress plugin provides multiple options to show the via tags related posts of a post. It contains a sidebar widget that is only visible when viewing a single post and displays a list of posts that are related with the current post via the tags.

\n

wpSEO

\n

The wpSEO plugin helps you to optimize your blog for SEO (Search Engine Optimization) purposes by eliminating issues with duplicate content and specifying meta tags and page titles for the different pages of your blog. You can also specify your meta tags and page titles manually.

\n

GD Star Rating

\n

GD Star Rating is post, page and comment rating plugin for WordPress. Plugin supports different image sets, rating moderation, vote rules, time restricted voting, templates, trend calculations, has a widgets build in and shortcode support.

\n

Smooth Scrolling Links

\n

This plug in uses the JavaScript provided by Stuart Langridge called Smooth scroll, which effectively adds a special effect to your “self” links. Self links means the links which are targeting to the same page with various locations like top, bottom or specific in page links.

\n

Moodlight

\n

Moodlight allows your visitors to add their mood on posts via comments.

\n

Simple Google Sitemap

\n

Creates an XML Sitemap, containing Homepage, Articles and Pages

\";s:7:\"pubdate\";s:31:\"Tue, 16 Dec 2008 19:11:47 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:3551:\"

New Plugins

\n

WP-Twitip-ID

\n

This plugin takes the pain out of adding another field to your comment form for users to enter their twitter username to be displayed as a link to follow them via their twitter page.

\n

Media Tags

\n

With this plugin you can now add tags to your media attachments. You can tag Word document, PDF documents basically anything you upload via the standard WordPress Media upload tool.

\n

CrossPress

\n

Automatically, cross-posting to associated site/blog to enabling the post-via-email option with PIN code e.g. multiply.com, livejournal.com, blogspot.

\n

Hackadelic Sliding Notes

\n

Ajax sliders for content fragments. Fancy replacement for foot- and other notes.

\n

GoogleMapper

\n

This plugin allows a WordPress Site Admin to enter locations of stores etc into the db. The user can then search for the closest location.

\n

Facebook Posted Items

\n

This plugin fetches posted items from Facebook and displays them in an unordered list with proper links and comments.

\n

Updated Plugins

\n

Sensitive Tag Cloud

\n

This WordPress plugin provides a configurable tag cloud that shows tags depending of the current context only.

\n

RelatedPosts

\n

This WordPress plugin provides multiple options to show the via tags related posts of a post. It contains a sidebar widget that is only visible when viewing a single post and displays a list of posts that are related with the current post via the tags.

\n

wpSEO

\n

The wpSEO plugin helps you to optimize your blog for SEO (Search Engine Optimization) purposes by eliminating issues with duplicate content and specifying meta tags and page titles for the different pages of your blog. You can also specify your meta tags and page titles manually.

\n

GD Star Rating

\n

GD Star Rating is post, page and comment rating plugin for WordPress. Plugin supports different image sets, rating moderation, vote rules, time restricted voting, templates, trend calculations, has a widgets build in and shortcode support.

\n

Smooth Scrolling Links

\n

This plug in uses the JavaScript provided by Stuart Langridge called Smooth scroll, which effectively adds a special effect to your “self” links. Self links means the links which are targeting to the same page with various locations like top, bottom or specific in page links.

\n

Moodlight

\n

Moodlight allows your visitors to add their mood on posts via comments.

\n

Simple Google Sitemap

\n

Creates an XML Sitemap, containing Homepage, Articles and Pages

\";}i:17;a:7:{s:5:\"title\";s:41:\"Weblog Tools Collection: A Long Look Back\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4771\";s:4:\"link\";s:70:\"http://weblogtoolscollection.com/archives/2008/12/16/a-long-look-back/\";s:11:\"description\";s:1007:\"

Ozh, a WordPress community superstar in his own right has compiled an awesome post highlighting the WordPress interface from version 0.7.1 which was released in May of 2003 all the way up to WordPress 2.7. Along with pictures, Ozh gives us a bit of background information for each version. If you’re like me and didn’t start using WordPress until around 2.2/2.3, this post should serve as an excellent reminder on how far WordPress has come. I can’t help but wonder, how many of you will get that nostalgic feeling when viewing those screenshots.

\n

Hat tip goes to The BlogHerald.

\n

While you’re here, drop us a comment and let us know what was the first version of WordPress you used?

\";s:7:\"pubdate\";s:31:\"Tue, 16 Dec 2008 11:00:52 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:13:\"Jeff Chandler\";}s:7:\"summary\";s:1007:\"

Ozh, a WordPress community superstar in his own right has compiled an awesome post highlighting the WordPress interface from version 0.7.1 which was released in May of 2003 all the way up to WordPress 2.7. Along with pictures, Ozh gives us a bit of background information for each version. If you’re like me and didn’t start using WordPress until around 2.2/2.3, this post should serve as an excellent reminder on how far WordPress has come. I can’t help but wonder, how many of you will get that nostalgic feeling when viewing those screenshots.

\n

Hat tip goes to The BlogHerald.

\n

While you’re here, drop us a comment and let us know what was the first version of WordPress you used?

\";}i:18;a:7:{s:5:\"title\";s:41:\"Lorelle on WP: WordPress 2.7 Upgrade Tips\";s:4:\"guid\";s:36:\"http://lorelle.wordpress.com/?p=3351\";s:4:\"link\";s:66:\"http://lorelle.wordpress.com/2008/12/11/wordpress-27-upgrade-tips/\";s:11:\"description\";s:26588:\"

\"WordPressWordPress 2.7 has been released, and in addition to the information I provided in “WordPress 2.7 Available Now,” here are some tips to help you make the transition to WordPress 2.7.

\n

Reports on the WordPress Support Forum and around the web are that WordPress 2.7 is the easiest upgrade ever. There are only a few reports of problems, which deal with Plugins that have now become integrated features in WordPress 2.7 and Plugins and Themes with customized comment areas. I’ve listed the most common support questions and their answers below, along with all the help you need to make this transition to WordPress 2.7 easy and pain free.

\n

Check Your Web Host for Compatibility

\n

Prior to upgrading, review WordPress 2.7 Core Update Host Compatibility to ensure that your WordPress blog will upgrade in compliance with the new features and demands of WordPress 2.7. Find a Web Host is a list of web hosts WordPress recommends.

\n

Some issues reported during testing were server issues. WordPress 2.7 now supports PHP5 and it is important that web hosts upgrade to current versions of PHP and MySQL for many reasons, including feature improvements and security vulnerabilities, to protect you as well as themselves. The WordPress development team has worked closely with web host providers around the world to ensure that WordPress will work, and we all benefit from those discussions, including non-WordPress users.

\n

If your web host is not on the compatibility or recommendation list, contact them directly to determine if they are supporting the new version of WordPress. If not, consider moving to another web host that will continue to support your blog and WordPress development.

\n

Upgrading from an Older Version of WordPress

\n

To upgrade from an older version of WordPress, I recommend a step-by-step process. If you are upgrading from WordPress 2.3 to WordPress 2.7, a big transition, go through the following documents on the WordPress Codex, the online manual for WordPress Users, and upgrade your WordPress blog for the transitions from 2.3 to 2.5, 2.5 to 2.6, and 2.6 to 2.7 to ensure you have made all changes necessary to your WordPress Theme. In general, Migrating Plugins and Themes to 2.7 should cover everything, but those upgrading from very old versions will want to check everything to ensure a successful upgrade.
\n
\nOr check with your WordPress Theme author for an updated version of the Theme.

\n\n

NOTE: If you are using the Auto Upgrade WordPress Plugin or something similar, a sticky post WordPress Plugin, or the Admin Drop Down Menus WordPress Plugin, deactivate them now. These features are now built into WordPress and no longer require these Plugins.

\n

How to Upgrade to WordPress 2.7

\n

If you are using one of the beta or release candidate versions of WordPress 2.7, run the upgrade feature. If you are using an older version of WordPress:

\n
    \n
  1. Check the WordPress 2.7 Core Update Host Compatibility guide.
  2. \n
  3. Check for updates to your WordPress Theme and Plugins.
  4. \n
  5. Follow the guide for Migrating Plugins and Themes for the version you are upgrading from. Check the WordPress 2.7 Plugin Compatibility and WordPress 2.7 Theme Compatibility lists to ensure your Theme and Plugin will work with WordPress 2.7.
  6. \n
  7. Download WordPress 2.7 and follow the instructions for installing WordPress (new installations) or upgrading WordPress. If you need more specific help with your upgrade, especially using the new auto-upgrade feature, see the extended upgrade instructions.
  8. \n
\n

I’m Worried About Upgrading to WordPress 2.7

\n

Many are worried about upgrading to WordPress 2.7. Here are some of the worries, and some answers.

\n
    \n
  1. It’s Painful to Upgrade: WordPress 2.7 now features auto-upgrade, so this could be the last “painful” upgrade you do, though upgrades are not very painful, really. Now, WordPress will alert you when there is a new version or upgrade available and walk you through the process. The same applies to WordPress Themes and Plugins, making life much less painful.
  2. \n
  3. My WordPress Theme Will Break: There is very little that will impact WordPress Themes in this new version, outside of WordPress Plugins that interact with WordPress Themes, especially with comments. WordPress Themes will be even better protected in future versions with the Parent/Child Theme feature (your changes are protected during upgrades) and easy auto-updating of Themes. If your Theme features customization to the comments area, see Migrating Plugins and Themes to 2.7 and Justin Tadlock’s “Making your theme’s comments compatible with WordPress 2.7 and earlier versions”.
  4. \n
  5. I’ll Only Have to Upgrade Again and Again: Trust me, future upgrades are going to be easier than ever. Don’t let this old whine of “WordPress updates too often” fool you. You are smarter than that. EVERY computer program, online and off, has regular updates. My computer nags at me daily to update something. WordPress only updates when they have to due to a security vulnerability discovered or threatened, or when they have great new features that you will benefit from. With the auto-upgrade feature, this process will be simple and painless. Upgrade now.
  6. \n
  7. I Won’t Be Able to Use My Favorite WordPress Plugins: If your favorite WordPress Plugins work with the Administration Panels or blog comments, the odds are that it won’t work or will need to be upgraded in WordPress 2.7. Check for upgrades to all your favorite WordPress Plugins and the WordPress 2.7 Plugin Compatibility list. If you are upgrading from a pre-WordPress 2.7 beta release, use the Upgrading WordPress instructions and deactivate all Plugins before installing, then activate them one by one to test them. If you can’t live without a certain WordPress Plugin, consider donating to their cause or encouraging their continued support of their Plugin. Or search for a new replacement.
  8. \n
  9. I Want to Wait Until All the Bugs are Fixed: WordPress 2.7 has undergone extensive development and testing. It is currently in use on almost 5 million WordPress.com blogs and has been for a couple of months, though the new interface wasn’t activated. With past versions, waiting was a good recommendation. Now, with the recent WordPress 2.6.5 security update, it is highly recommended that you take that one step further and upgrade to 2.7 now to ensure you have the latest and most secure version. Having had some of my blogs, and those of my clients and fellow bloggers, hacked because we didn’t upgrade, we know the pain. Upgrade now.
  10. \n
  11. I’m Not Technically Inclined: One of the most exciting features of WordPress 2.7 is the auto-upgrade feature. Once you get past a little initial code on this upgrade, depending upon which version you are upgrading from, no technical inclination required. WordPress, WordPress Themes, and WordPress Plugins all now feature automatic upgrades, making your life easier and less technical.
  12. \n
  13. I Just Don’t Want to Upgrade: Okay, so there are some people who like making their life harder and want to stay with an older version of WordPress for whatever reasons. Quick Online Tips featured WordPress Plugins for those who wish to stay with WordPress 2.6, offering much of the functionality of WordPress 2.7. Be sure and upgrade to the latest version, WordPress 2.6.5, to maintain a secure version and protect your site from hackers and vulnerabilities. You will have to upgrade soon to maintain a secure version of WordPress, but this will delay the inevitable.
  14. \n
\n

How to Get Help with WordPress 2.7 Issues

\n

Some of the most common problems reported on the WordPress Support Forums for WordPress 2.7 are related to the following:

\n
    \n
  1. Admin Drop Down Menu WordPress Plugin: The popular Admin Drop Down Menu WordPress Plugin is no longer required for WordPress 2.7. Deactivate the Plugin if you are having trouble seeing the Dashboard or other Panels on the WordPress Administration Panels.
  2. \n
  3. Internet Explorer Makes the Interface Look Strange: WordPress developers and designers have been fighting with Internet Explorer browser design issues and they recommend that you use the most recent browsers available, specifically FireFox 3, Google Chrome, and Safari 3.
  4. \n
  5. WordPress Auto Upgrade Plugin: If you are using one of the automatic upgrade WordPress Plugins, deactivate it before installing the new version of WordPress.
  6. \n
  7. Sticky Posts: If you have been using a sticky post WordPress Plugin, deactivate it. Sticky posts are now integrated into WordPress 2.7 and available on the specific post’s Edit panel. Check the box next to “Stick post to the front page” option.
  8. \n
  9. Inability to Access WordPress Plugins After Upgrade: WordPress Plugin access is now found on the WordPress Dashboard Panel. If you are having trouble accessing them, link to them directly with http://example.com/wp-admin/plugins.php with your domain name.
  10. \n
  11. A Plugin is Not Working: The Plugins are now accessed via the Dashboard. If you are having trouble finding your WordPress Plugins in 2.7, enter the address directly:
    \nhttp://example.com/wp-admin/plugins.php.
    \nIf you have installed WordPress 2.7 correctly, there is now an Uninstall Plugin API which allows useres to uninstall WordPress Plugins completely, not just deactivate them.
    \nAlso check the WordPress 2.7 Plugin Compatibility list to ensure the Plugin will work with WordPress 2.7 and contact the Plugin author directly if you are having problems.
  12. \n
  13. My Theme is Borked/Broken: If you are having trouble with your WordPress Theme after following the Migrating Plugins and Themes and checking the WordPress 2.7 Theme Compatibility list, then contact the Theme designer or check out the WordPress 2.7 Theme News section in WordPress 2.7 Release News and Links.
  14. \n
\n

If you are still having trouble, turn to the WordPress Codex first. It’s the online manual for WordPress users.

\n

Second, search first, through the WordPress Support Forums and your favorite search engine to see if others are having the same problem. Try different keywords and search terms as people often use non-standard terms to describe their problem.

\n

Turn to the WordPress Support Forums next to leave a request for help. Follow these guidelines to get the fastest and most appropriate help:

\n
    \n
  1. BE SPECIFIC AND CLEAR: “WordPress 2.7 is broken” is not helpful. What is broken? Is it a Plugin, Theme, login, specific panel, what is broken? If you are not giving a clear title and helpful information in your help request, you will not a helpful answer in return.
  2. \n
  3. WordPress Support Volunteers and Staff are not Mind Readers: Again, be clear. Be specific. Don’t expect us to understand what you are talking about. Give us all the pertinent information you can and you may get a specific and helpful answer.
  4. \n
  5. Make Your Request in the Right Place: If you put a request for help with a WordPress Theme design issue in the Installation section of the Support Forums, the odds are that those who hang out there can’t help you. Put your request for help in the right place:\n\n
  6. \n
  7. Direct Your Help Request to the Right People:\n
      \n
    • If you are having a general problem with WordPress, then use the WordPress Support Forums.
    • \n
    • If you are having trouble or a question about a WordPress Plugin, contact the Plugin author.
    • \n
    • If you are having trouble with a WordPress Theme, check with the Theme author for assistance.
    • \n
    • If you are having trouble with your web host or server, contact your web host.
    • \n
    • If you are having a general web design issue, please search the web and visit CSS and web design forums and groups for advice and help.
    • \n
    • Keep the WordPress Support Forums specifically for WordPress-related topics.
    • \n
    \n
  8. \n
  9. Be Kind and Play Nice: The WordPress Support Forums is not a place for egos or rudeness. The majority of those helping you are volunteers, giving freely of their time and WordPress expertise. Treat them kindly and you will get the support you need and the Support Forum will be a nice playground for everyone.
  10. \n
  11. Be Patient: If your question is a general one, they it might get a fast response. If it is the same question asked many times, it may get no response, so search first to ensure you are not being redundant. If it is a sophisticated coding question that requires a specialist, be patient. They might not be online in the forum at the moment. It might take a day or two for a response. Don’t bump the post (make another comment) or leave another request unless several days have passed. Consider searching the web and finding someone with the expertise you need to help you.
  12. \n
\n

Here are more tips and guides to finding help with WordPress:

\n\n

More Information on WordPress 2.7

\n

Honestly, it is worth it. Don’t wait. WordPress 2.7 is the most secure and user-friendly version available. Upgrade to WordPress 2.7 now.

\n\n

\"\"
\n


\n

Site Search Tags: wordpress, wordpress 2.7, wordpress help, wordpress upgrade, upgrading wordpress, wordpress news, wordpress tips, help with wordpress 2.7, wordpress themes, wordpress plugins, wordpress codex, wordpress support forum, how to get help with wordpress, how to get wordpress 2.7 help

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News, WordPress Tips      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Tue, 16 Dec 2008 05:45:05 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:17:\"Lorelle VanFossen\";}s:7:\"summary\";s:26588:\"

\"WordPressWordPress 2.7 has been released, and in addition to the information I provided in “WordPress 2.7 Available Now,” here are some tips to help you make the transition to WordPress 2.7.

\n

Reports on the WordPress Support Forum and around the web are that WordPress 2.7 is the easiest upgrade ever. There are only a few reports of problems, which deal with Plugins that have now become integrated features in WordPress 2.7 and Plugins and Themes with customized comment areas. I’ve listed the most common support questions and their answers below, along with all the help you need to make this transition to WordPress 2.7 easy and pain free.

\n

Check Your Web Host for Compatibility

\n

Prior to upgrading, review WordPress 2.7 Core Update Host Compatibility to ensure that your WordPress blog will upgrade in compliance with the new features and demands of WordPress 2.7. Find a Web Host is a list of web hosts WordPress recommends.

\n

Some issues reported during testing were server issues. WordPress 2.7 now supports PHP5 and it is important that web hosts upgrade to current versions of PHP and MySQL for many reasons, including feature improvements and security vulnerabilities, to protect you as well as themselves. The WordPress development team has worked closely with web host providers around the world to ensure that WordPress will work, and we all benefit from those discussions, including non-WordPress users.

\n

If your web host is not on the compatibility or recommendation list, contact them directly to determine if they are supporting the new version of WordPress. If not, consider moving to another web host that will continue to support your blog and WordPress development.

\n

Upgrading from an Older Version of WordPress

\n

To upgrade from an older version of WordPress, I recommend a step-by-step process. If you are upgrading from WordPress 2.3 to WordPress 2.7, a big transition, go through the following documents on the WordPress Codex, the online manual for WordPress Users, and upgrade your WordPress blog for the transitions from 2.3 to 2.5, 2.5 to 2.6, and 2.6 to 2.7 to ensure you have made all changes necessary to your WordPress Theme. In general, Migrating Plugins and Themes to 2.7 should cover everything, but those upgrading from very old versions will want to check everything to ensure a successful upgrade.
\n
\nOr check with your WordPress Theme author for an updated version of the Theme.

\n\n

NOTE: If you are using the Auto Upgrade WordPress Plugin or something similar, a sticky post WordPress Plugin, or the Admin Drop Down Menus WordPress Plugin, deactivate them now. These features are now built into WordPress and no longer require these Plugins.

\n

How to Upgrade to WordPress 2.7

\n

If you are using one of the beta or release candidate versions of WordPress 2.7, run the upgrade feature. If you are using an older version of WordPress:

\n
    \n
  1. Check the WordPress 2.7 Core Update Host Compatibility guide.
  2. \n
  3. Check for updates to your WordPress Theme and Plugins.
  4. \n
  5. Follow the guide for Migrating Plugins and Themes for the version you are upgrading from. Check the WordPress 2.7 Plugin Compatibility and WordPress 2.7 Theme Compatibility lists to ensure your Theme and Plugin will work with WordPress 2.7.
  6. \n
  7. Download WordPress 2.7 and follow the instructions for installing WordPress (new installations) or upgrading WordPress. If you need more specific help with your upgrade, especially using the new auto-upgrade feature, see the extended upgrade instructions.
  8. \n
\n

I’m Worried About Upgrading to WordPress 2.7

\n

Many are worried about upgrading to WordPress 2.7. Here are some of the worries, and some answers.

\n
    \n
  1. It’s Painful to Upgrade: WordPress 2.7 now features auto-upgrade, so this could be the last “painful” upgrade you do, though upgrades are not very painful, really. Now, WordPress will alert you when there is a new version or upgrade available and walk you through the process. The same applies to WordPress Themes and Plugins, making life much less painful.
  2. \n
  3. My WordPress Theme Will Break: There is very little that will impact WordPress Themes in this new version, outside of WordPress Plugins that interact with WordPress Themes, especially with comments. WordPress Themes will be even better protected in future versions with the Parent/Child Theme feature (your changes are protected during upgrades) and easy auto-updating of Themes. If your Theme features customization to the comments area, see Migrating Plugins and Themes to 2.7 and Justin Tadlock’s “Making your theme’s comments compatible with WordPress 2.7 and earlier versions”.
  4. \n
  5. I’ll Only Have to Upgrade Again and Again: Trust me, future upgrades are going to be easier than ever. Don’t let this old whine of “WordPress updates too often” fool you. You are smarter than that. EVERY computer program, online and off, has regular updates. My computer nags at me daily to update something. WordPress only updates when they have to due to a security vulnerability discovered or threatened, or when they have great new features that you will benefit from. With the auto-upgrade feature, this process will be simple and painless. Upgrade now.
  6. \n
  7. I Won’t Be Able to Use My Favorite WordPress Plugins: If your favorite WordPress Plugins work with the Administration Panels or blog comments, the odds are that it won’t work or will need to be upgraded in WordPress 2.7. Check for upgrades to all your favorite WordPress Plugins and the WordPress 2.7 Plugin Compatibility list. If you are upgrading from a pre-WordPress 2.7 beta release, use the Upgrading WordPress instructions and deactivate all Plugins before installing, then activate them one by one to test them. If you can’t live without a certain WordPress Plugin, consider donating to their cause or encouraging their continued support of their Plugin. Or search for a new replacement.
  8. \n
  9. I Want to Wait Until All the Bugs are Fixed: WordPress 2.7 has undergone extensive development and testing. It is currently in use on almost 5 million WordPress.com blogs and has been for a couple of months, though the new interface wasn’t activated. With past versions, waiting was a good recommendation. Now, with the recent WordPress 2.6.5 security update, it is highly recommended that you take that one step further and upgrade to 2.7 now to ensure you have the latest and most secure version. Having had some of my blogs, and those of my clients and fellow bloggers, hacked because we didn’t upgrade, we know the pain. Upgrade now.
  10. \n
  11. I’m Not Technically Inclined: One of the most exciting features of WordPress 2.7 is the auto-upgrade feature. Once you get past a little initial code on this upgrade, depending upon which version you are upgrading from, no technical inclination required. WordPress, WordPress Themes, and WordPress Plugins all now feature automatic upgrades, making your life easier and less technical.
  12. \n
  13. I Just Don’t Want to Upgrade: Okay, so there are some people who like making their life harder and want to stay with an older version of WordPress for whatever reasons. Quick Online Tips featured WordPress Plugins for those who wish to stay with WordPress 2.6, offering much of the functionality of WordPress 2.7. Be sure and upgrade to the latest version, WordPress 2.6.5, to maintain a secure version and protect your site from hackers and vulnerabilities. You will have to upgrade soon to maintain a secure version of WordPress, but this will delay the inevitable.
  14. \n
\n

How to Get Help with WordPress 2.7 Issues

\n

Some of the most common problems reported on the WordPress Support Forums for WordPress 2.7 are related to the following:

\n
    \n
  1. Admin Drop Down Menu WordPress Plugin: The popular Admin Drop Down Menu WordPress Plugin is no longer required for WordPress 2.7. Deactivate the Plugin if you are having trouble seeing the Dashboard or other Panels on the WordPress Administration Panels.
  2. \n
  3. Internet Explorer Makes the Interface Look Strange: WordPress developers and designers have been fighting with Internet Explorer browser design issues and they recommend that you use the most recent browsers available, specifically FireFox 3, Google Chrome, and Safari 3.
  4. \n
  5. WordPress Auto Upgrade Plugin: If you are using one of the automatic upgrade WordPress Plugins, deactivate it before installing the new version of WordPress.
  6. \n
  7. Sticky Posts: If you have been using a sticky post WordPress Plugin, deactivate it. Sticky posts are now integrated into WordPress 2.7 and available on the specific post’s Edit panel. Check the box next to “Stick post to the front page” option.
  8. \n
  9. Inability to Access WordPress Plugins After Upgrade: WordPress Plugin access is now found on the WordPress Dashboard Panel. If you are having trouble accessing them, link to them directly with http://example.com/wp-admin/plugins.php with your domain name.
  10. \n
  11. A Plugin is Not Working: The Plugins are now accessed via the Dashboard. If you are having trouble finding your WordPress Plugins in 2.7, enter the address directly:
    \nhttp://example.com/wp-admin/plugins.php.
    \nIf you have installed WordPress 2.7 correctly, there is now an Uninstall Plugin API which allows useres to uninstall WordPress Plugins completely, not just deactivate them.
    \nAlso check the WordPress 2.7 Plugin Compatibility list to ensure the Plugin will work with WordPress 2.7 and contact the Plugin author directly if you are having problems.
  12. \n
  13. My Theme is Borked/Broken: If you are having trouble with your WordPress Theme after following the Migrating Plugins and Themes and checking the WordPress 2.7 Theme Compatibility list, then contact the Theme designer or check out the WordPress 2.7 Theme News section in WordPress 2.7 Release News and Links.
  14. \n
\n

If you are still having trouble, turn to the WordPress Codex first. It’s the online manual for WordPress users.

\n

Second, search first, through the WordPress Support Forums and your favorite search engine to see if others are having the same problem. Try different keywords and search terms as people often use non-standard terms to describe their problem.

\n

Turn to the WordPress Support Forums next to leave a request for help. Follow these guidelines to get the fastest and most appropriate help:

\n
    \n
  1. BE SPECIFIC AND CLEAR: “WordPress 2.7 is broken” is not helpful. What is broken? Is it a Plugin, Theme, login, specific panel, what is broken? If you are not giving a clear title and helpful information in your help request, you will not a helpful answer in return.
  2. \n
  3. WordPress Support Volunteers and Staff are not Mind Readers: Again, be clear. Be specific. Don’t expect us to understand what you are talking about. Give us all the pertinent information you can and you may get a specific and helpful answer.
  4. \n
  5. Make Your Request in the Right Place: If you put a request for help with a WordPress Theme design issue in the Installation section of the Support Forums, the odds are that those who hang out there can’t help you. Put your request for help in the right place:\n\n
  6. \n
  7. Direct Your Help Request to the Right People:\n
      \n
    • If you are having a general problem with WordPress, then use the WordPress Support Forums.
    • \n
    • If you are having trouble or a question about a WordPress Plugin, contact the Plugin author.
    • \n
    • If you are having trouble with a WordPress Theme, check with the Theme author for assistance.
    • \n
    • If you are having trouble with your web host or server, contact your web host.
    • \n
    • If you are having a general web design issue, please search the web and visit CSS and web design forums and groups for advice and help.
    • \n
    • Keep the WordPress Support Forums specifically for WordPress-related topics.
    • \n
    \n
  8. \n
  9. Be Kind and Play Nice: The WordPress Support Forums is not a place for egos or rudeness. The majority of those helping you are volunteers, giving freely of their time and WordPress expertise. Treat them kindly and you will get the support you need and the Support Forum will be a nice playground for everyone.
  10. \n
  11. Be Patient: If your question is a general one, they it might get a fast response. If it is the same question asked many times, it may get no response, so search first to ensure you are not being redundant. If it is a sophisticated coding question that requires a specialist, be patient. They might not be online in the forum at the moment. It might take a day or two for a response. Don’t bump the post (make another comment) or leave another request unless several days have passed. Consider searching the web and finding someone with the expertise you need to help you.
  12. \n
\n

Here are more tips and guides to finding help with WordPress:

\n\n

More Information on WordPress 2.7

\n

Honestly, it is worth it. Don’t wait. WordPress 2.7 is the most secure and user-friendly version available. Upgrade to WordPress 2.7 now.

\n\n

\"\"
\n


\n

Site Search Tags: wordpress, wordpress 2.7, wordpress help, wordpress upgrade, upgrading wordpress, wordpress news, wordpress tips, help with wordpress 2.7, wordpress themes, wordpress plugins, wordpress codex, wordpress support forum, how to get help with wordpress, how to get wordpress 2.7 help

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News, WordPress Tips      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:19;a:7:{s:5:\"title\";s:43:\"Gravatar: EditGrid enables Gravatar support\";s:4:\"guid\";s:31:\"http://blog.gravatar.com/?p=111\";s:4:\"link\";s:70:\"http://blog.gravatar.com/2008/12/15/editgrid-enables-gravatar-support/\";s:11:\"description\";s:1717:\"

Thomas C. wrote in to let us know that EditGrid has gravatar support.

\n

I’m always really excited to watch where Gravatar expands beyond the blog commenting space.  EditGrid seems to have seen the upsides of using Gravatar for profiles.  They don’t have to worry about writing and maintaining a cropper, file storage, serving infrastructure, backups, etc. All they have to worry about is their core business: making a killer web based spreadsheet application.

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Mon, 15 Dec 2008 20:51:46 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"apokalyptik\";}s:7:\"summary\";s:1717:\"

Thomas C. wrote in to let us know that EditGrid has gravatar support.

\n

I’m always really excited to watch where Gravatar expands beyond the blog commenting space.  EditGrid seems to have seen the upsides of using Gravatar for profiles.  They don’t have to worry about writing and maintaining a cropper, file storage, serving infrastructure, backups, etc. All they have to worry about is their core business: making a killer web based spreadsheet application.

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:20;a:7:{s:5:\"title\";s:59:\"Weblog Tools Collection: WordPress Theme Releases for 12/15\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4769\";s:4:\"link\";s:87:\"http://weblogtoolscollection.com/archives/2008/12/15/wordpress-theme-releases-for-1215/\";s:11:\"description\";s:1659:\"

New Africa

\n

\"africa-wordpress-theme\"

\n

A Web 2.0 style WordPress theme featuring six widgetized sidebars for easy control over all the visual elements. Plenty of room to add and display all your favorite widgets.

\n

Alex Color

\n

\"alex_color\"

\n

Two column, fixed width, dark theme.

\n

Pixeled

\n

\"pixeled\"

\n

It features a standard 2-3 column layout and sports the following goodies: dropdown top menu for categories; customizable FeedBurner field on top right (optional - requires FeedBurner ID); widgets all around (6 positions); footer navigation and a sweet, transparent look. It’s also an Adsense ready WordPress theme - the divisions are all made to fit standard ad formats. The welcome message on top right can also be customized through the admin panel - no need to mess into the code.

\";s:7:\"pubdate\";s:31:\"Mon, 15 Dec 2008 18:45:12 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:1659:\"

New Africa

\n

\"africa-wordpress-theme\"

\n

A Web 2.0 style WordPress theme featuring six widgetized sidebars for easy control over all the visual elements. Plenty of room to add and display all your favorite widgets.

\n

Alex Color

\n

\"alex_color\"

\n

Two column, fixed width, dark theme.

\n

Pixeled

\n

\"pixeled\"

\n

It features a standard 2-3 column layout and sports the following goodies: dropdown top menu for categories; customizable FeedBurner field on top right (optional - requires FeedBurner ID); widgets all around (6 positions); footer navigation and a sweet, transparent look. It’s also an Adsense ready WordPress theme - the divisions are all made to fit standard ad formats. The welcome message on top right can also be customized through the admin panel - no need to mess into the code.

\";}i:21;a:7:{s:5:\"title\";s:60:\"Weblog Tools Collection: WordPress Plugin Releases for 12/14\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4762\";s:4:\"link\";s:88:\"http://weblogtoolscollection.com/archives/2008/12/14/wordpress-plugin-releases-for-1214/\";s:11:\"description\";s:3493:\"

New Plugins

\n

Flash Zoom

\n

If you have a video blog that had many videos in the home page, you must controls the size to fit the styles of homepage. And in the single page, the video will looks too small. The zoom flash plugin will zoom the size of flash without change the source of flash.

\n

JavaScript Logic

\n

This plugin allows you to control when JavaScript gets loaded on your blog using WordPress conditional tags. It also lets you to easily pick from WP’s built-in JavaScript and a few scripts included with the plugin.

\n

Drop-in Slideshow

\n

Plays full-frame slideshow of random images from a folder. Works as a WordPress drop-in and will play images from WordPress upload folder, auto-navigating to a post or page related to current image.

\n

Complete hCards

\n

This plugin turns the hcards that wordpress 2.7 makes out of the comments by default now, into full hcards including email addresses when you’re logged in as an admin or editor, so you can save those people to your address book if you use a microformat plugin in your browser.

\n

Updated Plugins

\n

MyTwitter

\n

MyTwitter allows users to display their Twitter status updates (tweets) on their WordPress site and update their status through the Settings page for MyTwitter. Includes customization options including avatar display, number of tweets to display, formatting options, and stylesheets. It can be called as a widget or a function.

\n

GoCodes

\n

GoCodes let’s you create shortcut URLs to anywhere on the internet, right from your WordPress Admin.

\n

Facelift Image Replacement

\n

Facelift Image Replacement (or FLIR, pronounced fleer) is an image replacement script that dynamically generates image representations of text on your web page in fonts that otherwise might not be visible to your visitors. The generated image will be automatically inserted into your web page via JavaScript and visible to all modern browsers. Any element with text can be replaced: from headers (h1, h2) to span elements and everything in between!

\n

Fluency Admin

\n

Featuring the familiar “fluency look”, it has been rebuilt from the ground up to be compatible with WordPress 2.7 and beyond. The main feature, besides the overall style, is the revamped menu, with fly out menus, and custom hotkeys that can be used to navigate the menus, eliminating the need to “click to expand” menus.

\n

PHP Speedy

\n

It significantly speeds up the loading time of your WordPress blog by combining all CSS and JavaScript into single files.

\n

 

\n

Lester Chan has updated his WordPress plugins and added a new plugin to the list. Check out the release post.

\";s:7:\"pubdate\";s:31:\"Sun, 14 Dec 2008 16:25:58 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:3493:\"

New Plugins

\n

Flash Zoom

\n

If you have a video blog that had many videos in the home page, you must controls the size to fit the styles of homepage. And in the single page, the video will looks too small. The zoom flash plugin will zoom the size of flash without change the source of flash.

\n

JavaScript Logic

\n

This plugin allows you to control when JavaScript gets loaded on your blog using WordPress conditional tags. It also lets you to easily pick from WP’s built-in JavaScript and a few scripts included with the plugin.

\n

Drop-in Slideshow

\n

Plays full-frame slideshow of random images from a folder. Works as a WordPress drop-in and will play images from WordPress upload folder, auto-navigating to a post or page related to current image.

\n

Complete hCards

\n

This plugin turns the hcards that wordpress 2.7 makes out of the comments by default now, into full hcards including email addresses when you’re logged in as an admin or editor, so you can save those people to your address book if you use a microformat plugin in your browser.

\n

Updated Plugins

\n

MyTwitter

\n

MyTwitter allows users to display their Twitter status updates (tweets) on their WordPress site and update their status through the Settings page for MyTwitter. Includes customization options including avatar display, number of tweets to display, formatting options, and stylesheets. It can be called as a widget or a function.

\n

GoCodes

\n

GoCodes let’s you create shortcut URLs to anywhere on the internet, right from your WordPress Admin.

\n

Facelift Image Replacement

\n

Facelift Image Replacement (or FLIR, pronounced fleer) is an image replacement script that dynamically generates image representations of text on your web page in fonts that otherwise might not be visible to your visitors. The generated image will be automatically inserted into your web page via JavaScript and visible to all modern browsers. Any element with text can be replaced: from headers (h1, h2) to span elements and everything in between!

\n

Fluency Admin

\n

Featuring the familiar “fluency look”, it has been rebuilt from the ground up to be compatible with WordPress 2.7 and beyond. The main feature, besides the overall style, is the revamped menu, with fly out menus, and custom hotkeys that can be used to navigate the menus, eliminating the need to “click to expand” menus.

\n

PHP Speedy

\n

It significantly speeds up the loading time of your WordPress blog by combining all CSS and JavaScript into single files.

\n

 

\n

Lester Chan has updated his WordPress plugins and added a new plugin to the list. Check out the release post.

\";}i:22;a:7:{s:5:\"title\";s:37:\"bbPress: bbPress 1.0-alpha-4 released\";s:4:\"guid\";s:25:\"http://bbpress.org/?p=131\";s:4:\"link\";s:60:\"http://bbpress.org/blog/2008/12/bbpress-10-alpha-4-released/\";s:11:\"description\";s:346:\"

Hot on the heels of 1.0-alpha-3 comes 1.0-alpha-4.

\n

I’d like to say that there are significant differences between the two releases, but this is basically a bug fix release, and more specifically a release to fix one major bug!

\n

Download from the download page is now available.

\";s:7:\"pubdate\";s:31:\"Sun, 14 Dec 2008 15:31:48 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Sam Bauers\";}s:7:\"summary\";s:346:\"

Hot on the heels of 1.0-alpha-3 comes 1.0-alpha-4.

\n

I’d like to say that there are significant differences between the two releases, but this is basically a bug fix release, and more specifically a release to fix one major bug!

\n

Download from the download page is now available.

\";}i:23;a:7:{s:5:\"title\";s:49:\"Weblog Tools Collection: WPWeekly In The Doghouse\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4743\";s:4:\"link\";s:78:\"http://weblogtoolscollection.com/archives/2008/12/13/wpweekly-in-the-doghouse/\";s:11:\"description\";s:1828:\"

Episode 33 Kenn Bell of TheDogFiles.com as well as Jason Schuller who is one half of the Revolution 2 project. This was a great combination of guests as Kenn initially created the design for TheDogFiles site while Jason applied his code ninja skills to make the design a reality. Here are a couple of bullet points of discussion from this episode:

\n
    \n
  • Brief discussion of WordPress 2.7
  • \n
  • Why WordPress was chosen as the publishing platform of choice
  • \n
  • The design and implementation of TheDogFiles website
  • \n
  • The Revolution 2 project
  • \n
\n

Last but not least, since Jason is a prominent theme designer in the WordPress community, I took the opportunity to have a small discussion with him regarding the removal of 200 themes from the WordPress repository. We did talk a little bit about the GPL and we also discussed the Revolution 2 business model.

\n

Announcements: Andy Peatling of the BuddyPress project will be our special guest on December 19th..

\n

WPWeekly Meta:

\n

Next Episode: Friday December 19th, 2008

\n

Subscribe To WPWeekly Via Itunes: Click here to subscribe

\n

Length Of Episode: 1 Hour 1 Minute

\n

Download The Show: WordPressWeeklyEpisode33.mp3

\n

Listen To Episode #33:
\n

\";s:7:\"pubdate\";s:31:\"Sat, 13 Dec 2008 21:39:52 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:13:\"Jeff Chandler\";}s:7:\"summary\";s:1828:\"

Episode 33 Kenn Bell of TheDogFiles.com as well as Jason Schuller who is one half of the Revolution 2 project. This was a great combination of guests as Kenn initially created the design for TheDogFiles site while Jason applied his code ninja skills to make the design a reality. Here are a couple of bullet points of discussion from this episode:

\n
    \n
  • Brief discussion of WordPress 2.7
  • \n
  • Why WordPress was chosen as the publishing platform of choice
  • \n
  • The design and implementation of TheDogFiles website
  • \n
  • The Revolution 2 project
  • \n
\n

Last but not least, since Jason is a prominent theme designer in the WordPress community, I took the opportunity to have a small discussion with him regarding the removal of 200 themes from the WordPress repository. We did talk a little bit about the GPL and we also discussed the Revolution 2 business model.

\n

Announcements: Andy Peatling of the BuddyPress project will be our special guest on December 19th..

\n

WPWeekly Meta:

\n

Next Episode: Friday December 19th, 2008

\n

Subscribe To WPWeekly Via Itunes: Click here to subscribe

\n

Length Of Episode: 1 Hour 1 Minute

\n

Download The Show: WordPressWeeklyEpisode33.mp3

\n

Listen To Episode #33:
\n

\";}i:24;a:7:{s:5:\"title\";s:55:\"Weblog Tools Collection: Wordpress Notifier for Mac OSX\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4752\";s:4:\"link\";s:84:\"http://weblogtoolscollection.com/archives/2008/12/13/wordpress-notifier-for-mac-osx/\";s:11:\"description\";s:740:\"
\"\"

Mac OS X Status Bar integeration

\n

Wordpress Notifier for Mac OSX.: This is a freeware tool that sits in your Mac OS X status bar and displays the current unapproved comment count on your WordPress blog. WordPress notifier works with WordPress 2.7 and above and works for blogs on WordPress.com. The screenshots look pretty cool!

\";s:7:\"pubdate\";s:31:\"Sat, 13 Dec 2008 15:20:10 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Mark Ghosh\";}s:7:\"summary\";s:740:\"
\"\"

Mac OS X Status Bar integeration

\n

Wordpress Notifier for Mac OSX.: This is a freeware tool that sits in your Mac OS X status bar and displays the current unapproved comment count on your WordPress blog. WordPress notifier works with WordPress 2.7 and above and works for blogs on WordPress.com. The screenshots look pretty cool!

\";}i:25;a:7:{s:5:\"title\";s:59:\"Weblog Tools Collection: WordPress Theme Releases for 12/13\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4750\";s:4:\"link\";s:89:\"http://weblogtoolscollection.com/archives/2008/12/13/wordpress-theme-releases-for-1213-2/\";s:11:\"description\";s:2015:\"

Freshcitrus

\n

\"freshcitrus\"

\n

Two column fixed with theme

\n

Make it Worn

\n

\"MakeItWorn\"

\n

Make It Worn is a template created from the materials provided by Bart-Jan Verhoef as a tutorial on how to create websites with a worn look. This was implemented as a three-column WP theme.

\n

WP Presstige

\n

\"wp-presstige\"

\n

A Web 2.0 style WordPress theme featuring 6 widgetized sidebars. Easily customizable and ad ready. Comes with PSD to make your own custom header.

\n

Graphii

\n

\"graphii\"

\n

Three column, widget ready, gravatar ready theme

\n

FTLThemes

\n

\"FTLThemes\"

\n

Fixed width, two column, widget ready and gravatar ready theme

\";s:7:\"pubdate\";s:31:\"Sat, 13 Dec 2008 12:35:21 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:2015:\"

Freshcitrus

\n

\"freshcitrus\"

\n

Two column fixed with theme

\n

Make it Worn

\n

\"MakeItWorn\"

\n

Make It Worn is a template created from the materials provided by Bart-Jan Verhoef as a tutorial on how to create websites with a worn look. This was implemented as a three-column WP theme.

\n

WP Presstige

\n

\"wp-presstige\"

\n

A Web 2.0 style WordPress theme featuring 6 widgetized sidebars. Easily customizable and ad ready. Comes with PSD to make your own custom header.

\n

Graphii

\n

\"graphii\"

\n

Three column, widget ready, gravatar ready theme

\n

FTLThemes

\n

\"FTLThemes\"

\n

Fixed width, two column, widget ready and gravatar ready theme

\";}i:26;a:7:{s:5:\"title\";s:37:\"bbPress: bbPress 1.0-alpha-3 released\";s:4:\"guid\";s:25:\"http://bbpress.org/?p=125\";s:4:\"link\";s:60:\"http://bbpress.org/blog/2008/12/bbpress-10-alpha-3-released/\";s:11:\"description\";s:1324:\"

Today bbPress 1.0-alpha-3 was made available via the download page.

\n

A couple of major improvements include new re-added support for “deep” integration and a new notification system that will report fatal errors in plugins on activation. BackPress is now also up to scratch with the code it borrows from WordPress 2.7, meaning a lot of bug fixes in those files.

\n

I need those that are interested in the “deep” integration with WordPress to do some pretty hefty testing to ensure that it is robust enough to remain as a supported option. Keep in mind that some of the existing login and cookie integration issues remain.

\n

You can view the changes in bbPress between 1.0-alpha-2 and 1.0-alpha-3, as well as the changes that have been made to BackPress between revision 161 and 178.

\n

Hackers out there should note that a lot of filenames have changed in the bb-includes directory.

\";s:7:\"pubdate\";s:31:\"Sat, 13 Dec 2008 08:35:57 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Sam Bauers\";}s:7:\"summary\";s:1324:\"

Today bbPress 1.0-alpha-3 was made available via the download page.

\n

A couple of major improvements include new re-added support for “deep” integration and a new notification system that will report fatal errors in plugins on activation. BackPress is now also up to scratch with the code it borrows from WordPress 2.7, meaning a lot of bug fixes in those files.

\n

I need those that are interested in the “deep” integration with WordPress to do some pretty hefty testing to ensure that it is robust enough to remain as a supported option. Keep in mind that some of the existing login and cookie integration issues remain.

\n

You can view the changes in bbPress between 1.0-alpha-2 and 1.0-alpha-3, as well as the changes that have been made to BackPress between revision 161 and 178.

\n

Hackers out there should note that a lot of filenames have changed in the bb-includes directory.

\";}i:27;a:7:{s:5:\"title\";s:27:\"Ryan Boren: Following 2.7.1\";s:4:\"guid\";s:23:\"http://boren.nu/?p=1643\";s:4:\"link\";s:50:\"http://boren.nu/archives/2008/12/12/following-271/\";s:11:\"description\";s:296:\"

A major release segues right into bug fixing for the first maintenance release.  You can follow what is being fixed for 2.7.1 here.  So far there are five small fixes.

\";s:7:\"pubdate\";s:31:\"Fri, 12 Dec 2008 21:30:39 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ryan\";}s:7:\"summary\";s:296:\"

A major release segues right into bug fixing for the first maintenance release.  You can follow what is being fixed for 2.7.1 here.  So far there are five small fixes.

\";}i:28;a:7:{s:5:\"title\";s:22:\"Matt: WordCamp SF 2009\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9764\";s:4:\"link\";s:38:\"http://ma.tt/2008/12/wordcamp-sf-2009/\";s:11:\"description\";s:102:\"

WordCamp San Francisco 2009 will be on May 30, 2009.

\";s:7:\"pubdate\";s:31:\"Fri, 12 Dec 2008 09:36:44 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:102:\"

WordCamp San Francisco 2009 will be on May 30, 2009.

\";}i:29;a:7:{s:5:\"title\";s:25:\"Ryan Boren: 100,000 in 20\";s:4:\"guid\";s:23:\"http://boren.nu/?p=1641\";s:4:\"link\";s:49:\"http://boren.nu/archives/2008/12/11/100000-in-20/\";s:11:\"description\";s:430:\"

100,000 downloads of WordPress 2.7 in 20 hours, and the pace is picking up.

\n

\"2.7

\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 23:11:40 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ryan\";}s:7:\"summary\";s:430:\"

100,000 downloads of WordPress 2.7 in 20 hours, and the pace is picking up.

\n

\"2.7

\";}i:30;a:7:{s:5:\"title\";s:20:\"Gravatar: Say Cheese\";s:4:\"guid\";s:31:\"http://blog.gravatar.com/?p=105\";s:4:\"link\";s:47:\"http://blog.gravatar.com/2008/12/11/say-cheese/\";s:11:\"description\";s:2248:\"

Some of your may have noticed that, for a while now, Gravatar.com has been sporting a fancy new method for setting your Gravatar image — Taking a photo with your webcam!  I just figured that I would drop a note in here and make a “formal” announcement about it.  Just choose the “A webcam  attached to your computer” from the “Add an image” page.  A lot of people ask where they can get images to use (since Gravatar.com does not provide any images to choose from,)  and this is a great way to get started!

\n

Note: If you’re using a Mac with an iSight you will probably have to right click on the webcam applet, choose settings, click on the last little tab (looks like a webcam,) and choose the correct camera.  This will probably be true of any computer with multiple video sources. I would set this default for you but flash is a black-box (they protect you from malicious sites trying to play with your settings — as they should!)

\n

So… go forth… and say cheese!

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 19:18:20 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"apokalyptik\";}s:7:\"summary\";s:2248:\"

Some of your may have noticed that, for a while now, Gravatar.com has been sporting a fancy new method for setting your Gravatar image — Taking a photo with your webcam!  I just figured that I would drop a note in here and make a “formal” announcement about it.  Just choose the “A webcam  attached to your computer” from the “Add an image” page.  A lot of people ask where they can get images to use (since Gravatar.com does not provide any images to choose from,)  and this is a great way to get started!

\n

Note: If you’re using a Mac with an iSight you will probably have to right click on the webcam applet, choose settings, click on the last little tab (looks like a webcam,) and choose the correct camera.  This will probably be true of any computer with multiple video sources. I would set this default for you but flash is a black-box (they protect you from malicious sites trying to play with your settings — as they should!)

\n

So… go forth… and say cheese!

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:31;a:7:{s:5:\"title\";s:25:\"Ryan Boren: WordPress 2.7\";s:4:\"guid\";s:23:\"http://boren.nu/?p=1638\";s:4:\"link\";s:49:\"http://boren.nu/archives/2008/12/11/wordpress-27/\";s:11:\"description\";s:1834:\"

Finally, at last, it’s here.  And now, on to 2.8.  There were several areas that we didn’t have time to re-design for 2.7.  2.8 will focus on making the media and widgets UI as good as the rest of WP.  There will be a few new features as well.  Theme browsing and one-click theme install is a likely one.  After a short rest to recover from 2.7, we’ll start brainstorming new features and put the results on the 2.8 codex page.

\n

We’ll likely do the usual .1 release in a month to address any bugs that slipped through the 2.7 beta testing cycle.

\n

A lot of people contributed to 2.7, but I’d like to give a few thank yous in particular.

\n

Jane for the UX, the wire frames, and for all of the help with managing this release.  Matt for the great visual design.  DD32 for the upgrade and install work, file system abstraction, and all of the bug fixing.  Jacob for the HTTP API and all of that phpdoc.  Aaron for quick edit. Austin for the many bug fixes and for getting the flash uploader working with Flash 10. Mike for the dashboard. Peter, Mark, and Andrew for being kick ass lead devs who put in a lot of thought, love,  and hours.  And Matt for bringing all of these great people together.

\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 18:07:32 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ryan\";}s:7:\"summary\";s:1834:\"

Finally, at last, it’s here.  And now, on to 2.8.  There were several areas that we didn’t have time to re-design for 2.7.  2.8 will focus on making the media and widgets UI as good as the rest of WP.  There will be a few new features as well.  Theme browsing and one-click theme install is a likely one.  After a short rest to recover from 2.7, we’ll start brainstorming new features and put the results on the 2.8 codex page.

\n

We’ll likely do the usual .1 release in a month to address any bugs that slipped through the 2.7 beta testing cycle.

\n

A lot of people contributed to 2.7, but I’d like to give a few thank yous in particular.

\n

Jane for the UX, the wire frames, and for all of the help with managing this release.  Matt for the great visual design.  DD32 for the upgrade and install work, file system abstraction, and all of the bug fixing.  Jacob for the HTTP API and all of that phpdoc.  Aaron for quick edit. Austin for the many bug fixes and for getting the flash uploader working with Flash 10. Mike for the dashboard. Peter, Mark, and Andrew for being kick ass lead devs who put in a lot of thought, love,  and hours.  And Matt for bringing all of these great people together.

\";}i:32;a:7:{s:5:\"title\";s:60:\"Weblog Tools Collection: WordPress Plugin Releases for 12/11\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4741\";s:4:\"link\";s:90:\"http://weblogtoolscollection.com/archives/2008/12/11/wordpress-plugin-releases-for-1211-2/\";s:11:\"description\";s:4497:\"

New Plugins

\n

WordPress Touchshop

\n

The downloadable kit contains all that you need for a WordPress e-Commerce Shop to be fully compatible for customers buying with their iPhone or iPod Touch.

\n

Widgets Reloaded Plugin

\n

Replaces many of the default widgets with versions that allow much more control. Widgets come with highly customizable control panels. Each widget can also be used any number of times.

\n

Compatibility Notes

\n

Compatibility Notes is a simple WordPress plugin that will attach a list of supported WordPress versions to every plugin’s description on the “Plugins” page.

\n

Post Notes

\n

On its own, this plugin adds a text area on the sidebar of the add and edit post pages so that users can add notes for themselves or others. Requires WordPress 2.7 or higher.

\n

wp-visitors

\n

wp-visitors records information about every visitor (page load). Requested URL, Referring URL, IP address, Hostname and Browser along with Date/Time are recorded and displayed in a tabular fashion.

\n

Old Post Promoter

\n

The purpose of the plugin is to promote old posts by sending them back onto the front page and into the RSS feed. It does it randomly choosing an eligible post and updating the publication timestamp. The post then appears to be the latest post on your WordPress blog.

\n

Blood Alcohol Calculator

\n

Calculates blood alcohol level based on the number of drinks and your weight.

\n

WP Title 2

\n

This plugin allows you to add and edit a Heading for your Posts/Pages, different from the Title

\n

Quick Press Widget

\n

You can write a simple post in frontend without login to backend.

\n

WP Post Sorting

\n

This plugin allows sorting of posts by Post Title (ascending or descending) or Post Date (ascending or descending), customizable for each category.

\n

Updated Plugins

\n

Dynamic Content Gallery

\n

By associating your gallery images with individual posts, using Post Custom Fields, the plugin dynamically creates the gallery from your latest and/or featured posts. Additionally, default images can be assigned to categories in the event that the necessary Post Custom Fields have not been set up. An Admin Options page enables you to select which categories and posts are linked to the gallery images.

\n

Simple Tags

\n

Simple Tags is the successor of Simple Tagging Plugin and is THE perfect tool to manage perfectly your WP 2.3 or 2.5 tags

\n

G-Lock Double Opt-in Manager

\n

This mailing list management plugin allows the visitors of your blog subscribe to your mailing list using a double opt-in method.

\n

htmltidy for WordPress

\n

Runs “htmltidy” over the complete output of the blog (excluding feeds), not just the post content. Do not use in conjunction with AJAX-thingies.

\n

Ozh’ Admin Drop Down Menu

\n

All admin links available in a neat drop down menu. Go to any admin page from any admin page in 1 click.

\n

WP-RelativeDate

\n

Displays relative date alongside with your post/comments actual date.

\n

TinyMCE Advanced

\n

Enables most of the advanced features of TinyMCE, the WordPress WYSIWYG editor.

\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 13:12:34 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:4497:\"

New Plugins

\n

WordPress Touchshop

\n

The downloadable kit contains all that you need for a WordPress e-Commerce Shop to be fully compatible for customers buying with their iPhone or iPod Touch.

\n

Widgets Reloaded Plugin

\n

Replaces many of the default widgets with versions that allow much more control. Widgets come with highly customizable control panels. Each widget can also be used any number of times.

\n

Compatibility Notes

\n

Compatibility Notes is a simple WordPress plugin that will attach a list of supported WordPress versions to every plugin’s description on the “Plugins” page.

\n

Post Notes

\n

On its own, this plugin adds a text area on the sidebar of the add and edit post pages so that users can add notes for themselves or others. Requires WordPress 2.7 or higher.

\n

wp-visitors

\n

wp-visitors records information about every visitor (page load). Requested URL, Referring URL, IP address, Hostname and Browser along with Date/Time are recorded and displayed in a tabular fashion.

\n

Old Post Promoter

\n

The purpose of the plugin is to promote old posts by sending them back onto the front page and into the RSS feed. It does it randomly choosing an eligible post and updating the publication timestamp. The post then appears to be the latest post on your WordPress blog.

\n

Blood Alcohol Calculator

\n

Calculates blood alcohol level based on the number of drinks and your weight.

\n

WP Title 2

\n

This plugin allows you to add and edit a Heading for your Posts/Pages, different from the Title

\n

Quick Press Widget

\n

You can write a simple post in frontend without login to backend.

\n

WP Post Sorting

\n

This plugin allows sorting of posts by Post Title (ascending or descending) or Post Date (ascending or descending), customizable for each category.

\n

Updated Plugins

\n

Dynamic Content Gallery

\n

By associating your gallery images with individual posts, using Post Custom Fields, the plugin dynamically creates the gallery from your latest and/or featured posts. Additionally, default images can be assigned to categories in the event that the necessary Post Custom Fields have not been set up. An Admin Options page enables you to select which categories and posts are linked to the gallery images.

\n

Simple Tags

\n

Simple Tags is the successor of Simple Tagging Plugin and is THE perfect tool to manage perfectly your WP 2.3 or 2.5 tags

\n

G-Lock Double Opt-in Manager

\n

This mailing list management plugin allows the visitors of your blog subscribe to your mailing list using a double opt-in method.

\n

htmltidy for WordPress

\n

Runs “htmltidy” over the complete output of the blog (excluding feeds), not just the post content. Do not use in conjunction with AJAX-thingies.

\n

Ozh’ Admin Drop Down Menu

\n

All admin links available in a neat drop down menu. Go to any admin page from any admin page in 1 click.

\n

WP-RelativeDate

\n

Displays relative date alongside with your post/comments actual date.

\n

TinyMCE Advanced

\n

Enables most of the advanced features of TinyMCE, the WordPress WYSIWYG editor.

\";}i:33;a:7:{s:5:\"title\";s:48:\"Andrew: Troubleshooting TinyMCE in WordPress 2.7\";s:4:\"guid\";s:33:\"http://azaozz.wordpress.com/?p=53\";s:4:\"link\";s:79:\"http://azaozz.wordpress.com/2008/12/11/troubleshooting-tinymce-in-wordpress-27/\";s:11:\"description\";s:3889:\"

One of the many improvements in WordPress 2.7 is the updated configuration of the visual editor, TinyMCE. It was optimized to support caching better and to load faster.

\n

The compression whitin WordPress is gone and all editor components are included as standard Javascript, CSS and HTML files. However a lot of servers compress these files automatically. If your server doesn’t do that, the first loading of the write/edit page will be slower, but after that the editor loads a lot faster than before, as all files are in the browser’s cache on the hard disk. And if the “Turbo” is turned on in WordPress and Gears enabled, the speed increase is even bigger as the browser does not have to check if any file has been updated.

\n

In my (non-scientific) tests the loading time of the Add New Post screen went from about 5 - 8 sec. to about 1 - 2 sec. depending on the Internet connection and the computer speed.

\n

The removal of the compression whitin WordPress also improves compatibility with some unusual server configurations and fixes some hard to catch errors, for example when there are php errors or output starts in the current theme’s functions.php file.

\n

Currently the editor’s settings together with all Javascript files are included directly in the HTML head section of the page, making it a lot easier to troubleshoot.

\n

There are a few steps that would help with the troubleshooting if the editor doesn’t start or work properly:

\n
    \n
  1. Make sure the “Disable the visual editor when writing” checkbox in your profile is not selected.
  2. \n
  3. Whitelist or set your blog as “trusted” in your firewall and antivirus program.
  4. \n
  5. Disable Gears, clear your browser’s cache, quit it, start it again, go back to the write page and force-reload it several times, while holding down Shift (Firefox) or Ctrl (IE). In Safari select Clear Cache (from the Safari menu on Mac).
  6. \n
  7. Try another browser and/or another computer.
  8. \n
  9. Disable all plugins, clear the cache, restart the browser and try again.
  10. \n
  11. Delete both wp-admin and wp-includes directories and upload fresh copies from the WordPress installation package.
  12. \n
  13. And finally install Firefox or Opera, note any Javascript errors, especially the first one and try searching on the support forum for a solution. If no solution exists, open a new thread including the error.
  14. \n
\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 11:01:18 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Andrew Ozz\";}s:7:\"summary\";s:3889:\"

One of the many improvements in WordPress 2.7 is the updated configuration of the visual editor, TinyMCE. It was optimized to support caching better and to load faster.

\n

The compression whitin WordPress is gone and all editor components are included as standard Javascript, CSS and HTML files. However a lot of servers compress these files automatically. If your server doesn’t do that, the first loading of the write/edit page will be slower, but after that the editor loads a lot faster than before, as all files are in the browser’s cache on the hard disk. And if the “Turbo” is turned on in WordPress and Gears enabled, the speed increase is even bigger as the browser does not have to check if any file has been updated.

\n

In my (non-scientific) tests the loading time of the Add New Post screen went from about 5 - 8 sec. to about 1 - 2 sec. depending on the Internet connection and the computer speed.

\n

The removal of the compression whitin WordPress also improves compatibility with some unusual server configurations and fixes some hard to catch errors, for example when there are php errors or output starts in the current theme’s functions.php file.

\n

Currently the editor’s settings together with all Javascript files are included directly in the HTML head section of the page, making it a lot easier to troubleshoot.

\n

There are a few steps that would help with the troubleshooting if the editor doesn’t start or work properly:

\n
    \n
  1. Make sure the “Disable the visual editor when writing” checkbox in your profile is not selected.
  2. \n
  3. Whitelist or set your blog as “trusted” in your firewall and antivirus program.
  4. \n
  5. Disable Gears, clear your browser’s cache, quit it, start it again, go back to the write page and force-reload it several times, while holding down Shift (Firefox) or Ctrl (IE). In Safari select Clear Cache (from the Safari menu on Mac).
  6. \n
  7. Try another browser and/or another computer.
  8. \n
  9. Disable all plugins, clear the cache, restart the browser and try again.
  10. \n
  11. Delete both wp-admin and wp-includes directories and upload fresh copies from the WordPress installation package.
  12. \n
  13. And finally install Firefox or Opera, note any Javascript errors, especially the first one and try searching on the support forum for a solution. If no solution exists, open a new thread including the error.
  14. \n
\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:34;a:7:{s:5:\"title\";s:42:\"Lorelle on WP: WordPress 2.7 Available Now\";s:4:\"guid\";s:36:\"http://lorelle.wordpress.com/?p=3342\";s:4:\"link\";s:67:\"http://lorelle.wordpress.com/2008/12/10/wordpress-27-available-now/\";s:11:\"description\";s:10574:\"

\"WordPressMark Jaquith has just announced that WordPress 2.7 is now live and ready for download. The official announcement is now out and confirms the news.

\n

This version of WordPress is named “Coltrane” for famed jazz Saxophonist, John Coltrane, a favorite of sax playing, WordPress founder, Matt Mullenweg.

\n

There are a lot of new features and improvements, including the new interface. For examples, screenshots, video, and information on the new WordPress 2.7 version, see WordPress 2.7 Release News and Links, Countdown to WordPress 2.7 and WordPress 2.7 Hits WordPress.com Thursday, December 4, 2008.

\n

WordPress.com has been running WordPress 2.7 for a week across almost 5 million blogs and the response has been incredibly positive. Old users are finding the learning curve almost non-existent and new users are embracing it.

\n

The hard work done by all of the development team, especially by Jane Wells who lead the conversion to the new interface, deserves some serious rounds of applause.

\n

For those concerned about upgrading to WordPress, and how often those updates occur, take a step down memory lane with Justin Tadlock in “Be Thankful WordPress Gets Updated.” He takes you through the various versions and why we should be so thankful for the most current versions, considering the path it took to get here.

\n

Upgrade WordPress Now!

\n

Unlike previous versions where it’s a good idea to wait, I recommend upgrading to WordPress 2.7 now. It is very stable and has been well tested on WordPress.com. It also includes the latest security updates. Once installed, it will notify you for automatic upgrades, making the upgrade process incredibly easy.

\n

If you are using one of the Release Candidates, the upgrade announcement should be on your blog. Follow the extended upgrade instructions. In the past, it was highly recommended that you turn off all WordPress Plugins before upgrading. WordPress 2.7 will automatically put WordPress in “maintenance mode” so this step is no longer necessary.

\n

To upgrade from older versions of WordPress, follow the instructions in the WordPress Codex, the online manual for WordPress Users, in Upgrading WordPress. Dependent upon which version of WordPress you are upgrading from, you may or may not have to make changes to your WordPress Theme to accommodate changes in WordPress Theme Template Tags.

\n

If you are worried about whether or not WordPress 2.7 will work with your server, Themes or Plugins, see:

\n\n

Don’t wait. It is critical that you upgrade WordPress now for the following reasons:

\n\n

Upgrade to WordPress 2.7 now.

\n

\"\"
\n


\n

Site Search Tags: wordpress news, wordpress versions, wordpress 2.7, wordpress 2.7 available, upgrade wordpress, reasons to upgrade wordpress, wordpress security, upgrade now, upgrade, upgrade news, upgrade tips, wordpress codex, wordpress upgrade

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 03:53:15 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:17:\"Lorelle VanFossen\";}s:7:\"summary\";s:10574:\"

\"WordPressMark Jaquith has just announced that WordPress 2.7 is now live and ready for download. The official announcement is now out and confirms the news.

\n

This version of WordPress is named “Coltrane” for famed jazz Saxophonist, John Coltrane, a favorite of sax playing, WordPress founder, Matt Mullenweg.

\n

There are a lot of new features and improvements, including the new interface. For examples, screenshots, video, and information on the new WordPress 2.7 version, see WordPress 2.7 Release News and Links, Countdown to WordPress 2.7 and WordPress 2.7 Hits WordPress.com Thursday, December 4, 2008.

\n

WordPress.com has been running WordPress 2.7 for a week across almost 5 million blogs and the response has been incredibly positive. Old users are finding the learning curve almost non-existent and new users are embracing it.

\n

The hard work done by all of the development team, especially by Jane Wells who lead the conversion to the new interface, deserves some serious rounds of applause.

\n

For those concerned about upgrading to WordPress, and how often those updates occur, take a step down memory lane with Justin Tadlock in “Be Thankful WordPress Gets Updated.” He takes you through the various versions and why we should be so thankful for the most current versions, considering the path it took to get here.

\n

Upgrade WordPress Now!

\n

Unlike previous versions where it’s a good idea to wait, I recommend upgrading to WordPress 2.7 now. It is very stable and has been well tested on WordPress.com. It also includes the latest security updates. Once installed, it will notify you for automatic upgrades, making the upgrade process incredibly easy.

\n

If you are using one of the Release Candidates, the upgrade announcement should be on your blog. Follow the extended upgrade instructions. In the past, it was highly recommended that you turn off all WordPress Plugins before upgrading. WordPress 2.7 will automatically put WordPress in “maintenance mode” so this step is no longer necessary.

\n

To upgrade from older versions of WordPress, follow the instructions in the WordPress Codex, the online manual for WordPress Users, in Upgrading WordPress. Dependent upon which version of WordPress you are upgrading from, you may or may not have to make changes to your WordPress Theme to accommodate changes in WordPress Theme Template Tags.

\n

If you are worried about whether or not WordPress 2.7 will work with your server, Themes or Plugins, see:

\n\n

Don’t wait. It is critical that you upgrade WordPress now for the following reasons:

\n\n

Upgrade to WordPress 2.7 now.

\n

\"\"
\n


\n

Site Search Tags: wordpress news, wordpress versions, wordpress 2.7, wordpress 2.7 available, upgrade wordpress, reasons to upgrade wordpress, wordpress security, upgrade now, upgrade, upgrade news, upgrade tips, wordpress codex, wordpress upgrade

\n

\"Feed Subscribe \"FeedburnerVia Feedburner \"\"Subscribe by Email \"\"Visit
Copyright Lorelle VanFossen, the author of Blogging Tips, What Bloggers Won\'t Tell You About Blogging.

\nPosted in WordPress News      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:35;a:7:{s:5:\"title\";s:9:\"Matt: 2.7\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9760\";s:4:\"link\";s:30:\"http://ma.tt/2008/12/twoseven/\";s:11:\"description\";s:261:\"

WordPress 2.7 “Coltrane” is live to the world. So many people put so much into this release, all I can really say is “thank you.”

\n

Check out the release video:

\n

\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 03:53:11 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:261:\"

WordPress 2.7 “Coltrane” is live to the world. So many people put so much into this release, all I can really say is “thank you.”

\n

Check out the release video:

\n

\";}i:36;a:7:{s:5:\"title\";s:38:\"Dev Blog: WordPress 2.7 “Coltrane”\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=477\";s:4:\"link\";s:50:\"http://wordpress.org/development/2008/12/coltrane/\";s:11:\"description\";s:11709:\"

The first thing you’ll notice about 2.7 is its new interface. From the top down, we’ve listened to your feedback and thought deeply about the design and the result is a WordPress that’s just plain faster. Nearly every task you do on your blog will take fewer clicks and be faster in 2.7 than it did in a previous version. (Download it now, or read on for more.)

\n

Next you’ll begin to notice the new features subtly sprinkled through the new interface: the new dashboard that you can arrange with drag and drop to put the things most important to you on top, QuickPress, comment threading, paging, and the ability to reply to comments from your dashboard, the ability to install any plugin directly from WordPress.org with a single click, and sticky posts.

\n

Digging in further you might notice that every screen is customizable. Let’s say you never care about author on your post listings — just click “Screen Options” and uncheck it and it’s instantly gone from the page. The same for any module on the dashboard or write screen. If your screen is narrow and the menu is taking up too much horizontal room, click the arrow to minimize it to be icon-only, and then go to the write page and drag and drop everything from the right column into the main one, so your posting area is full-screen. (For example I like hiding everything except categories, tags, and publish. I put categories and tags on the right, and publish under the post box.)

\n

For a visual introduction to what 2.7 is, check out this video (available in HD, and full screen):

\n

\n

It’s all about you. It’s the next generation of WordPress, which is why we’ve bestowed it with the honor of being named for John Coltrane. And you can download it today.

\n

Last, but certainly not least, this may be the last time you ever have to manually upgrade WordPress again. We heard how tired you were of doing upgrades for yourself and your friends, so now WordPress includes a built-in upgrade that will automatically notify you of new releases, and when you’re ready it will download them, install them, and upgrade your blog with a single click.

\n

(As with any interface change it may take a little bit of time to acclimate yourself but soon you’ll find yourself whizzing through the screens. Even people who have hated it at first tell us after a few days they wonder how they got by before.)

\n

The Story Behind 2.7

\n

The real reason Coltrane is such a huge leap forward is because the community was so involved with every step of the process. Over 150 people contributed code directly to the release, our highest ever, with many tens of thousands more participating in the polls, surveys, tests, mailing lists, and other feedback mechanisms the WordPress dev team used in putting this release together.

\n

For some of the back story in the development of 2.7, check out these blog posts (thanks to WeblogToolsCollection for the list):

\n\n

This was interesting to us, a blogging software release we actually blogged about, but the process was hugely informative. Prior to its release today Crazyhorse and 2.7 had been tested by tens of thousands of people on their blogs, hundreds of thousands of you count .com. The volume of feedback was so high that we decided to push back the release date a month to take time to incorporate it all and do more revisions based on what you guys said.

\n

For those of you wondering why we didn’t call this release 3.0, it’s because we abhor version number inflation. 3.0 will just be the next release after 2.9. The major features in new point releases approach also works well for products like OS X, with huge changes between a 10.3 and 10.4.

\n

The Future

\n

Those of you following along at home might have noticed this was our second major redesign of WordPress this year. Whoa nelly! While that wasn’t ideal, and I especially sympathize with those of you creating books or tutorials around WordPress, there’s good news. The changes to WordPress in 2.5 and 2.7 were necessary for us to break free of much of the legacy cruft and interface bloat that had built up over the years (gradually) and more importantly provide us with a UI framework and interface language we can use at the foundation to build tomorrow’s WordPress on, to express ideas we haven’t been able to before. So at the end of 2009 I expect, interface-wise, WordPress to look largely the same as it does now.

\n

That said, we couldn’t be more excited about the future with regards to features. Now that we’ve cleared out more basic things, we are looking forward in the coming year to really tackling media handling including audio and video, better tools for plugin and theme developers, widgets, theme updates, more integrated and contextual help, and easier integration with projects like BuddyPress and bbPress.

\n

Thank Yous

\n

We would like to take a moment to thank the following WordPress.org users for being a part of 2.7: Verena Segert, Ben Dunkle, 082net, _ck_, Aaron Brazell, Aaron Campbell, Aaron Harp, aaron_guitar, abackstrom, Alex Rabe, Alex Shiels, anderswc, andr, Andrew Ozz, andy, Andy Peatling, Austin Matzko, axelseaa, bendalton, Benedict Eastaugh, Betsy Kimak, Björn Wijers, bobrik, brianwhite, bubel, Byrne Reese, caesarsgrunt, capripot, Casey Bisson, Charles E. Frees-Melvin, Chris Johnston, codestyling, corischlegel, count_0, Daniel Jalkut, Daniel Torreblanca, David McFarlane, dbuser123, Demetris Kikizas, Dion Hulse, docwhat, Donncha O Caoimh, Doug Stewart, Dougal Campbell, dsader, dtsn, dwc, g30rg3x, guillep2k, Hailin Wu, Hans Engel, Jacob Santos, Jamie Rumbelow, Jan Brasna, Jane Wells, Jean-LucfromBrussels, Jennifer Hodgdon, Jeremy Clarke, Jérémie Bresson, jick, Joe Taiabjee, John Blackbourn, John Conners, John Lamansky, johnhennmacc, Joost de Valk, Joseph Scott, kashani, Kim Parsell, Lloyd Budd, Lutz Schröer, Malaiac, Mark Jaquith, Mark Steel, Matt Freedman, Matt Mullenweg, Matt Thomas, matthewh84, mattyrob, mcs_trekkie, Michael Adams, Michael Hampton, MichaelH, mictasm, Mike Schinkel, msi08, msw0418, mtekk, Nick Momrik, Nikolay Bachiyski, Noel Jackson, Otto, Ozh, paddya, paul, pedrop, pishmishy, Po0ky, RanYanivHartstein, raychampagne, rdworth, reinkim, rickoman, rm53, rnt, Robert Accettura, roganty, Ryan Boren, Ryan McCue, Sam Bauers, Sam_a, schiller, Scott Houst, sekundek, Shane, Simek, Simon Wheatley, sivel, st_falcon, stefano, strider72, tai, takayukister, techcookies, Terragg, thinlight, tott, Trevor Fitzgerald, tschai, Txanny, Valiallah (Mani) Monajjemi, Viper007Bond, Vladimir Kolesnikov, wasp, wet, wfrantz, x11tech, xknown, xorax, ydekproductions, yoavf, yonosoytu, yoshi, zedlander

\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 02:28:57 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:11709:\"

The first thing you’ll notice about 2.7 is its new interface. From the top down, we’ve listened to your feedback and thought deeply about the design and the result is a WordPress that’s just plain faster. Nearly every task you do on your blog will take fewer clicks and be faster in 2.7 than it did in a previous version. (Download it now, or read on for more.)

\n

Next you’ll begin to notice the new features subtly sprinkled through the new interface: the new dashboard that you can arrange with drag and drop to put the things most important to you on top, QuickPress, comment threading, paging, and the ability to reply to comments from your dashboard, the ability to install any plugin directly from WordPress.org with a single click, and sticky posts.

\n

Digging in further you might notice that every screen is customizable. Let’s say you never care about author on your post listings — just click “Screen Options” and uncheck it and it’s instantly gone from the page. The same for any module on the dashboard or write screen. If your screen is narrow and the menu is taking up too much horizontal room, click the arrow to minimize it to be icon-only, and then go to the write page and drag and drop everything from the right column into the main one, so your posting area is full-screen. (For example I like hiding everything except categories, tags, and publish. I put categories and tags on the right, and publish under the post box.)

\n

For a visual introduction to what 2.7 is, check out this video (available in HD, and full screen):

\n

\n

It’s all about you. It’s the next generation of WordPress, which is why we’ve bestowed it with the honor of being named for John Coltrane. And you can download it today.

\n

Last, but certainly not least, this may be the last time you ever have to manually upgrade WordPress again. We heard how tired you were of doing upgrades for yourself and your friends, so now WordPress includes a built-in upgrade that will automatically notify you of new releases, and when you’re ready it will download them, install them, and upgrade your blog with a single click.

\n

(As with any interface change it may take a little bit of time to acclimate yourself but soon you’ll find yourself whizzing through the screens. Even people who have hated it at first tell us after a few days they wonder how they got by before.)

\n

The Story Behind 2.7

\n

The real reason Coltrane is such a huge leap forward is because the community was so involved with every step of the process. Over 150 people contributed code directly to the release, our highest ever, with many tens of thousands more participating in the polls, surveys, tests, mailing lists, and other feedback mechanisms the WordPress dev team used in putting this release together.

\n

For some of the back story in the development of 2.7, check out these blog posts (thanks to WeblogToolsCollection for the list):

\n\n

This was interesting to us, a blogging software release we actually blogged about, but the process was hugely informative. Prior to its release today Crazyhorse and 2.7 had been tested by tens of thousands of people on their blogs, hundreds of thousands of you count .com. The volume of feedback was so high that we decided to push back the release date a month to take time to incorporate it all and do more revisions based on what you guys said.

\n

For those of you wondering why we didn’t call this release 3.0, it’s because we abhor version number inflation. 3.0 will just be the next release after 2.9. The major features in new point releases approach also works well for products like OS X, with huge changes between a 10.3 and 10.4.

\n

The Future

\n

Those of you following along at home might have noticed this was our second major redesign of WordPress this year. Whoa nelly! While that wasn’t ideal, and I especially sympathize with those of you creating books or tutorials around WordPress, there’s good news. The changes to WordPress in 2.5 and 2.7 were necessary for us to break free of much of the legacy cruft and interface bloat that had built up over the years (gradually) and more importantly provide us with a UI framework and interface language we can use at the foundation to build tomorrow’s WordPress on, to express ideas we haven’t been able to before. So at the end of 2009 I expect, interface-wise, WordPress to look largely the same as it does now.

\n

That said, we couldn’t be more excited about the future with regards to features. Now that we’ve cleared out more basic things, we are looking forward in the coming year to really tackling media handling including audio and video, better tools for plugin and theme developers, widgets, theme updates, more integrated and contextual help, and easier integration with projects like BuddyPress and bbPress.

\n

Thank Yous

\n

We would like to take a moment to thank the following WordPress.org users for being a part of 2.7: Verena Segert, Ben Dunkle, 082net, _ck_, Aaron Brazell, Aaron Campbell, Aaron Harp, aaron_guitar, abackstrom, Alex Rabe, Alex Shiels, anderswc, andr, Andrew Ozz, andy, Andy Peatling, Austin Matzko, axelseaa, bendalton, Benedict Eastaugh, Betsy Kimak, Björn Wijers, bobrik, brianwhite, bubel, Byrne Reese, caesarsgrunt, capripot, Casey Bisson, Charles E. Frees-Melvin, Chris Johnston, codestyling, corischlegel, count_0, Daniel Jalkut, Daniel Torreblanca, David McFarlane, dbuser123, Demetris Kikizas, Dion Hulse, docwhat, Donncha O Caoimh, Doug Stewart, Dougal Campbell, dsader, dtsn, dwc, g30rg3x, guillep2k, Hailin Wu, Hans Engel, Jacob Santos, Jamie Rumbelow, Jan Brasna, Jane Wells, Jean-LucfromBrussels, Jennifer Hodgdon, Jeremy Clarke, Jérémie Bresson, jick, Joe Taiabjee, John Blackbourn, John Conners, John Lamansky, johnhennmacc, Joost de Valk, Joseph Scott, kashani, Kim Parsell, Lloyd Budd, Lutz Schröer, Malaiac, Mark Jaquith, Mark Steel, Matt Freedman, Matt Mullenweg, Matt Thomas, matthewh84, mattyrob, mcs_trekkie, Michael Adams, Michael Hampton, MichaelH, mictasm, Mike Schinkel, msi08, msw0418, mtekk, Nick Momrik, Nikolay Bachiyski, Noel Jackson, Otto, Ozh, paddya, paul, pedrop, pishmishy, Po0ky, RanYanivHartstein, raychampagne, rdworth, reinkim, rickoman, rm53, rnt, Robert Accettura, roganty, Ryan Boren, Ryan McCue, Sam Bauers, Sam_a, schiller, Scott Houst, sekundek, Shane, Simek, Simon Wheatley, sivel, st_falcon, stefano, strider72, tai, takayukister, techcookies, Terragg, thinlight, tott, Trevor Fitzgerald, tschai, Txanny, Valiallah (Mani) Monajjemi, Viper007Bond, Vladimir Kolesnikov, wasp, wet, wfrantz, x11tech, xknown, xorax, ydekproductions, yoavf, yonosoytu, yoshi, zedlander

\";}i:37;a:7:{s:5:\"title\";s:42:\"Mark Jaquith: WordPress 2.7 Now Available!\";s:4:\"guid\";s:39:\"http://markjaquith.wordpress.com/?p=228\";s:4:\"link\";s:71:\"http://markjaquith.wordpress.com/2008/12/10/wordpress-27-now-available/\";s:11:\"description\";s:4742:\"

WordPress 2.7 is now available! Users with a 2.7 beta or RC version can just go to Tools → Upgrade to get the latest version.

\n

I’m incredibly excited to finally be able to release this to the world. For months, the entire WordPress core development team has been obsessing over this release. We’ve agonized over the smallest of details. We’ve spent long nights refining and tweaking when we should have long ago given up and gone to bed. The people-hours that have gone into this are tremendous, but more than that, we’ve poured our hearts and our souls into this release.

\n

That’s not to say that it’s perfect. Some part of us wants to hoard this release away until we can polish it to complete perfection. But that wouldn’t be fair to you. It’s time to show our work. We hope you like it. We’re incredibly proud of it.

\n

It’s certainly not a small change, especially for the WordPress admin. Change is difficult, even when for the better, so give it a fair trial before judging it. You’ll have developed “muscle memory” for certain tasks, so it will take a little while to rewire your brain. We think that once you’ve given yourself a chance to learn the new positions and functions, you’ll find that you’re a lot faster, and a lot more efficient at managing your WordPress site.

\n

Favorite New Features

\n

Without a doubt, my favorite new feature is comment moderation keyboard shortcuts. It has bothered me for a long time that comment moderation was such a tedious chore. Keyboard shortcuts (and the new inline reply) make it significantly less of a chore.

\n

The new customizable post screen is a close second. I love that I can completely hide away all of the stuff that I don’t use (by clicking the “Screen Options” tab), and keep tags and categories on the right, while increasing the size of my post content box.

\n

The new Publish module on the post screen is a personal treasure of mine. I took point on that, with some excellent design guidance from Jane Wells. It moves the “Save Draft” and “Publish” buttons far apart (a common complaint was that their proximity lead to accidental premature publishing. The Preview button now shows you the most recent changes to your post… not just the last saved version. Additionally, you can preview changes on published posts without those changes being shown publicly (until you’re ready). The Visibility section is new, and contains the functionality of private posts, password-protected posts, and sticky posts. Future posting is a lot more clear now. When you edit the time stamp to point to a future date, the “Publish” button becomes “Schedule.” All these changes were made to make your Publish module function predictably, so that you’re never wondering what happens when you click something.

\n

The new menu system is great. I operate it in folded mode, so that my content can really take center stage.

\n

There are too many new features to mention, and too many enhancements too small to make the features list (such as improved canonical URL support for even better SEO). Give it a try, and let us know what you think!

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Thu, 11 Dec 2008 02:04:37 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"Mark Jaquith\";}s:7:\"summary\";s:4742:\"

WordPress 2.7 is now available! Users with a 2.7 beta or RC version can just go to Tools → Upgrade to get the latest version.

\n

I’m incredibly excited to finally be able to release this to the world. For months, the entire WordPress core development team has been obsessing over this release. We’ve agonized over the smallest of details. We’ve spent long nights refining and tweaking when we should have long ago given up and gone to bed. The people-hours that have gone into this are tremendous, but more than that, we’ve poured our hearts and our souls into this release.

\n

That’s not to say that it’s perfect. Some part of us wants to hoard this release away until we can polish it to complete perfection. But that wouldn’t be fair to you. It’s time to show our work. We hope you like it. We’re incredibly proud of it.

\n

It’s certainly not a small change, especially for the WordPress admin. Change is difficult, even when for the better, so give it a fair trial before judging it. You’ll have developed “muscle memory” for certain tasks, so it will take a little while to rewire your brain. We think that once you’ve given yourself a chance to learn the new positions and functions, you’ll find that you’re a lot faster, and a lot more efficient at managing your WordPress site.

\n

Favorite New Features

\n

Without a doubt, my favorite new feature is comment moderation keyboard shortcuts. It has bothered me for a long time that comment moderation was such a tedious chore. Keyboard shortcuts (and the new inline reply) make it significantly less of a chore.

\n

The new customizable post screen is a close second. I love that I can completely hide away all of the stuff that I don’t use (by clicking the “Screen Options” tab), and keep tags and categories on the right, while increasing the size of my post content box.

\n

The new Publish module on the post screen is a personal treasure of mine. I took point on that, with some excellent design guidance from Jane Wells. It moves the “Save Draft” and “Publish” buttons far apart (a common complaint was that their proximity lead to accidental premature publishing. The Preview button now shows you the most recent changes to your post… not just the last saved version. Additionally, you can preview changes on published posts without those changes being shown publicly (until you’re ready). The Visibility section is new, and contains the functionality of private posts, password-protected posts, and sticky posts. Future posting is a lot more clear now. When you edit the time stamp to point to a future date, the “Publish” button becomes “Schedule.” All these changes were made to make your Publish module function predictably, so that you’re never wondering what happens when you click something.

\n

The new menu system is great. I operate it in folded mode, so that my content can really take center stage.

\n

There are too many new features to mention, and too many enhancements too small to make the features list (such as improved canonical URL support for even better SEO). Give it a try, and let us know what you think!

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:38;a:7:{s:5:\"title\";s:59:\"Weblog Tools Collection: WordPress Theme Releases for 12/10\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4737\";s:4:\"link\";s:87:\"http://weblogtoolscollection.com/archives/2008/12/10/wordpress-theme-releases-for-1210/\";s:11:\"description\";s:5125:\"

Aeros

\n

\"aeros\"

\n

Two column theme ready for WordPress 2.7 with customizable backgrounds, paged, threaded and nested comments.

\n

Sacred Blue

\n

\"sacredblue2\"

\n

Sacred Blue is a clean two-column and widget-ready theme sporting a bright color scheme. It comes with an options page where you can customize various features of the theme like the Ads, Navigation menu items etc.

\n

QuickPic

\n

\"quickpic-demo\"

\n

QuickPic is a 1-column, widget-ready, SEO-optimized, Ad-ready, compatible WordPress theme which is very useful for all types of bloggers, in particular for photobloggers.

\n

Evening Sun

\n

\"Evening-Sun\"

\n

Two column theme with a beautiful sun header, widgetized sidebar, threaded comments support. Compatible with WordPress 2.7.

\n

Miniflex

\n

\"miniflex\"

\n

Miniflex takes advantage of the footer for the widget enabled sidebar. This one column theme is clean with few colors, nice little features and the use of typography to make it more elegant.

\n

iPhone 3G

\n

\"iphone3G\"

\n

This theme is based echo of the ‘iPhone PSD file’ from Maniche and the clock is the script ‘Live Lite Clock’ Mark Plachetta.

\n

Plainscape

\n

\"plainscape-10-screenshot-640px-300x226\"

\n

Fixed width, two column theme with support for comments threading (WP 2.7), comments paging (WP 2.7), gravatars, post tags, sidebar widgets

\n

Old School

\n

\"OldSchool\"

\n

Old School is a child theme of the Hybrid theme framework. It takes advantage of WordPress 2.7’s new features such as threaded comments, but it’s also backward compatible with WP 2.6. It has an additional page template with a tabbed and widgetized feature section along with a tabbed category section. It also has two widget sections: Sidebar and Footer.

\n

TimeCafe

\n

\"TimeCafe\"

\n

Fixed width, two column, adsense and gravatar ready with custom header images

\n

IMAC

\n

\"imac\"

\n

The WordPress theme is 3 column, widgetized and header image can be changed to adapt a lot of variations. The theme is also cool to set up PhpBay stores.

\n

 

\n

You may also want to check out Blogsessive’s 20 Free Corporate WordPress Themes.

\";s:7:\"pubdate\";s:31:\"Wed, 10 Dec 2008 18:11:16 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:5125:\"

Aeros

\n

\"aeros\"

\n

Two column theme ready for WordPress 2.7 with customizable backgrounds, paged, threaded and nested comments.

\n

Sacred Blue

\n

\"sacredblue2\"

\n

Sacred Blue is a clean two-column and widget-ready theme sporting a bright color scheme. It comes with an options page where you can customize various features of the theme like the Ads, Navigation menu items etc.

\n

QuickPic

\n

\"quickpic-demo\"

\n

QuickPic is a 1-column, widget-ready, SEO-optimized, Ad-ready, compatible WordPress theme which is very useful for all types of bloggers, in particular for photobloggers.

\n

Evening Sun

\n

\"Evening-Sun\"

\n

Two column theme with a beautiful sun header, widgetized sidebar, threaded comments support. Compatible with WordPress 2.7.

\n

Miniflex

\n

\"miniflex\"

\n

Miniflex takes advantage of the footer for the widget enabled sidebar. This one column theme is clean with few colors, nice little features and the use of typography to make it more elegant.

\n

iPhone 3G

\n

\"iphone3G\"

\n

This theme is based echo of the ‘iPhone PSD file’ from Maniche and the clock is the script ‘Live Lite Clock’ Mark Plachetta.

\n

Plainscape

\n

\"plainscape-10-screenshot-640px-300x226\"

\n

Fixed width, two column theme with support for comments threading (WP 2.7), comments paging (WP 2.7), gravatars, post tags, sidebar widgets

\n

Old School

\n

\"OldSchool\"

\n

Old School is a child theme of the Hybrid theme framework. It takes advantage of WordPress 2.7’s new features such as threaded comments, but it’s also backward compatible with WP 2.6. It has an additional page template with a tabbed and widgetized feature section along with a tabbed category section. It also has two widget sections: Sidebar and Footer.

\n

TimeCafe

\n

\"TimeCafe\"

\n

Fixed width, two column, adsense and gravatar ready with custom header images

\n

IMAC

\n

\"imac\"

\n

The WordPress theme is 3 column, widgetized and header image can be changed to adapt a lot of variations. The theme is also cool to set up PhpBay stores.

\n

 

\n

You may also want to check out Blogsessive’s 20 Free Corporate WordPress Themes.

\";}i:39;a:7:{s:5:\"title\";s:42:\"bbPress: bbPress 0.9.0.3 (stable) released\";s:4:\"guid\";s:25:\"http://bbpress.org/?p=114\";s:4:\"link\";s:61:\"http://bbpress.org/blog/2008/12/bbpress-0903-stable-released/\";s:11:\"description\";s:767:\"

I’ve pushed out a new stable version today as a fairly major flaw was discovered that affected users of the previous stable release 0.9.0.2

\n

The flaw meant that regular users could not edit their own email address at all via their profile page. Which sounds like it would be something that would take less than 8 months to float to the surface, but there you go.

\n

This release contains some other less important changes as well which you can view here.

\n

The release is recommended for all users of current bbPress 0.9.x versions.

\n

Download bbPress 0.9.0.3 here.

\";s:7:\"pubdate\";s:31:\"Wed, 10 Dec 2008 09:59:56 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Sam Bauers\";}s:7:\"summary\";s:767:\"

I’ve pushed out a new stable version today as a fairly major flaw was discovered that affected users of the previous stable release 0.9.0.2

\n

The flaw meant that regular users could not edit their own email address at all via their profile page. Which sounds like it would be something that would take less than 8 months to float to the surface, but there you go.

\n

This release contains some other less important changes as well which you can view here.

\n

The release is recommended for all users of current bbPress 0.9.x versions.

\n

Download bbPress 0.9.0.3 here.

\";}i:40;a:7:{s:5:\"title\";s:37:\"Gravatar: WordPress.com Now Gravified\";s:4:\"guid\";s:30:\"http://blog.gravatar.com/?p=94\";s:4:\"link\";s:63:\"http://blog.gravatar.com/2008/12/10/wordpresscom-now-gravified/\";s:11:\"description\";s:2984:\"

I still remember the post Matt made back when we announced that we’d acquired Gravatar.com… “Of course I should be able to have my avatar wherever I go! Of course email is a great way to key it! Of course there should be an open API for any platform!” And we meant it. We have always intended to make Gravatar.com THE avatar system for WordPress.com — It just made sense.

\n

We flipped the switch on the evening of Friday Dec 5, 2008.  I’m happy to report that there was only minimal headache involved all around.  I’m also happy to note that the extra traffic (to the tune of something like 900 requests per second during peak usage) had a fairly negligible impact on Gravatars infrastructure.  I mention this to lay to rest any doubts that you might have about Gravatars ability to handle the extra requests that even high traffic web services might send our way.

\n

Just as WordPress.com will continue to focus on being the best hosted blogging platform that it can be, Gravatar will focus on being the best Avatar platform that it can be. We’ll continue to grow both products, and continue to mature the integration between them. We’re also working on eventually allowing UI integration with non Automattic owned sites (but lets not get ahead of ourselves.)

\n

I’m looking forward to announcing some more really cool stuff for both developers and users…  Some API goodness, and some UI improvements.  I’ll also continue working, as always, under the hood to make Gravatar as fast and reliable as possible.

\n

Cheers until then!
\nDK

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";s:7:\"pubdate\";s:31:\"Wed, 10 Dec 2008 01:15:04 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"apokalyptik\";}s:7:\"summary\";s:2984:\"

I still remember the post Matt made back when we announced that we’d acquired Gravatar.com… “Of course I should be able to have my avatar wherever I go! Of course email is a great way to key it! Of course there should be an open API for any platform!” And we meant it. We have always intended to make Gravatar.com THE avatar system for WordPress.com — It just made sense.

\n

We flipped the switch on the evening of Friday Dec 5, 2008.  I’m happy to report that there was only minimal headache involved all around.  I’m also happy to note that the extra traffic (to the tune of something like 900 requests per second during peak usage) had a fairly negligible impact on Gravatars infrastructure.  I mention this to lay to rest any doubts that you might have about Gravatars ability to handle the extra requests that even high traffic web services might send our way.

\n

Just as WordPress.com will continue to focus on being the best hosted blogging platform that it can be, Gravatar will focus on being the best Avatar platform that it can be. We’ll continue to grow both products, and continue to mature the integration between them. We’re also working on eventually allowing UI integration with non Automattic owned sites (but lets not get ahead of ourselves.)

\n

I’m looking forward to announcing some more really cool stuff for both developers and users…  Some API goodness, and some UI improvements.  I’ll also continue working, as always, under the hood to make Gravatar as fast and reliable as possible.

\n

Cheers until then!
\nDK

\n      \"\" \"\" \"\" \"\" \"\" \"\"
\";}i:41;a:7:{s:5:\"title\";s:35:\"Dev Blog: 2.7 Release Candidate Two\";s:4:\"guid\";s:39:\"http://wordpress.org/development/?p=474\";s:4:\"link\";s:66:\"http://wordpress.org/development/2008/12/27-release-candidate-two/\";s:11:\"description\";s:835:\"

There comes a time in every WordPress release when it’s ready for the world , to come out of its cocoon and feel the light of the world on its wings for the first time.

\n

It’s not quite that time yet, but we’re as close as we’ve ever been, hence the immediate availability of 2.7 Release Candidate 2, or RC2 for short.

\n

Of course if you were already testing 2.7, you can just use the built-in core updater (Tools > Upgrade) to download and install RC2 for you (and later upgrade you to the final release when it’s available) but if not you can use the download link above.

\n

We feel this release is pretty much exactly what we’re going to ship as 2.7, barring any final bugs or polish tweaks that you report or we find.

\";s:7:\"pubdate\";s:31:\"Wed, 10 Dec 2008 00:55:33 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:835:\"

There comes a time in every WordPress release when it’s ready for the world , to come out of its cocoon and feel the light of the world on its wings for the first time.

\n

It’s not quite that time yet, but we’re as close as we’ve ever been, hence the immediate availability of 2.7 Release Candidate 2, or RC2 for short.

\n

Of course if you were already testing 2.7, you can just use the built-in core updater (Tools > Upgrade) to download and install RC2 for you (and later upgrade you to the final release when it’s available) but if not you can use the download link above.

\n

We feel this release is pretty much exactly what we’re going to ship as 2.7, barring any final bugs or polish tweaks that you report or we find.

\";}i:42;a:7:{s:5:\"title\";s:30:\"Matt: Sydney Zoo & Opera House\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9549\";s:4:\"link\";s:44:\"http://ma.tt/2008/12/sydney-zoo-opera-house/\";s:11:\"description\";s:43335:\"

A day at the famous Sydney Zoo, including the open air bird show and a yawning tiger. Dinner across from the opera house.

\n

\n\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\";s:7:\"pubdate\";s:31:\"Tue, 09 Dec 2008 22:16:08 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:43335:\"

A day at the famous Sydney Zoo, including the open air bird show and a yawning tiger. Dinner across from the opera house.

\n

\n\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\";}i:43;a:7:{s:5:\"title\";s:69:\"Weblog Tools Collection: 10 Useful RSS Tricks and Hacks For WordPress\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4697\";s:4:\"link\";s:98:\"http://weblogtoolscollection.com/archives/2008/12/09/10-useful-rss-tricks-and-hacks-for-wordpress/\";s:11:\"description\";s:553:\"

10 Useful RSS-Tricks and Hacks For WordPress: A quick link to a set of tools, tips and tricks aimed towards offering a more personalized RSS feed for your WordPress blog from Smashing Magazine. Some of these have been hashed in the past but a few caught my attention including the hack that makes it very easy to display a feed from another blog with next to nothing in code or the one to “cleanly” get rid of RSS feeds from a WordPress blog.

\";s:7:\"pubdate\";s:31:\"Tue, 09 Dec 2008 21:18:29 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:10:\"Mark Ghosh\";}s:7:\"summary\";s:553:\"

10 Useful RSS-Tricks and Hacks For WordPress: A quick link to a set of tools, tips and tricks aimed towards offering a more personalized RSS feed for your WordPress blog from Smashing Magazine. Some of these have been hashed in the past but a few caught my attention including the hack that makes it very easy to display a feed from another blog with next to nothing in code or the one to “cleanly” get rid of RSS feeds from a WordPress blog.

\";}i:44;a:7:{s:5:\"title\";s:35:\"Matt: Most Annoying Economic Crisis\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9518\";s:4:\"link\";s:51:\"http://ma.tt/2008/12/most-annoying-economic-crisis/\";s:11:\"description\";s:445:\"

Inside the world’s most annoying economic crisis, on a shortage of coins in Argentina. “Factoring in the 50 centavos he had already handed over, this effectively reduced the fare to 13.50 pesos, which, for reasons I’ll get to in a moment, is actually more than 14.50 pesos.” Hat tip: Paul Kedrosky.

\";s:7:\"pubdate\";s:31:\"Tue, 09 Dec 2008 08:38:41 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:445:\"

Inside the world’s most annoying economic crisis, on a shortage of coins in Argentina. “Factoring in the 50 centavos he had already handed over, this effectively reduced the fare to 13.50 pesos, which, for reasons I’ll get to in a moment, is actually more than 14.50 pesos.” Hat tip: Paul Kedrosky.

\";}i:45;a:7:{s:5:\"title\";s:21:\"Matt: Houston & Dubai\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9316\";s:4:\"link\";s:35:\"http://ma.tt/2008/12/houston-dubai/\";s:11:\"description\";s:418:\"

I’m going to be in Houston for Christmas and while there attending the December Refresh Houston Meetup. If you’re in Houston, come by and say hi. After that I’m going to Dubai for a little bit of vacation, but if there are any WordPress users there I’d love to meet up one night. I’ll be there the Dec 28–Jan 4.

\";s:7:\"pubdate\";s:31:\"Tue, 09 Dec 2008 07:18:44 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:418:\"

I’m going to be in Houston for Christmas and while there attending the December Refresh Houston Meetup. If you’re in Houston, come by and say hi. After that I’m going to Dubai for a little bit of vacation, but if there are any WordPress users there I’d love to meet up one night. I’ll be there the Dec 28–Jan 4.

\";}i:46;a:7:{s:5:\"title\";s:20:\"Matt: Sydney Beaches\";s:4:\"guid\";s:20:\"http://ma.tt/?p=9181\";s:4:\"link\";s:36:\"http://ma.tt/2008/12/sydney-beaches/\";s:11:\"description\";s:23628:\"

In Sydney, Australia around a number of beaches including Bondi, with Sam and friends.

\n

\n\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\";s:7:\"pubdate\";s:31:\"Tue, 09 Dec 2008 04:45:27 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:23628:\"

In Sydney, Australia around a number of beaches including Bondi, with Sam and friends.

\n

\n\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\n\"\"\";}i:47;a:7:{s:5:\"title\";s:18:\"Matt: Learn Faster\";s:4:\"guid\";s:34:\"http://ma.tt/2008/12/learn-faster/\";s:4:\"link\";s:34:\"http://ma.tt/2008/12/learn-faster/\";s:11:\"description\";s:141:\"

Hacking Knowledge: 77 Ways to Learn Faster, Deeper, and Better.

\";s:7:\"pubdate\";s:31:\"Mon, 08 Dec 2008 23:06:04 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:141:\"

Hacking Knowledge: 77 Ways to Learn Faster, Deeper, and Better.

\";}i:48;a:7:{s:5:\"title\";s:34:\"Matt: Gas Stations for Flying Cars\";s:4:\"guid\";s:50:\"http://ma.tt/2008/12/gas-stations-for-flying-cars/\";s:4:\"link\";s:50:\"http://ma.tt/2008/12/gas-stations-for-flying-cars/\";s:11:\"description\";s:201:\"

Open Thread: Social Web & Its Challenges . “building floating gas stations for the flying cars”

\";s:7:\"pubdate\";s:31:\"Mon, 08 Dec 2008 20:48:58 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Matt\";}s:7:\"summary\";s:201:\"

Open Thread: Social Web & Its Challenges . “building floating gas stations for the flying cars”

\";}i:49;a:7:{s:5:\"title\";s:60:\"Weblog Tools Collection: WordPress Plugin Releases for 12/08\";s:4:\"guid\";s:40:\"http://weblogtoolscollection.com/?p=4723\";s:4:\"link\";s:88:\"http://weblogtoolscollection.com/archives/2008/12/08/wordpress-plugin-releases-for-1208/\";s:11:\"description\";s:3909:\"

New Plugins

\n

email2friend

\n

email2friend plugin is the easiest way ever to add send to friend functionality to your WordPress blog.

\n

Image List From Custom Fields

\n

The “Image List From Custom Fields” plugin displays a list of images from a recent number of posts and links back to them. This plugin is similar to other image list plugins with one main difference, instead of causing increased server load by searching for and pulling an image from the post it pulls the image from the custom fields specified by the author when he/she writes the post. By doing this a much clearer image can be displayed rather than showing a stretched or skewed image from the post.

\n

Fun without Cliches

\n

Fun without Clichés highlights clichés in your post when you preview it. It doesn’t change or remove any content allowing you total control over whether to keep the cliché in or alter it yourself.

\n

Fun with Theme Widgets

\n

Fun with Theme Widgets makes it super easy to create widgets as part of a theme without using any extra PHP code. You can create widgets using nothing but standard template tags and html.

\n

WP-Components

\n

WP-Components is a new WordPress plugin that allows theme developers to embed a short template tag instead of short bits of text and code that the website owner might want to change later on.

\n

spam-paladin

\n

Automatically comments marked as spam on login to WP-Admin

\n

Visual Recent Posts

\n

This plugin gives the look of a magazine-style website with thumbnails and excerpts in a clean layout. Thumbnails are generated automatically, and there is a settings page to set an unholy amount of options.

\n

WP PageFlip Lite

\n

Manage all your Page Flips ! Add your pages, reorganize them and start browsing your catalogs.

\n

Updated Plugins

\n

MyAdsense

\n

The plugin that gives you full control of your Google Adsense ads on your WordPress blog is now available for WordPress 2.7-RC1.

\n

GD Star Rating

\n

GD Star Rating is post, page and comment rating plugin for WordPress. Plugin supports different image sets, rating moderation, vote rules, time restricted voting, templates, trend calculations, has a widgets build in and shortcode support.

\n

Parent Category Toggler

\n

Automatically toggle the parent categories when a sub category is selected.

\n

Disable Revisions

\n

Disable revision functions in WordPress and delete all entries of revisions in database.

\n

Mail On Update

\n

Sends an E-Mail to one (i.e. WordPress admin) or multiple E-Mail Addresses if new versions of plugins are available.

\n

RefGenerator

\n

RefGenerator helps you reference all the external links included in your post

\n

postTabs

\n

postTabs allows you to easily split your post/page content into Tabs that will be shown to your visitors

\";s:7:\"pubdate\";s:31:\"Mon, 08 Dec 2008 18:22:32 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Ajay\";}s:7:\"summary\";s:3909:\"

New Plugins

\n

email2friend

\n

email2friend plugin is the easiest way ever to add send to friend functionality to your WordPress blog.

\n

Image List From Custom Fields

\n

The “Image List From Custom Fields” plugin displays a list of images from a recent number of posts and links back to them. This plugin is similar to other image list plugins with one main difference, instead of causing increased server load by searching for and pulling an image from the post it pulls the image from the custom fields specified by the author when he/she writes the post. By doing this a much clearer image can be displayed rather than showing a stretched or skewed image from the post.

\n

Fun without Cliches

\n

Fun without Clichés highlights clichés in your post when you preview it. It doesn’t change or remove any content allowing you total control over whether to keep the cliché in or alter it yourself.

\n

Fun with Theme Widgets

\n

Fun with Theme Widgets makes it super easy to create widgets as part of a theme without using any extra PHP code. You can create widgets using nothing but standard template tags and html.

\n

WP-Components

\n

WP-Components is a new WordPress plugin that allows theme developers to embed a short template tag instead of short bits of text and code that the website owner might want to change later on.

\n

spam-paladin

\n

Automatically comments marked as spam on login to WP-Admin

\n

Visual Recent Posts

\n

This plugin gives the look of a magazine-style website with thumbnails and excerpts in a clean layout. Thumbnails are generated automatically, and there is a settings page to set an unholy amount of options.

\n

WP PageFlip Lite

\n

Manage all your Page Flips ! Add your pages, reorganize them and start browsing your catalogs.

\n

Updated Plugins

\n

MyAdsense

\n

The plugin that gives you full control of your Google Adsense ads on your WordPress blog is now available for WordPress 2.7-RC1.

\n

GD Star Rating

\n

GD Star Rating is post, page and comment rating plugin for WordPress. Plugin supports different image sets, rating moderation, vote rules, time restricted voting, templates, trend calculations, has a widgets build in and shortcode support.

\n

Parent Category Toggler

\n

Automatically toggle the parent categories when a sub category is selected.

\n

Disable Revisions

\n

Disable revision functions in WordPress and delete all entries of revisions in database.

\n

Mail On Update

\n

Sends an E-Mail to one (i.e. WordPress admin) or multiple E-Mail Addresses if new versions of plugins are available.

\n

RefGenerator

\n

RefGenerator helps you reference all the external links included in your post

\n

postTabs

\n

postTabs allows you to easily split your post/page content into Tabs that will be shown to your visitors

\";}}s:7:\"channel\";a:5:{s:5:\"title\";s:16:\"WordPress Planet\";s:4:\"link\";s:28:\"http://planet.wordpress.org/\";s:8:\"language\";s:2:\"en\";s:11:\"description\";s:47:\"WordPress Planet - http://planet.wordpress.org/\";s:7:\"tagline\";s:47:\"WordPress Planet - http://planet.wordpress.org/\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:4:\"etag\";s:25:\"\"4c4f2-494f7a50-5042b6\"\r\n\";s:13:\"last_modified\";s:31:\"Mon, 22 Dec 2008 11:30:24 GMT\r\n\";}','no'),(689,0,'category_children','a:0:{}','yes'),(626,0,'widget_simplr_rsslinks','a:1:{s:5:\"title\";s:9:\"Subscribe\";}','yes'),(71,0,'rss_867bd5c64f85878d03a060509cd2f92c_ts','1229945564','no'),(130,0,'update_core','O:8:\"stdClass\":5:{s:12:\"last_checked\";i:1230028311;s:15:\"version_checked\";s:3:\"2.6\";s:8:\"response\";s:7:\"upgrade\";s:3:\"url\";s:30:\"http://wordpress.org/download/\";s:7:\"current\";s:3:\"2.7\";}','yes'),(72,0,'rss_1d7cd550a5fe38e791b9fb000f34b563','O:9:\"MagpieRSS\":17:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:0:{}s:7:\"channel\";a:11:{s:5:\"title\";s:38:\"Blog reactions to http://conflate.net/\";s:4:\"link\";s:57:\"http://technorati.com/search/http%3A%2F%2Fconflate.net%2F\";s:11:\"description\";s:57:\"Blog posts linking to http://conflate.net/ on Technorati.\";s:7:\"pubdate\";s:31:\"Thu, 13 Sep 2007 21:37:29 -0700\";s:9:\"generator\";s:15:\"Technorati v1.0\";s:9:\"webmaster\";s:43:\"support@technorati.com (Technorati Support)\";s:4:\"docs\";s:37:\"http://blogs.law.harvard.edu/tech/rss\";s:3:\"ttl\";s:2:\"60\";s:4:\"tapi\";a:3:{s:6:\"result\";s:5:\"\n \";s:10:\"result_url\";s:20:\"http://conflate.net/\";s:19:\"result_inboundlinks\";s:1:\"0\";}s:6:\"result\";s:10:\"\n \n \";s:7:\"tagline\";s:57:\"Blog posts linking to http://conflate.net/ on Technorati.\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:3:{s:3:\"url\";s:50:\"http://static.technorati.com/pix/logos/logo_sm.gif\";s:5:\"title\";s:15:\"Technorati logo\";s:4:\"link\";s:57:\"http://technorati.com/search/http%3A%2F%2Fconflate.net%2F\";}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}}','no'),(73,0,'rss_1d7cd550a5fe38e791b9fb000f34b563_ts','1189743858','no'),(74,0,'sidebars_widgets','a:3:{s:9:\"sidebar-1\";a:5:{i:0;s:6:\"text-1\";i:1;s:12:\"recent-posts\";i:2;s:12:\"categories-1\";i:3;s:9:\"rss_links\";i:4;s:4:\"meta\";}s:9:\"sidebar-2\";a:3:{i:0;s:5:\"links\";i:1;s:13:\"rss-220273942\";i:2;s:13:\"rss-220273941\";}s:13:\"array_version\";i:3;}','yes'),(76,0,'blogtxt_basefontsize','70%','yes'),(217,0,'blogtxt_blogtitlefontfamily','tahoma,geneva,sans-serif','yes'),(80,0,'blogtxt_miscfontfamily','helvetica,arial,sans-serif','yes'),(81,0,'blogtxt_posttextalignment','left','yes'),(82,0,'blogtxt_layoutwidth','97%','yes'),(83,0,'blogtxt_layouttype','3c-b.css','yes'),(84,0,'blogtxt_layoutalignment','center','yes'),(85,0,'blogtxt_authorlink','hidden','yes'),(95,0,'blogtxt_headingfontfamily','helvetica,arial,sans-serif','yes'),(138,0,'blogtxt_basefontfamily','helvetica,arial,sans-serif','yes'),(128,0,'rss_68951c7d1239bcdda8528c53a0458213','O:9:\"MagpieRSS\":17:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:0:{}s:7:\"channel\";a:11:{s:5:\"title\";s:47:\"Blog reactions to http://conflate.net/inductio/\";s:4:\"link\";s:68:\"http://technorati.com/search/http%3A%2F%2Fconflate.net%2Finductio%2F\";s:11:\"description\";s:66:\"Blog posts linking to http://conflate.net/inductio/ on Technorati.\";s:7:\"pubdate\";s:31:\"Thu, 20 Sep 2007 02:28:09 -0700\";s:9:\"generator\";s:15:\"Technorati v1.0\";s:9:\"webmaster\";s:43:\"support@technorati.com (Technorati Support)\";s:4:\"docs\";s:37:\"http://blogs.law.harvard.edu/tech/rss\";s:3:\"ttl\";s:2:\"60\";s:4:\"tapi\";a:3:{s:6:\"result\";s:5:\"\n \";s:10:\"result_url\";s:29:\"http://conflate.net/inductio/\";s:19:\"result_inboundlinks\";s:1:\"0\";}s:6:\"result\";s:10:\"\n \n \";s:7:\"tagline\";s:66:\"Blog posts linking to http://conflate.net/inductio/ on Technorati.\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:3:{s:3:\"url\";s:50:\"http://static.technorati.com/pix/logos/logo_sm.gif\";s:5:\"title\";s:15:\"Technorati logo\";s:4:\"link\";s:68:\"http://technorati.com/search/http%3A%2F%2Fconflate.net%2Finductio%2F\";}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}}','no'),(129,0,'rss_68951c7d1239bcdda8528c53a0458213_ts','1190280489','no'),(97,0,'widget_pages','a:3:{s:5:\"title\";s:5:\"Pages\";s:6:\"sortby\";s:2:\"ID\";s:7:\"exclude\";s:0:\"\";}','yes'),(98,0,'widget_calendar','a:1:{s:5:\"title\";s:0:\"\";}','yes'),(99,0,'widget_archives','a:3:{s:5:\"count\";b:0;s:8:\"dropdown\";b:0;s:5:\"title\";s:8:\"Archives\";}','yes'),(100,0,'widget_categories','a:2:{s:6:\"number\";i:1;i:1;a:4:{s:5:\"title\";s:6:\"Topics\";s:5:\"count\";b:0;s:12:\"hierarchical\";b:0;s:8:\"dropdown\";b:0;}}','yes'),(101,0,'widget_recent_entries','a:2:{s:5:\"title\";s:8:\"Recently\";s:6:\"number\";i:5;}','yes'),(102,0,'widget_text','a:1:{i:1;a:2:{s:5:\"title\";s:5:\"About\";s:4:\"text\";s:735:\"

\r\nYou\'re reading Mark Reid\'s research blog. Mark is a research fellow at the Computer Sciences Lab at the Australian National University. Naturally, the opinions here are personal ones and do not represent those of the ANU.\r\n

\r\nYou can find out more about me and this site here or browse the archive for older posts.\r\n

\";}}','yes'),(103,0,'widget_rss','a:3:{s:6:\"number\";i:2;i:220273942;a:6:{s:5:\"title\";s:9:\"Bookmarks\";s:3:\"url\";s:55:\"http://feeds.delicious.com/v2/rss/mreid/machinelearning\";s:5:\"items\";i:5;s:12:\"show_summary\";i:0;s:11:\"show_author\";i:0;s:9:\"show_date\";i:0;}i:220273941;a:6:{s:5:\"title\";s:9:\"CiteULike\";s:3:\"url\";s:40:\"http://www.citeulike.org/rss/user/mdreid\";s:5:\"items\";i:5;s:12:\"show_summary\";i:0;s:11:\"show_author\";i:0;s:9:\"show_date\";i:0;}}','yes'),(104,0,'widget_recent_comments','a:2:{s:5:\"title\";s:0:\"\";s:6:\"number\";i:5;}','yes'),(105,0,'widget_blogtxt_homelink','a:1:{s:5:\"title\";s:0:\"\";}','yes'),(106,0,'widget_blogtxt_rsslinks','a:1:{s:5:\"title\";s:9:\"Subscribe\";}','yes'),(107,0,'widget_blogtxt_recent_comments','a:2:{s:5:\"title\";s:8:\"Comments\";s:7:\"rccount\";s:1:\"5\";}','yes'),(111,0,'cron','a:1:{s:7:\"version\";i:2;}','yes'),(108,0,'wp_hashcash_db','1','yes'),(109,0,'plugin_wp-hashcash-version','a:2:{s:7:\"version\";s:5:\"4.0.5\";s:4:\"last\";i:1202165440;}','yes'),(112,0,'doing_cron','0','yes'),(116,0,'analytics_uastring','UA-1051817-2','yes'),(131,0,'tag_base','','yes'),(134,0,'rss_5523304628798024dab3245633623bef','O:9:\"MagpieRSS\":17:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:10:{i:0;a:5:{s:5:\"title\";s:40:\"19th century reading habits in australia\";s:4:\"link\";s:74:\"http://blogginman.blogspot.com/2008/06/19th-century-reading-habits-in.html\";s:11:\"description\";s:240:\"here is a blog post describing data mining of 19th century reading habits in australia. it is a fascinating application of pca and clustering. i don\'t think it will be long before commercial databases include standard data mining ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:14:\"Thoughts of Me\";s:7:\"creator\";s:6:\"Tanton\";s:4:\"date\";s:20:\"2008-06-17T06:37:00Z\";}s:7:\"summary\";s:240:\"here is a blog post describing data mining of 19th century reading habits in australia. it is a fascinating application of pca and clustering. i don\'t think it will be long before commercial databases include standard data mining ...\";}i:1;a:5:{s:5:\"title\";s:42:\"visualisation of 19th century reading data\";s:4:\"link\";s:107:\"http://processing.org/discourse/yabb_beta/YaBB.cgi?board=Exhibition;action=display;num=1213691685;start=0#0\";s:11:\"description\";s:246:\"hi, i\'ve recently used processing to create a visualisation of reading data from the australian common readers project: http://conflate.net/inductio/wp-content/public/acrp/ a discussion of the data, techniques and links to the code can ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:36:\"Processing 1.0 (BETA) : recent posts\";s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2008-06-17T01:34:45Z\";}s:7:\"summary\";s:246:\"hi, i\'ve recently used processing to create a visualisation of reading data from the australian common readers project: http://conflate.net/inductio/wp-content/public/acrp/ a discussion of the data, techniques and links to the code can ...\";}i:2;a:5:{s:5:\"title\";s:8:\"books...\";s:4:\"link\";s:52:\"http://laughing1wolf.blogspot.com/2008/06/books.html\";s:11:\"description\";s:246:\"books that changed my life... ~kk. [not my list]. books still have the power to change lives. which ones have changed yours? i don\'t mean merely great books, or memorable ones, or favorite ones. i mean books that altered your behavior, ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:19:\"paws and reflect...\";s:7:\"creator\";s:12:\"laughingwolf\";s:4:\"date\";s:20:\"2008-06-14T15:16:00Z\";}s:7:\"summary\";s:246:\"books that changed my life... ~kk. [not my list]. books still have the power to change lives. which ones have changed yours? i don\'t mean merely great books, or memorable ones, or favorite ones. i mean books that altered your behavior, ...\";}i:3;a:5:{s:5:\"title\";s:26:\"Books That Changed My Life\";s:4:\"link\";s:47:\"http://www.kk.org/cooltools/archives/002879.php\";s:11:\"description\";s:246:\"Books still have the power to change lives. Which ones have changed yours? I don\'t mean merely great books, or memorable ones, or favorite ones. I mean books that altered your behavior, changed your mind, redirected the course of your ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:10:\"Cool Tools\";s:7:\"creator\";s:7:\"unknown\";s:4:\"date\";s:20:\"2008-06-14T04:43:11Z\";}s:7:\"summary\";s:246:\"Books still have the power to change lives. Which ones have changed yours? I don\'t mean merely great books, or memorable ones, or favorite ones. I mean books that altered your behavior, changed your mind, redirected the course of your ...\";}i:4;a:5:{s:5:\"title\";s:34:\"The Seductive Power of Mathematics\";s:4:\"link\";s:80:\"http://apperceptual.wordpress.com/2008/05/24/the-seductive-power-of-mathematics/\";s:11:\"description\";s:244:\"I believe that math is very important: My first paper was mathematical (How many ways can an N-dimensional hypercube be unfolded into (N-1)-dimensional space?) and my most recent paper was mathematical (How can a very large tensor be ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:12:\"Apperceptual\";s:7:\"creator\";s:12:\"Peter Turney\";s:4:\"date\";s:20:\"2008-05-24T16:07:07Z\";}s:7:\"summary\";s:244:\"I believe that math is very important: My first paper was mathematical (How many ways can an N-dimensional hypercube be unfolded into (N-1)-dimensional space?) and my most recent paper was mathematical (How can a very large tensor be ...\";}i:5;a:5:{s:5:\"title\";s:32:\"The Manifesto マニフェスト\";s:4:\"link\";s:30:\"http://jp.doloreslabs.com/?p=3\";s:11:\"description\";s:348:\"2008年3月13日. 僕が初めてアマゾン・メカニカルタークを利用したのは検索エンジンのスタートアップPowersetにいたとき。自分たちの初期内部アルゴリズムの質をYahooやグーグルの結果と比較するために利用しました。当初、検索結果の質を比較するフル ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:19:\"Dolores Labs 日本\";s:7:\"creator\";s:3:\"eri\";s:4:\"date\";s:20:\"2008-04-08T20:45:47Z\";}s:7:\"summary\";s:348:\"2008年3月13日. 僕が初めてアマゾン・メカニカルタークを利用したのは検索エンジンのスタートアップPowersetにいたとき。自分たちの初期内部アルゴリズムの質をYahooやグーグルの結果と比較するために利用しました。当初、検索結果の質を比較するフル ...\";}i:6;a:5:{s:5:\"title\";s:13:\"The Manifesto\";s:4:\"link\";s:50:\"http://blog.doloreslabs.com/2008/03/the-manifesto/\";s:11:\"description\";s:248:\"The first time I used Amazon’s Mechanical Turk it was at a search engine startup, Powerset, and I used it to compare the quality of a few versions of our early internal algorithm with Yahoo and Google. We were thinking we would have to ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:17:\"Dolores Labs Blog\";s:7:\"creator\";s:5:\"lukas\";s:4:\"date\";s:20:\"2008-03-13T17:36:24Z\";}s:7:\"summary\";s:248:\"The first time I used Amazon’s Mechanical Turk it was at a search engine startup, Powerset, and I used it to compare the quality of a few versions of our early internal algorithm with Yahoo and Google. We were thinking we would have to ...\";}i:7;a:5:{s:5:\"title\";s:14:\"data data data\";s:4:\"link\";s:69:\"http://socialscienceplusplus.blogspot.com/2008/03/data-data-data.html\";s:11:\"description\";s:71:\"this is a lot of data: inductio ex machina - a meta-index of data sets.\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:16:\"Social Science++\";s:7:\"creator\";s:7:\"Brendan\";s:4:\"date\";s:20:\"2008-03-06T01:08:00Z\";}s:7:\"summary\";s:71:\"this is a lot of data: inductio ex machina - a meta-index of data sets.\";}i:8;a:5:{s:5:\"title\";s:8:\"boosting\";s:4:\"link\";s:34:\"http://www.lukasbiewald.com/?p=142\";s:11:\"description\";s:252:\"i don’t get much time to read papers these days, but this jmlr article called evidence contrary to the statistical view of boosting was fascinating (found on inductio ex machina.) there’sa format where the authors write their thesis and ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:13:\"Lukas Biewald\";s:7:\"creator\";s:5:\"admin\";s:4:\"date\";s:20:\"2008-03-04T15:40:17Z\";}s:7:\"summary\";s:252:\"i don’t get much time to read papers these days, but this jmlr article called evidence contrary to the statistical view of boosting was fascinating (found on inductio ex machina.) there’sa format where the authors write their thesis and ...\";}i:9;a:5:{s:5:\"title\";s:47:\"from inductio ex machina: the mathematical grue\";s:4:\"link\";s:30:\"http://cliftonsnyder.net/?p=94\";s:11:\"description\";s:248:\"colossal cave meets nerd humor; this is fantastic stuff: the mathematical grue. non-nerds needn’t apply. just to give you a taste:. you are sitting before a particularly thorny conjecture. possible proofs lead away from here in several ...\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:17:\"cliftonsnyder.net\";s:7:\"creator\";s:5:\"cliff\";s:4:\"date\";s:20:\"2007-10-26T17:50:25Z\";}s:7:\"summary\";s:248:\"colossal cave meets nerd humor; this is fantastic stuff: the mathematical grue. non-nerds needn’t apply. just to give you a taste:. you are sitting before a particularly thorny conjecture. possible proofs lead away from here in several ...\";}}s:7:\"channel\";a:5:{s:5:\"title\";s:55:\"link:http://conflate.net/inductio/ - Google Blog Search\";s:4:\"link\";s:113:\"http://blogsearch.google.com/blogsearch?hl=en&scoring=d&ie=ISO-8859-1&num=10&q=link:http://conflate.net/inductio/\";s:11:\"description\";s:123:\"Google Blog Search Results: 11 results for http://conflate.net/inductio/ - showing 1 through 10\";s:10:\"opensearch\";a:3:{s:12:\"totalresults\";s:2:\"11\";s:10:\"startindex\";s:1:\"1\";s:12:\"itemsperpage\";s:2:\"10\";}s:7:\"tagline\";s:123:\"Google Blog Search Results: 11 results for http://conflate.net/inductio/ - showing 1 through 10\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}}','no'),(135,0,'rss_5523304628798024dab3245633623bef_ts','1229945564','no'),(136,0,'update_plugins','O:8:\"stdClass\":3:{s:12:\"last_checked\";i:1230023385;s:7:\"checked\";a:11:{s:19:\"akismet/akismet.php\";s:5:\"2.1.6\";s:19:\"googleanalytics.php\";s:4:\"0.68\";s:9:\"hello.php\";s:3:\"1.5\";s:17:\"js-kit/js-kit.php\";s:5:\"2.0.5\";s:34:\"latexrender/latexrender-plugin.php\";s:3:\"1.0\";s:12:\"markdown.php\";s:5:\"1.1.6\";s:30:\"latexrender/mimetex-plugin.php\";s:3:\"1.0\";s:31:\"theme-test-drive/themedrive.php\";s:4:\"1.11\";s:9:\"stats.php\";s:5:\"1.2.2\";s:15:\"wp-hashcash.php\";s:3:\"4.2\";s:15:\"openid/core.php\";s:5:\"2.1.8\";}s:8:\"response\";a:5:{s:19:\"akismet/akismet.php\";O:8:\"stdClass\":5:{s:2:\"id\";s:2:\"15\";s:4:\"slug\";s:7:\"akismet\";s:11:\"new_version\";s:5:\"2.2.3\";s:3:\"url\";s:44:\"http://wordpress.org/extend/plugins/akismet/\";s:7:\"package\";s:49:\"http://downloads.wordpress.org/plugin/akismet.zip\";}s:31:\"theme-test-drive/themedrive.php\";O:8:\"stdClass\":5:{s:2:\"id\";s:4:\"1583\";s:4:\"slug\";s:16:\"theme-test-drive\";s:11:\"new_version\";s:5:\"2.7.1\";s:3:\"url\";s:53:\"http://wordpress.org/extend/plugins/theme-test-drive/\";s:7:\"package\";s:58:\"http://downloads.wordpress.org/plugin/theme-test-drive.zip\";}s:9:\"stats.php\";O:8:\"stdClass\":5:{s:2:\"id\";s:3:\"626\";s:4:\"slug\";s:5:\"stats\";s:11:\"new_version\";s:5:\"1.3.5\";s:3:\"url\";s:42:\"http://wordpress.org/extend/plugins/stats/\";s:7:\"package\";s:53:\"http://downloads.wordpress.org/plugin/stats.1.3.5.zip\";}s:15:\"wp-hashcash.php\";O:8:\"stdClass\":5:{s:2:\"id\";s:3:\"355\";s:4:\"slug\";s:11:\"wp-hashcash\";s:11:\"new_version\";s:3:\"4.3\";s:3:\"url\";s:48:\"http://wordpress.org/extend/plugins/wp-hashcash/\";s:7:\"package\";s:57:\"http://downloads.wordpress.org/plugin/wp-hashcash.4.3.zip\";}s:15:\"openid/core.php\";O:8:\"stdClass\":5:{s:2:\"id\";s:3:\"210\";s:4:\"slug\";s:6:\"openid\";s:11:\"new_version\";s:5:\"3.1.4\";s:3:\"url\";s:43:\"http://wordpress.org/extend/plugins/openid/\";s:7:\"package\";s:54:\"http://downloads.wordpress.org/plugin/openid.3.1.4.zip\";}}}','yes'),(150,0,'rss_91023398faf07e66dad9495243bdb7cf','O:9:\"MagpieRSS\":18:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:15:{i:0;a:9:{s:5:\"about\";s:73:\"http://blog.doloreslabs.com/2008/09/amt-fast-cheap-good-machine-learning/\";s:5:\"title\";s:54:\"AMT is fast, cheap, and good for machine learning data\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-09-11T00:21:40Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:53:\"machinelearning data crowdsourcing AMT research paper\";}s:4:\"link\";s:73:\"http://blog.doloreslabs.com/2008/09/amt-fast-cheap-good-machine-learning/\";s:11:\"description\";s:193:\"Discussion of a recent paper that studies the accuracy and cost of using "crowdsourcing" via the Amazon Mechanical Turk to collect supervised annotations of data for machine learning.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:52:\"\n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:193:\"Discussion of a recent paper that studies the accuracy and cost of using "crowdsourcing" via the Amazon Mechanical Turk to collect supervised annotations of data for machine learning.\";}i:1;a:9:{s:5:\"about\";s:48:\"http://www.cs.cmu.edu/~guestrin/Class/10725-S08/\";s:5:\"title\";s:21:\"Optimization Lectures\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-09-04T10:01:16Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:59:\"convex_analysis lecture optimisation machinelearning course\";}s:4:\"link\";s:48:\"http://www.cs.cmu.edu/~guestrin/Class/10725-S08/\";s:11:\"description\";s:158:\"Carlos Guestrin's 2008 lectures on optimisation theory as it is applied to machine learning and other problems. The slides and references are quite nice.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:43:\"\n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:158:\"Carlos Guestrin's 2008 lectures on optimisation theory as it is applied to machine learning and other problems. The slides and references are quite nice.\";}i:2;a:9:{s:5:\"about\";s:35:\"http://www.stanford.edu/group/mmds/\";s:5:\"title\";s:43:\"Workshop on Modern Massive Data Sets (MMDS)\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-08-27T21:33:00Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:62:\"workshop conference machinelearning data algorithms 2008 stats\";}s:4:\"link\";s:35:\"http://www.stanford.edu/group/mmds/\";s:11:\"description\";s:164:\"This workshop was attended by some big names and a quick scan through the abstracts reveals that some interesting work was presented here. At Stanford in June 2008.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:61:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:164:\"This workshop was attended by some big names and a quick scan through the abstracts reveals that some interesting work was presented here. At Stanford in June 2008.\";}i:3;a:9:{s:5:\"about\";s:48:\"http://www.adaptivebox.net/CILib/CICON_stat.html\";s:5:\"title\";s:37:\"Conference Acceptance Rate Statistics\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-07-28T02:45:48Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:51:\"machinelearning conference acceptance stats summary\";}s:4:\"link\";s:48:\"http://www.adaptivebox.net/CILib/CICON_stat.html\";s:11:\"description\";s:184:\"Several tables worth of acceptance rate statistics for various conferences under the broad rubric of "computational intelligence". Includes ICML, NIPS, UAI, ECML, AAAI, COLT.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:43:\"\n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:184:\"Several tables worth of acceptance rate statistics for various conferences under the broad rubric of "computational intelligence". Includes ICML, NIPS, UAI, ECML, AAAI, COLT.\";}i:4;a:9:{s:5:\"about\";s:43:\"http://www.econ.upf.es/~lugosi/mlss_slt.pdf\";s:5:\"title\";s:49:\"Introduction to Statistical Learning Theory [PDF]\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-07-27T23:04:55Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:64:\"machinelearning stats theory survey introduction analysis useful\";}s:4:\"link\";s:43:\"http://www.econ.upf.es/~lugosi/mlss_slt.pdf\";s:11:\"description\";s:184:\"This well-written survey of common theoretical tools in statistical learning by Bousquet, Boucheron & Lugosi covers SRM, regularisation, VC-dimension, Rademacher averages and more.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:61:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:184:\"This well-written survey of common theoretical tools in statistical learning by Bousquet, Boucheron & Lugosi covers SRM, regularisation, VC-dimension, Rademacher averages and more.\";}i:5;a:9:{s:5:\"about\";s:70:\"http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\";s:5:\"title\";s:32:\"ICML/UAI/COLT 2008 Retrospective\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-07-21T23:08:44Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:53:\"machinelearning conference summary ICML UAI COLT 2008\";}s:4:\"link\";s:70:\"http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\";s:11:\"description\";s:87:\"Hal's summary of the three conference, replete with links to his favourite papers.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:61:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:87:\"Hal's summary of the three conference, replete with links to his favourite papers.\";}i:6;a:9:{s:5:\"about\";s:68:\"http://www.scholarpedia.org/article/Information_theoretic_clustering\";s:5:\"title\";s:32:\"Information theoretic clustering\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-07-21T06:24:56Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:72:\"machinelearning clustering research infotheory information theory survey\";}s:4:\"link\";s:68:\"http://www.scholarpedia.org/article/Information_theoretic_clustering\";s:11:\"description\";s:95:\"Scholarpedia article surveying techniques using information theoretic approaches to clustering.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:61:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:95:\"Scholarpedia article surveying techniques using information theoretic approaches to clustering.\";}i:7;a:9:{s:5:\"about\";s:29:\"http://www.conflate.net/icml/\";s:5:\"title\";s:20:\"ICML Discussion Site\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-07-01T07:56:13Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:82:\"(mine) ICML conference machinelearning community wiki dokuwiki discussion research\";}s:4:\"link\";s:29:\"http://www.conflate.net/icml/\";s:11:\"description\";s:71:\"The ICML 2008 discussion site I set up using DokuWiki and some plugins.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:79:\"\n \n \n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:71:\"The ICML 2008 discussion site I set up using DokuWiki and some plugins.\";}i:8;a:9:{s:5:\"about\";s:65:\"http://www.wired.com/science/discoveries/magazine/16-07/pb_theory\";s:5:\"title\";s:71:\"The End of Theory: The Data Deluge Makes the Scientific Method Obsolete\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-06-25T06:56:04Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:112:\"analytics wired data statistics machinelearning research science compsci google petabyte datamining via:cshalizi\";}s:4:\"link\";s:65:\"http://www.wired.com/science/discoveries/magazine/16-07/pb_theory\";s:11:\"description\";s:221:\"Even if this were the case, having lots of data does not avoid Hand's "Errors of the third kind" - getting the right answer to the wrong question - it just means we can do it faster and with more precision.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:106:\"\n \n \n \n \n \n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:221:\"Even if this were the case, having lots of data does not avoid Hand's "Errors of the third kind" - getting the right answer to the wrong question - it just means we can do it faster and with more precision.\";}i:9;a:9:{s:5:\"about\";s:45:\"ftp://ftp.sas.com/pub/neural/measurement.html\";s:5:\"title\";s:46:\"Measurement theory: Frequently asked questions\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-06-24T05:36:40Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:50:\"machinelearning measurement theory stats FAQ quote\";}s:4:\"link\";s:45:\"ftp://ftp.sas.com/pub/neural/measurement.html\";s:11:\"description\";s:174:\""Mathematical statistics is concerned with the connection between inference and data. Measurement theory is concerned with the connection between data and reality."\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:52:\"\n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:174:\""Mathematical statistics is concerned with the connection between inference and data. Measurement theory is concerned with the connection between data and reality."\";}i:10;a:9:{s:5:\"about\";s:39:\"http://wiki.dbpedia.org/NextSteps?v=keb\";s:5:\"title\";s:20:\"DBPedia : Next Steps\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-06-23T23:43:42Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:61:\"research database datamining machinelearning future inference\";}s:4:\"link\";s:39:\"http://wiki.dbpedia.org/NextSteps?v=keb\";s:11:\"description\";s:142:\"Another large, online data repository. One of their next steps is "Experiment with domain knowledge and inference over the dataset."\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:52:\"\n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:142:\"Another large, online data repository. One of their next steps is "Experiment with domain knowledge and inference over the dataset."\";}i:11;a:9:{s:5:\"about\";s:40:\"http://rlai.cs.ualberta.ca/RLBB/top.html\";s:5:\"title\";s:7:\"RL-Glue\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-06-18T06:09:35Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:76:\"reinforcementlearning machinelearning language protocol programming research\";}s:4:\"link\";s:40:\"http://rlai.cs.ualberta.ca/RLBB/top.html\";s:11:\"description\";s:144:\"An attempt to define a language and protocol for specifying reinforcement learning tasks. This has been used for several benchmark competitions.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:52:\"\n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:144:\"An attempt to define a language and protocol for specifying reinforcement learning tasks. This has been used for several benchmark competitions.\";}i:12;a:9:{s:5:\"about\";s:42:\"http://www.kdubiq.org/kdubiq/control/index\";s:5:\"title\";s:6:\"KDubiq\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-06-18T01:34:09Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:39:\"datamining machinelearning research web\";}s:4:\"link\";s:42:\"http://www.kdubiq.org/kdubiq/control/index\";s:11:\"description\";s:116:\"Ubiquitous knowledge discovery. A multi-institute work group to look at the future of data-mining and related areas.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:34:\"\n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:116:\"Ubiquitous knowledge discovery. A multi-institute work group to look at the future of data-mining and related areas.\";}i:13;a:9:{s:5:\"about\";s:44:\"http://videolectures.net/cmulls08_singh_rlm/\";s:5:\"title\";s:54:\"Relational Learning as Collective Matrix Factorization\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-06-03T21:44:39Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:93:\"maths machinelearning matrix factorisation svd bregman exponential_families video via:csantos\";}s:4:\"link\";s:44:\"http://videolectures.net/cmulls08_singh_rlm/\";s:11:\"description\";s:144:\"Video lecture by Ajit Singh from CMU presents a unified take on matrix factorisation with links to exponential families and Bregman divergences.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:79:\"\n \n \n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:144:\"Video lecture by Ajit Singh from CMU presents a unified take on matrix factorisation with links to exponential families and Bregman divergences.\";}i:14;a:9:{s:5:\"about\";s:127:\"http://anand.typepad.com/datawocky/2008/05/are-human-experts-less-prone-to-catastrophic-errors-than-machine-learned-models.html\";s:5:\"title\";s:56:\"Are Machine-Learned Models Prone to Catastrophic Errors?\";s:2:\"dc\";a:3:{s:4:\"date\";s:20:\"2008-05-26T03:57:25Z\";s:7:\"creator\";s:5:\"mreid\";s:7:\"subject\";s:79:\"google machinelearning person Norvig reliability search wisdom human_vs_machine\";}s:4:\"link\";s:127:\"http://anand.typepad.com/datawocky/2008/05/are-human-experts-less-prone-to-catastrophic-errors-than-machine-learned-models.html\";s:11:\"description\";s:119:\"Summary of discussion with Peter Norvig on why Google doesn't trust machine learnt models of search relevance yet.\";s:4:\"taxo\";a:2:{s:6:\"topics\";s:7:\"\n \";s:10:\"topics_bag\";s:9:\"\n \";}s:10:\"topics_bag\";s:70:\"\n \n \n \n \n \n \n \n \";s:6:\"topics\";s:5:\"\n \";s:7:\"summary\";s:119:\"Summary of discussion with Peter Norvig on why Google doesn't trust machine learnt models of search relevance yet.\";}}s:7:\"channel\";a:6:{s:5:\"title\";s:31:\"Delicious/mreid/machinelearning\";s:4:\"link\";s:42:\"http://delicious.com/mreid/machinelearning\";s:11:\"description\";s:41:\"bookmarks tagged machinelearning by mreid\";s:5:\"items\";s:12:\"\n \n \";s:9:\"items_seq\";s:142:\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \";s:7:\"tagline\";s:41:\"bookmarks tagged machinelearning by mreid\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"1.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:13:\"last_modified\";s:33:\"Thu, 11 Sep 2008 21:48:00 +0000\r\n\";}','no'),(151,0,'rss_91023398faf07e66dad9495243bdb7cf_ts','1221169680','no'),(141,0,'widget_tag_cloud','a:1:{s:5:\"title\";s:4:\"Tags\";}','yes'),(152,0,'CTcitationStyles','s:1997:\"a:7:{s:3:\"apa\";a:4:{s:4:\"name\";s:9:\"APA style\";s:5:\"style\";s:129:\"%pagename%. (%date:Y, F j%). In %publisher%. Retrieved %retdate:H:s, F j, Y%, from %permalink%\";s:8:\"styleURI\";s:38:\"http://en.wikipedia.org/wiki/APA_style\";s:4:\"show\";b:1;}s:3:\"mla\";a:4:{s:4:\"name\";s:9:\"MLA style\";s:5:\"style\";s:148:\"%author%, \\\"%pagename%.\\\" %publisher%. %date:j F Y, H:s% UTC. %institution%. %retdate:j M Y% <%permalink%>.\";s:8:\"styleURI\";s:49:\"http://en.wikipedia.org/wiki/The_MLA_style_manual\";s:4:\"show\";b:0;}s:4:\"mhra\";a:4:{s:4:\"name\";s:10:\"MHRA style\";s:5:\"style\";s:149:\"%author%, '%pagename%', %publisher%, %date:j F Y, H:s% UTC, <%permalink%> [accessed %retdate:j F Y%]\";s:8:\"styleURI\";s:45:\"http://en.wikipedia.org/wiki/MHRA_Style_Guide\";s:4:\"show\";b:0;}s:7:\"chicago\";a:4:{s:4:\"name\";s:27:\"The Chicago Manual of Style\";s:5:\"style\";s:114:\"%author%, \\\"%pagename%.\\\" %publisher%, %permalink% [accessed %retdate:F j, Y%].\";s:8:\"styleURI\";s:36:\"http://www.chicagomanualofstyle.org/\";s:4:\"show\";b:0;}s:6:\"cbecse\";a:4:{s:4:\"name\";s:13:\"CBE/CSE style\";s:5:\"style\";s:151:\"%author%, %pagename% [Internet]. %publisher%; %date: Y F j, H:s% UTC [cited %retdate: Y M j%]. Available from: %permalink%.\";s:8:\"styleURI\";s:55:\"http://en.wikipedia.org/wiki/Council_of_Science_Editors\";s:4:\"show\";b:0;}s:8:\"bluebook\";a:4:{s:4:\"name\";s:14:\"Bluebook style\";s:5:\"style\";s:85:\"%pagename%, %permalink% (last visited %retdate:M. j, Y%).\";s:8:\"styleURI\";s:37:\"http://en.wikipedia.org/wiki/Bluebook\";s:4:\"show\";b:0;}s:3:\"ama\";a:4:{s:4:\"name\";s:9:\"AMA style\";s:5:\"style\";s:140:\"%author%, %pagename%. %publisher%. %date:F j, Y, H:s% UTC. Available at: %permalink%. Accessed %retdate:F j, Y%.\";s:8:\"styleURI\";s:57:\"http://en.wikipedia.org/wiki/American_Medical_Association\";s:4:\"show\";b:0;}}\";','yes'),(153,0,'CTgerneralOptions','s:274:\"a:9:{s:11:\"institution\";s:0:\"\";s:10:\"singleMode\";s:6:\"manual\";s:23:\"singleModeManualDynamic\";b:1;s:15:\"singleModePopup\";b:0;s:8:\"loopMode\";s:7:\"disable\";s:21:\"loopModeManualDynamic\";b:0;s:13:\"loopModePopup\";b:0;s:23:\"widgetModeManualDynamic\";b:1;s:15:\"widgetModePopup\";b:0;}\";','yes'),(183,0,'rss_9a4f323c26275a2301a4169fef6d833f','O:9:\"MagpieRSS\":19:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:31:{i:0;a:9:{s:5:\"about\";s:69:\"http://www.cs.utexas.edu/users/EWD/transcriptions/EWD06xx/EWD637.html\";s:5:\"title\";s:67:\"Dijkstra: The Three Golden Rules for Successful Scientific Research\";s:4:\"link\";s:69:\"http://www.cs.utexas.edu/users/EWD/transcriptions/EWD06xx/EWD637.html\";s:11:\"description\";s:169:\"Your work should be: 1) of a high quality, near the edge of your ability, 2) scientifically sound and, if possible, socially relevant; 3) something no-one else is doing.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-24T08:31:20Z\";s:7:\"subject\";s:57:\"Dijkstra productivity quote research rules science wisdom\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:33:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:169:\"Your work should be: 1) of a high quality, near the edge of your ability, 2) scientifically sound and, if possible, socially relevant; 3) something no-one else is doing.\";}i:1;a:9:{s:5:\"about\";s:42:\"http://hunch.net/~learning-problem-design/\";s:5:\"title\";s:37:\"Principles of Learning Problem Design\";s:4:\"link\";s:42:\"http://hunch.net/~learning-problem-design/\";s:11:\"description\";s:168:\"John Langford and Alina Beygelzimer are organising this workshop at NIPS. The aim is to focus on defining machine learning problems which highlight their relationships.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-23T10:46:04Z\";s:7:\"subject\";s:56:\"NIPS machinelearning reductions research theory workshop\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:168:\"John Langford and Alina Beygelzimer are organising this workshop at NIPS. The aim is to focus on defining machine learning problems which highlight their relationships.\";}i:2;a:9:{s:5:\"about\";s:29:\"http://precedings.nature.com/\";s:5:\"title\";s:17:\"Nature Precedings\";s:4:\"link\";s:29:\"http://precedings.nature.com/\";s:11:\"description\";s:166:\"Nature\'s answer to arXiv.org for the biological and medical sciences. Upload pre-prints, presentations, or posters and have them curated (but not reviewed) by Nature.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-21T00:41:25Z\";s:7:\"subject\";s:63:\"archive cite:macresearch.org nature publishing research science\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:166:\"Nature\'s answer to arXiv.org for the biological and medical sciences. Upload pre-prints, presentations, or posters and have them curated (but not reviewed) by Nature.\";}i:3;a:9:{s:5:\"about\";s:60:\"http://faculty.washington.edu/kenrice/ConditionalMarilyn.jpg\";s:5:\"title\";s:19:\"Conditional Marilyn\";s:4:\"link\";s:60:\"http://faculty.washington.edu/kenrice/ConditionalMarilyn.jpg\";s:11:\"description\";s:112:\"A humorous poster from a paper given a Bayesian nonparametrics workshop in the style of Warhol and Lichtenstein.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-20T03:51:45Z\";s:7:\"subject\";s:63:\"art bayesian comic pop poster presentation research stats weird\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:43:\"\n \n \n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:112:\"A humorous poster from a paper given a Bayesian nonparametrics workshop in the style of Warhol and Lichtenstein.\";}i:4;a:9:{s:5:\"about\";s:43:\"http://focs.wordpress.com/category/replies/\";s:5:\"title\";s:29:\"figuring out computer science\";s:4:\"link\";s:43:\"http://focs.wordpress.com/category/replies/\";s:11:\"description\";s:156:\"A German computer science student asks several leading computer scientists the Richard Hamming question: What are the most important problems in your field?\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-19T03:45:44Z\";s:7:\"subject\";s:49:\"blog compsci interesting question research wisdom\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:156:\"A German computer science student asks several leading computer scientists the Richard Hamming question: What are the most important problems in your field?\";}i:5;a:9:{s:5:\"about\";s:23:\"http://hunch.net/?p=291\";s:5:\"title\";s:40:\"It’s MDL Jim, but not as we know it…\";s:4:\"link\";s:23:\"http://hunch.net/?p=291\";s:11:\"description\";s:90:\"Peter Grünwald gives a précis of his new MDL book at the Machine Learning (Theory) blog.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-18T22:45:16Z\";s:7:\"subject\";s:55:\"MDL bayesian book machinelearning research stats theory\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:33:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:90:\"Peter Grünwald gives a précis of his new MDL book at the Machine Learning (Theory) blog.\";}i:6;a:9:{s:5:\"about\";s:55:\"http://www.phdcomics.com/comics/archive.php?comicid=905\";s:5:\"title\";s:29:\"PHD Comics: Analysis of Value\";s:4:\"link\";s:55:\"http://www.phdcomics.com/comics/archive.php?comicid=905\";s:11:\"description\";s:138:\""Is your research worth anything?". "Significance is determined by comparing one\'s research with the Dull Hypothesis".\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-10T09:04:05Z\";s:7:\"subject\";s:44:\"Fisher anova comic humour phd research stats\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:33:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:138:\""Is your research worth anything?". "Significance is determined by comparing one\'s research with the Dull Hypothesis".\";}i:7;a:9:{s:5:\"about\";s:79:\"http://golem.ph.utexas.edu/category/2007/09/category_theory_in_machine_lea.html\";s:5:\"title\";s:35:\"Category Theory in Machine Learning\";s:4:\"link\";s:79:\"http://golem.ph.utexas.edu/category/2007/09/category_theory_in_machine_lea.html\";s:11:\"description\";s:115:\"David Corfield muses about how category theory might be used in machine learning. An interesting discussion ensues.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-07T06:24:48Z\";s:7:\"subject\";s:63:\"categorytheory interesting machinelearning maths research stats\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:115:\"David Corfield muses about how category theory might be used in machine learning. An interesting discussion ensues.\";}i:8;a:9:{s:5:\"about\";s:80:\"http://www.americanscientist.org/template/AssetDetail/assetid/48548/page/1#48703\";s:5:\"title\";s:52:\"Where\'s the Real Bottleneck in Scientific Computing?\";s:4:\"link\";s:80:\"http://www.americanscientist.org/template/AssetDetail/assetid/48548/page/1#48703\";s:11:\"description\";s:164:\"Argues that scientists would do well to learn how to use some of the tools that are indispensable to software engineering, such as version control and unit testing.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-03T23:23:56Z\";s:7:\"subject\";s:97:\"cite:michael_mccracken programming research science software tools unittest versioncontrol wisdom\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:43:\"\n \n \n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:164:\"Argues that scientists would do well to learn how to use some of the tools that are indispensable to software engineering, such as version control and unit testing.\";}i:9;a:9:{s:5:\"about\";s:38:\"http://www.dsi.unifi.it/~paolo/JMLR08/\";s:5:\"title\";s:51:\"JMLR: Mining and Learning with Graphs and Relations\";s:4:\"link\";s:38:\"http://www.dsi.unifi.it/~paolo/JMLR08/\";s:11:\"description\";s:93:\"Special edition on graphs and relations. Abstracts due 3rd Feb. 2008, papers on the 10th Feb.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-09-03T05:45:25Z\";s:7:\"subject\";s:72:\"(todo) ILP academia graph jmlr journal machinelearning relation research\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:43:\"\n \n \n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:93:\"Special edition on graphs and relations. Abstracts due 3rd Feb. 2008, papers on the 10th Feb.\";}i:10;a:9:{s:5:\"about\";s:35:\"http://elliscave.com/APL_J/tool.pdf\";s:5:\"title\";s:35:\"Notation as a Tool of Thought [PDF]\";s:4:\"link\";s:35:\"http://elliscave.com/APL_J/tool.pdf\";s:11:\"description\";s:72:\"Iverson\'s paper on the APL language and the importance of good notation.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-08-27T10:29:33Z\";s:7:\"subject\";s:52:\"communication language notation programming research\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:23:\"\n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:72:\"Iverson\'s paper on the APL language and the importance of good notation.\";}i:11;a:9:{s:5:\"about\";s:36:\"http://predict.kyb.tuebingen.mpg.de/\";s:5:\"title\";s:43:\"Evaluating Predictive Uncertainty Challenge\";s:4:\"link\";s:36:\"http://predict.kyb.tuebingen.mpg.de/\";s:11:\"description\";s:137:\"A competition run for NIPS 2004 to evaluate systems that make probabilistic predictions. Data and evaluation scripts are still available.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-08-23T04:01:49Z\";s:7:\"subject\";s:69:\"NIPS competition data evaluation machinelearning probability research\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:33:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:137:\"A competition run for NIPS 2004 to evaluate systems that make probabilistic predictions. Data and evaluation scripts are still available.\";}i:12;a:9:{s:5:\"about\";s:32:\"http://skim-app.sourceforge.net/\";s:5:\"title\";s:4:\"Skim\";s:4:\"link\";s:32:\"http://skim-app.sourceforge.net/\";s:11:\"description\";s:176:\"This is a great free replacement for OS X\'s Preview. It\'s a PDF viewer that\'s built for researchers, making it easy to annotate papers, read in full screen and link to BibDesk.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-08-17T02:19:13Z\";s:7:\"subject\";s:29:\"PDF bibdesk osx research tool\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:23:\"\n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:176:\"This is a great free replacement for OS X\'s Preview. It\'s a PDF viewer that\'s built for researchers, making it easy to annotate papers, read in full screen and link to BibDesk.\";}i:13;a:9:{s:5:\"about\";s:76:\"http://ml.typepad.com/machine_learning_thoughts/2006/06/making_machine_.html\";s:5:\"title\";s:39:\"Making Machine Learning More Scientific\";s:4:\"link\";s:76:\"http://ml.typepad.com/machine_learning_thoughts/2006/06/making_machine_.html\";s:11:\"description\";s:102:\"Olivier Bousquet has some good suggestions for resolving some foundational issues in machine learning.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-08-15T10:28:13Z\";s:7:\"subject\";s:44:\"idea machinelearning research science wisdom\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:23:\"\n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:102:\"Olivier Bousquet has some good suggestions for resolving some foundational issues in machine learning.\";}i:14;a:9:{s:5:\"about\";s:36:\"http://www.citeulike.org/user/mdreid\";s:5:\"title\";s:30:\"CiteULike: Mark Reid\'s Library\";s:4:\"link\";s:36:\"http://www.citeulike.org/user/mdreid\";s:11:\"description\";s:126:\"My CiteULike library will hopefully get more of a workout shortly. At the moment I\'m just keeping track of my online accounts.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-07-14T11:26:47Z\";s:7:\"subject\";s:37:\"(mine) account list research tool web\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:126:\"My CiteULike library will hopefully get more of a workout shortly. At the moment I\'m just keeping track of my online accounts.\";}i:15;a:9:{s:5:\"about\";s:28:\"http://mekentosj.com/papers/\";s:5:\"title\";s:42:\"Papers... Your personal library of science\";s:4:\"link\";s:28:\"http://mekentosj.com/papers/\";s:11:\"description\";s:145:\"This looks like a slick, commercial Mac application for organising your research papers. I should try this out to see how it compares to BibDesk.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-06-29T05:12:40Z\";s:7:\"subject\";s:52:\"application database osx paper research science tool\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:33:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:145:\"This looks like a slick, commercial Mac application for organising your research papers. I should try this out to see how it compares to BibDesk.\";}i:16;a:9:{s:5:\"about\";s:60:\"http://www.columbia.edu/~mjs2105/zheng_salganik_gelman06.pdf\";s:5:\"title\";s:44:\"How Many People Do You Know In Prison? [PDF]\";s:4:\"link\";s:60:\"http://www.columbia.edu/~mjs2105/zheng_salganik_gelman06.pdf\";s:11:\"description\";s:187:\"Interesting new research into networks that attempts to estimate hard-to-count populations and social network parameters from collections of "How Many X Do You Know?" questions.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-06-07T23:50:15Z\";s:7:\"subject\";s:48:\"article estimation network research social stats\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:187:\"Interesting new research into networks that attempts to estimate hard-to-count populations and social network parameters from collections of "How Many X Do You Know?" questions.\";}i:17;a:9:{s:5:\"about\";s:66:\"http://www.ams.org/bull/2002-39-01/S0273-0979-01-00923-5/home.html\";s:5:\"title\";s:43:\"On the Mathematical Foundations of Learning\";s:4:\"link\";s:66:\"http://www.ams.org/bull/2002-39-01/S0273-0979-01-00923-5/home.html\";s:11:\"description\";s:125:\"Interesting paper with a great collection of references to some seminal pieces on mathematics in statistical learning theory.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-06-05T00:43:03Z\";s:7:\"subject\";s:52:\"machinelearning maths paper reference research stats\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:125:\"Interesting paper with a great collection of references to some seminal pieces on mathematics in statistical learning theory.\";}i:18;a:9:{s:5:\"about\";s:40:\"http://talks.cam.ac.uk/show/archive/6983\";s:5:\"title\";s:46:\"Machine Learning Reading Group @ Cambridge Uni\";s:4:\"link\";s:40:\"http://talks.cam.ac.uk/show/archive/6983\";s:11:\"description\";s:113:\"An active machine learning reading group covering topics from Gaussian Processes to Probability Monads in Haskell\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-06-03T04:08:08Z\";s:7:\"subject\";s:37:\"machinelearning readinggroup research\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:13:\"\n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:113:\"An active machine learning reading group covering topics from Gaussian Processes to Probability Monads in Haskell\";}i:19;a:9:{s:5:\"about\";s:34:\"http://www1.cs.columbia.edu/~risi/\";s:5:\"title\";s:16:\"Imre Risi Kondor\";s:4:\"link\";s:34:\"http://www1.cs.columbia.edu/~risi/\";s:11:\"description\";s:69:\"Researcher applying techniques from group theory to machine learning.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-05-21T05:18:34Z\";s:7:\"subject\";s:43:\"grouptheory machinelearning people research\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:18:\"\n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:69:\"Researcher applying techniques from group theory to machine learning.\";}i:20;a:9:{s:5:\"about\";s:61:\"http://www-stat.stanford.edu/~donoho/Reports/1995/wavelab.pdf\";s:5:\"title\";s:39:\"WaveLab and Reproducible Research [PDF]\";s:4:\"link\";s:61:\"http://www-stat.stanford.edu/~donoho/Reports/1995/wavelab.pdf\";s:11:\"description\";s:130:\"This paper follows up on the Standford group\'s work and outlines a MatLab library that makes reproducible wavelet research easier.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-05-14T00:31:04Z\";s:7:\"subject\";s:36:\"matlab reproducible research science\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:18:\"\n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:130:\"This paper follows up on the Standford group\'s work and outlines a MatLab library that makes reproducible wavelet research easier.\";}i:21;a:9:{s:5:\"about\";s:50:\"http://sepwww.stanford.edu/research/redoc/cip.html\";s:5:\"title\";s:24:\"Reproducing Computations\";s:4:\"link\";s:50:\"http://sepwww.stanford.edu/research/redoc/cip.html\";s:11:\"description\";s:132:\"A paper outlining a Standford group\'s approach to ensuring reproducible research. Basically, makefiles plus file naming conventions.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-05-14T00:27:40Z\";s:7:\"subject\";s:29:\"reproducible research science\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:13:\"\n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:132:\"A paper outlining a Standford group\'s approach to ensuring reproducible research. Basically, makefiles plus file naming conventions.\";}i:22;a:9:{s:5:\"about\";s:65:\"http://www.stat.washington.edu/jaw/jaw.research.reproducible.html\";s:5:\"title\";s:27:\"Reproducible Research Links\";s:4:\"link\";s:65:\"http://www.stat.washington.edu/jaw/jaw.research.reproducible.html\";s:11:\"description\";s:199:\"A collection of links to papers and website describing methods for ensuring your scientific research is reproducible. The methods are still primitive (makefiles etc) but the message needs amplifying.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-05-14T00:25:00Z\";s:7:\"subject\";s:41:\"programming reproducible research science\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:18:\"\n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:199:\"A collection of links to papers and website describing methods for ensuring your scientific research is reproducible. The methods are still primitive (makefiles etc) but the message needs amplifying.\";}i:23;a:9:{s:5:\"about\";s:38:\"http://www.eigenfactor.org/methods.htm\";s:5:\"title\";s:57:\"eigenfactor.org - ranking and mapping scientific journals\";s:4:\"link\";s:38:\"http://www.eigenfactor.org/methods.htm\";s:11:\"description\";s:160:\"Interesting use of eigen-methods similar to those used by Google to provide a ranking of academic journals. This page describes the maths behind their approach.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-05-07T23:59:23Z\";s:7:\"subject\";s:39:\"academia journal maths network research\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:23:\"\n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:160:\"Interesting use of eigen-methods similar to those used by Google to provide a ranking of academic journals. This page describes the maths behind their approach.\";}i:24;a:9:{s:5:\"about\";s:51:\"http://www.apa.org/journals/features/psp7761121.pdf\";s:5:\"title\";s:27:\"Unskilled and Unaware [PDF]\";s:4:\"link\";s:51:\"http://www.apa.org/journals/features/psp7761121.pdf\";s:11:\"description\";s:196:\"Fascinating experimental psychology paper that examines how people assess their own level of skill relative to others. The conclusion: unskilled people drastically over-estimate their own ability.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-05-01T00:09:24Z\";s:7:\"subject\";s:33:\"paper psychology research science\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:18:\"\n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:196:\"Fascinating experimental psychology paper that examines how people assess their own level of skill relative to others. The conclusion: unskilled people drastically over-estimate their own ability.\";}i:25;a:9:{s:5:\"about\";s:39:\"http://www.uclic.ucl.ac.uk/harold/warp/\";s:5:\"title\";s:4:\"Warp\";s:4:\"link\";s:39:\"http://www.uclic.ucl.ac.uk/harold/warp/\";s:11:\"description\";s:198:\""Warp is a system for helping write reliable explanations for algorithms, programs and code. " It aims to make it easier for computer scientists to publish reliable code and documentation.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-04-30T05:56:36Z\";s:7:\"subject\";s:40:\"aca application compsci research writing\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:23:\"\n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:198:\""Warp is a system for helping write reliable explanations for algorithms, programs and code. " It aims to make it easier for computer scientists to publish reliable code and documentation.\";}i:26;a:9:{s:5:\"about\";s:63:\"http://www.amstat.org/publications/jse/v14n3/datasets.kern.html\";s:5:\"title\";s:31:\"Pig Data and Bayesian Inference\";s:4:\"link\";s:63:\"http://www.amstat.org/publications/jse/v14n3/datasets.kern.html\";s:11:\"description\";s:175:\"Pedagogical analysis of the game "Pass the Pigs". Introduces the use of the Dirichlet prior and derives initial values from scoring information in the game\'s manual.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-04-29T05:04:14Z\";s:7:\"subject\";s:44:\"bayesian games paper research stats teaching\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:175:\"Pedagogical analysis of the game "Pass the Pigs". Introduces the use of the Dirichlet prior and derives initial values from scoring information in the game\'s manual.\";}i:27;a:9:{s:5:\"about\";s:85:\"http://www.bioinformaticszen.com/2007/02/the-dark-side-of-bioinformatics-data-mining/\";s:5:\"title\";s:43:\"The dark side of bioinformatics data mining\";s:4:\"link\";s:85:\"http://www.bioinformaticszen.com/2007/02/the-dark-side-of-bioinformatics-data-mining/\";s:11:\"description\";s:210:\"You could replace the "bioinformatics data mining" of this article with any application of machine learning, the moral is the same: if you don\'t have a question to answer, don\'t do the data crunching.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-02-19T02:38:48Z\";s:7:\"subject\";s:42:\"datamining machinelearning research wisdom\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:18:\"\n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:210:\"You could replace the "bioinformatics data mining" of this article with any application of machine learning, the moral is the same: if you don\'t have a question to answer, don\'t do the data crunching.\";}i:28;a:9:{s:5:\"about\";s:52:\"http://www.cs.utk.edu/~dongarra/etemplates/book.html\";s:5:\"title\";s:60:\"Templates for the Solutions of Algebraic Eigenvalue Problems\";s:4:\"link\";s:52:\"http://www.cs.utk.edu/~dongarra/etemplates/book.html\";s:11:\"description\";s:154:\"Complete textbook on the theory of and algorithms for eigenvalue decompositions. I\'ve read the section on SVDs and it\'s concise, precise and easy to read.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-02-12T02:28:26Z\";s:7:\"subject\";s:54:\"SVD algorithm book eigenvalue maths reference research\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:33:\"\n \n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:154:\"Complete textbook on the theory of and algorithms for eigenvalue decompositions. I\'ve read the section on SVDs and it\'s concise, precise and easy to read.\";}i:29;a:9:{s:5:\"about\";s:56:\"http://www.stat.columbia.edu/~jakulin/Politics/index.htm\";s:5:\"title\";s:23:\"Data Mining in Politics\";s:4:\"link\";s:56:\"http://www.stat.columbia.edu/~jakulin/Politics/index.htm\";s:11:\"description\";s:176:\"Gallery of research projects which explore data from the political sphere, such as voting patterns amongst representatives. Some work was done in conjunction with Wray Buntine.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2007-01-11T06:02:09Z\";s:7:\"subject\";s:51:\"datamining machinelearning politics research voting\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:23:\"\n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:176:\"Gallery of research projects which explore data from the political sphere, such as voting patterns amongst representatives. Some work was done in conjunction with Wray Buntine.\";}i:30;a:9:{s:5:\"about\";s:50:\"http://www.atmos.washington.edu/~salathe/osx_unix/\";s:5:\"title\";s:55:\"Setting up OS X as a Scientific Programming Environment\";s:4:\"link\";s:50:\"http://www.atmos.washington.edu/~salathe/osx_unix/\";s:11:\"description\";s:106:\"Collection of applications and steps for setting up your Mac for heavy scientific and numerical computing.\";s:2:\"dc\";a:3:{s:7:\"creator\";s:5:\"mreid\";s:4:\"date\";s:20:\"2006-12-15T06:23:06Z\";s:7:\"subject\";s:42:\"mac osx programming research science tools\";}s:4:\"taxo\";a:2:{s:6:\"topics\";s:3:\"\n \";s:10:\"topics_bag\";s:5:\"\n \";}s:10:\"topics_bag\";s:28:\"\n \n \n \n \n \n \";s:6:\"topics\";s:1:\"\n\";s:7:\"summary\";s:106:\"Collection of applications and steps for setting up your Mac for heavy scientific and numerical computing.\";}}s:7:\"channel\";a:5:{s:5:\"title\";s:26:\"del.icio.us/mreid/research\";s:4:\"link\";s:33:\"http://del.icio.us/mreid/research\";s:5:\"items\";s:3:\"\n \n\";s:9:\"items_seq\";s:95:\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \";s:7:\"tagline\";N;}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"1.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:13:\"last_modified\";s:31:\"Mon, 24 Sep 2007 08:31:21 GMT\r\n\";s:4:\"etag\";s:22:\"31313930363232363831\r\n\";}','no'),(184,0,'rss_9a4f323c26275a2301a4169fef6d833f_ts','1190622929','no'),(283,0,'oid_plugin_enabled','1','yes'),(284,0,'oid_plugin_revision','svn-31842','yes'),(285,0,'oid_db_revision','24426','yes'),(286,0,'oid_enable_commentform','1','yes'),(287,0,'oid_enable_approval','1','yes'),(230,0,'wordpress_api_key','3697b86c9d18','yes'),(231,0,'akismet_discard_month','true','yes'),(232,0,'akismet_spam_count','2783','yes'),(235,0,'widget_akismet','a:1:{s:5:\"title\";s:12:\"Spam Blocked\";}','yes'),(253,0,'plugin_wp-hashcash','a:11:{s:13:\"comments-spam\";i:59;s:12:\"comments-ham\";i:26;s:3:\"key\";a:5:{i:0;i:159060893;i:1;i:1140234300;i:2;i:874347908;i:3;i:328515038;i:4;i:1110562531;}s:8:\"key-date\";i:1229602405;s:7:\"refresh\";i:604800;s:10:\"moderation\";s:7:\"akismet\";s:12:\"signups-spam\";b:0;s:11:\"signups-ham\";b:0;s:11:\"validate-ip\";b:1;s:12:\"validate-url\";b:1;s:7:\"logging\";b:1;}','yes'),(256,0,'page_on_front','0','yes'),(257,0,'page_for_posts','','yes'),(233,0,'rss_d710ea9466a3e8d0e1e93f34f7d73336','O:9:\"MagpieRSS\":17:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:50:{i:0;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3733674\";s:5:\"title\";s:55:\"Morphological Signal Processing and the Slope Transform\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3733674\";s:11:\"description\";s:1128:\"(1993)

This paper presents the operation of tangential dilation, which describes the touching of differentiable surfaces. It generalizes the classical dilation, but is invertible. It is shown that line segments are eigenfunctions of this dilation, and are parallel-transported, and that curvature is additive. We then present the slope transform which is a re-representation of morphology onto the morphological eigenfunctions. As such, the slope transform provides for tangential morphology the same analytical power as the Fourier transform provides for linear signal processing. Under the slope transform dilation becomes addition (just as under a Fourier transform, convolution becomes multiplication). We give a discrete slope transform suited for implementation, and discuss the relationships to the Legendre transform, Young-Fenchel conjugate, and A-transform. We exhibit a logarithmic correspondence of this tangential morphology to linear systems theory, and touch on the consequences for morphological data analysis of a scanning tunnelling microscope. 1
Leo Dorst, Rein van den Boomgaard\";s:2:\"dc\";a:4:{s:5:\"title\";s:55:\"Morphological Signal Processing and the Slope Transform\";s:7:\"creator\";s:31:\"Leo DorstRein van den Boomgaard\";s:6:\"source\";s:6:\"(1993)\";s:4:\"date\";s:25:\"2008-12-01T22:54:59-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"1993\";s:8:\"category\";s:38:\"convexityfenchel_dualityrepresentation\";}s:7:\"summary\";s:1128:\"(1993)

This paper presents the operation of tangential dilation, which describes the touching of differentiable surfaces. It generalizes the classical dilation, but is invertible. It is shown that line segments are eigenfunctions of this dilation, and are parallel-transported, and that curvature is additive. We then present the slope transform which is a re-representation of morphology onto the morphological eigenfunctions. As such, the slope transform provides for tangential morphology the same analytical power as the Fourier transform provides for linear signal processing. Under the slope transform dilation becomes addition (just as under a Fourier transform, convolution becomes multiplication). We give a discrete slope transform suited for implementation, and discuss the relationships to the Legendre transform, Young-Fenchel conjugate, and A-transform. We exhibit a logarithmic correspondence of this tangential morphology to linear systems theory, and touch on the consequences for morphological data analysis of a scanning tunnelling microscope. 1
Leo Dorst, Rein van den Boomgaard\";}i:1;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3680075\";s:5:\"title\";s:70:\"Measures of Clustering Quality: A Working Set of Axioms for Clustering\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3680075\";s:11:\"description\";s:1464:\"(2008)

Aiming towards the development of a general clustering theory, we discuss abstract axiomatization for clustering. In this respect, we follow up on the work of Kleinberg, ([1]) that showed an impossibility result for such axiomatization. We argue that an impossibility result is not an inherent feature of clustering, but rather, to a large extent, it is an artifact of the specific formalism used in [1]. As opposed to previous work focusing on clustering functions, we propose to address clustering quality measures as the object to be axiomatized. We show that principles like those formulated in Kleinberg’s axioms can be readily expressed in the latter framework without leading to inconsistency. A clustering-quality measure (CQM) is a function that, given a data set and its partition into clusters, returns a non-negative real number representing how strong or conclusive the clustering is. We analyze what clustering-quality measures should look like and introduce a set of requirements (axioms) for such measures. Our axioms capture the principles expressed by Kleinberg’s axioms while retaining consistency. We propose several natural clustering quality measures, all satisfying the proposed axioms. In addition, we analyze the computational complexity of evaluating the quality of a given clustering and show that, for the proposed CQMs, it can be computed in polynomial time.
Margareta Ackerman, Shai Ben-David\";s:2:\"dc\";a:4:{s:5:\"title\";s:70:\"Measures of Clustering Quality: A Working Set of Axioms for Clustering\";s:7:\"creator\";s:32:\"Margareta AckermanShai Ben-David\";s:6:\"source\";s:6:\"(2008)\";s:4:\"date\";s:25:\"2008-11-24T00:17:35-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2008\";s:8:\"category\";s:10:\"clustering\";}s:7:\"summary\";s:1464:\"(2008)

Aiming towards the development of a general clustering theory, we discuss abstract axiomatization for clustering. In this respect, we follow up on the work of Kleinberg, ([1]) that showed an impossibility result for such axiomatization. We argue that an impossibility result is not an inherent feature of clustering, but rather, to a large extent, it is an artifact of the specific formalism used in [1]. As opposed to previous work focusing on clustering functions, we propose to address clustering quality measures as the object to be axiomatized. We show that principles like those formulated in Kleinberg’s axioms can be readily expressed in the latter framework without leading to inconsistency. A clustering-quality measure (CQM) is a function that, given a data set and its partition into clusters, returns a non-negative real number representing how strong or conclusive the clustering is. We analyze what clustering-quality measures should look like and introduce a set of requirements (axioms) for such measures. Our axioms capture the principles expressed by Kleinberg’s axioms while retaining consistency. We propose several natural clustering quality measures, all satisfying the proposed axioms. In addition, we analyze the computational complexity of evaluating the quality of a given clustering and show that, for the proposed CQMs, it can be computed in polynomial time.
Margareta Ackerman, Shai Ben-David\";}i:2;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/1747014\";s:5:\"title\";s:39:\"A Group Theoretic Model for Information\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/1747014\";s:11:\"description\";s:1485:\"(5 Oct 2007)

In this paper we formalize the notions of information elements and information lattices, first proposed by Shannon. Exploiting this formalization, we identify a comprehensive parallelism between information lattices and subgroup lattices. Qualitatively, we demonstrate isomorphisms between information lattices and subgroup lattices. Quantitatively, we establish a decisive approximation relation between the entropy structures of information lattices and the log-index structures of the corresponding subgroup lattices. This approximation extends the approximation for joint entropies carried out previously by Chan and Yeung. As a consequence of our approximation result, we show that any continuous law holds in general for the entropies of information elements if and only if the same law holds in general for the log-indices of subgroups. As an application, by constructing subgroup counterexamples we find surprisingly that common information, unlike joint information, obeys neither the submodularity nor the supermodularity law. We emphasize that the notion of information elements is conceptually significant--formalizing it helps to reveal the deep connection between information theory and group theory. The parallelism established in this paper admits an appealing group-action explanation and provides useful insights into the intrinsic structure among information elements from a group-theoretic perspective.
Hua Li, Edwin Chong\";s:2:\"dc\";a:4:{s:5:\"title\";s:39:\"A Group Theoretic Model for Information\";s:7:\"creator\";s:17:\"Hua LiEdwin Chong\";s:6:\"source\";s:12:\"(5 Oct 2007)\";s:4:\"date\";s:25:\"2008-11-17T07:13:03-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2007\";s:8:\"category\";s:32:\"groupinequalityinformationtheory\";}s:7:\"summary\";s:1485:\"(5 Oct 2007)

In this paper we formalize the notions of information elements and information lattices, first proposed by Shannon. Exploiting this formalization, we identify a comprehensive parallelism between information lattices and subgroup lattices. Qualitatively, we demonstrate isomorphisms between information lattices and subgroup lattices. Quantitatively, we establish a decisive approximation relation between the entropy structures of information lattices and the log-index structures of the corresponding subgroup lattices. This approximation extends the approximation for joint entropies carried out previously by Chan and Yeung. As a consequence of our approximation result, we show that any continuous law holds in general for the entropies of information elements if and only if the same law holds in general for the log-indices of subgroups. As an application, by constructing subgroup counterexamples we find surprisingly that common information, unlike joint information, obeys neither the submodularity nor the supermodularity law. We emphasize that the notion of information elements is conceptually significant--formalizing it helps to reveal the deep connection between information theory and group theory. The parallelism established in this paper admits an appealing group-action explanation and provides useful insights into the intrinsic structure among information elements from a group-theoretic perspective.
Hua Li, Edwin Chong\";}i:3;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3501856\";s:5:\"title\";s:48:\"Learning to rank with combinatorial Hodge theory\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3501856\";s:11:\"description\";s:1906:\"(7 Nov 2008)

We propose techniques for learning a global ranking from data that may be incomplete and imbalanced -- characteristics almost universal to modern datasets coming from e-commerce and internet applications. We are primarily interested in score or rating-based cardinal data. From raw ranking data, we construct pairwise rankings, represented as edge flows on an appropriate graph. Our rank learning method uses the graph Helmholtzian, the graph theoretic analogue of the Helmholtz operator or vector Laplacian, in much the same way the graph Laplacian is an analogue of the Laplace operator or scalar Laplacian. We study the graph Helmholtzian using combinatorial Hodge theory: we show that every edge flow representing pairwise ranking can be resolved into two orthogonal components, a gradient flow that represents the L2-optimal global ranking and a divergence-free flow (cyclic) that measures the validity of the global ranking obtained -- if this is large, then the data does not have a meaningful global ranking. This divergence-free flow can be further decomposed orthogonally into a curl flow (locally cyclic) and a harmonic flow (locally acyclic but globally cyclic); these provides information on whether inconsistency arises locally or globally. An obvious advantage over the NP-hard Kemeny optimization is that discrete Hodge decomposition may be computed via a linear least squares regression. We also investigated the L1-projection of edge flows, showing that this is dual to correlation maximization over bounded divergence-free flows, and the L1-approximate sparse cyclic ranking, showing that this is dual to correlation maximization over bounded curl-free flows. We discuss relations with Kemeny optimization, Borda count, and Kendall-Smith consistency index from social choice theory and statistics.
Xiaoye Jiang, Lek-Heng Lim, Yuan Yao, Yinyu Ye\";s:2:\"dc\";a:4:{s:5:\"title\";s:48:\"Learning to rank with combinatorial Hodge theory\";s:7:\"creator\";s:40:\"Xiaoye JiangLek-Heng LimYuan YaoYinyu Ye\";s:6:\"source\";s:12:\"(7 Nov 2008)\";s:4:\"date\";s:25:\"2008-11-10T21:42:28-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2008\";s:8:\"category\";s:32:\"combinatoricsgraph_theoryranking\";}s:7:\"summary\";s:1906:\"(7 Nov 2008)

We propose techniques for learning a global ranking from data that may be incomplete and imbalanced -- characteristics almost universal to modern datasets coming from e-commerce and internet applications. We are primarily interested in score or rating-based cardinal data. From raw ranking data, we construct pairwise rankings, represented as edge flows on an appropriate graph. Our rank learning method uses the graph Helmholtzian, the graph theoretic analogue of the Helmholtz operator or vector Laplacian, in much the same way the graph Laplacian is an analogue of the Laplace operator or scalar Laplacian. We study the graph Helmholtzian using combinatorial Hodge theory: we show that every edge flow representing pairwise ranking can be resolved into two orthogonal components, a gradient flow that represents the L2-optimal global ranking and a divergence-free flow (cyclic) that measures the validity of the global ranking obtained -- if this is large, then the data does not have a meaningful global ranking. This divergence-free flow can be further decomposed orthogonally into a curl flow (locally cyclic) and a harmonic flow (locally acyclic but globally cyclic); these provides information on whether inconsistency arises locally or globally. An obvious advantage over the NP-hard Kemeny optimization is that discrete Hodge decomposition may be computed via a linear least squares regression. We also investigated the L1-projection of edge flows, showing that this is dual to correlation maximization over bounded divergence-free flows, and the L1-approximate sparse cyclic ranking, showing that this is dual to correlation maximization over bounded curl-free flows. We discuss relations with Kemeny optimization, Borda count, and Kendall-Smith consistency index from social choice theory and statistics.
Xiaoye Jiang, Lek-Heng Lim, Yuan Yao, Yinyu Ye\";}i:4;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3438754\";s:5:\"title\";s:47:\"On Relevant Dimensions in Kernel Feature Spaces\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3438754\";s:11:\"description\";s:1210:\"Journal of Machine Learning Research, Vol. 9 (August 2008), pp. 1875-1908.

We show that the relevant information of a supervised learning problem is contained up to negligible error in a finite number of leading kernel PCA components if the kernel matches the underlying learning problem in the sense that it can asymptotically represent the function to be learned and is sufficiently smooth. Thus, kernels do not only transform data sets such that good generalization can be achieved using only linear discriminant functions, but this transformation is also performed in a manner which makes economical use of feature space dimensions. In the best case, kernels provide efficient implicit representations of the data for supervised learning problems. Practically, we propose an algorithm which enables us to recover the number of leading kernel PCA components relevant for good classification. Our algorithm can therefore be applied (1) to analyze the interplay of data set and kernel in a geometric fashion, (2) to aid in model selection, and (3) to denoise in feature space in order to yield better classification results.
Mikio Braun, Joachim Buhmann, Klaus-Robert Müller\";s:2:\"dc\";a:4:{s:5:\"title\";s:47:\"On Relevant Dimensions in Kernel Feature Spaces\";s:7:\"creator\";s:46:\"Mikio BraunJoachim BuhmannKlaus-Robert Müller\";s:6:\"source\";s:74:\"Journal of Machine Learning Research, Vol. 9 (August 2008), pp. 1875-1908.\";s:4:\"date\";s:25:\"2008-10-22T08:07:33-00:00\";}s:5:\"prism\";a:6:{s:15:\"publicationyear\";s:4:\"2008\";s:15:\"publicationname\";s:36:\"Journal of Machine Learning Research\";s:6:\"volume\";s:1:\"9\";s:12:\"startingpage\";s:4:\"1875\";s:10:\"endingpage\";s:4:\"1908\";s:8:\"category\";s:30:\"eigenvectorkernelpcasupervised\";}s:7:\"summary\";s:1210:\"Journal of Machine Learning Research, Vol. 9 (August 2008), pp. 1875-1908.

We show that the relevant information of a supervised learning problem is contained up to negligible error in a finite number of leading kernel PCA components if the kernel matches the underlying learning problem in the sense that it can asymptotically represent the function to be learned and is sufficiently smooth. Thus, kernels do not only transform data sets such that good generalization can be achieved using only linear discriminant functions, but this transformation is also performed in a manner which makes economical use of feature space dimensions. In the best case, kernels provide efficient implicit representations of the data for supervised learning problems. Practically, we propose an algorithm which enables us to recover the number of leading kernel PCA components relevant for good classification. Our algorithm can therefore be applied (1) to analyze the interplay of data set and kernel in a geometric fashion, (2) to aid in model selection, and (3) to denoise in feature space in order to yield better classification results.
Mikio Braun, Joachim Buhmann, Klaus-Robert Müller\";}i:5;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/1894142\";s:5:\"title\";s:34:\"An ontology for a Robot Scientist.\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/1894142\";s:11:\"description\";s:1522:\"Bioinformatics, Vol. 22, No. 14. (15 July 2006)

MOTIVATION: A Robot Scientist is a physically implemented robotic system that can automatically carry out cycles of scientific experimentation. We are commissioning a new Robot Scientist designed to investigate gene function in S. cerevisiae. This Robot Scientist will be capable of initiating >1,000 experiments, and making >200,000 observations a day. Robot Scientists provide a unique test bed for the development of methodologies for the curation and annotation of scientific experiments: because the experiments are conceived and executed automatically by computer, it is possible to completely capture and digitally curate all aspects of the scientific process. This new ability brings with it significant technical challenges. To meet these we apply an ontology driven approach to the representation of all the Robot Scientist\'s data and metadata. RESULTS: We demonstrate the utility of developing an ontology for our new Robot Scientist. This ontology is based on a general ontology of experiments. The ontology aids the curation and annotating of the experimental data and metadata, and the equipment metadata, and supports the design of database systems to hold the data and metadata. AVAILABILITY: EXPO in XML and OWL formats is at: http://sourceforge.net/projects/expo/. All materials about the Robot Scientist project are available at: http://www.aber.ac.uk/compsci/Research/bio/robotsci/.
LN Soldatova, A Clare, A Sparkes, RD King\";s:2:\"dc\";a:5:{s:5:\"title\";s:34:\"An ontology for a Robot Scientist.\";s:7:\"creator\";s:35:\"LN SoldatovaA ClareA SparkesRD King\";s:10:\"identifier\";s:33:\"doi:10.1093/bioinformatics/btl207\";s:6:\"source\";s:47:\"Bioinformatics, Vol. 22, No. 14. (15 July 2006)\";s:4:\"date\";s:25:\"2008-10-21T06:06:41-00:00\";}s:5:\"prism\";a:6:{s:15:\"publicationyear\";s:4:\"2006\";s:15:\"publicationname\";s:14:\"Bioinformatics\";s:4:\"issn\";s:9:\"1460-2059\";s:6:\"volume\";s:2:\"22\";s:6:\"number\";s:2:\"14\";s:8:\"category\";s:42:\"bioinformaticsexperimentallanguageprotocol\";}s:7:\"summary\";s:1522:\"Bioinformatics, Vol. 22, No. 14. (15 July 2006)

MOTIVATION: A Robot Scientist is a physically implemented robotic system that can automatically carry out cycles of scientific experimentation. We are commissioning a new Robot Scientist designed to investigate gene function in S. cerevisiae. This Robot Scientist will be capable of initiating >1,000 experiments, and making >200,000 observations a day. Robot Scientists provide a unique test bed for the development of methodologies for the curation and annotation of scientific experiments: because the experiments are conceived and executed automatically by computer, it is possible to completely capture and digitally curate all aspects of the scientific process. This new ability brings with it significant technical challenges. To meet these we apply an ontology driven approach to the representation of all the Robot Scientist\'s data and metadata. RESULTS: We demonstrate the utility of developing an ontology for our new Robot Scientist. This ontology is based on a general ontology of experiments. The ontology aids the curation and annotating of the experimental data and metadata, and the equipment metadata, and supports the design of database systems to hold the data and metadata. AVAILABILITY: EXPO in XML and OWL formats is at: http://sourceforge.net/projects/expo/. All materials about the Robot Scientist project are available at: http://www.aber.ac.uk/compsci/Research/bio/robotsci/.
LN Soldatova, A Clare, A Sparkes, RD King\";}i:6;a:7:{s:5:\"about\";s:51:\"http://www.citeulike.org/user/mdreid/article/103855\";s:5:\"title\";s:42:\"Diffusion Kernels on Statistical Manifolds\";s:4:\"link\";s:51:\"http://www.citeulike.org/user/mdreid/article/103855\";s:11:\"description\";s:1036:\"J. Mach. Learn. Res., Vol. 6 (2005), pp. 129-163.

A family of kernels for statistical learning is introduced that exploits the geometric structure of statistical models. The kernels are based on the heat equation on the Riemannian manifold defined by the Fisher information metric associated with a statistical family, and generalize the Gaussian kernel of Euclidean space. As an important special case, kernels based on the geometry of multinomial families are derived, leading to kernel-based learning algorithms that apply naturally to discrete data. Bounds on covering numbers and Rademacher averages for the kernels are proved using bounds on the eigenvalues of the Laplacian on Riemannian manifolds. Experimental results are presented for document classification, for which the use of multinomial geometry is natural and well motivated, and improvements are obtained over the standard use of Gaussian or linear kernels, which have been the standard for text classification.
John Lafferty, Guy Lebanon\";s:2:\"dc\";a:4:{s:5:\"title\";s:42:\"Diffusion Kernels on Statistical Manifolds\";s:7:\"creator\";s:24:\"John LaffertyGuy Lebanon\";s:6:\"source\";s:49:\"J. Mach. Learn. Res., Vol. 6 (2005), pp. 129-163.\";s:4:\"date\";s:25:\"2008-10-13T10:15:40-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"2005\";s:15:\"publicationname\";s:20:\"J. Mach. Learn. Res.\";s:4:\"issn\";s:9:\"1533-7928\";s:6:\"volume\";s:1:\"6\";s:12:\"startingpage\";s:3:\"129\";s:10:\"endingpage\";s:3:\"163\";s:9:\"publisher\";s:9:\"MIT Press\";s:8:\"category\";s:62:\"algebraic_geometryfisher_informationinformation_geometrykernel\";}s:7:\"summary\";s:1036:\"J. Mach. Learn. Res., Vol. 6 (2005), pp. 129-163.

A family of kernels for statistical learning is introduced that exploits the geometric structure of statistical models. The kernels are based on the heat equation on the Riemannian manifold defined by the Fisher information metric associated with a statistical family, and generalize the Gaussian kernel of Euclidean space. As an important special case, kernels based on the geometry of multinomial families are derived, leading to kernel-based learning algorithms that apply naturally to discrete data. Bounds on covering numbers and Rademacher averages for the kernels are proved using bounds on the eigenvalues of the Laplacian on Riemannian manifolds. Experimental results are presented for document classification, for which the use of multinomial geometry is natural and well motivated, and improvements are obtained over the standard use of Gaussian or linear kernels, which have been the standard for text classification.
John Lafferty, Guy Lebanon\";}i:7;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3319735\";s:5:\"title\";s:34:\"The Proximal Average: Basic Theory\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3319735\";s:11:\"description\";s:616:\"SIAM Journal on Optimization, Vol. 19, No. 2. (2008), pp. 766-785.

The recently introduced proximal average of two convex functions is a convex function with many useful properties. In this paper, we introduce and systematically study the proximal average for finitely many convex functions. The basic properties of the proximal average with respect to the standard convex-analytical notions (domain, Fenchel conjugate, subdifferential, proximal mapping, epi-continuity, and others) are provided and illustrated by several examples.
Heinz Bauschke, Rafal Goebel, Yves Lucet, Xianfu Wang\";s:2:\"dc\";a:4:{s:5:\"title\";s:34:\"The Proximal Average: Basic Theory\";s:7:\"creator\";s:47:\"Heinz BauschkeRafal GoebelYves LucetXianfu Wang\";s:6:\"source\";s:66:\"SIAM Journal on Optimization, Vol. 19, No. 2. (2008), pp. 766-785.\";s:4:\"date\";s:25:\"2008-09-23T03:37:54-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"2008\";s:15:\"publicationname\";s:28:\"SIAM Journal on Optimization\";s:6:\"volume\";s:2:\"19\";s:6:\"number\";s:1:\"2\";s:12:\"startingpage\";s:3:\"766\";s:10:\"endingpage\";s:3:\"785\";s:9:\"publisher\";s:4:\"SIAM\";s:8:\"category\";s:37:\"averageconvexityfenchel_dualitytheory\";}s:7:\"summary\";s:616:\"SIAM Journal on Optimization, Vol. 19, No. 2. (2008), pp. 766-785.

The recently introduced proximal average of two convex functions is a convex function with many useful properties. In this paper, we introduce and systematically study the proximal average for finitely many convex functions. The basic properties of the proximal average with respect to the standard convex-analytical notions (domain, Fenchel conjugate, subdifferential, proximal mapping, epi-continuity, and others) are provided and illustrated by several examples.
Heinz Bauschke, Rafal Goebel, Yves Lucet, Xianfu Wang\";}i:8;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3267635\";s:5:\"title\";s:13:\"Predictocracy\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3267635\";s:11:\"description\";s:44:\"(2007)
Michael Abramowicz\";s:2:\"dc\";a:4:{s:5:\"title\";s:13:\"Predictocracy\";s:7:\"creator\";s:18:\"Michael Abramowicz\";s:6:\"source\";s:6:\"(2007)\";s:4:\"date\";s:25:\"2008-09-15T06:21:33-00:00\";}s:5:\"prism\";a:3:{s:15:\"publicationyear\";s:4:\"2007\";s:9:\"publisher\";s:21:\"Yale University Press\";s:8:\"category\";s:18:\"prediction_markets\";}s:7:\"summary\";s:44:\"(2007)
Michael Abramowicz\";}i:9;a:7:{s:5:\"about\";s:51:\"http://www.citeulike.org/user/mdreid/article/577076\";s:5:\"title\";s:46:\"Algebraic Statistics for Computational Biology\";s:4:\"link\";s:51:\"http://www.citeulike.org/user/mdreid/article/577076\";s:11:\"description\";s:753:\"(22 August 2005)

The quantitative analysis of biological sequence data is based on methods from statistics coupled with efficient algorithms from computer science. Algebra provides a framework for unifying many of the seemingly disparate techniques used by computational biologists. This book offers an introduction to this mathematical framework and describes tools from computational algebra for designing new algorithms for exact, accurate results. These algorithms can be applied to biological problems such as aligning genomes, finding genes and constructing phylogenies. As the first book in the exciting and dynamic area, it will be welcomed as a text for self-study or for advanced undergraduate and beginning graduate courses.\";s:2:\"dc\";a:3:{s:5:\"title\";s:46:\"Algebraic Statistics for Computational Biology\";s:6:\"source\";s:16:\"(22 August 2005)\";s:4:\"date\";s:25:\"2008-09-15T06:19:31-00:00\";}s:5:\"prism\";a:3:{s:15:\"publicationyear\";s:4:\"2005\";s:9:\"publisher\";s:26:\"Cambridge University Press\";s:8:\"category\";s:6:\"no-tag\";}s:7:\"summary\";s:753:\"(22 August 2005)

The quantitative analysis of biological sequence data is based on methods from statistics coupled with efficient algorithms from computer science. Algebra provides a framework for unifying many of the seemingly disparate techniques used by computational biologists. This book offers an introduction to this mathematical framework and describes tools from computational algebra for designing new algorithms for exact, accurate results. These algorithms can be applied to biological problems such as aligning genomes, finding genes and constructing phylogenies. As the first book in the exciting and dynamic area, it will be welcomed as a text for self-study or for advanced undergraduate and beginning graduate courses.\";}i:10;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3267634\";s:5:\"title\";s:23:\"Support Vector Machines\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3267634\";s:11:\"description\";s:60:\"(2008)
Ingo Steinwart, Andreas Christmann\";s:2:\"dc\";a:4:{s:5:\"title\";s:23:\"Support Vector Machines\";s:7:\"creator\";s:32:\"Ingo SteinwartAndreas Christmann\";s:6:\"source\";s:6:\"(2008)\";s:4:\"date\";s:25:\"2008-09-15T06:18:41-00:00\";}s:5:\"prism\";a:3:{s:15:\"publicationyear\";s:4:\"2008\";s:9:\"publisher\";s:8:\"Springer\";s:8:\"category\";s:6:\"no-tag\";}s:7:\"summary\";s:60:\"(2008)
Ingo Steinwart, Andreas Christmann\";}i:11;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3196695\";s:5:\"title\";s:71:\"Convex solutions of a functional equation arising in information theory\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3196695\";s:11:\"description\";s:762:\"Journal of Mathematical Analysis and Applications, Vol. 328, No. 2. (15 April 2007), pp. 1309-1320.

Given a convex function f defined for positive real variables, the so-called Csiszár f-divergence is a function If defined for two n-dimensional probability vectors p=(p1,...,pn) and q=(q1,...,qn) as . For this generalized measure of entropy to have distance-like properties, especially symmetry, it is necessary for f to satisfy the following functional equation: for all x>0. In the present paper we determine all the convex solutions of this functional equation by proposing a way of generating all of them. In doing so, existing usual f-divergences are recovered and new ones are proposed.
JB Hiriart-Urruty, JE Martínez-Legaz\";s:2:\"dc\";a:5:{s:5:\"title\";s:71:\"Convex solutions of a functional equation arising in information theory\";s:7:\"creator\";s:35:\"JB Hiriart-UrrutyJE Martínez-Legaz\";s:10:\"identifier\";s:30:\"doi:10.1016/j.jmaa.2006.06.035\";s:6:\"source\";s:99:\"Journal of Mathematical Analysis and Applications, Vol. 328, No. 2. (15 April 2007), pp. 1309-1320.\";s:4:\"date\";s:25:\"2008-09-05T05:00:59-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2007\";s:15:\"publicationname\";s:49:\"Journal of Mathematical Analysis and Applications\";s:6:\"volume\";s:3:\"328\";s:6:\"number\";s:1:\"2\";s:12:\"startingpage\";s:4:\"1309\";s:10:\"endingpage\";s:4:\"1320\";s:8:\"category\";s:38:\"convexityf-divergenceinformationtheory\";}s:7:\"summary\";s:762:\"Journal of Mathematical Analysis and Applications, Vol. 328, No. 2. (15 April 2007), pp. 1309-1320.

Given a convex function f defined for positive real variables, the so-called Csiszár f-divergence is a function If defined for two n-dimensional probability vectors p=(p1,...,pn) and q=(q1,...,qn) as . For this generalized measure of entropy to have distance-like properties, especially symmetry, it is necessary for f to satisfy the following functional equation: for all x>0. In the present paper we determine all the convex solutions of this functional equation by proposing a way of generating all of them. In doing so, existing usual f-divergences are recovered and new ones are proposed.
JB Hiriart-Urruty, JE Martínez-Legaz\";}i:12;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3190612\";s:5:\"title\";s:70:\"On a Functional Operation Generating Convex Functions, Part 1: Duality\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3190612\";s:11:\"description\";s:530:\"Journal of Optimization Theory and Applications, Vol. 126, No. 1. (1 July 2005), pp. 175-189.

The function , dom g, is jointly convex provided f is convex and nonpositive at the origin and provided g is concave and nonnegative on its effective domain. Its convex conjugate combines the convex conjugates of f and −g by means of the same composition law. The effective domain of f Δg is then studied, which will prove to be useful in Part 2 of this paper (algebraic properties, Ref. 1).
P Maréchal\";s:2:\"dc\";a:5:{s:5:\"title\";s:70:\"On a Functional Operation Generating Convex Functions, Part 1: Duality\";s:7:\"creator\";s:11:\"P Maréchal\";s:10:\"identifier\";s:29:\"doi:10.1007/s10957-005-2667-0\";s:6:\"source\";s:93:\"Journal of Optimization Theory and Applications, Vol. 126, No. 1. (1 July 2005), pp. 175-189.\";s:4:\"date\";s:25:\"2008-09-04T08:37:35-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2005\";s:15:\"publicationname\";s:47:\"Journal of Optimization Theory and Applications\";s:6:\"volume\";s:3:\"126\";s:6:\"number\";s:1:\"1\";s:12:\"startingpage\";s:3:\"175\";s:10:\"endingpage\";s:3:\"189\";s:8:\"category\";s:22:\"convexitydualitytheory\";}s:7:\"summary\";s:530:\"Journal of Optimization Theory and Applications, Vol. 126, No. 1. (1 July 2005), pp. 175-189.

The function , dom g, is jointly convex provided f is convex and nonpositive at the origin and provided g is concave and nonnegative on its effective domain. Its convex conjugate combines the convex conjugates of f and −g by means of the same composition law. The effective domain of f Δg is then studied, which will prove to be useful in Part 2 of this paper (algebraic properties, Ref. 1).
P Maréchal\";}i:13;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3152084\";s:5:\"title\";s:47:\"Structured machine learning: the next ten years\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3152084\";s:11:\"description\";s:695:\"Machine Learning

Abstract  The field of inductive logic programming (ILP) has made steady progress, since the first ILP workshop in 1991, based on a balance of developments in theory, implementations and applications. More recently there has been an increased emphasis on Probabilistic ILP and the related fields of Statistical Relational Learning (SRL) and Structured Prediction. The goal of the current paper is to consider these emerging trends and chart out the strategic directions and open problems for the broader area of structured machine learning for the next 10 years.
Thomas Dietterich, Pedro Domingos, Lise Getoor, Stephen Muggleton, Prasad Tadepalli\";s:2:\"dc\";a:5:{s:5:\"title\";s:47:\"Structured machine learning: the next ten years\";s:7:\"creator\";s:75:\"Thomas DietterichPedro DomingosLise GetoorStephen MuggletonPrasad Tadepalli\";s:10:\"identifier\";s:29:\"doi:10.1007/s10994-008-5079-1\";s:6:\"source\";s:16:\"Machine Learning\";s:4:\"date\";s:25:\"2008-09-03T02:28:37-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationname\";s:16:\"Machine Learning\";s:8:\"category\";s:21:\"futurestructuresurvey\";}s:7:\"summary\";s:695:\"Machine Learning

Abstract  The field of inductive logic programming (ILP) has made steady progress, since the first ILP workshop in 1991, based on a balance of developments in theory, implementations and applications. More recently there has been an increased emphasis on Probabilistic ILP and the related fields of Statistical Relational Learning (SRL) and Structured Prediction. The goal of the current paper is to consider these emerging trends and chart out the strategic directions and open problems for the broader area of structured machine learning for the next 10 years.
Thomas Dietterich, Pedro Domingos, Lise Getoor, Stephen Muggleton, Prasad Tadepalli\";}i:14;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3157932\";s:5:\"title\";s:51:\"Persistent Clustering and a Theorem of J. Kleinberg\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3157932\";s:11:\"description\";s:768:\"(16 Aug 2008)

We construct a framework for studying clustering algorithms, which includes two key ideas: persistence and functoriality. The first encodes the idea that the output of a clustering scheme should carry a multiresolution structure, the second the idea that one should be able to compare the results of clustering algorithms as one varies the data set, for example by adding points or by applying functions to it. We show that within this framework, one can prove a theorem analogous to one of J. Kleinberg, in which one obtains an existence and uniqueness theorem instead of a non-existence result. We explore further properties of this unique scheme, stability and convergence are established.
Gunnar Carlsson, Facundo Memoli\";s:2:\"dc\";a:4:{s:5:\"title\";s:51:\"Persistent Clustering and a Theorem of J. Kleinberg\";s:7:\"creator\";s:29:\"Gunnar CarlssonFacundo Memoli\";s:6:\"source\";s:13:\"(16 Aug 2008)\";s:4:\"date\";s:25:\"2008-08-27T08:13:53-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2008\";s:8:\"category\";s:36:\"category_theoryclusteringtheorytrees\";}s:7:\"summary\";s:768:\"(16 Aug 2008)

We construct a framework for studying clustering algorithms, which includes two key ideas: persistence and functoriality. The first encodes the idea that the output of a clustering scheme should carry a multiresolution structure, the second the idea that one should be able to compare the results of clustering algorithms as one varies the data set, for example by adding points or by applying functions to it. We show that within this framework, one can prove a theorem analogous to one of J. Kleinberg, in which one obtains an existence and uniqueness theorem instead of a non-existence result. We explore further properties of this unique scheme, stability and convergence are established.
Gunnar Carlsson, Facundo Memoli\";}i:15;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3150176\";s:5:\"title\";s:5:\"Shark\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3150176\";s:11:\"description\";s:438:\"Journal of Machine Learning Research, Vol. 9 (June 2008), pp. 993-996.

SHARK is an object-oriented library for the design of adaptive systems. It comprises methods for single- and multi-objective optimization (e.g., evolutionary and gradient-based algorithms) as well as kernel-based methods, neural networks, and other machine learning techniques.
Christian Igel, Verena Heidrich-Meisner, Tobias Glasmachers\";s:2:\"dc\";a:4:{s:5:\"title\";s:5:\"Shark\";s:7:\"creator\";s:55:\"Christian IgelVerena Heidrich-MeisnerTobias Glasmachers\";s:6:\"source\";s:70:\"Journal of Machine Learning Research, Vol. 9 (June 2008), pp. 993-996.\";s:4:\"date\";s:25:\"2008-08-23T00:57:43-00:00\";}s:5:\"prism\";a:6:{s:15:\"publicationyear\";s:4:\"2008\";s:15:\"publicationname\";s:36:\"Journal of Machine Learning Research\";s:6:\"volume\";s:1:\"9\";s:12:\"startingpage\";s:3:\"993\";s:10:\"endingpage\";s:3:\"996\";s:8:\"category\";s:35:\"classificationcppregressionsoftware\";}s:7:\"summary\";s:438:\"Journal of Machine Learning Research, Vol. 9 (June 2008), pp. 993-996.

SHARK is an object-oriented library for the design of adaptive systems. It comprises methods for single- and multi-objective optimization (e.g., evolutionary and gradient-based algorithms) as well as kernel-based methods, neural networks, and other machine learning techniques.
Christian Igel, Verena Heidrich-Meisner, Tobias Glasmachers\";}i:16;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3110825\";s:5:\"title\";s:39:\"Tropical geometry of statistical models\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3110825\";s:11:\"description\";s:928:\"Proceedings of the National Academy of Sciences of the United States of America, Vol. 101, No. 46. (16 November 2004), pp. 16132-16137.

10.1073/pnas.0406010101 This article presents a unified mathematical framework for inference in graphical models, building on the observation that graphical models are algebraic varieties. From this geometric viewpoint, observations generated from a model are coordinates of a point in the variety, and the sum-product algorithm is an efficient tool for evaluating specific coordinates. Here, we address the question of how the solutions to various inference problems depend on the model parameters. The proposed answer is expressed in terms of tropical algebraic geometry. The Newton polytope of a statistical model plays a key role. Our results are applied to the hidden Markov model and the general Markov model on a binary tree.
Lior Pachter, Bernd Sturmfels\";s:2:\"dc\";a:5:{s:5:\"title\";s:39:\"Tropical geometry of statistical models\";s:7:\"creator\";s:27:\"Lior PachterBernd Sturmfels\";s:10:\"identifier\";s:27:\"doi:10.1073/pnas.0406010101\";s:6:\"source\";s:135:\"Proceedings of the National Academy of Sciences of the United States of America, Vol. 101, No. 46. (16 November 2004), pp. 16132-16137.\";s:4:\"date\";s:25:\"2008-08-12T05:43:22-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2004\";s:15:\"publicationname\";s:79:\"Proceedings of the National Academy of Sciences of the United States of America\";s:6:\"volume\";s:3:\"101\";s:6:\"number\";s:2:\"46\";s:12:\"startingpage\";s:5:\"16132\";s:10:\"endingpage\";s:5:\"16137\";s:8:\"category\";s:62:\"algebraic_geometrygraphical_modelshmmmodellingstatisticstheory\";}s:7:\"summary\";s:928:\"Proceedings of the National Academy of Sciences of the United States of America, Vol. 101, No. 46. (16 November 2004), pp. 16132-16137.

10.1073/pnas.0406010101 This article presents a unified mathematical framework for inference in graphical models, building on the observation that graphical models are algebraic varieties. From this geometric viewpoint, observations generated from a model are coordinates of a point in the variety, and the sum-product algorithm is an efficient tool for evaluating specific coordinates. Here, we address the question of how the solutions to various inference problems depend on the model parameters. The proposed answer is expressed in terms of tropical algebraic geometry. The Newton polytope of a statistical model plays a key role. Our results are applied to the hidden Markov model and the general Markov model on a binary tree.
Lior Pachter, Bernd Sturmfels\";}i:17;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3110822\";s:5:\"title\";s:31:\"Phylogenetic Algebraic Geometry\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3110822\";s:11:\"description\";s:633:\"(2 Jul 2004)

Phylogenetic algebraic geometry is concerned with certain complex projective algebraic varieties derived from finite trees. Real positive points on these varieties represent probabilistic models of evolution. For small trees, we recover classical geometric objects, such as toric and determinantal varieties and their secant varieties, but larger trees lead to new and largely unexplored territory. This paper gives a self-contained introduction to this subject and offers numerous open problems for algebraic geometers.
Nicholas Eriksson, Kristian Ranestad, Bernd Sturmfels, Seth Sullivant\";s:2:\"dc\";a:4:{s:5:\"title\";s:31:\"Phylogenetic Algebraic Geometry\";s:7:\"creator\";s:63:\"Nicholas ErikssonKristian RanestadBernd SturmfelsSeth Sullivant\";s:6:\"source\";s:12:\"(2 Jul 2004)\";s:4:\"date\";s:25:\"2008-08-12T05:41:55-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2004\";s:8:\"category\";s:45:\"algebraic_geometrymodellingphylogeneticstrees\";}s:7:\"summary\";s:633:\"(2 Jul 2004)

Phylogenetic algebraic geometry is concerned with certain complex projective algebraic varieties derived from finite trees. Real positive points on these varieties represent probabilistic models of evolution. For small trees, we recover classical geometric objects, such as toric and determinantal varieties and their secant varieties, but larger trees lead to new and largely unexplored territory. This paper gives a self-contained introduction to this subject and offers numerous open problems for algebraic geometers.
Nicholas Eriksson, Kristian Ranestad, Bernd Sturmfels, Seth Sullivant\";}i:18;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3095794\";s:5:\"title\";s:36:\"Measures of the Value of Information\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3095794\";s:11:\"description\";s:150:\"Proceedings of the National Academy of Sciences of the United States of America, Vol. 42, No. 9. (1956), pp. 654-655.
John Mccarthy\";s:2:\"dc\";a:5:{s:5:\"title\";s:36:\"Measures of the Value of Information\";s:7:\"creator\";s:13:\"John Mccarthy\";s:10:\"identifier\";s:17:\"doi:10.2307/89735\";s:6:\"source\";s:117:\"Proceedings of the National Academy of Sciences of the United States of America, Vol. 42, No. 9. (1956), pp. 654-655.\";s:4:\"date\";s:25:\"2008-08-07T12:15:02-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"1956\";s:15:\"publicationname\";s:79:\"Proceedings of the National Academy of Sciences of the United States of America\";s:6:\"volume\";s:2:\"42\";s:6:\"number\";s:1:\"9\";s:12:\"startingpage\";s:3:\"654\";s:10:\"endingpage\";s:3:\"655\";s:9:\"publisher\";s:28:\"National Academy of Sciences\";s:8:\"category\";s:13:\"scoring_rules\";}s:7:\"summary\";s:150:\"Proceedings of the National Academy of Sciences of the United States of America, Vol. 42, No. 9. (1956), pp. 654-655.
John Mccarthy\";}i:19;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3093216\";s:5:\"title\";s:57:\"Scoring Rules and the Evaluation of Probability Assessors\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3093216\";s:11:\"description\";s:795:\"Journal of the American Statistical Association, Vol. 64, No. 327. (1969), pp. 1073-1078.

The personalistic theory of probability prescribes that personal probability assessments to be used in decision-making situations should correspond with the assessor\'s judgments. A payoff function which depends on the assessor\'s stated probabilities and on the event which actually occurs may be used (1) to keep the assessor honest or (2) to evaluate the assessor. It is shown that with the exception of a logarithmic payoff function, these two uses of payoff functions for assessors are not compatible. This conflict is explained in terms of the differences in the situations facing the assessor and the evaluator (the user of the probabilistic predictions).
Robert Winkler\";s:2:\"dc\";a:5:{s:5:\"title\";s:57:\"Scoring Rules and the Evaluation of Probability Assessors\";s:7:\"creator\";s:14:\"Robert Winkler\";s:10:\"identifier\";s:19:\"doi:10.2307/2283486\";s:6:\"source\";s:89:\"Journal of the American Statistical Association, Vol. 64, No. 327. (1969), pp. 1073-1078.\";s:4:\"date\";s:25:\"2008-08-07T00:51:11-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"1969\";s:15:\"publicationname\";s:47:\"Journal of the American Statistical Association\";s:6:\"volume\";s:2:\"64\";s:6:\"number\";s:3:\"327\";s:12:\"startingpage\";s:4:\"1073\";s:10:\"endingpage\";s:4:\"1078\";s:9:\"publisher\";s:32:\"American Statistical Association\";s:8:\"category\";s:13:\"scoring_rules\";}s:7:\"summary\";s:795:\"Journal of the American Statistical Association, Vol. 64, No. 327. (1969), pp. 1073-1078.

The personalistic theory of probability prescribes that personal probability assessments to be used in decision-making situations should correspond with the assessor\'s judgments. A payoff function which depends on the assessor\'s stated probabilities and on the event which actually occurs may be used (1) to keep the assessor honest or (2) to evaluate the assessor. It is shown that with the exception of a logarithmic payoff function, these two uses of payoff functions for assessors are not compatible. This conflict is explained in terms of the differences in the situations facing the assessor and the evaluator (the user of the probabilistic predictions).
Robert Winkler\";}i:20;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3093106\";s:5:\"title\";s:39:\"Combinatorial Information Market Design\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3093106\";s:11:\"description\";s:1312:\"Information Systems Frontiers (January 2003), pp. 107-119.

Information markets are markets created to aggregate information. Such markets usually estimate a probability distribution over the values of certain variables, via bets on those values. Combinatorial information markets would aggregate information on the entire joint probability distribution over many variables, by allowing bets on all variable value combinations. To achieve this, we want to overcome the thin market and irrational participation problems that plague standard information markets. Scoring rules avoid these problems, but instead suffer from opinion pooling problems in the thick market case. Market scoring rules avoid all these problems, by becoming automated market makers in the thick market case and simple scoring rules in the thin market case. Logarithmic versions have cost and modularity advantages. After introducing market scoring rules, we consider several design issues, including how to represent variables to support both conditional and unconditional estimates, how to avoid becoming a money pump via errors in calculating probabilities, and how to ensure that users can cover their bets, without needlessly preventing them from using previous bets as collateral for future bets.
R Hanson\";s:2:\"dc\";a:4:{s:5:\"title\";s:39:\"Combinatorial Information Market Design\";s:7:\"creator\";s:8:\"R Hanson\";s:6:\"source\";s:58:\"Information Systems Frontiers (January 2003), pp. 107-119.\";s:4:\"date\";s:25:\"2008-08-07T00:06:05-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2003\";s:15:\"publicationname\";s:29:\"Information Systems Frontiers\";s:4:\"issn\";s:9:\"1387-3326\";s:12:\"startingpage\";s:3:\"107\";s:10:\"endingpage\";s:3:\"119\";s:9:\"publisher\";s:8:\"Springer\";s:8:\"category\";s:31:\"prediction_marketsscoring_rules\";}s:7:\"summary\";s:1312:\"Information Systems Frontiers (January 2003), pp. 107-119.

Information markets are markets created to aggregate information. Such markets usually estimate a probability distribution over the values of certain variables, via bets on those values. Combinatorial information markets would aggregate information on the entire joint probability distribution over many variables, by allowing bets on all variable value combinations. To achieve this, we want to overcome the thin market and irrational participation problems that plague standard information markets. Scoring rules avoid these problems, but instead suffer from opinion pooling problems in the thick market case. Market scoring rules avoid all these problems, by becoming automated market makers in the thick market case and simple scoring rules in the thin market case. Logarithmic versions have cost and modularity advantages. After introducing market scoring rules, we consider several design issues, including how to represent variables to support both conditional and unconditional estimates, how to avoid becoming a money pump via errors in calculating probabilities, and how to ensure that users can cover their bets, without needlessly preventing them from using previous bets as collateral for future bets.
R Hanson\";}i:21;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/1680584\";s:5:\"title\";s:75:\"Architectural styles and the design of network-based software architectures\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/1680584\";s:11:\"description\";s:73:\"(2000)

Chair-Richard N. Taylor
Roy Fielding\";s:2:\"dc\";a:4:{s:5:\"title\";s:75:\"Architectural styles and the design of network-based software architectures\";s:7:\"creator\";s:12:\"Roy Fielding\";s:6:\"source\";s:6:\"(2000)\";s:4:\"date\";s:25:\"2008-08-06T04:52:36-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2000\";s:8:\"category\";s:18:\"designrestsoftware\";}s:7:\"summary\";s:73:\"(2000)

Chair-Richard N. Taylor
Roy Fielding\";}i:22;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3089906\";s:5:\"title\";s:50:\"Generalized projections for non-negative functions\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3089906\";s:11:\"description\";s:102:\"Acta Mathematica Hungarica, Vol. 68, No. 1. (1 March 1995), pp. 161-186.
I Csiszár\";s:2:\"dc\";a:5:{s:5:\"title\";s:50:\"Generalized projections for non-negative functions\";s:7:\"creator\";s:10:\"I Csiszár\";s:10:\"identifier\";s:22:\"doi:10.1007/BF01874442\";s:6:\"source\";s:72:\"Acta Mathematica Hungarica, Vol. 68, No. 1. (1 March 1995), pp. 161-186.\";s:4:\"date\";s:25:\"2008-08-06T04:44:33-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"1995\";s:15:\"publicationname\";s:26:\"Acta Mathematica Hungarica\";s:6:\"volume\";s:2:\"68\";s:6:\"number\";s:1:\"1\";s:12:\"startingpage\";s:3:\"161\";s:10:\"endingpage\";s:3:\"186\";s:8:\"category\";s:52:\"bregmandistributionsdivergencef-divergenceprojection\";}s:7:\"summary\";s:102:\"Acta Mathematica Hungarica, Vol. 68, No. 1. (1 March 1995), pp. 161-186.
I Csiszár\";}i:23;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3079477\";s:5:\"title\";s:41:\"On preferred point geometry in statistics\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3079477\";s:11:\"description\";s:1075:\"Journal of Statistical Planning and Inference, Vol. 102, No. 2. (1 April 2002), pp. 229-245.

A brief synopsis of progress in differential geometry in statistics is followed by a note of some points of tension in the developing relationship between these disciplines. The preferred point nature of much of statistics is described and suggests the adoption of a corresponding geometry which reduces these tensions. Applications of preferred point geometry in statistics are then reviewed. These include extensions of statistical manifolds, a statistical interpretation of duality in Amari\'s expected geometry, and removal of the apparent incompatibility between (Kullback-Leibler) divergence and geodesic distance. Equivalences between a number of new expected preferred point geometries are established and a new characterisation of total flatness shown. A preferred point geometry of influence analysis is briefly indicated. Technical details are kept to a minimum throughout to improve accessibility.
Frank Critchley, Paul Marriott, Mark Salmon\";s:2:\"dc\";a:5:{s:5:\"title\";s:41:\"On preferred point geometry in statistics\";s:7:\"creator\";s:39:\"Frank CritchleyPaul MarriottMark Salmon\";s:10:\"identifier\";s:33:\"doi:10.1016/S0378-3758(01)00115-X\";s:6:\"source\";s:92:\"Journal of Statistical Planning and Inference, Vol. 102, No. 2. (1 April 2002), pp. 229-245.\";s:4:\"date\";s:25:\"2008-08-04T05:03:58-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2002\";s:15:\"publicationname\";s:45:\"Journal of Statistical Planning and Inference\";s:6:\"volume\";s:3:\"102\";s:6:\"number\";s:1:\"2\";s:12:\"startingpage\";s:3:\"229\";s:10:\"endingpage\";s:3:\"245\";s:8:\"category\";s:30:\"divergenceinformation_geometry\";}s:7:\"summary\";s:1075:\"Journal of Statistical Planning and Inference, Vol. 102, No. 2. (1 April 2002), pp. 229-245.

A brief synopsis of progress in differential geometry in statistics is followed by a note of some points of tension in the developing relationship between these disciplines. The preferred point nature of much of statistics is described and suggests the adoption of a corresponding geometry which reduces these tensions. Applications of preferred point geometry in statistics are then reviewed. These include extensions of statistical manifolds, a statistical interpretation of duality in Amari\'s expected geometry, and removal of the apparent incompatibility between (Kullback-Leibler) divergence and geodesic distance. Equivalences between a number of new expected preferred point geometries are established and a new characterisation of total flatness shown. A preferred point geometry of influence analysis is briefly indicated. Technical details are kept to a minimum throughout to improve accessibility.
Frank Critchley, Paul Marriott, Mark Salmon\";}i:24;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2805124\";s:5:\"title\";s:44:\"ECONOMICS: The Promise of Prediction Markets\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2805124\";s:11:\"description\";s:441:\"Science, Vol. 320, No. 5878. (16 May 2008), pp. 877-878.

10.1126/science.1157679
Kenneth Arrow, Robert Forsythe, Michael Gorham, Robert Hahn, Robin Hanson, John Ledyard, Saul Levmore, Robert Litan, Paul Milgrom, Forrest Nelson, George Neumann, Marco Ottaviani, Thomas Schelling, Robert Shiller, Vernon Smith, Erik Snowberg, Cass Sunstein, Paul Tetlock, Philip Tetlock, Hal Varian, Justin Wolfers, Eric Zitzewitz\";s:2:\"dc\";a:5:{s:5:\"title\";s:44:\"ECONOMICS: The Promise of Prediction Markets\";s:7:\"creator\";s:288:\"Kenneth ArrowRobert ForsytheMichael GorhamRobert HahnRobin HansonJohn LedyardSaul LevmoreRobert LitanPaul MilgromForrest NelsonGeorge NeumannMarco OttavianiThomas SchellingRobert ShillerVernon SmithErik SnowbergCass SunsteinPaul TetlockPhilip TetlockHal VarianJustin WolfersEric Zitzewitz\";s:10:\"identifier\";s:27:\"doi:10.1126/science.1157679\";s:6:\"source\";s:56:\"Science, Vol. 320, No. 5878. (16 May 2008), pp. 877-878.\";s:4:\"date\";s:25:\"2008-07-28T23:53:14-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2008\";s:15:\"publicationname\";s:7:\"Science\";s:6:\"volume\";s:3:\"320\";s:6:\"number\";s:4:\"5878\";s:12:\"startingpage\";s:3:\"877\";s:10:\"endingpage\";s:3:\"878\";s:8:\"category\";s:35:\"economicspoliticsprediction_markets\";}s:7:\"summary\";s:441:\"Science, Vol. 320, No. 5878. (16 May 2008), pp. 877-878.

10.1126/science.1157679
Kenneth Arrow, Robert Forsythe, Michael Gorham, Robert Hahn, Robin Hanson, John Ledyard, Saul Levmore, Robert Litan, Paul Milgrom, Forrest Nelson, George Neumann, Marco Ottaviani, Thomas Schelling, Robert Shiller, Vernon Smith, Erik Snowberg, Cass Sunstein, Paul Tetlock, Philip Tetlock, Hal Varian, Justin Wolfers, Eric Zitzewitz\";}i:25;a:7:{s:5:\"about\";s:51:\"http://www.citeulike.org/user/mdreid/article/432231\";s:5:\"title\";s:63:\"Correlated Equilibrium as an Expression of Bayesian Rationality\";s:4:\"link\";s:51:\"http://www.citeulike.org/user/mdreid/article/432231\";s:11:\"description\";s:1204:\"Econometrica, Vol. 55, No. 1. (1987), pp. 1-18.

Correlated equilibrium is formulated in a manner that does away with the dichotomy usually perceived between the \"Bayesian\" and the \"game-theoretic\" view of the world. From the Bayesian viewpoint, probabilities should be assignable to everything, including the prospect of a player choosing a certain strategy in a certain game. The so-called \"game-theoretic\" viewpoint holds that probabilities can only be assigned to events not governed by rational decision makers; for the latter, one must substitute an equilibrium (or other game-theoretic) notion. The current formulation synthesizes the two viewpoints: Correlated equilibrium is viewed as the result of Bayesian rationality; the equilibrium condition appears as a simple maximization of utility on the part of each player, given his information. A feature of this approach is that it does not require explicit randomization on the part of the players. Each player always chooses a definite pure strategy,with no attempt to randomize; the probabilistic nature of the strategies reflects the uncertainty of other players about his choice. Examples are given.
Robert Aumann\";s:2:\"dc\";a:5:{s:5:\"title\";s:63:\"Correlated Equilibrium as an Expression of Bayesian Rationality\";s:7:\"creator\";s:13:\"Robert Aumann\";s:10:\"identifier\";s:19:\"doi:10.2307/1911154\";s:6:\"source\";s:47:\"Econometrica, Vol. 55, No. 1. (1987), pp. 1-18.\";s:4:\"date\";s:25:\"2008-07-28T03:37:41-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"1987\";s:15:\"publicationname\";s:12:\"Econometrica\";s:6:\"volume\";s:2:\"55\";s:6:\"number\";s:1:\"1\";s:12:\"startingpage\";s:1:\"1\";s:10:\"endingpage\";s:2:\"18\";s:8:\"category\";s:32:\"bayesiangame_theoryscoring_rules\";}s:7:\"summary\";s:1204:\"Econometrica, Vol. 55, No. 1. (1987), pp. 1-18.

Correlated equilibrium is formulated in a manner that does away with the dichotomy usually perceived between the \"Bayesian\" and the \"game-theoretic\" view of the world. From the Bayesian viewpoint, probabilities should be assignable to everything, including the prospect of a player choosing a certain strategy in a certain game. The so-called \"game-theoretic\" viewpoint holds that probabilities can only be assigned to events not governed by rational decision makers; for the latter, one must substitute an equilibrium (or other game-theoretic) notion. The current formulation synthesizes the two viewpoints: Correlated equilibrium is viewed as the result of Bayesian rationality; the equilibrium condition appears as a simple maximization of utility on the part of each player, given his information. A feature of this approach is that it does not require explicit randomization on the part of the players. Each player always chooses a definite pure strategy,with no attempt to randomize; the probabilistic nature of the strategies reflects the uncertainty of other players about his choice. Examples are given.
Robert Aumann\";}i:26;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3038701\";s:5:\"title\";s:53:\"An Efficient Algorithm for Bandit Linear Optimization\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3038701\";s:11:\"description\";s:675:\"(21 February 2008)

We introduce an efficient algorithm for the problem of online linear optimization in the bandit setting which achieves the optimal O∗ (√T ) regret. The setting is a natural generalization of the non-stochastic multi-armed bandit problem, and the existence of an efficient optimal algorithm has been posed as an open problem in a number of recent papers. We show how the difficulties encountered by previous approaches are overcome by the use of a self-concordant potential function. Our approach presents a novel connection between online learning and interior point methods.
Jacob Abernethy, Elad Hazan, Alexander Rakhlin\";s:2:\"dc\";a:4:{s:5:\"title\";s:53:\"An Efficient Algorithm for Bandit Linear Optimization\";s:7:\"creator\";s:42:\"Jacob AbernethyElad HazanAlexander Rakhlin\";s:6:\"source\";s:18:\"(21 February 2008)\";s:4:\"date\";s:25:\"2008-07-24T08:32:05-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2008\";s:8:\"category\";s:60:\"algorithmbanditbregman_divergenceconvexityoptimisationtheory\";}s:7:\"summary\";s:675:\"(21 February 2008)

We introduce an efficient algorithm for the problem of online linear optimization in the bandit setting which achieves the optimal O∗ (√T ) regret. The setting is a natural generalization of the non-stochastic multi-armed bandit problem, and the existence of an efficient optimal algorithm has been posed as an open problem in a number of recent papers. We show how the difficulties encountered by previous approaches are overcome by the use of a self-concordant potential function. Our approach presents a novel connection between online learning and interior point methods.
Jacob Abernethy, Elad Hazan, Alexander Rakhlin\";}i:27;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3038216\";s:5:\"title\";s:69:\"Data Spectroscopy: Eigenspace of Convolution Operators and Clustering\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3038216\";s:11:\"description\";s:1533:\"(23 Jul 2008)

This paper focuses on obtaining clustering information in a distribution when $iid$ data are given. First, we develop theoretical results for understanding and using clustering information contained in the eigenvectors of data adjacency matrices based on a radial kernel function (with a sufficiently fast tail decay). We provide population analyses to give insights into which eigenvectors should be used and when the clustering information for the distribution can be recovered from the data. In particular, we learned that top eigenvectors do not contain all the clustering information. Second, we use heuristics from these analyses to design the Data Spectroscopic clustering (DaSpec) algorithm that uses properly selected top eigenvectors, determines the number of clusters, gives data labels, and provides a classification rule for future data, all based on only one eigen decomposition. Our findings not only extend and go beyond the intuitions underlying existing spectral techniques (e.g. spectral clustering and Kernel Principal Components Analysis), but also provide insights about their usability and modes of failure. Simulation studies and experiments on real world data are conducted to show the promise of our proposed data spectroscopy clustering algorithm relative to k-means and one spectral method. In particular, DaSpec seems to be able to handle unbalanced groups and recover clusters of different shapes better than competing methods.
Tao Shi, Mikhail Belkin, Bin Yu\";s:2:\"dc\";a:4:{s:5:\"title\";s:69:\"Data Spectroscopy: Eigenspace of Convolution Operators and Clustering\";s:7:\"creator\";s:27:\"Tao ShiMikhail BelkinBin Yu\";s:6:\"source\";s:13:\"(23 Jul 2008)\";s:4:\"date\";s:25:\"2008-07-24T04:56:35-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2008\";s:8:\"category\";s:24:\"clusteringspectraltheory\";}s:7:\"summary\";s:1533:\"(23 Jul 2008)

This paper focuses on obtaining clustering information in a distribution when $iid$ data are given. First, we develop theoretical results for understanding and using clustering information contained in the eigenvectors of data adjacency matrices based on a radial kernel function (with a sufficiently fast tail decay). We provide population analyses to give insights into which eigenvectors should be used and when the clustering information for the distribution can be recovered from the data. In particular, we learned that top eigenvectors do not contain all the clustering information. Second, we use heuristics from these analyses to design the Data Spectroscopic clustering (DaSpec) algorithm that uses properly selected top eigenvectors, determines the number of clusters, gives data labels, and provides a classification rule for future data, all based on only one eigen decomposition. Our findings not only extend and go beyond the intuitions underlying existing spectral techniques (e.g. spectral clustering and Kernel Principal Components Analysis), but also provide insights about their usability and modes of failure. Simulation studies and experiments on real world data are conducted to show the promise of our proposed data spectroscopy clustering algorithm relative to k-means and one spectral method. In particular, DaSpec seems to be able to handle unbalanced groups and recover clusters of different shapes better than competing methods.
Tao Shi, Mikhail Belkin, Bin Yu\";}i:28;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3026076\";s:5:\"title\";s:13:\"Elicitability\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3026076\";s:11:\"description\";s:919:\"(July 2008)

We investigate the problem of truthfully eliciting an expert’s assessment of a property of a probability distribution, where a property is any real-valued function of the distribution like mean or variance. We show that not all properties are elicitable; for example, the mean is elicitable and the variance is not. For those that are elicitable, we provide a representation theorem characterizing all payment (or “score”) functions that induce truthful revelation. We also consider the elicitation of sets of properties. We then observe that properties can always be inferred from sets of elicitable properties. This naturally suggests the concept of elicitation complexity ; the elicitation complexity of property is the minimal size of such a set implying the property. Finally we discuss applications to prediction markets.
Nicolas Lambert, David Pennock, Yoav Shoham\";s:2:\"dc\";a:4:{s:5:\"title\";s:13:\"Elicitability\";s:7:\"creator\";s:39:\"Nicolas LambertDavid PennockYoav Shoham\";s:6:\"source\";s:11:\"(July 2008)\";s:4:\"date\";s:25:\"2008-07-22T05:13:12-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2008\";s:8:\"category\";s:74:\"integral_representationprediction_marketsprobabilityscoring_rulesweighting\";}s:7:\"summary\";s:919:\"(July 2008)

We investigate the problem of truthfully eliciting an expert’s assessment of a property of a probability distribution, where a property is any real-valued function of the distribution like mean or variance. We show that not all properties are elicitable; for example, the mean is elicitable and the variance is not. For those that are elicitable, we provide a representation theorem characterizing all payment (or “score”) functions that induce truthful revelation. We also consider the elicitation of sets of properties. We then observe that properties can always be inferred from sets of elicitable properties. This naturally suggests the concept of elicitation complexity ; the elicitation complexity of property is the minimal size of such a set implying the property. Finally we discuss applications to prediction markets.
Nicolas Lambert, David Pennock, Yoav Shoham\";}i:29;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3025859\";s:5:\"title\";s:50:\"Evaluating Probabilities: Asymmetric Scoring Rules\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3025859\";s:11:\"description\";s:1070:\"Management Science, Vol. 40, No. 11. (1994), pp. 1395-1405.

Proper scoring rules are overall evaluation measures that reward accurate probabilities. Specific rules encountered in the literature and used in practice are invariably symmetric in the sense that the expected score for a perfectly-calibrated probability assessor (or model generating probabilities) is minimized at a probability of one-half. A family of asymmetric scoring rules that provide better measures of the degree of skill inherent in the probabilities and render scores that are more comparable in different situations is developed here. One member of this family, a quadratic asymmetric rule, is applied to evaluate an extensive set of precipitation probability forecasts from the U.S. National Weather Service. Connections to previous characterizations of proper scoring rules are investigated, and some relevant issues pertaining to the design of specific asymmetric rules for particular inferential and decision-making problems are discussed briefly.
Robert Winkler\";s:2:\"dc\";a:5:{s:5:\"title\";s:50:\"Evaluating Probabilities: Asymmetric Scoring Rules\";s:7:\"creator\";s:14:\"Robert Winkler\";s:10:\"identifier\";s:19:\"doi:10.2307/2632926\";s:6:\"source\";s:59:\"Management Science, Vol. 40, No. 11. (1994), pp. 1395-1405.\";s:4:\"date\";s:25:\"2008-07-21T23:27:36-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"1994\";s:15:\"publicationname\";s:18:\"Management Science\";s:6:\"volume\";s:2:\"40\";s:6:\"number\";s:2:\"11\";s:12:\"startingpage\";s:4:\"1395\";s:10:\"endingpage\";s:4:\"1405\";s:9:\"publisher\";s:7:\"INFORMS\";s:8:\"category\";s:34:\"asymmetricprobabilityscoring_rules\";}s:7:\"summary\";s:1070:\"Management Science, Vol. 40, No. 11. (1994), pp. 1395-1405.

Proper scoring rules are overall evaluation measures that reward accurate probabilities. Specific rules encountered in the literature and used in practice are invariably symmetric in the sense that the expected score for a perfectly-calibrated probability assessor (or model generating probabilities) is minimized at a probability of one-half. A family of asymmetric scoring rules that provide better measures of the degree of skill inherent in the probabilities and render scores that are more comparable in different situations is developed here. One member of this family, a quadratic asymmetric rule, is applied to evaluate an extensive set of precipitation probability forecasts from the U.S. National Weather Service. Connections to previous characterizations of proper scoring rules are investigated, and some relevant issues pertaining to the design of specific asymmetric rules for particular inferential and decision-making problems are discussed briefly.
Robert Winkler\";}i:30;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3025857\";s:5:\"title\";s:49:\"Scoring rules and the evaluation of probabilities\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3025857\";s:11:\"description\";s:865:\"TEST, Vol. 5, No. 1. (28 March 1990), pp. 1-60.

Summary  In Bayesian inference and decision analysis, inferences and predictions are inherently probabilistic in nature. Scoring rules, which involve the computation of a score based on probability forecasts and what actually occurs, can be used to evaluate probabilities and to provide appropriate incentives for “good” probabilities. This paper review scoring rules and some related measures for evaluating probabilities, including decompositions of scoring rules and attributes of “goodness” of probabilites, comparability of scores, and the design of scoring rules for specific inferential and decision-making problems
R Winkler, Javier Muñoz, José Cervera, José Bernardo, Gail Blattenberger, Joseph Kadane, Dennis Lindley, Allan Murphy, Robert Oliver, David Ríos-Insua\";s:2:\"dc\";a:5:{s:5:\"title\";s:49:\"Scoring rules and the evaluation of probabilities\";s:7:\"creator\";s:136:\"R WinklerJavier MuñozJosé CerveraJosé BernardoGail BlattenbergerJoseph KadaneDennis LindleyAllan MurphyRobert OliverDavid Ríos-Insua\";s:10:\"identifier\";s:22:\"doi:10.1007/BF02562681\";s:6:\"source\";s:47:\"TEST, Vol. 5, No. 1. (28 March 1990), pp. 1-60.\";s:4:\"date\";s:25:\"2008-07-21T23:23:11-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"1990\";s:15:\"publicationname\";s:4:\"TEST\";s:6:\"volume\";s:1:\"5\";s:6:\"number\";s:1:\"1\";s:12:\"startingpage\";s:1:\"1\";s:10:\"endingpage\";s:2:\"60\";s:8:\"category\";s:24:\"probabilityscoring_rules\";}s:7:\"summary\";s:865:\"TEST, Vol. 5, No. 1. (28 March 1990), pp. 1-60.

Summary  In Bayesian inference and decision analysis, inferences and predictions are inherently probabilistic in nature. Scoring rules, which involve the computation of a score based on probability forecasts and what actually occurs, can be used to evaluate probabilities and to provide appropriate incentives for “good” probabilities. This paper review scoring rules and some related measures for evaluating probabilities, including decompositions of scoring rules and attributes of “goodness” of probabilites, comparability of scores, and the design of scoring rules for specific inferential and decision-making problems
R Winkler, Javier Muñoz, José Cervera, José Bernardo, Gail Blattenberger, Joseph Kadane, Dennis Lindley, Allan Murphy, Robert Oliver, David Ríos-Insua\";}i:31;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2815247\";s:5:\"title\";s:31:\"Learning Low-Density Separators\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2815247\";s:11:\"description\";s:880:\"(19 May 2008)

We define a novel, basic, unsupervised learning problem - learning the lowest density homogeneous hyperplane separator of an unknown probability distribution. This task is relevant to several problems in machine learning, such as semi-supervised learning and clustering stability. We investigate the question of existence of a universally consistent algorithm for this problem. We propose two natural learning paradigms and prove that, on input unlabeled random samples generated by any member of a rich family of distributions, they are guaranteed to converge to the optimal separator for that distribution. We complement this result by showing that no learning algorithm for our task can achieve uniform learning rates (that are independent of the data generating distribution).
Shai Ben-David, Tyler Lu, David Pal, Miroslava Sotakova\";s:2:\"dc\";a:4:{s:5:\"title\";s:31:\"Learning Low-Density Separators\";s:7:\"creator\";s:49:\"Shai Ben-DavidTyler LuDavid PalMiroslava Sotakova\";s:6:\"source\";s:13:\"(19 May 2008)\";s:4:\"date\";s:25:\"2008-07-21T07:20:59-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2008\";s:8:\"category\";s:18:\"density_estimation\";}s:7:\"summary\";s:880:\"(19 May 2008)

We define a novel, basic, unsupervised learning problem - learning the lowest density homogeneous hyperplane separator of an unknown probability distribution. This task is relevant to several problems in machine learning, such as semi-supervised learning and clustering stability. We investigate the question of existence of a universally consistent algorithm for this problem. We propose two natural learning paradigms and prove that, on input unlabeled random samples generated by any member of a rich family of distributions, they are guaranteed to converge to the optimal separator for that distribution. We complement this result by showing that no learning algorithm for our task can achieve uniform learning rates (that are independent of the data generating distribution).
Shai Ben-David, Tyler Lu, David Pal, Miroslava Sotakova\";}i:32;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3023465\";s:5:\"title\";s:51:\"An Augmented PAC Model for Semi-Supervised Learning\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3023465\";s:11:\"description\";s:72:\"(2006), pp. 397-420.
Maria-Florina Balcan, Avrim Blum\";s:2:\"dc\";a:4:{s:5:\"title\";s:51:\"An Augmented PAC Model for Semi-Supervised Learning\";s:7:\"creator\";s:30:\"Maria-Florina BalcanAvrim Blum\";s:6:\"source\";s:20:\"(2006), pp. 397-420.\";s:4:\"date\";s:25:\"2008-07-21T05:31:07-00:00\";}s:5:\"prism\";a:5:{s:15:\"publicationyear\";s:4:\"2006\";s:12:\"startingpage\";s:3:\"397\";s:10:\"endingpage\";s:3:\"420\";s:9:\"publisher\";s:9:\"MIT Press\";s:8:\"category\";s:42:\"boundscompatibilitypacsemisupervisedtheory\";}s:7:\"summary\";s:72:\"(2006), pp. 397-420.
Maria-Florina Balcan, Avrim Blum\";}i:33;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3023260\";s:5:\"title\";s:34:\"Kernel methods in machine learning\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3023260\";s:11:\"description\";s:789:\"Annals of Statistics, Vol. 36, No. 3. (2008), pp. 1171-1220.

We review machine learning methods employing positive definite kernels. These methods formulate learning and estimation problems in a reproducing kernel Hilbert space (RKHS) of functions defined on the data domain, expanded in terms of a kernel. Working in linear spaces of function has the benefit of facilitating the construction and analysis of learning algorithms while at the same time allowing large classes of functions. The latter include nonlinear functions as well as functions defined on nonvectorial data. We cover a wide range of methods, ranging from binary classifiers to sophisticated methods for estimation with structured data.
Thomas Hofmann, Bernhard Schölkopf, Alexander Smola\";s:2:\"dc\";a:4:{s:5:\"title\";s:34:\"Kernel methods in machine learning\";s:7:\"creator\";s:48:\"Thomas HofmannBernhard SchölkopfAlexander Smola\";s:6:\"source\";s:60:\"Annals of Statistics, Vol. 36, No. 3. (2008), pp. 1171-1220.\";s:4:\"date\";s:25:\"2008-07-20T23:07:43-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2008\";s:15:\"publicationname\";s:20:\"Annals of Statistics\";s:6:\"volume\";s:2:\"36\";s:6:\"number\";s:1:\"3\";s:12:\"startingpage\";s:4:\"1171\";s:10:\"endingpage\";s:4:\"1220\";s:8:\"category\";s:9:\"kernelsvm\";}s:7:\"summary\";s:789:\"Annals of Statistics, Vol. 36, No. 3. (2008), pp. 1171-1220.

We review machine learning methods employing positive definite kernels. These methods formulate learning and estimation problems in a reproducing kernel Hilbert space (RKHS) of functions defined on the data domain, expanded in terms of a kernel. Working in linear spaces of function has the benefit of facilitating the construction and analysis of learning algorithms while at the same time allowing large classes of functions. The latter include nonlinear functions as well as functions defined on nonvectorial data. We cover a wide range of methods, ranging from binary classifiers to sophisticated methods for estimation with structured data.
Thomas Hofmann, Bernhard Schölkopf, Alexander Smola\";}i:34;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3016243\";s:5:\"title\";s:50:\"Classifier Technology and the Illusion of Progress\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3016243\";s:11:\"description\";s:890:\"(19 Jun 2006)

A great many tools have been developed for supervised classification, ranging from early methods such as linear discriminant analysis through to modern developments such as neural networks and support vector machines. A large number of comparative studies have been conducted in attempts to establish the relative superiority of these methods. This paper argues that these comparisons often fail to take into account important aspects of real problems, so that the apparent superiority of more sophisticated methods may be something of an illusion. In particular, simple methods typically yield performance almost as good as more sophisticated methods, to the extent that the difference in performance may be swamped by other sources of uncertainty that generally are not considered in the classical supervised classification paradigm.
David Hand\";s:2:\"dc\";a:4:{s:5:\"title\";s:50:\"Classifier Technology and the Illusion of Progress\";s:7:\"creator\";s:10:\"David Hand\";s:6:\"source\";s:13:\"(19 Jun 2006)\";s:4:\"date\";s:25:\"2008-07-18T04:20:36-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2006\";s:8:\"category\";s:44:\"classificationempiricismevaluationsimplicity\";}s:7:\"summary\";s:890:\"(19 Jun 2006)

A great many tools have been developed for supervised classification, ranging from early methods such as linear discriminant analysis through to modern developments such as neural networks and support vector machines. A large number of comparative studies have been conducted in attempts to establish the relative superiority of these methods. This paper argues that these comparisons often fail to take into account important aspects of real problems, so that the apparent superiority of more sophisticated methods may be something of an illusion. In particular, simple methods typically yield performance almost as good as more sophisticated methods, to the extent that the difference in performance may be swamped by other sources of uncertainty that generally are not considered in the classical supervised classification paradigm.
David Hand\";}i:35;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3016115\";s:5:\"title\";s:29:\"Local Rademacher complexities\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3016115\";s:11:\"description\";s:621:\"Annals of Statistics, Vol. 33, No. 4. (2005), pp. 1497-1537.

We propose new bounds on the error of learning algorithms in terms of a data-dependent notion of complexity. The estimates we establish give optimal rates and are based on a local and empirical version of Rademacher averages, in the sense that the Rademacher averages are computed from the data, on a subset of functions with small empirical error. We present some applications to classification and prediction with convex function classes, and with kernel classes in particular.
Peter Bartlett, Olivier Bousquet, Shahar Mendelson\";s:2:\"dc\";a:4:{s:5:\"title\";s:29:\"Local Rademacher complexities\";s:7:\"creator\";s:46:\"Peter BartlettOlivier BousquetShahar Mendelson\";s:6:\"source\";s:60:\"Annals of Statistics, Vol. 33, No. 4. (2005), pp. 1497-1537.\";s:4:\"date\";s:25:\"2008-07-18T01:52:53-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2005\";s:15:\"publicationname\";s:20:\"Annals of Statistics\";s:6:\"volume\";s:2:\"33\";s:6:\"number\";s:1:\"4\";s:12:\"startingpage\";s:4:\"1497\";s:10:\"endingpage\";s:4:\"1537\";s:8:\"category\";s:45:\"boundscomplexitydata_dependenterrorinequality\";}s:7:\"summary\";s:621:\"Annals of Statistics, Vol. 33, No. 4. (2005), pp. 1497-1537.

We propose new bounds on the error of learning algorithms in terms of a data-dependent notion of complexity. The estimates we establish give optimal rates and are based on a local and empirical version of Rademacher averages, in the sense that the Rademacher averages are computed from the data, on a subset of functions with small empirical error. We present some applications to classification and prediction with convex function classes, and with kernel classes in particular.
Peter Bartlett, Olivier Bousquet, Shahar Mendelson\";}i:36;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/3007211\";s:5:\"title\";s:53:\"Random projection trees and low dimensional manifolds\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/3007211\";s:11:\"description\";s:68:\"(2008), pp. 537-546.
Sanjoy Dasgupta, Yoav Freund\";s:2:\"dc\";a:5:{s:5:\"title\";s:53:\"Random projection trees and low dimensional manifolds\";s:7:\"creator\";s:26:\"Sanjoy DasguptaYoav Freund\";s:10:\"identifier\";s:27:\"doi:10.1145/1374376.1374452\";s:6:\"source\";s:20:\"(2008), pp. 537-546.\";s:4:\"date\";s:25:\"2008-07-16T02:55:21-00:00\";}s:5:\"prism\";a:5:{s:15:\"publicationyear\";s:4:\"2008\";s:12:\"startingpage\";s:3:\"537\";s:10:\"endingpage\";s:3:\"546\";s:9:\"publisher\";s:3:\"ACM\";s:8:\"category\";s:38:\"dimensional_reductionrandom_projection\";}s:7:\"summary\";s:68:\"(2008), pp. 537-546.
Sanjoy Dasgupta, Yoav Freund\";}i:37;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2989074\";s:5:\"title\";s:65:\"Graphical models, exponential families, and variational inference\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2989074\";s:11:\"description\";s:1388:\"(17 September 2003)

The formalism of probabilistic graphical models provides a unifying framework for the development of large-scale multivariate statistical models. Graphical models have become a focus of research in many applied statistical and computational fields, including bioinformatics, information theory, signal and image processing, information retrieval and machine learning. Many problems that arise in specific instances—including the key problems of computing marginals and modes of probability distributions—are best studied in the general setting. Working with exponential family representations, and exploiting the conjugate duality between the cumulant generating function and the entropy for exponential families, we develop general variational representations of the problems of computing marginal probabilities and modes. We describe how a wide variety of known computational algorithms—including mean field methods and cluster variational techniques—can be understood in terms of approximations of these variational representations. We also present novel convex relaxations based on the variational framework. The variational approach provides a complementary alternative to Markov chain Monte Carlo as a general source of approximation methods for inference in large-scale statistical models.
Martin Wainwright, Michael Jordan\";s:2:\"dc\";a:4:{s:5:\"title\";s:65:\"Graphical models, exponential families, and variational inference\";s:7:\"creator\";s:31:\"Martin WainwrightMichael Jordan\";s:6:\"source\";s:19:\"(17 September 2003)\";s:4:\"date\";s:25:\"2008-07-11T16:51:11-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2003\";s:8:\"category\";s:7:\"duality\";}s:7:\"summary\";s:1388:\"(17 September 2003)

The formalism of probabilistic graphical models provides a unifying framework for the development of large-scale multivariate statistical models. Graphical models have become a focus of research in many applied statistical and computational fields, including bioinformatics, information theory, signal and image processing, information retrieval and machine learning. Many problems that arise in specific instances—including the key problems of computing marginals and modes of probability distributions—are best studied in the general setting. Working with exponential family representations, and exploiting the conjugate duality between the cumulant generating function and the entropy for exponential families, we develop general variational representations of the problems of computing marginal probabilities and modes. We describe how a wide variety of known computational algorithms—including mean field methods and cluster variational techniques—can be understood in terms of approximations of these variational representations. We also present novel convex relaxations based on the variational framework. The variational approach provides a complementary alternative to Markov chain Monte Carlo as a general source of approximation methods for inference in large-scale statistical models.
Martin Wainwright, Michael Jordan\";}i:38;a:7:{s:5:\"about\";s:51:\"http://www.citeulike.org/user/mdreid/article/600676\";s:5:\"title\";s:49:\"A Neyman-Pearson approach to statistical learning\";s:4:\"link\";s:51:\"http://www.citeulike.org/user/mdreid/article/600676\";s:11:\"description\";s:1376:\"Information Theory, IEEE Transactions on, Vol. 51, No. 11. (2005), pp. 3806-3819.

The Neyman-Pearson (NP) approach to hypothesis testing is useful in situations where different types of error have different consequences or a priori probabilities are unknown. For any /spl alpha/>0, the NP lemma specifies the most powerful test of size /spl alpha/, but assumes the distributions for each hypothesis are known or (in some cases) the likelihood ratio is monotonic in an unknown parameter. This paper investigates an extension of NP theory to situations in which one has no knowledge of the underlying distributions except for a collection of independent and identically distributed (i.i.d.) training examples from each hypothesis. Building on a \"fundamental lemma\" of Cannon et al., we demonstrate that several concepts from statistical learning theory have counterparts in the NP context. Specifically, we consider constrained versions of empirical risk minimization (NP-ERM) and structural risk minimization (NP-SRM), and prove performance guarantees for both. General conditions are given under which NP-SRM leads to strong universal consistency. We also apply NP-SRM to (dyadic) decision trees to derive rates of convergence. Finally, we present explicit algorithms to implement NP-SRM for histograms and dyadic decision trees.
C Scott, R Nowak\";s:2:\"dc\";a:5:{s:5:\"title\";s:49:\"A Neyman-Pearson approach to statistical learning\";s:7:\"creator\";s:14:\"C ScottR Nowak\";s:10:\"identifier\";s:27:\"doi:10.1109/TIT.2005.856955\";s:6:\"source\";s:81:\"Information Theory, IEEE Transactions on, Vol. 51, No. 11. (2005), pp. 3806-3819.\";s:4:\"date\";s:25:\"2008-06-26T12:10:36-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2005\";s:15:\"publicationname\";s:40:\"Information Theory, IEEE Transactions on\";s:6:\"volume\";s:2:\"51\";s:6:\"number\";s:2:\"11\";s:12:\"startingpage\";s:4:\"3806\";s:10:\"endingpage\";s:4:\"3819\";s:8:\"category\";s:47:\"erminformationneyman-pearsonsrmstatisticstheory\";}s:7:\"summary\";s:1376:\"Information Theory, IEEE Transactions on, Vol. 51, No. 11. (2005), pp. 3806-3819.

The Neyman-Pearson (NP) approach to hypothesis testing is useful in situations where different types of error have different consequences or a priori probabilities are unknown. For any /spl alpha/>0, the NP lemma specifies the most powerful test of size /spl alpha/, but assumes the distributions for each hypothesis are known or (in some cases) the likelihood ratio is monotonic in an unknown parameter. This paper investigates an extension of NP theory to situations in which one has no knowledge of the underlying distributions except for a collection of independent and identically distributed (i.i.d.) training examples from each hypothesis. Building on a \"fundamental lemma\" of Cannon et al., we demonstrate that several concepts from statistical learning theory have counterparts in the NP context. Specifically, we consider constrained versions of empirical risk minimization (NP-ERM) and structural risk minimization (NP-SRM), and prove performance guarantees for both. General conditions are given under which NP-SRM leads to strong universal consistency. We also apply NP-SRM to (dyadic) decision trees to derive rates of convergence. Finally, we present explicit algorithms to implement NP-SRM for histograms and dyadic decision trees.
C Scott, R Nowak\";}i:39;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2925107\";s:5:\"title\";s:36:\"Deconstructing Statistical Questions\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2925107\";s:11:\"description\";s:914:\"Journal of the Royal Statistical Society. Series A (Statistics in Society), Vol. 157, No. 3. (1994), pp. 317-356.

Too much current statistical work takes a superficial view of the client\'s research question, adopting techniques which have a solid history, a sound mathematical basis or readily available software, but without considering in depth whether the questions being answered are in fact those which should be asked. Examples, some familiar and others less so, are given to illustrate this assertion. It is clear that establishing the mapping from the client\'s domain to a statistical question is one of the most difficult parts of a statistical analysis. It is a part in which the responsibility is shared by both client and statistician. A plea is made for more research effort to go in this direction and some suggestions are made for ways to tackle the problem.
David Hand\";s:2:\"dc\";a:5:{s:5:\"title\";s:36:\"Deconstructing Statistical Questions\";s:7:\"creator\";s:10:\"David Hand\";s:10:\"identifier\";s:19:\"doi:10.2307/2983526\";s:6:\"source\";s:113:\"Journal of the Royal Statistical Society. Series A (Statistics in Society), Vol. 157, No. 3. (1994), pp. 317-356.\";s:4:\"date\";s:25:\"2008-06-25T06:09:48-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"1994\";s:15:\"publicationname\";s:74:\"Journal of the Royal Statistical Society. Series A (Statistics in Society)\";s:6:\"volume\";s:3:\"157\";s:6:\"number\";s:1:\"3\";s:12:\"startingpage\";s:3:\"317\";s:10:\"endingpage\";s:3:\"356\";s:9:\"publisher\";s:54:\"Blackwell Publishing for the Royal Statistical Society\";s:8:\"category\";s:37:\"methodologyphilosophystatisticstheory\";}s:7:\"summary\";s:914:\"Journal of the Royal Statistical Society. Series A (Statistics in Society), Vol. 157, No. 3. (1994), pp. 317-356.

Too much current statistical work takes a superficial view of the client\'s research question, adopting techniques which have a solid history, a sound mathematical basis or readily available software, but without considering in depth whether the questions being answered are in fact those which should be asked. Examples, some familiar and others less so, are given to illustrate this assertion. It is clear that establishing the mapping from the client\'s domain to a statistical question is one of the most difficult parts of a statistical analysis. It is a part in which the responsibility is shared by both client and statistician. A plea is made for more research effort to go in this direction and some suggestions are made for ways to tackle the problem.
David Hand\";}i:40;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2901818\";s:5:\"title\";s:41:\"DBpedia: A Nucleus for a Web of Open Data\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2901818\";s:11:\"description\";s:924:\"The Semantic Web (11-15 November 2008), pp. 722-735.

DBpedia is a community effort to extract structured information from Wikipedia and to make this information available on the Web. DBpedia allows you to ask sophisticated queries against datasets derived from Wikipedia and to link other datasets on the Web to Wikipedia data. We describe the extraction of the DBpedia datasets, and how the resulting information is published on the Web for human- and machine-consumption. We describe some emerging applications from the DBpedia community and show how website authors can facilitate DBpedia content within their sites. Finally, we present the current status of interlinking DBpedia with other open datasets on the Web and outline how DBpedia could serve as a nucleus for an emerging Web of open data.
Sören Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, Zachary Ives\";s:2:\"dc\";a:5:{s:5:\"title\";s:41:\"DBpedia: A Nucleus for a Web of Open Data\";s:7:\"creator\";s:82:\"Sören AuerChristian BizerGeorgi KobilarovJens LehmannRichard CyganiakZachary Ives\";s:10:\"identifier\";s:32:\"doi:10.1007/978-3-540-76298-0_52\";s:6:\"source\";s:52:\"The Semantic Web (11-15 November 2008), pp. 722-735.\";s:4:\"date\";s:25:\"2008-06-25T05:31:02-00:00\";}s:5:\"prism\";a:5:{s:15:\"publicationyear\";s:4:\"2008\";s:15:\"publicationname\";s:16:\"The Semantic Web\";s:12:\"startingpage\";s:3:\"722\";s:10:\"endingpage\";s:3:\"735\";s:8:\"category\";s:16:\"communitydataweb\";}s:7:\"summary\";s:924:\"The Semantic Web (11-15 November 2008), pp. 722-735.

DBpedia is a community effort to extract structured information from Wikipedia and to make this information available on the Web. DBpedia allows you to ask sophisticated queries against datasets derived from Wikipedia and to link other datasets on the Web to Wikipedia data. We describe the extraction of the DBpedia datasets, and how the resulting information is published on the Web for human- and machine-consumption. We describe some emerging applications from the DBpedia community and show how website authors can facilitate DBpedia content within their sites. Finally, we present the current status of interlinking DBpedia with other open datasets on the Web and outline how DBpedia could serve as a nucleus for an emerging Web of open data.
Sören Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, Zachary Ives\";}i:41;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2925051\";s:5:\"title\";s:48:\"Information Generation: How Data Rules Our World\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2925051\";s:11:\"description\";s:473:\"(25 January 2007)

Information Generation is the story of the seminal role data plays in our lives and explains how the advance of our civilization has come hand in hand with our ability to collect and interpret data. Starting with occasional scratching on cave walls, eminent statistician David J. Hand guides us right up to the modern era where society is completely dependent on an abundance of data systems for its very survival.
David Hand\";s:2:\"dc\";a:4:{s:5:\"title\";s:48:\"Information Generation: How Data Rules Our World\";s:7:\"creator\";s:10:\"David Hand\";s:6:\"source\";s:17:\"(25 January 2007)\";s:4:\"date\";s:25:\"2008-06-25T05:25:01-00:00\";}s:5:\"prism\";a:3:{s:15:\"publicationyear\";s:4:\"2007\";s:9:\"publisher\";s:21:\"Oneworld Publications\";s:8:\"category\";s:30:\"datainformationpopular_science\";}s:7:\"summary\";s:473:\"(25 January 2007)

Information Generation is the story of the seminal role data plays in our lives and explains how the advance of our civilization has come hand in hand with our ability to collect and interpret data. Starting with occasional scratching on cave walls, eminent statistician David J. Hand guides us right up to the modern era where society is completely dependent on an abundance of data systems for its very survival.
David Hand\";}i:42;a:7:{s:5:\"about\";s:51:\"http://www.citeulike.org/user/mdreid/article/578321\";s:5:\"title\";s:63:\"Nominal, Ordinal, Interval, and Ratio Typologies Are Misleading\";s:4:\"link\";s:51:\"http://www.citeulike.org/user/mdreid/article/578321\";s:11:\"description\";s:680:\"The American Statistician, Vol. 47, No. 1. (1993), pp. 65-72.

The psychophysicist S. S. Stevens developed a measurement scale typology that has dominated social statistics methodology for almost 50 years. During this period, it has generated considerable controversy among statisticians. Recently, there has been a renaissance in the use of Stevens\'s scale typology for guiding the design of statistical computer packages. The current use of Stevens\'s terminology fails to deal with the classical criticisms at the time it was proposed and ignores important developments in data analysis over the last several decades.
Paul Velleman, Leland Wilkinson\";s:2:\"dc\";a:5:{s:5:\"title\";s:63:\"Nominal, Ordinal, Interval, and Ratio Typologies Are Misleading\";s:7:\"creator\";s:29:\"Paul VellemanLeland Wilkinson\";s:10:\"identifier\";s:19:\"doi:10.2307/2684788\";s:6:\"source\";s:61:\"The American Statistician, Vol. 47, No. 1. (1993), pp. 65-72.\";s:4:\"date\";s:25:\"2008-06-24T05:51:26-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"1993\";s:15:\"publicationname\";s:25:\"The American Statistician\";s:6:\"volume\";s:2:\"47\";s:6:\"number\";s:1:\"1\";s:12:\"startingpage\";s:2:\"65\";s:10:\"endingpage\";s:2:\"72\";s:8:\"category\";s:41:\"criticismmeasurementstatisticstheorytypes\";}s:7:\"summary\";s:680:\"The American Statistician, Vol. 47, No. 1. (1993), pp. 65-72.

The psychophysicist S. S. Stevens developed a measurement scale typology that has dominated social statistics methodology for almost 50 years. During this period, it has generated considerable controversy among statisticians. Recently, there has been a renaissance in the use of Stevens\'s scale typology for guiding the design of statistical computer packages. The current use of Stevens\'s terminology fails to deal with the classical criticisms at the time it was proposed and ignores important developments in data analysis over the last several decades.
Paul Velleman, Leland Wilkinson\";}i:43;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2920189\";s:5:\"title\";s:40:\"Statistics and the Theory of Measurement\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2920189\";s:11:\"description\";s:1004:\"Journal of the Royal Statistical Society. Series A (Statistics in Society), Vol. 159, No. 3. (1996), pp. 445-492.

Just as there are different interpretations of probability, leading to different kinds of inferential statements and different conclusions about statistical models and questions, so there are different theories of measurement, which in turn may lead to different kinds of statistical model and possibly different conclusions. This has led to much confusion and a long running debate about when different classes of statistical methods may legitimately be applied. This paper outlines the major theories of measurement and their relationships and describes the different kinds of models and hypotheses which may be formulated within each theory. One general conclusion is that the domains of applicability of the two major theories are typically different, and it is this which helps apparent contradictions to be avoided in most practical applications.
DJ Hand\";s:2:\"dc\";a:5:{s:5:\"title\";s:40:\"Statistics and the Theory of Measurement\";s:7:\"creator\";s:7:\"DJ Hand\";s:10:\"identifier\";s:19:\"doi:10.2307/2983326\";s:6:\"source\";s:113:\"Journal of the Royal Statistical Society. Series A (Statistics in Society), Vol. 159, No. 3. (1996), pp. 445-492.\";s:4:\"date\";s:25:\"2008-06-24T05:22:01-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"1996\";s:15:\"publicationname\";s:74:\"Journal of the Royal Statistical Society. Series A (Statistics in Society)\";s:6:\"volume\";s:3:\"159\";s:6:\"number\";s:1:\"3\";s:12:\"startingpage\";s:3:\"445\";s:10:\"endingpage\";s:3:\"492\";s:9:\"publisher\";s:54:\"Blackwell Publishing for the Royal Statistical Society\";s:8:\"category\";s:41:\"measurementrepresentationstatisticstheory\";}s:7:\"summary\";s:1004:\"Journal of the Royal Statistical Society. Series A (Statistics in Society), Vol. 159, No. 3. (1996), pp. 445-492.

Just as there are different interpretations of probability, leading to different kinds of inferential statements and different conclusions about statistical models and questions, so there are different theories of measurement, which in turn may lead to different kinds of statistical model and possibly different conclusions. This has led to much confusion and a long running debate about when different classes of statistical methods may legitimately be applied. This paper outlines the major theories of measurement and their relationships and describes the different kinds of models and hypotheses which may be formulated within each theory. One general conclusion is that the domains of applicability of the two major theories are typically different, and it is this which helps apparent contradictions to be avoided in most practical applications.
DJ Hand\";}i:44;a:7:{s:5:\"about\";s:51:\"http://www.citeulike.org/user/mdreid/article/611484\";s:5:\"title\";s:38:\"On the Theory of Scales of Measurement\";s:4:\"link\";s:51:\"http://www.citeulike.org/user/mdreid/article/611484\";s:11:\"description\";s:86:\"Science, Vol. 103, No. 2684. (7 June 1946), pp. 677-680.
SS Stevens\";s:2:\"dc\";a:4:{s:5:\"title\";s:38:\"On the Theory of Scales of Measurement\";s:7:\"creator\";s:10:\"SS Stevens\";s:6:\"source\";s:56:\"Science, Vol. 103, No. 2684. (7 June 1946), pp. 677-680.\";s:4:\"date\";s:25:\"2008-06-24T04:53:10-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"1946\";s:15:\"publicationname\";s:7:\"Science\";s:6:\"volume\";s:3:\"103\";s:6:\"number\";s:4:\"2684\";s:12:\"startingpage\";s:3:\"677\";s:10:\"endingpage\";s:3:\"680\";s:9:\"publisher\";s:51:\"American Association for the Advancement of Science\";s:8:\"category\";s:27:\"historicalmeasurementtheory\";}s:7:\"summary\";s:86:\"Science, Vol. 103, No. 2684. (7 June 1946), pp. 677-680.
SS Stevens\";}i:45;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2301040\";s:5:\"title\";s:40:\"Value Regularization and Fenchel Duality\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2301040\";s:11:\"description\";s:1528:\"J. Mach. Learn. Res., Vol. 8 (2007), pp. 441-479.

Regularization is an approach to function learning that balances fit and smoothness. In practice, we search for a function f with a finite representation $f = ∑_i c_i φ_i(⋅)$. In most treatments, the $c_i$ are the primary objects of study. We consider value regularization, constructing optimization problems in which the predicted values at the training points are the primary variables, and therefore the central objects of study. Although this is a simple change, it has profound consequences. From convex conjugacy and the theory of Fenchel duality, we derive separate optimality conditions for the regularization and loss portions of the learning problem; this technique yields clean and short derivations of standard algorithms. This framework is ideally suited to studying many other phenomena at the intersection of learning theory and optimization. We obtain a value-based variant of the representer theorem, which underscores the transductive nature of regularization in reproducing kernel Hilbert spaces. We unify and extend previous results on learning kernel functions, with very simple proofs. We analyze the use of unregularized bias terms in optimization problems, and low-rank approximations to kernel matrices, obtaining new results in these areas. In summary, the combination of value regularization and Fenchel duality are valuable tools for studying the optimization problems in machine learning.
Ryan Rifkin, Ross Lippert\";s:2:\"dc\";a:4:{s:5:\"title\";s:40:\"Value Regularization and Fenchel Duality\";s:7:\"creator\";s:23:\"Ryan RifkinRoss Lippert\";s:6:\"source\";s:49:\"J. Mach. Learn. Res., Vol. 8 (2007), pp. 441-479.\";s:4:\"date\";s:25:\"2008-06-23T00:41:54-00:00\";}s:5:\"prism\";a:8:{s:15:\"publicationyear\";s:4:\"2007\";s:15:\"publicationname\";s:20:\"J. Mach. Learn. Res.\";s:4:\"issn\";s:9:\"1533-7928\";s:6:\"volume\";s:1:\"8\";s:12:\"startingpage\";s:3:\"441\";s:10:\"endingpage\";s:3:\"479\";s:9:\"publisher\";s:9:\"MIT Press\";s:8:\"category\";s:59:\"classificationfenchel_dualitykernelregressionregularisation\";}s:7:\"summary\";s:1528:\"J. Mach. Learn. Res., Vol. 8 (2007), pp. 441-479.

Regularization is an approach to function learning that balances fit and smoothness. In practice, we search for a function f with a finite representation $f = ∑_i c_i φ_i(⋅)$. In most treatments, the $c_i$ are the primary objects of study. We consider value regularization, constructing optimization problems in which the predicted values at the training points are the primary variables, and therefore the central objects of study. Although this is a simple change, it has profound consequences. From convex conjugacy and the theory of Fenchel duality, we derive separate optimality conditions for the regularization and loss portions of the learning problem; this technique yields clean and short derivations of standard algorithms. This framework is ideally suited to studying many other phenomena at the intersection of learning theory and optimization. We obtain a value-based variant of the representer theorem, which underscores the transductive nature of regularization in reproducing kernel Hilbert spaces. We unify and extend previous results on learning kernel functions, with very simple proofs. We analyze the use of unregularized bias terms in optimization problems, and low-rank approximations to kernel matrices, obtaining new results in these areas. In summary, the combination of value regularization and Fenchel duality are valuable tools for studying the optimization problems in machine learning.
Ryan Rifkin, Ross Lippert\";}i:46;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2891300\";s:5:\"title\";s:74:\"A generalization of principal component analysis to the exponential family\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2891300\";s:11:\"description\";s:564:\"(2001)

Principal component analysis (PCA) is a commonly applied technique for dimensionality reduction. PCA implicitly minimizes a squared loss function, which may be inappropriate for data that is not real-valued, such as binary-valued data. This paper draws on ideas from the Exponential family, Generalized linear models, and Bregman distances, to give a generalization of PCA to loss functions that we argue are better suited to other data types. We describe algorithms for minimizing the loss...
M Collins, S Dasgupta, R Schapire\";s:2:\"dc\";a:4:{s:5:\"title\";s:74:\"A generalization of principal component analysis to the exponential family\";s:7:\"creator\";s:29:\"M CollinsS DasguptaR Schapire\";s:6:\"source\";s:6:\"(2001)\";s:4:\"date\";s:25:\"2008-06-17T12:57:03-00:00\";}s:5:\"prism\";a:2:{s:15:\"publicationyear\";s:4:\"2001\";s:8:\"category\";s:60:\"bregman_divergencedimensional_reductionexponential_familypca\";}s:7:\"summary\";s:564:\"(2001)

Principal component analysis (PCA) is a commonly applied technique for dimensionality reduction. PCA implicitly minimizes a squared loss function, which may be inappropriate for data that is not real-valued, such as binary-valued data. This paper draws on ideas from the Exponential family, Generalized linear models, and Bregman distances, to give a generalization of PCA to loss functions that we argue are better suited to other data types. We describe algorithms for minimizing the loss...
M Collins, S Dasgupta, R Schapire\";}i:47;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/1995983\";s:5:\"title\";s:36:\"Nonparametric Bayesian Data Analysis\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/1995983\";s:11:\"description\";s:691:\"Statistical Science, Vol. 19, No. 1. (2004), pp. 95-110.

We review the current state of nonparametric Bayesian inference. The discussion follows a list of important statistical inference problems, including density estimation, regression, survival analysis, hierarchical models and model validation. For each inference problem we review relevant nonparametric Bayesian models and approaches including Dirichlet process (DP) models and variations, Pólya trees, wavelet based models, neural network models, spline regression, CART, dependent DP models and model validation with DP and Pólya tree extensions of parametric models.
Peter Müller, Fernando Quintana\";s:2:\"dc\";a:4:{s:5:\"title\";s:36:\"Nonparametric Bayesian Data Analysis\";s:7:\"creator\";s:30:\"Peter MüllerFernando Quintana\";s:6:\"source\";s:56:\"Statistical Science, Vol. 19, No. 1. (2004), pp. 95-110.\";s:4:\"date\";s:25:\"2008-06-17T04:38:51-00:00\";}s:5:\"prism\";a:7:{s:15:\"publicationyear\";s:4:\"2004\";s:15:\"publicationname\";s:19:\"Statistical Science\";s:6:\"volume\";s:2:\"19\";s:6:\"number\";s:1:\"1\";s:12:\"startingpage\";s:2:\"95\";s:10:\"endingpage\";s:3:\"110\";s:8:\"category\";s:51:\"density_estimationdirichlet_processregressionsurvey\";}s:7:\"summary\";s:691:\"Statistical Science, Vol. 19, No. 1. (2004), pp. 95-110.

We review the current state of nonparametric Bayesian inference. The discussion follows a list of important statistical inference problems, including density estimation, regression, survival analysis, hierarchical models and model validation. For each inference problem we review relevant nonparametric Bayesian models and approaches including Dirichlet process (DP) models and variations, Pólya trees, wavelet based models, neural network models, spline regression, CART, dependent DP models and model validation with DP and Pólya tree extensions of parametric models.
Peter Müller, Fernando Quintana\";}i:48;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2861694\";s:5:\"title\";s:35:\"Sufficient Dimensionality Reduction\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2861694\";s:11:\"description\";s:2349:\"Journal of Machine Learning Research, Vol. 3 (March 2003), pp. 1307-1331.

Dimensionality reduction of empirical co-occurrence data is a fundamental problem in unsupervised learning. It is also a well studied problem in statistics known as the analysis of cross-classified data. One principled approach to this problem is to represent the data in low dimension with minimal loss of (mutual) information contained in the original data. In this paper we introduce an information theoretic nonlinear method for finding such a most informative dimension reduction. In contrast with previously introduced clustering based approaches, here we extract continuous feature functions directly from the co-occurrence matrix. In a sense, we automatically extract functions of the variables that serve as approximate sufficient statistics for a sample of one variable about the other one. Our method is different from dimensionality reduction methods which are based on a specific, sometimes arbitrary, metric or embedding. Another interpretation of our method is as generalized - multi-dimensional - non-linear regression, where rather than fitting one regression function through two dimensional data, we extract d-regression functions whose expectation values capture the information among the variables. It thus presents a new learning paradigm that unifies aspects from both supervised and unsupervised learning. The resulting dimension reduction can be described by two conjugate d-dimensional differential manifolds that are coupled through Maximum Entropy I-projections. The Riemannian metrics of these manifolds are determined by the observed expectation values of our extracted features. Following this geometric interpretation we present an iterative information projection algorithm for finding such features and prove its convergence. Our algorithm is similar to the method of \"association analysis\" in statistics, though the feature extraction context as well as the information theoretic and geometric interpretation are new. The algorithm is illustrated by various synthetic co-occurrence data. It is then demonstrated for text categorization and information retrieval and proves effective in selecting a small set of features, often improving performance over the original feature set.
Amir Globerson, Naftali Tishby\";s:2:\"dc\";a:4:{s:5:\"title\";s:35:\"Sufficient Dimensionality Reduction\";s:7:\"creator\";s:28:\"Amir GlobersonNaftali Tishby\";s:6:\"source\";s:73:\"Journal of Machine Learning Research, Vol. 3 (March 2003), pp. 1307-1331.\";s:4:\"date\";s:25:\"2008-06-12T04:54:00-00:00\";}s:5:\"prism\";a:6:{s:15:\"publicationyear\";s:4:\"2003\";s:15:\"publicationname\";s:36:\"Journal of Machine Learning Research\";s:6:\"volume\";s:1:\"3\";s:12:\"startingpage\";s:4:\"1307\";s:10:\"endingpage\";s:4:\"1331\";s:8:\"category\";s:49:\"dimensional_reductioninformationsufficiencytheory\";}s:7:\"summary\";s:2349:\"Journal of Machine Learning Research, Vol. 3 (March 2003), pp. 1307-1331.

Dimensionality reduction of empirical co-occurrence data is a fundamental problem in unsupervised learning. It is also a well studied problem in statistics known as the analysis of cross-classified data. One principled approach to this problem is to represent the data in low dimension with minimal loss of (mutual) information contained in the original data. In this paper we introduce an information theoretic nonlinear method for finding such a most informative dimension reduction. In contrast with previously introduced clustering based approaches, here we extract continuous feature functions directly from the co-occurrence matrix. In a sense, we automatically extract functions of the variables that serve as approximate sufficient statistics for a sample of one variable about the other one. Our method is different from dimensionality reduction methods which are based on a specific, sometimes arbitrary, metric or embedding. Another interpretation of our method is as generalized - multi-dimensional - non-linear regression, where rather than fitting one regression function through two dimensional data, we extract d-regression functions whose expectation values capture the information among the variables. It thus presents a new learning paradigm that unifies aspects from both supervised and unsupervised learning. The resulting dimension reduction can be described by two conjugate d-dimensional differential manifolds that are coupled through Maximum Entropy I-projections. The Riemannian metrics of these manifolds are determined by the observed expectation values of our extracted features. Following this geometric interpretation we present an iterative information projection algorithm for finding such features and prove its convergence. Our algorithm is similar to the method of \"association analysis\" in statistics, though the feature extraction context as well as the information theoretic and geometric interpretation are new. The algorithm is illustrated by various synthetic co-occurrence data. It is then demonstrated for text categorization and information retrieval and proves effective in selecting a small set of features, often improving performance over the original feature set.
Amir Globerson, Naftali Tishby\";}i:49;a:7:{s:5:\"about\";s:52:\"http://www.citeulike.org/user/mdreid/article/2587574\";s:5:\"title\";s:34:\"A Tutorial on Conformal Prediction\";s:4:\"link\";s:52:\"http://www.citeulike.org/user/mdreid/article/2587574\";s:11:\"description\";s:1542:\"Journal of Machine Learning Research, Vol. 9 (March 2008), pp. 371-421.

Conformal prediction uses past experience to determine precise levels of confidence in new predictions. Given an error probability ε, together with a method that makes a prediction y of a label y, it produces a set of labels, typically containing y, that also contains y with probability 1 − ε. Conformal prediction can be applied to any method for producing y: a nearest-neighbor method, a support-vector machine, ridge regression, etc. Conformal prediction is designed for an on-line setting in which labels are predicted successively, each one being revealed before the next is predicted. The most novel and valuable feature of conformal prediction is that if the successive examples are sampled independently from the same distribution, then the successive predictions will be right 1 − ε of the time, even though they are based on an accumulating data set rather than on independent data sets. In addition to the model under which successive examples are sampled independently, other on-line compression models can also use conformal prediction. The widely used Gaussian linear model is one of these. This tutorial presents a self-contained account of the theory of conformal prediction and works through several numerical examples. A more comprehensive treatment of the topic is provided in Algorithmic Learning in a Random World, by Vladimir Vovk, Alex Gammerman, and Glenn Shafer (Springer, 2005).
Glenn Shafer, Vladimir Vovk\";s:2:\"dc\";a:4:{s:5:\"title\";s:34:\"A Tutorial on Conformal Prediction\";s:7:\"creator\";s:25:\"Glenn ShaferVladimir Vovk\";s:6:\"source\";s:71:\"Journal of Machine Learning Research, Vol. 9 (March 2008), pp. 371-421.\";s:4:\"date\";s:25:\"2008-06-12T04:47:21-00:00\";}s:5:\"prism\";a:6:{s:15:\"publicationyear\";s:4:\"2008\";s:15:\"publicationname\";s:36:\"Journal of Machine Learning Research\";s:6:\"volume\";s:1:\"9\";s:12:\"startingpage\";s:3:\"371\";s:10:\"endingpage\";s:3:\"421\";s:8:\"category\";s:81:\"confidenceconformal_predictioncredibilityexchangeabilityprobabilitytheorytutorial\";}s:7:\"summary\";s:1542:\"Journal of Machine Learning Research, Vol. 9 (March 2008), pp. 371-421.

Conformal prediction uses past experience to determine precise levels of confidence in new predictions. Given an error probability ε, together with a method that makes a prediction y of a label y, it produces a set of labels, typically containing y, that also contains y with probability 1 − ε. Conformal prediction can be applied to any method for producing y: a nearest-neighbor method, a support-vector machine, ridge regression, etc. Conformal prediction is designed for an on-line setting in which labels are predicted successively, each one being revealed before the next is predicted. The most novel and valuable feature of conformal prediction is that if the successive examples are sampled independently from the same distribution, then the successive predictions will be right 1 − ε of the time, even though they are based on an accumulating data set rather than on independent data sets. In addition to the model under which successive examples are sampled independently, other on-line compression models can also use conformal prediction. The widely used Gaussian linear model is one of these. This tutorial presents a self-contained account of the theory of conformal prediction and works through several numerical examples. A more comprehensive treatment of the topic is provided in Algorithmic Learning in a Random World, by Vladimir Vovk, Alex Gammerman, and Glenn Shafer (Springer, 2005).
Glenn Shafer, Vladimir Vovk\";}}s:7:\"channel\";a:8:{s:7:\"pubdate\";s:29:\"Tue, 23 Dec 2008 08:33:07 GMT\";s:5:\"title\";s:42:\"CiteULike: mdreid\'s library [108 articles]\";s:11:\"description\";s:42:\"CiteULike: mdreid\'s library [108 articles]\";s:4:\"link\";s:36:\"http://www.citeulike.org/user/mdreid\";s:2:\"dc\";a:3:{s:9:\"publisher\";s:13:\"CiteULike.org\";s:8:\"language\";s:5:\"en-gb\";s:6:\"rights\";s:36:\"Copyright © 2004-2008 citeulike.org\";}s:5:\"items\";s:7:\"\n \n \";s:9:\"items_seq\";s:453:\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \";s:7:\"tagline\";s:42:\"CiteULike: mdreid\'s library [108 articles]\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"1.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}}','no'),(234,0,'rss_d710ea9466a3e8d0e1e93f34f7d73336_ts','1230021188','no'),(318,0,'show_avatars','1','yes'),(319,0,'avatar_rating','PG','yes'),(320,0,'upload_url_path','','yes'),(321,0,'thumbnail_size_w','150','yes'),(322,0,'thumbnail_size_h','150','yes'),(323,0,'thumbnail_crop','1','yes'),(324,0,'medium_size_w','300','yes'),(325,0,'medium_size_h','300','yes'),(327,0,'dashboard_widget_options','a:3:{s:24:\"dashboard_incoming_links\";a:5:{s:4:\"home\";s:28:\"http://conflate.net/inductio\";s:4:\"link\";s:110:\"http://blogsearch.google.com/blogsearch?hl=en&scoring=d&partner=wordpress&q=link:http://conflate.net/inductio/\";s:3:\"url\";s:143:\"http://blogsearch.google.com/blogsearch_feeds?hl=en&scoring=d&ie=utf-8&num=10&output=rss&partner=wordpress&q=link:http://conflate.net/inductio/\";s:5:\"items\";i:5;s:9:\"show_date\";i:0;}s:17:\"dashboard_primary\";a:7:{s:4:\"link\";s:33:\"http://wordpress.org/development/\";s:3:\"url\";s:38:\"http://wordpress.org/development/feed/\";s:5:\"title\";s:26:\"WordPress Development Blog\";s:5:\"items\";i:2;s:12:\"show_summary\";i:1;s:11:\"show_author\";i:0;s:9:\"show_date\";i:1;}s:19:\"dashboard_secondary\";a:4:{s:4:\"link\";s:28:\"http://planet.wordpress.org/\";s:3:\"url\";s:33:\"http://planet.wordpress.org/feed/\";s:5:\"title\";s:20:\"Other WordPress News\";s:5:\"items\";i:15;}}','yes'),(375,0,'current_theme','Simplr','yes'),(329,0,'rss_a5420c83891a9c88ad2a4f04584a5efc','O:9:\"MagpieRSS\":19:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:15:{i:0;a:7:{s:5:\"title\";s:17:\"matt on \"Akismet\"\";s:4:\"link\";s:52:\"http://wordpress.org/extend/plugins/akismet/#post-15\";s:7:\"pubdate\";s:31:\"Fri, 09 Mar 2007 22:11:30 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"matt\";}s:4:\"guid\";s:39:\"15@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:98:\"Akismet checks your comments against the Akismet web service to see if they look like spam or not.\";s:7:\"summary\";s:98:\"Akismet checks your comments against the Akismet web service to see if they look like spam or not.\";}i:1;a:7:{s:5:\"title\";s:33:\"uberdose on \"All in One SEO Pack\"\";s:4:\"link\";s:65:\"http://wordpress.org/extend/plugins/all-in-one-seo-pack/#post-753\";s:7:\"pubdate\";s:31:\"Fri, 30 Mar 2007 20:08:18 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:8:\"uberdose\";}s:4:\"guid\";s:40:\"753@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:92:\"Automatically optimizes your Wordpress blog for Search Engines (Search Engine Optimization).\";s:7:\"summary\";s:92:\"Automatically optimizes your Wordpress blog for Search Engines (Search Engine Optimization).\";}i:2;a:7:{s:5:\"title\";s:36:\"olivers on \"cformsII - contact form\"\";s:4:\"link\";s:52:\"http://wordpress.org/extend/plugins/cforms/#post-925\";s:7:\"pubdate\";s:31:\"Mon, 09 Apr 2007 22:12:26 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:7:\"olivers\";}s:4:\"guid\";s:40:\"925@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:119:\"cforms II is the most customizable, flexible & powerful ajax supporting contact form plugin (& comment form)!\";s:7:\"summary\";s:119:\"cforms II is the most customizable, flexible & powerful ajax supporting contact form plugin (& comment form)!\";}i:3;a:7:{s:5:\"title\";s:29:\"andy on \"WordPress.com Stats\"\";s:4:\"link\";s:52:\"http://wordpress.org/extend/plugins/stats/#post-1355\";s:7:\"pubdate\";s:31:\"Sun, 06 May 2007 02:15:03 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"andy\";}s:4:\"guid\";s:41:\"1355@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:119:\"You can have simple, concise stats with no additional load on your server by plugging into WordPress.com\'s stat system.\";s:7:\"summary\";s:119:\"You can have simple, concise stats with no additional load on your server by plugging into WordPress.com\'s stat system.\";}i:4;a:7:{s:5:\"title\";s:27:\"donncha on \"WP Super Cache\"\";s:4:\"link\";s:61:\"http://wordpress.org/extend/plugins/wp-super-cache/#post-2572\";s:7:\"pubdate\";s:31:\"Mon, 05 Nov 2007 11:40:04 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:7:\"donncha\";}s:4:\"guid\";s:41:\"2572@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:73:\"A very fast caching engine for WordPress that produces static html files.\";s:7:\"summary\";s:73:\"A very fast caching engine for WordPress that produces static html files.\";}i:5;a:7:{s:5:\"title\";s:30:\"arnee on \"Google XML Sitemaps\"\";s:4:\"link\";s:70:\"http://wordpress.org/extend/plugins/google-sitemap-generator/#post-132\";s:7:\"pubdate\";s:31:\"Fri, 09 Mar 2007 22:31:32 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"arnee\";}s:4:\"guid\";s:40:\"132@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:87:\"This plugin will create a Google sitemaps compliant XML-Sitemap of your WordPress blog.\";s:7:\"summary\";s:87:\"This plugin will create a Google sitemaps compliant XML-Sitemap of your WordPress blog.\";}i:6;a:7:{s:5:\"title\";s:29:\"alexrabe on \"NextGEN Gallery\"\";s:4:\"link\";s:62:\"http://wordpress.org/extend/plugins/nextgen-gallery/#post-1169\";s:7:\"pubdate\";s:31:\"Mon, 23 Apr 2007 20:08:06 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:8:\"alexrabe\";}s:4:\"guid\";s:41:\"1169@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:102:\"NextGEN Gallery is a full integrated Image Gallery plugin for WordPress with a Flash slideshow option.\";s:7:\"summary\";s:102:\"NextGEN Gallery is a full integrated Image Gallery plugin for WordPress with a Flash slideshow option.\";}i:7;a:7:{s:5:\"title\";s:41:\"Viper007Bond on \"Viper\'s Video Quicktags\"\";s:4:\"link\";s:68:\"http://wordpress.org/extend/plugins/vipers-video-quicktags/#post-810\";s:7:\"pubdate\";s:31:\"Tue, 03 Apr 2007 00:08:41 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"Viper007Bond\";}s:4:\"guid\";s:40:\"810@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:114:\"Allows easy and XHTML valid posting of videos from various websites such as YouTube, DailyMotion, Vimeo, and more.\";s:7:\"summary\";s:114:\"Allows easy and XHTML valid posting of videos from various websites such as YouTube, DailyMotion, Vimeo, and more.\";}i:8;a:7:{s:5:\"title\";s:44:\"keithdsouza on \"Wordpress Automatic upgrade\"\";s:4:\"link\";s:74:\"http://wordpress.org/extend/plugins/wordpress-automatic-upgrade/#post-2560\";s:7:\"pubdate\";s:31:\"Sat, 27 Oct 2007 20:55:05 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"keithdsouza\";}s:4:\"guid\";s:41:\"2560@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:112:\"Wordpress automatic upgrade allows a user to automatically upgrade the wordpress installation to the latest one.\";s:7:\"summary\";s:112:\"Wordpress automatic upgrade allows a user to automatically upgrade the wordpress installation to the latest one.\";}i:9;a:7:{s:5:\"title\";s:51:\"micropat on \"Add to Any Share/Save/Bookmark Button\"\";s:4:\"link\";s:56:\"http://wordpress.org/extend/plugins/add-to-any/#post-498\";s:7:\"pubdate\";s:31:\"Sat, 17 Mar 2007 23:08:16 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:8:\"micropat\";}s:4:\"guid\";s:40:\"498@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:141:\"Helps readers share, save, and bookmark your posts and pages using any service, such as Delicious, Digg, Facebook, MySpace, and all the rest.\";s:7:\"summary\";s:141:\"Helps readers share, save, and bookmark your posts and pages using any service, such as Delicious, Digg, Facebook, MySpace, and all the rest.\";}i:10;a:7:{s:5:\"title\";s:35:\"Utkarsh Kukreti on \"Plugin Manager\"\";s:4:\"link\";s:61:\"http://wordpress.org/extend/plugins/plugin-manager/#post-6737\";s:7:\"pubdate\";s:31:\"Fri, 22 Aug 2008 16:11:19 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:15:\"Utkarsh Kukreti\";}s:4:\"guid\";s:41:\"6737@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:129:\"View, download and install plugins from WordPress.org Plugin Database from an AJAX\'ed interface with a single click of the mouse.\";s:7:\"summary\";s:129:\"View, download and install plugins from WordPress.org Plugin Database from an AJAX\'ed interface with a single click of the mouse.\";}i:11;a:7:{s:5:\"title\";s:28:\"freediver on \"Smart YouTube\"\";s:4:\"link\";s:60:\"http://wordpress.org/extend/plugins/smart-youtube/#post-2935\";s:7:\"pubdate\";s:31:\"Tue, 12 Feb 2008 12:05:03 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:9:\"freediver\";}s:4:\"guid\";s:41:\"2935@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:112:\"Smart Youtube plugin allows you to insert full featured YouTube videos into your post, comments and in RSS feed.\";s:7:\"summary\";s:112:\"Smart Youtube plugin allows you to insert full featured YouTube videos into your post, comments and in RSS feed.\";}i:12;a:7:{s:5:\"title\";s:30:\"weefselkweekje on \"WP-Cumulus\"\";s:4:\"link\";s:57:\"http://wordpress.org/extend/plugins/wp-cumulus/#post-6499\";s:7:\"pubdate\";s:31:\"Thu, 31 Jul 2008 10:00:57 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:14:\"weefselkweekje\";}s:4:\"guid\";s:41:\"6499@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:91:\"WP-Cumulus displays your tags and/or categories in 3D by placing them on a rotating sphere.\";s:7:\"summary\";s:91:\"WP-Cumulus displays your tags and/or categories in 3D by placing them on a rotating sphere.\";}i:13;a:7:{s:5:\"title\";s:32:\"takayukister on \"Contact Form 7\"\";s:4:\"link\";s:61:\"http://wordpress.org/extend/plugins/contact-form-7/#post-2141\";s:7:\"pubdate\";s:31:\"Thu, 02 Aug 2007 12:45:03 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"takayukister\";}s:4:\"guid\";s:41:\"2141@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:54:\"Just another contact form plugin. Simple but flexible.\";s:7:\"summary\";s:54:\"Just another contact form plugin. Simple but flexible.\";}i:14;a:7:{s:5:\"title\";s:25:\"joostdevalk on \"Sociable\"\";s:4:\"link\";s:55:\"http://wordpress.org/extend/plugins/sociable/#post-2865\";s:7:\"pubdate\";s:31:\"Thu, 31 Jan 2008 11:36:17 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"joostdevalk\";}s:4:\"guid\";s:41:\"2865@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:100:\"Automatically add links on your posts, pages and RSS feed to your favorite social bookmarking sites.\";s:7:\"summary\";s:100:\"Automatically add links on your posts, pages and RSS feed to your favorite social bookmarking sites.\";}}s:7:\"channel\";a:6:{s:5:\"title\";s:12:\"Most Popular\";s:4:\"link\";s:36:\"http://wordpress.org/extend/plugins/\";s:11:\"description\";s:12:\"Most Popular\";s:8:\"language\";s:2:\"en\";s:7:\"pubdate\";s:31:\"Sun, 30 Nov 2008 10:30:11 +0000\";s:7:\"tagline\";s:12:\"Most Popular\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:13:\"last_modified\";s:21:\"2007-03-09 22:11:30\r\n\";s:4:\"etag\";s:36:\"\"8675051e89bf6e8eba42545ba20839d9\"\r\n\";}','no'),(330,0,'rss_a5420c83891a9c88ad2a4f04584a5efc_ts','1229897943','no'),(331,0,'rss_57bc725ad6568758915363af670fd8bc','O:9:\"MagpieRSS\":19:{s:6:\"parser\";i:0;s:12:\"current_item\";a:6:{s:5:\"title\";s:25:\"LukaszWiecek on \"BlipBot\"\";s:4:\"link\";s:54:\"http://wordpress.org/extend/plugins/blipbot/#post-8174\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 09:51:05 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"LukaszWiecek\";}s:4:\"guid\";s:41:\"8174@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:16:\"Wtyczka pozwalaj\";}s:5:\"items\";a:4:{i:0;a:7:{s:5:\"title\";s:24:\"indranil on \"Menu Maker\"\";s:4:\"link\";s:56:\"http://wordpress.org/extend/plugins/menumaker/#post-8196\";s:7:\"pubdate\";s:31:\"Sun, 21 Dec 2008 12:37:10 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:8:\"indranil\";}s:4:\"guid\";s:41:\"8196@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:118:\"The Menu Maker plugin helps in creating a menu for your site. Usually this can be used for creating a navigation menu.\";s:7:\"summary\";s:118:\"The Menu Maker plugin helps in creating a menu for your site. Usually this can be used for creating a navigation menu.\";}i:1;a:7:{s:5:\"title\";s:33:\"mattwalters on \"WordPress Filter\"\";s:4:\"link\";s:63:\"http://wordpress.org/extend/plugins/wordpress-filter/#post-8189\";s:7:\"pubdate\";s:31:\"Sun, 21 Dec 2008 01:16:31 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"mattwalters\";}s:4:\"guid\";s:41:\"8189@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:74:\"WordPress Filter is a comprehensive post filtering & template system.\";s:7:\"summary\";s:74:\"WordPress Filter is a comprehensive post filtering & template system.\";}i:2;a:7:{s:5:\"title\";s:30:\"joedolson on \"WP Post Styling\"\";s:4:\"link\";s:62:\"http://wordpress.org/extend/plugins/wp-post-styling/#post-8181\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 16:00:06 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:9:\"joedolson\";}s:4:\"guid\";s:41:\"8181@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:150:\"Allows you to define custom styles for any specific post or page on your WordPress site. This is particularly useful for journal-style publications wh\";s:7:\"summary\";s:150:\"Allows you to define custom styles for any specific post or page on your WordPress site. This is particularly useful for journal-style publications wh\";}i:3;a:7:{s:5:\"title\";s:21:\"mptre on \"List pages\"\";s:4:\"link\";s:57:\"http://wordpress.org/extend/plugins/list-pages/#post-8180\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 15:50:46 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mptre\";}s:4:\"guid\";s:41:\"8180@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:145:\"Retrieve pages, and children, in a more sophisticated way and add the correct class to the parent li-element even if you\'re visting a child-page.\";s:7:\"summary\";s:145:\"Retrieve pages, and children, in a more sophisticated way and add the correct class to the parent li-element even if you\'re visting a child-page.\";}}s:7:\"channel\";a:6:{s:5:\"title\";s:6:\"Newest\";s:4:\"link\";s:36:\"http://wordpress.org/extend/plugins/\";s:11:\"description\";s:6:\"Newest\";s:8:\"language\";s:2:\"en\";s:7:\"pubdate\";s:31:\"Mon, 22 Dec 2008 11:32:46 +0000\";s:7:\"tagline\";s:6:\"Newest\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:1:{i:0;s:11:\"description\";}s:9:\"inchannel\";b:1;s:6:\"initem\";b:1;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:13:\"last_modified\";s:21:\"2008-12-21 12:37:10\r\n\";s:4:\"etag\";s:36:\"\"07cfb9cfa411cb821b0e371452134bea\"\r\n\";}','no'),(332,0,'rss_57bc725ad6568758915363af670fd8bc_ts','1229945566','no'),(333,0,'rss_1a5f760f2e2b48827d4974a60857e7c2','O:9:\"MagpieRSS\":19:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:15:{i:0;a:7:{s:5:\"title\";s:44:\"andrewescott on \"hReview Support for Editor\"\";s:4:\"link\";s:73:\"http://wordpress.org/extend/plugins/hreview-support-for-editor/#post-2075\";s:7:\"pubdate\";s:31:\"Sun, 24 Jun 2007 11:45:12 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"andrewescott\";}s:4:\"guid\";s:41:\"2075@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:150:\"This is a plugin to allow the easy entry of microformat content for reviews (i.e. the hReview microformat) in WordPress pages and posts. It adds a but\";s:7:\"summary\";s:150:\"This is a plugin to allow the easy entry of microformat content for reviews (i.e. the hReview microformat) in WordPress pages and posts. It adds a but\";}i:1;a:7:{s:5:\"title\";s:24:\"Joen on \"Quote Comments\"\";s:4:\"link\";s:61:\"http://wordpress.org/extend/plugins/quote-comments/#post-8157\";s:7:\"pubdate\";s:31:\"Fri, 19 Dec 2008 12:05:34 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"Joen\";}s:4:\"guid\";s:41:\"8157@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:149:\"Creates a little quote icon in comment boxes which, when clicked, copies that comment to the comment box wrapped in blockquotes (or bq. if Textile is\";s:7:\"summary\";s:149:\"Creates a little quote icon in comment boxes which, when clicked, copies that comment to the comment box wrapped in blockquotes (or bq. if Textile is\";}i:2;a:7:{s:5:\"title\";s:43:\"jaschaephraim on \"Page Management Dropdown\"\";s:4:\"link\";s:71:\"http://wordpress.org/extend/plugins/page-management-dropdown/#post-6619\";s:7:\"pubdate\";s:31:\"Mon, 11 Aug 2008 22:55:50 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:13:\"jaschaephraim\";}s:4:\"guid\";s:41:\"6619@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:69:\"Adds a link to edit each individual page to the new Pages admin menu.\";s:7:\"summary\";s:69:\"Adds a link to edit each individual page to the new Pages admin menu.\";}i:3;a:7:{s:5:\"title\";s:39:\"Nicolas FISCHMEISTER on \"Saint du jour\"\";s:4:\"link\";s:60:\"http://wordpress.org/extend/plugins/saint-du-jour/#post-8090\";s:7:\"pubdate\";s:31:\"Mon, 15 Dec 2008 11:10:31 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:20:\"Nicolas FISCHMEISTER\";}s:4:\"guid\";s:41:\"8090@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:139:\"This widget displays the saint of the day, according to the timetable of the ordinary and/or the extraordinary form of Roman Catholic rite.\";s:7:\"summary\";s:139:\"This widget displays the saint of the day, according to the timetable of the ordinary and/or the extraordinary form of Roman Catholic rite.\";}i:4;a:7:{s:5:\"title\";s:38:\"gfazioli on \"Flash Feed Scroll Reader\"\";s:4:\"link\";s:71:\"http://wordpress.org/extend/plugins/flash-feed-scroll-reader/#post-8161\";s:7:\"pubdate\";s:31:\"Fri, 19 Dec 2008 16:52:49 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:8:\"gfazioli\";}s:4:\"guid\";s:41:\"8161@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:73:\"Flash Feed Scroll Reader is a Flash Feed Reader with horizontal scrolling\";s:7:\"summary\";s:73:\"Flash Feed Scroll Reader is a Flash Feed Reader with horizontal scrolling\";}i:5;a:7:{s:5:\"title\";s:26:\"kubi23 on \"Mail On Update\"\";s:4:\"link\";s:61:\"http://wordpress.org/extend/plugins/mail-on-update/#post-5355\";s:7:\"pubdate\";s:31:\"Wed, 23 Apr 2008 10:13:31 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:6:\"kubi23\";}s:4:\"guid\";s:41:\"5355@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:116:\"Sends an E-Mail to one (i.e. WordPress admin) or multiple E-Mail Addresses if new versions of plugins are available.\";s:7:\"summary\";s:116:\"Sends an E-Mail to one (i.e. WordPress admin) or multiple E-Mail Addresses if new versions of plugins are available.\";}i:6;a:7:{s:5:\"title\";s:21:\"mptre on \"List pages\"\";s:4:\"link\";s:57:\"http://wordpress.org/extend/plugins/list-pages/#post-8180\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 15:50:46 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mptre\";}s:4:\"guid\";s:41:\"8180@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:145:\"Retrieve pages, and children, in a more sophisticated way and add the correct class to the parent li-element even if you\'re visting a child-page.\";s:7:\"summary\";s:145:\"Retrieve pages, and children, in a more sophisticated way and add the correct class to the parent li-element even if you\'re visting a child-page.\";}i:7;a:7:{s:5:\"title\";s:33:\"jaroat on \"Yet Another Photoblog\"\";s:4:\"link\";s:68:\"http://wordpress.org/extend/plugins/yet-another-photoblog/#post-2569\";s:7:\"pubdate\";s:31:\"Fri, 02 Nov 2007 21:25:29 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:6:\"jaroat\";}s:4:\"guid\";s:41:\"2569@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:93:\"Convert your WordPress 2.5+ installation into a full featured photoblog in virtually no time.\";s:7:\"summary\";s:93:\"Convert your WordPress 2.5+ installation into a full featured photoblog in virtually no time.\";}i:8;a:7:{s:5:\"title\";s:31:\"denzel_chia on \"vimeo quicktag\"\";s:4:\"link\";s:62:\"http://wordpress.org/extend/plugins/vimeo-quicktags/#post-8108\";s:7:\"pubdate\";s:31:\"Tue, 16 Dec 2008 15:42:51 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:11:\"denzel_chia\";}s:4:\"guid\";s:41:\"8108@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:66:\"insert vimeo quicktag with full embed options as provided by vimeo\";s:7:\"summary\";s:66:\"insert vimeo quicktag with full embed options as provided by vimeo\";}i:9;a:7:{s:5:\"title\";s:39:\"Erunafailaro on \"Feed Reading Blogroll\"\";s:4:\"link\";s:68:\"http://wordpress.org/extend/plugins/feed-reading-blogroll/#post-5942\";s:7:\"pubdate\";s:31:\"Thu, 12 Jun 2008 11:55:05 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"Erunafailaro\";}s:4:\"guid\";s:41:\"5942@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:150:\"The plugin checks and displays for all your blogroll-bookmarks with a separate saved feed-url, when the most recent update has occured at the bookmark\";s:7:\"summary\";s:150:\"The plugin checks and displays for all your blogroll-bookmarks with a separate saved feed-url, when the most recent update has occured at the bookmark\";}i:10;a:7:{s:5:\"title\";s:34:\"tinkerpriest on \"Searchable Links\"\";s:4:\"link\";s:63:\"http://wordpress.org/extend/plugins/searchable-links/#post-8172\";s:7:\"pubdate\";s:31:\"Sat, 20 Dec 2008 07:23:58 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:12:\"tinkerpriest\";}s:4:\"guid\";s:41:\"8172@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:70:\"Takes values from a custom field and turns them into searchable links.\";s:7:\"summary\";s:70:\"Takes values from a custom field and turns them into searchable links.\";}i:11;a:7:{s:5:\"title\";s:21:\"mkyong on \"Digg Digg\"\";s:4:\"link\";s:56:\"http://wordpress.org/extend/plugins/digg-digg/#post-6553\";s:7:\"pubdate\";s:31:\"Tue, 05 Aug 2008 14:19:19 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:6:\"mkyong\";}s:4:\"guid\";s:41:\"6553@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:69:\"Integrate \"Digg Button\" and \"Reddit Me Button\" Into Wordpress Content\";s:7:\"summary\";s:69:\"Integrate \"Digg Button\" and \"Reddit Me Button\" Into Wordpress Content\";}i:12;a:7:{s:5:\"title\";s:22:\"neop on \"Postalicious\"\";s:4:\"link\";s:59:\"http://wordpress.org/extend/plugins/postalicious/#post-2507\";s:7:\"pubdate\";s:31:\"Mon, 15 Oct 2007 02:15:03 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:4:\"neop\";}s:4:\"guid\";s:41:\"2507@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:147:\"Postalicious is a WordPress plugin that automatically posts your delicious, ma.gnolia, Google Reader, Reddit or Yahoo Pipes bookmarks to your blog.\";s:7:\"summary\";s:147:\"Postalicious is a WordPress plugin that automatically posts your delicious, ma.gnolia, Google Reader, Reddit or Yahoo Pipes bookmarks to your blog.\";}i:13;a:7:{s:5:\"title\";s:28:\"nsimon on \"DandyID Services\"\";s:4:\"link\";s:63:\"http://wordpress.org/extend/plugins/dandyid-services/#post-8169\";s:7:\"pubdate\";s:31:\"Fri, 19 Dec 2008 22:11:03 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:6:\"nsimon\";}s:4:\"guid\";s:41:\"8169@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:71:\"DandyID provides one place to manage you and your relationships online.\";s:7:\"summary\";s:71:\"DandyID provides one place to manage you and your relationships online.\";}i:14;a:7:{s:5:\"title\";s:28:\"storyday on \"ajaxcoderender\"\";s:4:\"link\";s:61:\"http://wordpress.org/extend/plugins/ajaxcoderender/#post-8159\";s:7:\"pubdate\";s:31:\"Fri, 19 Dec 2008 13:54:21 +0000\";s:2:\"dc\";a:1:{s:7:\"creator\";s:8:\"storyday\";}s:4:\"guid\";s:41:\"8159@http://wordpress.org/extend/plugins/\";s:11:\"description\";s:65:\"ajaxcoderender is a plugin to make your code in post looks pretty\";s:7:\"summary\";s:65:\"ajaxcoderender is a plugin to make your code in post looks pretty\";}}s:7:\"channel\";a:6:{s:5:\"title\";s:16:\"Recently Updated\";s:4:\"link\";s:36:\"http://wordpress.org/extend/plugins/\";s:11:\"description\";s:16:\"Recently Updated\";s:8:\"language\";s:2:\"en\";s:7:\"pubdate\";s:31:\"Mon, 22 Dec 2008 11:32:46 +0000\";s:7:\"tagline\";s:16:\"Recently Updated\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:13:\"last_modified\";s:21:\"2007-06-24 11:45:12\r\n\";s:4:\"etag\";s:36:\"\"3ff20512c48af546fdcb00815ca4a289\"\r\n\";}','no'),(334,0,'rss_1a5f760f2e2b48827d4974a60857e7c2_ts','1229945566','no'),(374,0,'td_themes','Simplr','yes'),(368,0,'stats_options','a:6:{s:4:\"host\";s:12:\"conflate.net\";s:4:\"path\";s:9:\"/inductio\";s:7:\"blog_id\";i:3422007;s:7:\"version\";s:1:\"1\";s:5:\"error\";b:0;s:7:\"api_key\";s:12:\"3697b86c9d18\";}','yes'),(369,0,'stats_cache','a:3:{s:32:\"c031aa2690fe0249ca8600f21c8223e0\";a:1:{i:1229945565;a:3:{i:0;a:4:{s:7:\"post_id\";s:2:\"38\";s:10:\"post_title\";s:23:\"Research-Changing Books\";s:14:\"post_permalink\";s:61:\"http://conflate.net/inductio/2008/05/research-changing-books/\";s:5:\"views\";s:4:\"2786\";}i:1;a:4:{s:7:\"post_id\";s:2:\"99\";s:10:\"post_title\";s:34:\"Prediction and the Axiom of Choice\";s:14:\"post_permalink\";s:72:\"http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/\";s:5:\"views\";s:4:\"1894\";}i:2;a:4:{s:7:\"post_id\";s:3:\"143\";s:10:\"post_title\";s:27:\"Behold! Jensen\'s Inequality\";s:14:\"post_permalink\";s:63:\"http://conflate.net/inductio/2008/11/behold-jensens-inequality/\";s:5:\"views\";s:3:\"745\";}}}s:32:\"17059248d549800430f44adb915ce314\";a:1:{i:1229945565;a:3:{i:0;a:4:{s:7:\"post_id\";s:2:\"99\";s:10:\"post_title\";s:34:\"Prediction and the Axiom of Choice\";s:14:\"post_permalink\";s:72:\"http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/\";s:5:\"views\";s:2:\"48\";}i:1;a:4:{s:7:\"post_id\";s:3:\"143\";s:10:\"post_title\";s:27:\"Behold! Jensen\'s Inequality\";s:14:\"post_permalink\";s:63:\"http://conflate.net/inductio/2008/11/behold-jensens-inequality/\";s:5:\"views\";s:2:\"20\";}i:2;a:4:{s:7:\"post_id\";s:3:\"164\";s:10:\"post_title\";s:35:\"Machine Learning Summer School 2009\";s:14:\"post_permalink\";s:73:\"http://conflate.net/inductio/2008/11/machine-learning-summer-school-2009/\";s:5:\"views\";s:2:\"12\";}}}s:32:\"c941cc86c16bd25fb63eecd2663e1702\";a:1:{i:1229945566;a:3:{i:0;a:2:{s:10:\"searchterm\";s:35:\"machine learning summer school 2009\";s:5:\"views\";s:1:\"6\";}i:1;a:2:{s:10:\"searchterm\";s:11:\"\"infer.net\"\";s:5:\"views\";s:1:\"3\";}i:2;a:2:{s:10:\"searchterm\";s:19:\"jensen\'s inequality\";s:5:\"views\";s:1:\"3\";}}}}','yes'),(545,0,'recently_activated','a:1:{s:17:\"js-kit/js-kit.php\";i:1229547446;}','yes'),(379,0,'widget_meta','a:1:{s:5:\"title\";s:0:\"\";}','yes'),(380,0,'simplr_basefontsize','100%','yes'),(381,0,'simplr_basefontfamily','\'lucida sans unicode\', \'lucida grande\', sans-serif','yes'),(382,0,'simplr_headingfontfamily','\'lucida sans unicode\', \'lucida grande\', sans-serif','yes'),(383,0,'simplr_layoutwidth','42em','yes'),(384,0,'simplr_posttextalignment','left','yes'),(385,0,'simplr_sidebarposition','col1-col2','yes'),(386,0,'simplr_accesslinks','mouseover','yes'),(558,0,'rewrite_rules','a:63:{s:11:\"robots.txt$\";s:18:\"index.php?robots=1\";s:14:\".*wp-atom.php$\";s:19:\"index.php?feed=atom\";s:13:\".*wp-rdf.php$\";s:18:\"index.php?feed=rdf\";s:13:\".*wp-rss.php$\";s:18:\"index.php?feed=rss\";s:14:\".*wp-rss2.php$\";s:19:\"index.php?feed=rss2\";s:14:\".*wp-feed.php$\";s:19:\"index.php?feed=feed\";s:22:\".*wp-commentsrss2.php$\";s:34:\"index.php?feed=rss2&withcomments=1\";s:32:\"feed/(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:27:\"(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:20:\"page/?([0-9]{1,})/?$\";s:28:\"index.php?&paged=$matches[1]\";s:41:\"comments/feed/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:36:\"comments/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:29:\"comments/page/?([0-9]{1,})/?$\";s:28:\"index.php?&paged=$matches[1]\";s:44:\"search/(.+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:39:\"search/(.+)/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:32:\"search/(.+)/page/?([0-9]{1,})/?$\";s:41:\"index.php?s=$matches[1]&paged=$matches[2]\";s:14:\"search/(.+)/?$\";s:23:\"index.php?s=$matches[1]\";s:47:\"category/(.+?)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:52:\"index.php?category_name=$matches[1]&feed=$matches[2]\";s:42:\"category/(.+?)/(feed|rdf|rss|rss2|atom)/?$\";s:52:\"index.php?category_name=$matches[1]&feed=$matches[2]\";s:35:\"category/(.+?)/page/?([0-9]{1,})/?$\";s:53:\"index.php?category_name=$matches[1]&paged=$matches[2]\";s:17:\"category/(.+?)/?$\";s:35:\"index.php?category_name=$matches[1]\";s:42:\"tag/(.+?)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?tag=$matches[1]&feed=$matches[2]\";s:37:\"tag/(.+?)/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?tag=$matches[1]&feed=$matches[2]\";s:30:\"tag/(.+?)/page/?([0-9]{1,})/?$\";s:43:\"index.php?tag=$matches[1]&paged=$matches[2]\";s:12:\"tag/(.+?)/?$\";s:25:\"index.php?tag=$matches[1]\";s:47:\"author/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:42:\"author/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:35:\"author/([^/]+)/page/?([0-9]{1,})/?$\";s:51:\"index.php?author_name=$matches[1]&paged=$matches[2]\";s:17:\"author/([^/]+)/?$\";s:33:\"index.php?author_name=$matches[1]\";s:69:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:64:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:57:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&paged=$matches[4]\";s:39:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/?$\";s:63:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]\";s:56:\"([0-9]{4})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:51:\"([0-9]{4})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:44:\"([0-9]{4})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:65:\"index.php?year=$matches[1]&monthnum=$matches[2]&paged=$matches[3]\";s:26:\"([0-9]{4})/([0-9]{1,2})/?$\";s:47:\"index.php?year=$matches[1]&monthnum=$matches[2]\";s:43:\"([0-9]{4})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:38:\"([0-9]{4})/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:31:\"([0-9]{4})/page/?([0-9]{1,})/?$\";s:44:\"index.php?year=$matches[1]&paged=$matches[2]\";s:13:\"([0-9]{4})/?$\";s:26:\"index.php?year=$matches[1]\";s:47:\"[0-9]{4}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:57:\"[0-9]{4}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:77:\"[0-9]{4}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:72:\"[0-9]{4}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:44:\"([0-9]{4})/([0-9]{1,2})/([^/]+)/trackback/?$\";s:69:\"index.php?year=$matches[1]&monthnum=$matches[2]&name=$matches[3]&tb=1\";s:64:\"([0-9]{4})/([0-9]{1,2})/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&name=$matches[3]&feed=$matches[4]\";s:59:\"([0-9]{4})/([0-9]{1,2})/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&name=$matches[3]&feed=$matches[4]\";s:52:\"([0-9]{4})/([0-9]{1,2})/([^/]+)/page/?([0-9]{1,})/?$\";s:82:\"index.php?year=$matches[1]&monthnum=$matches[2]&name=$matches[3]&paged=$matches[4]\";s:44:\"([0-9]{4})/([0-9]{1,2})/([^/]+)(/[0-9]+)?/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&name=$matches[3]&page=$matches[4]\";s:36:\"[0-9]{4}/[0-9]{1,2}/[^/]+/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:46:\"[0-9]{4}/[0-9]{1,2}/[^/]+/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:66:\"[0-9]{4}/[0-9]{1,2}/[^/]+/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:61:\"[0-9]{4}/[0-9]{1,2}/[^/]+/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:25:\".+?/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:35:\".+?/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:55:\".+?/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:50:\".+?/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:18:\"(.+?)/trackback/?$\";s:35:\"index.php?pagename=$matches[1]&tb=1\";s:38:\"(.+?)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:33:\"(.+?)/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:26:\"(.+?)/page/?([0-9]{1,})/?$\";s:48:\"index.php?pagename=$matches[1]&paged=$matches[2]\";s:18:\"(.+?)(/[0-9]+)?/?$\";s:47:\"index.php?pagename=$matches[1]&page=$matches[2]\";}','yes'),(544,0,'logged_in_salt','(cK5qhvnLVU&','yes'),(539,0,'auth_salt','cyZ3VnxYi*^d','yes'),(540,0,'avatar_default','mystery','yes'),(541,0,'enable_app','1','yes'),(542,0,'enable_xmlrpc','1','yes'),(628,0,'rss_ad16bc921c09b567b18d31a5a1b6ba49','O:9:\"MagpieRSS\":18:{s:6:\"parser\";i:0;s:12:\"current_item\";a:0:{}s:5:\"items\";a:15:{i:0;a:11:{s:5:\"title\";s:49:\"A Universal Kernel for Learning Regular Languages\";s:7:\"pubdate\";s:31:\"Thu, 18 Dec 2008 07:36:50 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/8c261e8c76d3c257bad3afcf4c113d51#mreid\";s:4:\"link\";s:52:\"http://www.wisdom.weizmann.ac.il/~aryehk/univker.pdf\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/8c261e8c76d3c257bad3afcf4c113d51\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/8c261e8c76d3c257bad3afcf4c113d51\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:84:\"JMLR preprint describing a technique for applying SVMs to regular language learning.\";s:8:\"category\";s:106:\"kernelsvmresearchmachinelearninglanguageautomatatheoryjmlr(unread)system:filetype:pdfsystem:media:document\";s:7:\"summary\";s:84:\"JMLR preprint describing a technique for applying SVMs to regular language learning.\";}i:1;a:11:{s:5:\"title\";s:9:\"Infer.NET\";s:7:\"pubdate\";s:31:\"Sat, 13 Dec 2008 00:34:51 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/f41b4ed9e1845a06aa60da0367e61068#mreid\";s:4:\"link\";s:67:\"http://research.microsoft.com/en-us/um/cambridge/projects/infernet/\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/f41b4ed9e1845a06aa60da0367e61068\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/f41b4ed9e1845a06aa60da0367e61068\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:62:\"Microsoft Cambridge's .NET platform for machine learning.\";s:8:\"category\";s:65:\"inferencemachinelearningprogramminglibraryplatformresearchvia:dwf\";s:7:\"summary\";s:62:\"Microsoft Cambridge's .NET platform for machine learning.\";}i:2;a:11:{s:5:\"title\";s:39:\"Statistics vs. Machine Learning, fight!\";s:7:\"pubdate\";s:31:\"Sat, 06 Dec 2008 20:52:54 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/d3ed63572e9892185b04d63c1cce2a34#mreid\";s:4:\"link\";s:68:\"http://anyall.org/blog/2008/12/statistics-vs-machine-learning-fight/\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/d3ed63572e9892185b04d63c1cce2a34\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/d3ed63572e9892185b04d63c1cce2a34\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:403:\"Nice post by Brendan O'Connor expanding on Tibshirani's humorous Rosetta stone for machine learning and statistics. I think the main reasons ML has been better funded than stats are the same reasons physicists get bigger budgets than mathematicians: equipment and relevance. ML isn't afraid to ditch purity in order to scale an algorithm up and then analyse the heuristics after the fact.\";s:8:\"category\";s:64:\"statsmachinelearningcomparisonfunnyacademiaresearchvia:arthegall\";s:7:\"summary\";s:403:\"Nice post by Brendan O'Connor expanding on Tibshirani's humorous Rosetta stone for machine learning and statistics. I think the main reasons ML has been better funded than stats are the same reasons physicists get bigger budgets than mathematicians: equipment and relevance. ML isn't afraid to ditch purity in order to scale an algorithm up and then analyse the heuristics after the fact.\";}i:3;a:11:{s:5:\"title\";s:13:\"Scratch Input\";s:7:\"pubdate\";s:31:\"Thu, 04 Dec 2008 06:12:45 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/2187fe567d82f7da458c14d9e7974cb3#mreid\";s:4:\"link\";s:82:\"http://procrastineering.blogspot.com/2008/11/scratch-input-and-low-cost-multi.html\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/2187fe567d82f7da458c14d9e7974cb3\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/2187fe567d82f7da458c14d9e7974cb3\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:184:\"Nice explanation and video of a cute new input method that makes use of surface mics to pick up on scratches. Machine learning is used to build converters of audio signals to commands.\";s:8:\"category\";s:87:\"machinelearningresearchUIHCIinnovationcoolscratchsurfacecontrolaudiogesturevia:benvolio\";s:7:\"summary\";s:184:\"Nice explanation and video of a cute new input method that makes use of surface mics to pick up on scratches. Machine learning is used to build converters of audio signals to commands.\";}i:4;a:11:{s:5:\"title\";s:27:\"The Future of Data Analysis\";s:7:\"pubdate\";s:31:\"Fri, 21 Nov 2008 01:10:09 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/445596e6f5de5547eaf8e49e31455082#mreid\";s:4:\"link\";s:88:\"http://www.stat.columbia.edu/~cook/movabletype/archives/2008/11/the_future_of_bayes.html\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/445596e6f5de5547eaf8e49e31455082\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/445596e6f5de5547eaf8e49e31455082\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:164:\"Aleks Jakulin at SMCISS with some ideas on how data analysis should proceed. I didn't really follow his argument but the proposals seem uncontroversial enough.\";s:8:\"category\";s:67:\"dataanalysismachinelearningpredictionspeculationsurveyresearchstats\";s:7:\"summary\";s:164:\"Aleks Jakulin at SMCISS with some ideas on how data analysis should proceed. I didn't really follow his argument but the proposals seem uncontroversial enough.\";}i:5;a:11:{s:5:\"title\";s:58:\"Statistical Learning Theory: Models, Concepts, and Results\";s:7:\"pubdate\";s:31:\"Sat, 01 Nov 2008 22:24:03 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/386667b250ccdd2368629d6b93327f36#mreid\";s:4:\"link\";s:39:\"http://front.math.ucdavis.edu/0810.4752\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/386667b250ccdd2368629d6b93327f36\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/386667b250ccdd2368629d6b93327f36\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:99:\"An excellent overview of Statistical Learning Theory by Ulrike von Luxburg and Bernhard Schölkopf.\";s:8:\"category\";s:46:\"machinelearningoverviewintroductiontheoryarxiv\";s:7:\"summary\";s:99:\"An excellent overview of Statistical Learning Theory by Ulrike von Luxburg and Bernhard Schölkopf.\";}i:6;a:11:{s:5:\"title\";s:30:\"Mathematical Statistics Course\";s:7:\"pubdate\";s:31:\"Mon, 06 Oct 2008 06:26:50 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/7086ae985e7d8888830fdfc2ceef8a9e#mreid\";s:4:\"link\";s:98:\"http://ocw.mit.edu/OcwWeb/Mathematics/18-466Mathematical-StatisticsSpring2003/CourseHome/index.htm\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/7086ae985e7d8888830fdfc2ceef8a9e\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/7086ae985e7d8888830fdfc2ceef8a9e\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:243:\"Richard M. Dudley's MIT OpenCourseWare materials for a course on mathematical statistics he taught in 2003. The first lecture has an easy to follow presentation of the Neyman-Pearson lemma. Later lectures discuss Bayesian decision theory.\";s:8:\"category\";s:69:\"mathsstatscourseby:DudleyR.M.machinelearningdecisiontheoryMITlectures\";s:7:\"summary\";s:243:\"Richard M. Dudley's MIT OpenCourseWare materials for a course on mathematical statistics he taught in 2003. The first lecture has an easy to follow presentation of the Neyman-Pearson lemma. Later lectures discuss Bayesian decision theory.\";}i:7;a:11:{s:5:\"title\";s:25:\"Machine Learning Lectures\";s:7:\"pubdate\";s:31:\"Fri, 19 Sep 2008 03:55:00 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/a7c253b5fc143bc14d9b7d7fa1f526cd#mreid\";s:4:\"link\";s:86:\"http://see.stanford.edu/SEE/lecturelist.aspx?coll=348ca38a-3a6d-4052-937d-cb017338d7b1\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/a7c253b5fc143bc14d9b7d7fa1f526cd\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/a7c253b5fc143bc14d9b7d7fa1f526cd\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:129:\"Full transcripts and videos of Andrew Ng's lectures on machine learning. Part of the Stanford Engineering Everywhere scheme.\";s:8:\"category\";s:58:\"machinelearninglecturesvideotranscriptsStanfordby:NgAndrew\";s:7:\"summary\";s:129:\"Full transcripts and videos of Andrew Ng's lectures on machine learning. Part of the Stanford Engineering Everywhere scheme.\";}i:8;a:11:{s:5:\"title\";s:54:\"AMT is fast, cheap, and good for machine learning data\";s:7:\"pubdate\";s:31:\"Thu, 11 Sep 2008 00:21:40 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/039cdbdf655573f982c37ac4485c084d#mreid\";s:4:\"link\";s:73:\"http://blog.doloreslabs.com/2008/09/amt-fast-cheap-good-machine-learning/\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/039cdbdf655573f982c37ac4485c084d\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/039cdbdf655573f982c37ac4485c084d\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:193:\"Discussion of a recent paper that studies the accuracy and cost of using "crowdsourcing" via the Amazon Mechanical Turk to collect supervised annotations of data for machine learning.\";s:8:\"category\";s:48:\"machinelearningdatacrowdsourcingAMTresearchpaper\";s:7:\"summary\";s:193:\"Discussion of a recent paper that studies the accuracy and cost of using "crowdsourcing" via the Amazon Mechanical Turk to collect supervised annotations of data for machine learning.\";}i:9;a:11:{s:5:\"title\";s:21:\"Optimization Lectures\";s:7:\"pubdate\";s:31:\"Thu, 04 Sep 2008 10:01:16 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/8942f69600a38f0048eaa78df83a23d9#mreid\";s:4:\"link\";s:48:\"http://www.cs.cmu.edu/~guestrin/Class/10725-S08/\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/8942f69600a38f0048eaa78df83a23d9\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/8942f69600a38f0048eaa78df83a23d9\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:158:\"Carlos Guestrin's 2008 lectures on optimisation theory as it is applied to machine learning and other problems. The slides and references are quite nice.\";s:8:\"category\";s:55:\"convex_analysislectureoptimisationmachinelearningcourse\";s:7:\"summary\";s:158:\"Carlos Guestrin's 2008 lectures on optimisation theory as it is applied to machine learning and other problems. The slides and references are quite nice.\";}i:10;a:11:{s:5:\"title\";s:43:\"Workshop on Modern Massive Data Sets (MMDS)\";s:7:\"pubdate\";s:31:\"Wed, 27 Aug 2008 21:33:00 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/2cd7d6ebfb235a952ff5142a14269205#mreid\";s:4:\"link\";s:35:\"http://www.stanford.edu/group/mmds/\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/2cd7d6ebfb235a952ff5142a14269205\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/2cd7d6ebfb235a952ff5142a14269205\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:164:\"This workshop was attended by some big names and a quick scan through the abstracts reveals that some interesting work was presented here. At Stanford in June 2008.\";s:8:\"category\";s:56:\"workshopconferencemachinelearningdataalgorithms2008stats\";s:7:\"summary\";s:164:\"This workshop was attended by some big names and a quick scan through the abstracts reveals that some interesting work was presented here. At Stanford in June 2008.\";}i:11;a:11:{s:5:\"title\";s:37:\"Conference Acceptance Rate Statistics\";s:7:\"pubdate\";s:31:\"Mon, 28 Jul 2008 02:45:48 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/160a4871839060150741157fb5440d6e#mreid\";s:4:\"link\";s:48:\"http://www.adaptivebox.net/CILib/CICON_stat.html\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/160a4871839060150741157fb5440d6e\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/160a4871839060150741157fb5440d6e\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:184:\"Several tables worth of acceptance rate statistics for various conferences under the broad rubric of "computational intelligence". Includes ICML, NIPS, UAI, ECML, AAAI, COLT.\";s:8:\"category\";s:47:\"machinelearningconferenceacceptancestatssummary\";s:7:\"summary\";s:184:\"Several tables worth of acceptance rate statistics for various conferences under the broad rubric of "computational intelligence". Includes ICML, NIPS, UAI, ECML, AAAI, COLT.\";}i:12;a:11:{s:5:\"title\";s:49:\"Introduction to Statistical Learning Theory [PDF]\";s:7:\"pubdate\";s:31:\"Sun, 27 Jul 2008 23:04:55 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/3f00ea5f8152a02693deed83a321d9e3#mreid\";s:4:\"link\";s:43:\"http://www.econ.upf.es/~lugosi/mlss_slt.pdf\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/3f00ea5f8152a02693deed83a321d9e3\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/3f00ea5f8152a02693deed83a321d9e3\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:184:\"This well-written survey of common theoretical tools in statistical learning by Bousquet, Boucheron & Lugosi covers SRM, regularisation, VC-dimension, Rademacher averages and more.\";s:8:\"category\";s:98:\"machinelearningstatstheorysurveyintroductionanalysisusefulsystem:filetype:pdfsystem:media:document\";s:7:\"summary\";s:184:\"This well-written survey of common theoretical tools in statistical learning by Bousquet, Boucheron & Lugosi covers SRM, regularisation, VC-dimension, Rademacher averages and more.\";}i:13;a:11:{s:5:\"title\";s:32:\"ICML/UAI/COLT 2008 Retrospective\";s:7:\"pubdate\";s:31:\"Mon, 21 Jul 2008 23:08:44 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/bef3ad388e455b82167bcfbd46750195#mreid\";s:4:\"link\";s:70:\"http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/bef3ad388e455b82167bcfbd46750195\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/bef3ad388e455b82167bcfbd46750195\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:87:\"Hal's summary of the three conference, replete with links to his favourite papers.\";s:8:\"category\";s:47:\"machinelearningconferencesummaryICMLUAICOLT2008\";s:7:\"summary\";s:87:\"Hal's summary of the three conference, replete with links to his favourite papers.\";}i:14;a:11:{s:5:\"title\";s:32:\"Information theoretic clustering\";s:7:\"pubdate\";s:31:\"Mon, 21 Jul 2008 06:24:56 +0000\";s:4:\"guid\";s:63:\"http://delicious.com/url/7af3173e27249587296c6325c6074542#mreid\";s:4:\"link\";s:68:\"http://www.scholarpedia.org/article/Information_theoretic_clustering\";s:2:\"dc\";a:1:{s:7:\"creator\";s:5:\"mreid\";}s:8:\"comments\";s:57:\"http://delicious.com/url/7af3173e27249587296c6325c6074542\";s:3:\"wfw\";a:1:{s:10:\"commentrss\";s:70:\"http://feeds.delicious.com/v2/rss/url/7af3173e27249587296c6325c6074542\";}s:6:\"source\";s:17:\"mreid\'s bookmarks\";s:11:\"description\";s:95:\"Scholarpedia article surveying techniques using information theoretic approaches to clustering.\";s:8:\"category\";s:66:\"machinelearningclusteringresearchinfotheoryinformationtheorysurvey\";s:7:\"summary\";s:95:\"Scholarpedia article surveying techniques using information theoretic approaches to clustering.\";}}s:7:\"channel\";a:4:{s:5:\"title\";s:31:\"Delicious/mreid/machinelearning\";s:4:\"link\";s:42:\"http://delicious.com/mreid/machinelearning\";s:11:\"description\";s:41:\"bookmarks tagged machinelearning by mreid\";s:7:\"tagline\";s:41:\"bookmarks tagged machinelearning by mreid\";}s:9:\"textinput\";a:0:{}s:5:\"image\";a:0:{}s:9:\"feed_type\";s:3:\"RSS\";s:12:\"feed_version\";s:3:\"2.0\";s:5:\"stack\";a:0:{}s:9:\"inchannel\";b:0;s:6:\"initem\";b:0;s:9:\"incontent\";b:0;s:11:\"intextinput\";b:0;s:7:\"inimage\";b:0;s:13:\"current_field\";s:0:\"\";s:17:\"current_namespace\";b:0;s:19:\"_CONTENT_CONSTRUCTS\";a:6:{i:0;s:7:\"content\";i:1;s:7:\"summary\";i:2;s:4:\"info\";i:3;s:5:\"title\";i:4;s:7:\"tagline\";i:5;s:9:\"copyright\";}s:13:\"last_modified\";s:33:\"Tue, 23 Dec 2008 11:05:29 +0000\r\n\";}','no'),(629,0,'rss_ad16bc921c09b567b18d31a5a1b6ba49_ts','1230030329','no'); -/*!40000 ALTER TABLE `wp_options` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_postmeta` --- - -DROP TABLE IF EXISTS `wp_postmeta`; -CREATE TABLE `wp_postmeta` ( - `meta_id` bigint(20) NOT NULL auto_increment, - `post_id` bigint(20) NOT NULL default '0', - `meta_key` varchar(255) default NULL, - `meta_value` longtext, - PRIMARY KEY (`meta_id`), - KEY `post_id` (`post_id`), - KEY `meta_key` (`meta_key`) -) ENGINE=MyISAM AUTO_INCREMENT=222 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_postmeta` --- - -LOCK TABLES `wp_postmeta` WRITE; -/*!40000 ALTER TABLE `wp_postmeta` DISABLE KEYS */; -INSERT INTO `wp_postmeta` VALUES (78,29,'_edit_lock','1216776740'),(23,2,'_wp_page_template','default'),(79,29,'_edit_last','2'),(80,22,'_edit_lock','1216343266'),(81,22,'_edit_last','2'),(82,30,'_edit_lock','1216776751'),(83,30,'_edit_last','2'),(84,31,'_edit_lock','1216639747'),(85,31,'_edit_last','2'),(86,31,'_wp_page_template','default'),(87,32,'_edit_lock','1216639754'),(88,32,'_edit_last','2'),(89,32,'_wp_page_template','default'),(90,33,'_edit_lock','1207722973'),(91,33,'_edit_last','2'),(155,47,'_edit_last','2'),(154,47,'_edit_lock','1218869584'),(104,37,'_edit_lock','1216889724'),(103,26,'_edit_last','2'),(102,26,'_edit_lock','1207870132'),(99,36,'_edit_lock','1207799474'),(100,36,'_edit_last','2'),(101,36,'_wp_page_template','sitemap.php'),(105,37,'_edit_last','2'),(106,23,'_edit_lock','1217986978'),(107,23,'_edit_last','2'),(110,2,'_edit_lock','1213692749'),(111,2,'_edit_last','2'),(112,38,'_edit_lock','1213449930'),(113,38,'_edit_last','2'),(116,39,'_wp_attached_file','/home/confla/public_html/inductio/wp-content/uploads/2008/06/me_shorthairdark_bw.jpg'),(117,39,'_wp_attachment_metadata','a:5:{s:5:\"width\";i:127;s:6:\"height\";i:138;s:14:\"hwstring_small\";s:22:\"height=\'96\' width=\'88\'\";s:4:\"file\";s:84:\"/home/confla/public_html/inductio/wp-content/uploads/2008/06/me_shorthairdark_bw.jpg\";s:10:\"image_meta\";a:10:{s:8:\"aperture\";i:0;s:6:\"credit\";s:0:\"\";s:6:\"camera\";s:0:\"\";s:7:\"caption\";s:0:\"\";s:17:\"created_timestamp\";i:0;s:9:\"copyright\";s:0:\"\";s:12:\"focal_length\";i:0;s:3:\"iso\";i:0;s:13:\"shutter_speed\";i:0;s:5:\"title\";s:0:\"\";}}'),(118,40,'_edit_lock','1229561178'),(119,40,'_edit_last','2'),(120,41,'_wp_attached_file','/home/confla/public_html/inductio/wp-content/uploads/2008/06/acrp.png'),(121,41,'_wp_attachment_metadata','a:6:{s:5:\"width\";i:577;s:6:\"height\";i:543;s:14:\"hwstring_small\";s:23:\"height=\'96\' width=\'102\'\";s:4:\"file\";s:69:\"/home/confla/public_html/inductio/wp-content/uploads/2008/06/acrp.png\";s:5:\"sizes\";a:2:{s:9:\"thumbnail\";a:3:{s:4:\"file\";s:16:\"acrp-150x150.png\";s:5:\"width\";i:150;s:6:\"height\";i:150;}s:6:\"medium\";a:3:{s:4:\"file\";s:16:\"acrp-300x282.png\";s:5:\"width\";i:300;s:6:\"height\";i:282;}}s:10:\"image_meta\";a:10:{s:8:\"aperture\";i:0;s:6:\"credit\";s:0:\"\";s:6:\"camera\";s:0:\"\";s:7:\"caption\";s:0:\"\";s:17:\"created_timestamp\";i:0;s:9:\"copyright\";s:0:\"\";s:12:\"focal_length\";i:0;s:3:\"iso\";i:0;s:13:\"shutter_speed\";i:0;s:5:\"title\";s:0:\"\";}}'),(122,42,'_edit_lock','1213595217'),(123,42,'_edit_last','2'),(130,43,'_wp_attached_file','/home/confla/public_html/inductio/wp-content/uploads/2008/06/all_libraries.png'),(131,43,'_wp_attachment_metadata','a:6:{s:5:\"width\";i:531;s:6:\"height\";i:489;s:14:\"hwstring_small\";s:23:\"height=\'96\' width=\'104\'\";s:4:\"file\";s:78:\"/home/confla/public_html/inductio/wp-content/uploads/2008/06/all_libraries.png\";s:5:\"sizes\";a:2:{s:9:\"thumbnail\";a:3:{s:4:\"file\";s:25:\"all_libraries-150x150.png\";s:5:\"width\";i:150;s:6:\"height\";i:150;}s:6:\"medium\";a:3:{s:4:\"file\";s:25:\"all_libraries-300x276.png\";s:5:\"width\";i:300;s:6:\"height\";i:276;}}s:10:\"image_meta\";a:10:{s:8:\"aperture\";i:0;s:6:\"credit\";s:0:\"\";s:6:\"camera\";s:0:\"\";s:7:\"caption\";s:0:\"\";s:17:\"created_timestamp\";i:0;s:9:\"copyright\";s:0:\"\";s:12:\"focal_length\";i:0;s:3:\"iso\";i:0;s:13:\"shutter_speed\";i:0;s:5:\"title\";s:0:\"\";}}'),(140,44,'_edit_lock','1216619203'),(141,44,'_edit_last','2'),(144,45,'_edit_lock','1216639099'),(145,45,'_edit_last','2'),(148,46,'_edit_lock','1229560084'),(149,46,'_edit_last','2'),(160,21,'_edit_lock','1217995089'),(161,21,'_edit_last','2'),(162,99,'_edit_lock','1229560061'),(163,99,'_edit_last','2'),(170,24,'_edit_lock','1220579959'),(169,109,'_edit_last','2'),(168,109,'_edit_lock','1226389524'),(171,24,'_edit_last','2'),(172,111,'_edit_lock','1222066787'),(173,111,'_edit_last','2'),(174,12,'_edit_lock','1221174108'),(175,12,'_edit_last','2'),(176,112,'_wp_attached_file','/home/confla/public_html/inductio/wp-content/uploads/2008/09/picture-1.png'),(177,112,'_wp_attachment_metadata','a:6:{s:5:\"width\";i:401;s:6:\"height\";i:152;s:14:\"hwstring_small\";s:23:\"height=\'48\' width=\'128\'\";s:4:\"file\";s:74:\"/home/confla/public_html/inductio/wp-content/uploads/2008/09/picture-1.png\";s:5:\"sizes\";a:2:{s:9:\"thumbnail\";a:3:{s:4:\"file\";s:21:\"picture-1-150x150.png\";s:5:\"width\";i:150;s:6:\"height\";i:150;}s:6:\"medium\";a:3:{s:4:\"file\";s:21:\"picture-1-300x113.png\";s:5:\"width\";i:300;s:6:\"height\";i:113;}}s:10:\"image_meta\";a:10:{s:8:\"aperture\";i:0;s:6:\"credit\";s:0:\"\";s:6:\"camera\";s:0:\"\";s:7:\"caption\";s:0:\"\";s:17:\"created_timestamp\";i:0;s:9:\"copyright\";s:0:\"\";s:12:\"focal_length\";i:0;s:3:\"iso\";i:0;s:13:\"shutter_speed\";i:0;s:5:\"title\";s:0:\"\";}}'),(178,117,'_edit_lock','1221526935'),(179,117,'_edit_last','2'),(184,124,'_edit_lock','1229339223'),(185,124,'_edit_last','2'),(188,132,'_edit_lock','1226886599'),(189,132,'_edit_last','2'),(190,134,'_edit_lock','1224450283'),(191,134,'_edit_last','2'),(196,140,'_edit_lock','1226022761'),(197,140,'_edit_last','2'),(198,141,'_wp_attached_file','/home/confla/public_html/inductio/wp-content/uploads/2008/11/ada.jpg'),(199,141,'_wp_attachment_metadata','a:6:{s:5:\"width\";i:170;s:6:\"height\";i:130;s:14:\"hwstring_small\";s:23:\"height=\'96\' width=\'125\'\";s:4:\"file\";s:68:\"/home/confla/public_html/inductio/wp-content/uploads/2008/11/ada.jpg\";s:5:\"sizes\";a:1:{s:9:\"thumbnail\";a:3:{s:4:\"file\";s:15:\"ada-150x130.jpg\";s:5:\"width\";i:150;s:6:\"height\";i:130;}}s:10:\"image_meta\";a:10:{s:8:\"aperture\";d:4;s:6:\"credit\";s:0:\"\";s:6:\"camera\";s:21:\"Canon DIGITAL IXUS 55\";s:7:\"caption\";s:0:\"\";s:17:\"created_timestamp\";i:1225400046;s:9:\"copyright\";s:0:\"\";s:12:\"focal_length\";d:12.1199999999999992184029906638897955417633056640625;s:3:\"iso\";i:3;s:13:\"shutter_speed\";d:0.00625;s:5:\"title\";s:0:\"\";}}'),(202,143,'_edit_lock','1229325094'),(203,143,'_edit_last','2'),(204,154,'_wp_attached_file','/home/confla/public_html/inductio/wp-content/uploads/2008/11/jensen.png'),(205,154,'_wp_attachment_metadata','a:6:{s:5:\"width\";i:485;s:6:\"height\";i:420;s:14:\"hwstring_small\";s:23:\"height=\'96\' width=\'110\'\";s:4:\"file\";s:71:\"/home/confla/public_html/inductio/wp-content/uploads/2008/11/jensen.png\";s:5:\"sizes\";a:2:{s:9:\"thumbnail\";a:3:{s:4:\"file\";s:18:\"jensen-150x150.png\";s:5:\"width\";i:150;s:6:\"height\";i:150;}s:6:\"medium\";a:3:{s:4:\"file\";s:18:\"jensen-300x259.png\";s:5:\"width\";i:300;s:6:\"height\";i:259;}}s:10:\"image_meta\";a:10:{s:8:\"aperture\";i:0;s:6:\"credit\";s:0:\"\";s:6:\"camera\";s:0:\"\";s:7:\"caption\";s:0:\"\";s:17:\"created_timestamp\";i:0;s:9:\"copyright\";s:0:\"\";s:12:\"focal_length\";i:0;s:3:\"iso\";i:0;s:13:\"shutter_speed\";i:0;s:5:\"title\";s:0:\"\";}}'),(210,164,'_edit_lock','1229331183'),(211,164,'_edit_last','2'),(220,171,'_edit_lock','1229660157'),(221,171,'_edit_last','2'); -/*!40000 ALTER TABLE `wp_postmeta` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_posts` --- - -DROP TABLE IF EXISTS `wp_posts`; -CREATE TABLE `wp_posts` ( - `ID` bigint(20) unsigned NOT NULL auto_increment, - `post_author` bigint(20) NOT NULL default '0', - `post_date` datetime NOT NULL default '0000-00-00 00:00:00', - `post_date_gmt` datetime NOT NULL default '0000-00-00 00:00:00', - `post_content` longtext NOT NULL, - `post_title` text NOT NULL, - `post_category` int(4) NOT NULL default '0', - `post_excerpt` text NOT NULL, - `post_status` varchar(20) NOT NULL default 'publish', - `comment_status` varchar(20) NOT NULL default 'open', - `ping_status` varchar(20) NOT NULL default 'open', - `post_password` varchar(20) NOT NULL default '', - `post_name` varchar(200) NOT NULL default '', - `to_ping` text NOT NULL, - `pinged` text NOT NULL, - `post_modified` datetime NOT NULL default '0000-00-00 00:00:00', - `post_modified_gmt` datetime NOT NULL default '0000-00-00 00:00:00', - `post_content_filtered` text NOT NULL, - `post_parent` bigint(20) NOT NULL default '0', - `guid` varchar(255) NOT NULL default '', - `menu_order` int(11) NOT NULL default '0', - `post_type` varchar(20) NOT NULL default 'post', - `post_mime_type` varchar(100) NOT NULL default '', - `comment_count` bigint(20) NOT NULL default '0', - PRIMARY KEY (`ID`), - KEY `post_name` (`post_name`), - KEY `type_status_date` (`post_type`,`post_status`,`post_date`,`ID`) -) ENGINE=MyISAM AUTO_INCREMENT=175 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_posts` --- - -LOCK TABLES `wp_posts` WRITE; -/*!40000 ALTER TABLE `wp_posts` DISABLE KEYS */; -INSERT INTO `wp_posts` VALUES (2,1,'2007-09-11 06:30:09','2007-09-11 11:30:09','Inductio Ex Machina is [Mark Reid][me]\'s machine learning research blog.\r\n\r\nI\'m a [research fellow][me_anu] with the Statistical Machine Learning group at the [Australian National University](http://anu.edu.au) in Canberra.\r\n\r\nCurrently, I\'m investigating representations of learning problems and looking at connections between them. The aim is to (eventually) build a conceptual map of machine learning in its many and varied flavours. That\'s still a long way off yet - I\'m still orienting myself and scanning for landmarks. More details can be found at my [academic site](http://rsise.anu.edu.au/~mreid/). \r\n\r\nIf you want to keep up with what I\'m reading you can check out my [CiteULike](http://www.citeulike.org/user/mdreid) profile or your may want to join the [Statistical Machine Learning](http://www.citeulike.org/groupfunc/3808/home) group I started there.\r\n\r\nYou can find out more about me at my [eponymous website](http://mark.reid.name).\r\n\r\n[me]: http://mark.reid.name\r\n[me_anu]: http://users.rsise.anu.edu.au/~mreid','About',0,'','publish','open','open','','about','','','2008-06-17 18:52:28','2008-06-17 08:52:28','',0,'',0,'page','',0),(12,2,'2007-09-22 17:45:48','2007-09-22 07:45:48','Welcome to my machine learning research blog.\r\n\r\nI\'m a newly minted Ph.D. graduate who has taken up a post-doctoral research fellow with the\r\n[Statistical Machine Learning][sml] group within the [Computer Sciences Laboratory][csl] at\r\nthe Australian National University.\r\n\r\n[sml]: http://csl.rsise.anu.edu.au/sml\r\n[csl]: http://csl.cecs.anu.edu.au/\r\n\r\nThe plan here is to present for discussion any papers, questions or ideas related to machine\r\nlearning. In particular, I\'ll probably concentrate on my past and present interests, inclduing:\r\nstatistical learning theory, dimensional reduction, transfer learning, rule learning and the\r\nphilosophy of induction.\r\n\r\nAs well as being an introduction, this post also plays a second role, which is to claim this blog\r\nas part of my Technorati Profile\r\nthrough the inclusion of that previous link.','Introducing Inductio Ex Machina ',0,'','publish','open','open','','introducing-inductio-ex-machina','','','2007-09-24 15:06:51','2007-09-24 05:06:51','',0,'http://conflate.net/inductio/general/introducing-inductio-ex-machina/',0,'post','',2),(15,2,'2007-10-03 13:03:53','2007-10-03 03:03:53','Last week I saw an interesting PhD monitoring [presentation][] by [Justin Bedo][] on the\r\ncounter-intuitive phenomenon of \"anti-learning\". For certain datasets, learning a classifier from a small number of samples and inverting its predictions performs much better than the original classifier. Most of the theoretical results Justin mentioned about are discussed in a recent [paper][] and [video lecture][] by [Adam Kowalczyk][]. These build on [earlier work][] presented at ALT 2005. As John notes in his [blog post][] from a couple of years ago, the strangeness of anti-learning is due to our assumption that proximity implies similarity.\r\n\r\nThis anti-learning effect has been observed in naturally occurring esophageal adenocarcinoma data: a binary classification problem with nearly 10,000 features. In his talk, Justin presented evidence that the effect was real (by constructing a null hypotheses through repeated shuffling of the data labels) and relatively invariant to choice of learning algorithm.\r\n\r\nLike any good scientist, Justin and his colleagues replicated the phenomena in a simpler,\r\nsynthetic model in order to better understand what might be happening. The model proposed is one that modeling competition between features: if one feature has a large value the others are small but in the opposite direction and examples from different classes have distinct large features pointing in opposite directions. This results in examples from opposite classes being more similar (_i.e._, they have a larger positive inner product) than examples from the same class. At a stretch, this model is also biologically plausible if features are expressions of competing entities in a cell.\r\n\r\nThe algorithm proposed to deal with anti-learning uses some of the data available at training\r\ntime to test whether has anti-learning characteristics and, if so, inverts the resulting\r\nclassifier. This \"burns\" some of the information in the training data but can dramatically\r\nimprove performance when anti-learning is correctly identified.\r\n\r\nIt\'s an interesting example of a trade-off that can be made between data and background\r\nknowledge. With relatively few examples and the knowledge that you are in an anti-learning situation, you can flip classifications and do very well. As the amount of data available increases, the learning algorithm will converge to a good classifier, the assumption is less valuable and flipping classifications is costly.\r\n\r\n[adam kowalczyk]: http://users.rsise.anu.edu.au/~akowalczyk/\r\n[paper]: http://adamk.antilearning.googlepages.com/ecml07.pdf\r\n[video lecture]: http://videolectures.net/mlss06au_kowalczyk_al/\r\n[justin bedo]: http://holly.ath.cx/\r\n[blog post]: http://hunch.net/?p=35\r\n[presentation]: http://cecs.anu.edu.au/seminars/showone.pl?SID=523\r\n[earlier work]: http://www.springerlink.com/content/e3ey7r6yxu68fye6/','Anti-Learning',0,'','publish','open','closed','','anti-learning','','\nhttp://hunch.net/?p=35','2007-10-03 13:41:54','2007-10-03 03:41:54','',0,'http://conflate.net/inductio/general/anti-learning/',0,'post','',-6),(37,2,'2008-04-21 15:57:42','2008-04-21 05:57:42','I\'ve been looking into the relationships between losses, divergences and other measures of predictors and problems recently and came across a 2006 paper by Drummond and Holte entitled Cost Curves: An improved method for visualizing classifier performance. This paper describes a representation of classifier performance that is very closely related to the usual ROC curve. However, unlike ROC plots of (False Positive Rate, True Positive Rate)-points for various operating conditions of the classifier cost curves show (cost, risk)-points. That is, for each cost plotted on the x axis, the y co-ordinate shows the cost-weighted loss for the classifier.\r\n\r\nAs explained in Drummond and Holte\'s paper, there is a simple point-line duality between ROC space and Cost-Loss space based on the definition of cost-weighted loss. If [tex](FP,TP)[/tex] is a point in ROC space then the cost-loss relationship [tex](c, L)[/tex] is linear and satisfies\r\n
\r\n[tex] \\displaymath L = (1-\\pi) c FP + \\pi (1-c) (1 - TP) [/tex] \r\n
\r\nwhere [tex]c[/tex] is the cost of a false positive and [tex]\\pi[/tex] the prior probability of the positive class[^1]. \r\n\r\nGiven a specific [tex]\\pi[/tex] this relationship is completely invertible. A point [tex](c,L)[/tex] in cost-loss space corresponds to the following line in ROC space\r\n
\r\n[tex]\\displaymath TP = \\frac{(1-\\pi) c}{\\pi(1-c)} FP + \\frac{(1-\\pi) c - L}{\\pi(1-c)}.[/tex]\r\n
\r\n\r\nMy ability to intuitively grasp this duality relationship was not that great so I hacked together the following applet to help. On the right is a black curve in ROC space representing five (False Positive, True Positive) rates for some imaginary classifier. The points are (0,0), (0.1, 0.5), (0.3, 0.8), (0.7, 0.95) and (1,1). The diagonal grey line on the ROC plot represents the performance of random classifiers - each increase in True Positive rate is countered by an equivalent decrease in False Positive rate.\r\n\r\nThe left plot, entitled \"Cost Space\" shows the (cost,loss) duals of both the black and grey curves from the right-hand plot. The grey diagonal on the right corresponds to a \"tent\" on the left that represents the best performance of a classifier that constantly predicts a single class.\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n

\r\nThis browser does not have a Java Plug-in.
\r\nGet the latest Java Plug-in here.
\r\n

\r\n
\r\n\r\nIf you click in the applet area you can get a feel for the relationship between these two representations. When you move your mouse over ROC space you will see the corresponding line in cost space. Conversely, when you move your mouse over the cost space plot you will see the dual line in ROC space.\r\n\r\nThe bar at the bottom of the two plots controls the prior probability [tex]\\pi[/tex]. You can see how the dual curve in cost space changes as this parameter is modified.\r\n\r\nThe code for this applet is available through [GitHub](http://github.com). The visualisation aspects are written in [Processing](http://processing.org) and are [available here](http://github.com/mreid/siroc/). This relies on some [Java code](http://github.com/mreid/geovex/) I also wrote that does the point-line conversions.\r\n\r\n[Chris Drummond](http://www.site.uottawa.ca/~cdrummon/) has also created an [applet](http://www.site.uottawa.ca/~cdrummon/rocconversion.html) to do the same kind of conversion. The one here can be seen as complementary since his version allows the user to add data points and construct curves whereas mine just aims to make the key relationship interactive. \r\n\r\n[^1]: My description here differs slightly from Drummond and Holte\'s in that I am keeping priors and costs separate and not normalising the loss. ','Visualising ROC and Cost Curve Duality',0,'Discussion of the point-line duality between Drummond and Holte\'s cost curves and ROC curves. An applet is provided to help visualise this relationship. ','publish','open','open','','visualising-roc-and-cost-curve-duality','','','2008-04-21 15:57:42','2008-04-21 05:57:42','',0,'http://conflate.net/inductio/?p=37',0,'post','',0),(16,2,'2007-10-03 15:39:36','2007-10-03 05:39:36','In keeping with the \"Anti-\" theme from my [last post][] I thought I\'d share something I found in the treasure trove of [rants][] that [J. Michael Steele][]\'s has put on the web for our edification.\n\nAntihubrisines, according to John W. Tukey in his 1986 paper, [Sunset Salvo][], are little pearls of wisdom to keep in mind if you suspect you are being afflicted by hubris. They are to \"suffering philosophy\" what antihistamines are to suffering sinuses:\n> To statisticians, hubris should mean the kind of pride that fosters \n> and inflated idea of one\'s powers and thereby keeps one from being \n> more than marginally helpful to others. ... The feeling of \"Give me\n> (or more likely even, give my assistant) the data, and I will tell\n> you what the real answer is!\" is one we must all fight against again\n> and again, and yet again.\n\nIncluded in Tukey\'s prescription are number of strains of advice, both qualitative and quantitative. Among my favourites is this very bracing tonic that should be administered whenever you plan to start number crunching:\n> The data may not contain the answer. The combination of some data \n> and an aching desire for an answer does not ensure that a \n> reasonable answer can be extracted from a given body of data.\n\n[last post]: http://conflate.net/inductio/theory/anti-learning\n[rants]: http://www-stat.wharton.upenn.edu/~steele/Rants.htm\n[j. michael steele]: http://www-stat.wharton.upenn.edu/~steele/\n[sunset salvo]: http://www-stat.wharton.upenn.edu/~steele/HoldingPen/SunsetSalvo.pdf','Antihubrisines',0,'','publish','open','closed','','antihubrisines','','','2007-10-03 15:39:36','2007-10-03 05:39:36','',0,'http://conflate.net/inductio/philosophy/antihubrisines/',0,'post','',0),(18,2,'2007-10-19 16:57:15','2007-10-19 06:57:15','A [discussion over at God Plays Dice][discussion] had me nodding in agreement: proving a theorem is like playing an adventure game. As Isabel puts it\r\n\r\n> You are in a maze of twisty little equations, all alike\r\n\r\nalluding to a particularly fiendish puzzle in the text adventure [Colossal Cave][].\r\n\r\nHaving recently grappled with some tricky proofs I was wondering how they might play out as a piece of interactive fiction...\r\n\r\n You are sitting before a particularly thorny conjecture. \r\n Possible proofs lead away from here in several directions.\r\n \r\n > inventory\r\n \r\n You are carrying the following items:\r\n A ream of blank paper\r\n A pencil\r\n The Cauchy-Schwarz inequality\r\n Some half-remembered undergraduate mathematics\r\n \r\n > look conjecture\r\n \r\n You stare blankly at the conjecture. You think it might \r\n have something to do with convexity.\r\n \r\n > w\r\n\r\n You surf over to Wikipedia and read up on sub-tangents. \r\n The notation makes you confused.\r\n \r\n There is a lemma here.\r\n \r\n > take lemma\r\n \r\n Taken.\r\n \r\n > e\r\n \r\n You wander off to go get a bite to eat and some coffee.\r\n \r\n You see a colleague here.\r\n \r\n > talk colleague\r\n \r\n After explaining your conjecture your colleague mutters \r\n that it was probably proven in the 50s by a Russian.\r\n \r\n > s\r\n \r\n You sit back down at your desk and spend half an hour \r\n reading pages linked to from reddit.\r\n\r\n You see an unproved conjecture here.\r\n \r\n > use lemma\r\n [on the conjecture] \r\n \r\n With a bit of manipulation you turn the equation into one \r\n involving the expectation of a product.\r\n \r\n > use Cauchy-Schwarz\r\n [on the conjecture]\r\n \r\n Hooray! You now have a tight bound on a key quantity, \r\n proving your conjecture.\r\n \r\n > generalise assumptions\r\n\r\n Your theorem was eaten by a Grue.\r\n\r\n[discussion]: http://godplaysdice.blogspot.com/2007/10/you-are-in-maze-of-twisty-little.html\r\n[colossal cave]: http://en.wikipedia.org/wiki/Colossal_Cave_Adventure#Maze_of_twisty_little_passages','The Mathematical Grue',0,'','publish','open','open','','the-mathematical-grue','','','2007-10-20 08:54:58','2007-10-19 22:54:58','',0,'http://conflate.net/inductio/general/the-mathematical-grue/',0,'post','',2),(47,2,'0000-00-00 00:00:00','0000-00-00 00:00:00','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\nTrading Cash for Probability\r\n-------------------------------\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \r\n\r\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\r\n\r\n* A) \"Pays $1 to bearer if it rains next Monday\", and \r\n* B) \"Pays $1 to bearer if it does not rain next Monday\". \r\n\r\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \r\n\r\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \r\n\r\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \r\n\r\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\r\n\r\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\r\n\r\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\r\n\r\nProper Scoring Rules\r\n------------------------\r\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as \"proper scoring rules\" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\r\n\r\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a \"report\" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \r\n\r\nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]\\langle s(r), w \\rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]\\langle s(r), w \\rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex].\r\n\r\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \r\n\r\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then\r\n
\r\n[tex]\r\n\\displaystyle\r\n\\max_{r} \\mathbb{E}_p \\langle s(r), w \\rangle = \\mathbb{E}_p \\langle s(p), w \\rangle .\r\n[/tex]\r\n
\r\nScoring rules that meet this criteria are described as \"proper\" or \"Fisher consistent\".\r\n\r\nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \r\n
\r\n[tex]\r\n\\displaystyle\r\n\\mathbb{E}_p \\langle s(r), w \\rangle = \\langle s(r), \\mathbb{E}_p w \\rangle = \\langle s(r), p \\rangle\r\n[/tex]\r\n
\r\nsince [tex]\\mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or \"properness\") condition requires that the derivatives of the scoring rule satisfy, for all [tex]p[/tex],\r\n
\r\n[tex]\r\n\\displaystyle\r\n\\langle \\frac{\\partial}{\\partial r_i} s(p), p \\rangle = 0.\r\n[/tex]\r\n
\r\nThat means the derivatives of the scoring rule must be orthogonal to [tex]p[/tex]. \r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\r\n\r\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\nPrediction Markets in the Wild\r\n----------------------------------\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\r\n\r\n[hubdub]: http://www.hubdub.com/\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\r\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\r\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\r\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Scoring Rules and Prediction Markets',0,'','draft','open','open','','','','','2008-08-11 21:59:12','2008-08-11 11:59:12','',0,'http://conflate.net/inductio/?p=47',0,'post','',0),(20,2,'2007-11-20 15:32:36','2007-11-20 05:32:36','Things have been a little quite around here of late, mainly because I\'ve been working on a submission for the NIPS 2007 Workshop on [Principles of Learning Problem Design][nipsws] in early December.\r\n\r\nI\'m pleased to say that I\'ll be presenting some recent results that [Bob][] and [I][me] have been working on under the heading of \"Representations in Learning Task Design\". The focus is on finding *primitives* and *combinators* for describing learning tasks (Aside: \"problems\" are what you are trying to solve, \"tasks\" are what you give to computers to solve them). \r\n\r\nUnsurprisingly, [cost-sensitive losses][csl] are one such primitive and when combined using weighted integration they can represent a variety of losses for a range of learning tasks including classification, regression and class probability estimation. \r\n\r\nSince this is a workshop paper, most of the results are still fairly preliminary and build on a lot of work by others. That said, I think it\'s a good approach as several previously known results are subsumed and simplified. I\'ll post our paper and slides once they are completed.\r\n\r\nLet me know if you are attending NIPS and we can try to catch up. Hope to see you at [Whistler][]. \r\n\r\n[nipsws]: http://hunch.net/~learning-problem-design/ \r\n[bob]: http://users.rsise.anu.edu.au/~williams/\r\n[me]:http://users.rsise.anu.edu.au/~mreid/\r\n[csl]: http://www-cse.ucsd.edu/users/elkan/rescale.pdf\r\n[whistler]: http://nips.cc/Conferences/2007/Program/schedule.php?Session=Workshops','Principles of Learning Problem Design',0,'','publish','open','open','','principles-of-learning-problem-design','','','2007-11-20 15:32:36','2007-11-20 05:32:36','',0,'http://conflate.net/inductio/theory/principles-of-learning-problem-design/',0,'post','',0),(21,2,'2007-12-19 09:20:43','2007-12-18 23:20:43','I\'ve been attempting to read an interesting [NIPS 2007][] paper entitled [Estimating divergence functionals and the likelihood ratio by convex risk minimzation][Nguyen et al 2007] and realised my knowledge of convex analysis was sketchy at best.\r\n\r\nFortunately, [Wikipedia][] pointed me to an excellent [summary of the Legendre-Fenchel transformation][LF transform] by [Hugo Touchette][]. A bit more digging around Hugo\'s site led me to a great [cheat sheet][] for convex analysis, covering many of the concepts that were causing me trouble.\r\n\r\nGreat stuff!\r\n\r\n[Nguyen et al 2007]: http://books.nips.cc/papers/files/nips20/NIPS2007_0782.pdf\r\n[Wikipedia]: http://en.wikipedia.org/wiki/Convex_conjugate \r\n[LF transform]: https://www.maths.qmul.ac.uk/~ht/archive/lfth2.pdf\r\n[Hugo Touchette]: http://www.maths.qmul.ac.uk/~ht/index.html\r\n[cheat sheet]: http://www.maths.qmul.ac.uk/~ht/archive/convex1.pdf\r\n[NIPS 2007]: http://books.nips.cc/nips20.html','A Crash Course in Convex Analysis',0,'','publish','open','open','','a-crash-course-in-convex-analysis','','','2007-12-19 09:20:43','2007-12-18 23:20:43','',0,'http://conflate.net/inductio/theory/a-crash-course-in-convex-analysis/',0,'post','',-1),(22,2,'2007-12-22 00:50:03','2007-12-21 14:50:03','I attended my first NIPS conference this month and had a great time and was\r\nable to put faces to many names I\'ve encountered on papers recently.\r\n\r\nThe quantity and quality of work was a somewhat overwhelming but I hear from\r\nmore seasoned NIPS attendees that this is par for the course. What follows are a\r\nfew topics and presentations that caught my attention during the conference and\r\nworkshop sessions this year.\r\n\r\n\r\n\r\n\r\nThe Conference\r\n--------------\r\n\r\n\r\nThe Workshops\r\n-------------\r\nI attended two of the 19 workshops: \r\n[Principles of Learning Problem Design][plpd] (where I gave a talk on some of \r\nthe [representations][] work [Bob][] and I have been working on) and \r\n[Representations and Inference on Probability Distributions][ripd].\r\n\r\n[Bob]: http://axiom.anu.edu.au/~williams/\r\n[representations]: http://users.rsise.anu.edu.au/~mreid/research/\r\n[plpd]: http://hunch.net/~learning-problem-design/\r\n[ripd]: http://nips2007.kyb.tuebingen.mpg.de/\r\n\r\nThere was a wide variety of work in the first of these workshops but the one \r\nthat I found most interesting was the poster that [Steve Hanneke][] talked me \r\nthrough on [Asymptotic Active Learning][aal]. Previous results show that active \r\nlearning is not any more powerful than standard supervised learning in terms of\r\nthe number of labelled samples required to beat a particular error rate with \r\nconfidence. The new result here is that a slight weakening of the usual PAC\r\nmodel - only requiring the learner find a good hypothesis rather than requiring \r\nthat the learner also \"know\" it is a good hypothesis - leads to sample \r\ncomplexities that are polylogarithmic (rather than linear) in the reciprocal of\r\nthe error rate.\r\n\r\n[Steve Hanneke]: http://www.cs.cmu.edu/~shanneke/\r\n[aal]: http://hunch.net/~learning-problem-design/gamma.pdf\r\n\r\nIn the Representations and Inference on Probability Distributions workshop there\r\nwere several talks and posters I found interesting. I really enjoyed both the\r\ncontent and presentation of [Andrew McGregor][]\'s talk on \r\n[Sketching and Streaming for Distributions][ssfd]\r\n\r\n[Andrew McGregor]: http://talk.ucsd.edu/andrewm/\r\n[ssfd]: http://talk.ucsd.edu/andrewm/slides/07-nipsslides.pdf\r\n\r\n* Conjugate Projection Limits: \r\n [Peter Orbanz][] has some work looking at how to extend distribution updates \r\n followed by projections in infinite dimensional spaces.\r\n\r\n[Peter Orbanz]: https://www.ml.inf.ethz.ch/people/phdstudents/porbanz\r\n\r\n* Fourier decompositions of distributions over permutations:\r\n Very cool group theory-based work to define how you can represent \r\n distributions over permutations in terms of group characters.\r\n\r\n* Efficiently estimating divergences between distributions:\r\n Andrew McGregor gives some results on \"sketching\" distributions and uses \r\n results from communication theory to show that only L1 and L2 divergences\r\n can be (space) efficiently learnt from data streams (no revisiting data).\r\n\r\n* Frechet derivatives(?):\r\n Used in Maya Gupta\'s talk on extending divergences beyond vectors.\r\n\r\nInsights\r\n--------\r\nMaya Gupta and colleagues\' presentation started from the observation that the\r\nmean of a set of points is the least square minimiser and others\' results \r\nshowing that Bregman divergences can be seen as the quantity to be minimised\r\nwhen other summary statistics are used. Using a nice observation that the \r\ndefinition of a Bregman divergence for F, $d_F(x,y)$ can be seen as the tail of\r\na Taylor series expansion \r\n\\[\r\n F(x) = F(y) + \\del F(y)(y - x) + d_F(x,y)\r\n\\]\r\nthere are able to generalise divergences to objects other than vectors \r\n(e.g., functions).\r\n\r\nLe Song\'s presentation on the use of RKHSs to estimate expectations of functions\r\nfrom samples was neat. The trick here was that the mean of the samples in the\r\nHilbert space can \"encode\" information about the higher order moments of the\r\nsamples in the original space. The expectation of a function with respect to the\r\nsamples can be written as the inner product of the function and the mean in the\r\nHilbert space. They show that approximating well the sample means by an estimate \r\nof the density function in the Hilbert space will give good approximations in the\r\noriginal space of the expectation (w.r.t. to the sample distribution) of any \r\nfunction. This differs from the use of moment generating functions in that \r\ninfinitely many moments of the sample distribution can be estimated \r\nsimultaneously.','NIPS 2007 Highlights',0,'','private','open','open','','nips-2007-highlights','','','2008-07-18 09:41:43','2008-07-17 23:41:43','',0,'http://conflate.net/inductio/community/nips-2007-highlights/',0,'post','',0),(23,2,'2008-02-04 14:06:18','2008-02-04 04:06:18','Just when I thought I was starting to get my head around the multitudinous uses of convexity in statistics I was thrown by the following definition:\r\n\r\n> A function f over the interval (a,b) is convex if, for all choices of {x,y,z} \r\n> satisfying a < x < y < z < b the determinant \r\n>\r\n>
[tex] \\displaystyle \\left| \\begin{array}{ccc} 1 & 1 & 1 \\\\ x & y & z \\\\ f(x) & f(y) & f(z) \\end{array}\\right|[/tex]
\r\n>\r\n> is non-negative.\r\n\r\nAfter expanding the determinant and some algebraic twiddling I realised that this is just a very compact way of requiring that\r\n
\r\n[tex]\\displaystyle\\frac{z-y}{z-x} f(x) + \\frac{y-x}{z-x}f(z) \\geq f(y)[/tex]\r\n
\r\nwhich, after noticing that (z-y) + (y-x) = (z-x), of course is the more traditional way of saying a function is convex.\r\n\r\nWhat\'s neat about this determinant representation is that it extends nicely to what are known as kth-order convex functions (ones whose derivatives up to order k are convex). Specifically, f is k-convex whenever [tex]\\{x_i\\}_{i=0}^{k+1}[/tex] satisfy [tex]a < x_0 < \\ldots < x_{k+1} < b [/tex] and \r\n
\r\n[tex] \\displaystyle \\left| \r\n \\begin{array}{ccc} \r\n 1 & \\cdots & 1 \\\\ \r\n x_0 & \\cdots & x_{k+1} \\\\ \r\n x_0^2 & \\cdots & x_{k+1}^2 \\\\\r\n \\vdots & \\ddots & \\vdots \\\\\r\n x_0^k & \\cdots & x_{k+1}^k \\\\\r\n f(x_1) & \\cdots & f(x_{k+1}) \r\n \\end{array} \\right| \\geq 0.[/tex]\r\n
\r\n\r\nWhile it is arguably less transparent than explicitly writing out all the convexity inequalities for each of the derivatives of f it certainly makes up for it with compactness.','A Cute Convexity Result',0,'','publish','open','open','','a-cute-convexity-result','','','2008-02-05 08:35:54','2008-02-04 22:35:54','',0,'http://conflate.net/inductio/theory/a-cute-convexity-result/',0,'post','',4),(24,2,'2008-02-07 16:42:05','2008-02-07 06:42:05','I recently started using [CiteULike][] to keep track of papers I read. For those not familiar with it, it deems itself to be \"a free online service to organise your academic papers\". In contrast to my offline bibliography organising tool, [BibDesk][], a service like this has at least three main advantages:\r\n\r\n* Reduced data entry: If someone else has already entered the article\'s details or CiteULike can scrape them of a journal\'s web page I don\'t have to enter them myself.\r\n\r\n* Easy access: The bibliography is stored on the web making access away from my machine or by others straightforward. It\'s also possible to upload copies of papers for personal use in case you\'re not able to get to your university\'s reverse proxy. \r\n\r\n* Social networks: When I see that someone else has read a paper I\'m interested in I can easily see what else that person has read. I can also look at all the other papers that people have associated with a particular tag. \r\n\r\nLike Yaroslav, who also uses CiteULike as part of his [larger strategy][yaroslav] for staying organised, I have started using the Notes field for entries to keep track of important theorems, equations and cross-references of papers that I go over more than once. \r\n\r\nOf course, once you\'ve collected a bunch of papers you can also export your bibliography as BibTeX or RIS so your can include citations in your papers. This is especially convenient with BibDesk. All I do is open a \"New External File Group\" in BibDesk and point it to the URL for my CiteULike account: `http://www.citeulike.org/bibtex/user/mdreid`. BibDesk keeps track of which external entries have or haven\'t been imported into your offline BibTeX file making it easy to quickly build a conference specific bibliography.\r\n\r\nI find this BibDesk and CiteULike combination the best of both worlds as it reduces the amount of data entry I need to do while still making it easy to push citations to [TextMate][] or [LyX][] when I\'m writing.\r\n\r\n[citeulike]: http://citeulike.org/user/mdreid\r\n[bibdesk]: http://bibdesk.sourceforge.net/\r\n[yaroslav]: http://yaroslavvb.blogspot.com/2008/02/strategies-for-organizing-literature.html\r\n[textmate]: http://macromates.com/\r\n[lyx]: http://www.lyx.org/','Staying Organised with CiteULike and BibDesk',0,'','publish','open','open','','staying-organised-with-citeulike-and-bibdesk','','','2008-02-07 16:42:05','2008-02-07 06:42:05','',0,'http://conflate.net/inductio/reading/staying-organised-with-citeulike-and-bibdesk/',0,'post','',2),(25,2,'2008-02-19 21:36:34','2008-02-19 10:36:34','John Langford has diagnosed a [complexity illness][] that afflicts research in academia. One of its symptoms is what he calls \"Math++\": the use of unnecessary and obfuscatory mathematics to improve the author\'s chance of publication. \r\n\r\nHaving recently ploughed through a large number of math-heavy articles during the preparation of a [COLT][] paper I have started to worry whether the illness is contagious. At present there is a rash of awkward notation breaking out in some sections of my draft. While I don\'t think I can completely get rid of it I\'m hoping that I can at least tidy it up and turn it into something presentable.\r\n\r\nWanting to tidy up awkward mathematical expression is definitely not the same as wanting removing it completely. To switch [analogies][], maths is akin to a communications channel. The aim of the encoder is to cram information down the line so it can be decoded a the other end. Good mathematical notation encodes frequently occurring concepts with short, memorable terms and takes advantage of uses relationships between concepts. Using a side-channel -- e.g., the English text of the paper -- to ease the burden of decoding is also a good strategy.\r\n\r\nJohn also suggests treating Math++ (and other forms of complexity) with education. This doesn\'t necessarily mean give a lecture on your research but any attempt at communication. I\'ve found that attempting to describe what I\'m working on over lunch - and without a whiteboard - can be a good way to focus on the story of your research rather than the technicalities. I find technical details of a paper much easier to understand when I understand their motivation.\r\n\r\nEven if I don\'t completely cure my paper of Math++, I take some solace from Fernano Pereira who [points out][pereira] that research is a form of dialogue and that dialogue is inherently messy which is sometimes the reason mathematical exposition is less than perfect. It\'s only through repeated attempts to communicate ideas that one is able to figure out what is important.\r\n\r\n[pereira]: http://earningmyturns.blogspot.com/2008/02/complexity-illness.html\r\n[complexity illness]: http://hunch.net/?p=316\r\n[COLT]: http://www.learningtheory.org/\r\n[analogies]: http://apperceptual.wordpress.com/2007/12/20/readings-in-analogy-making/\r\n ','Clarity and Mathematics',0,'','publish','open','open','','clarity-and-mathematics','','\nhttp://hunch.net/?p=316','2008-03-07 10:06:21','2008-03-06 23:06:21','',0,'http://conflate.net/inductio/general/clarity-and-mathematics/',0,'post','',4),(26,2,'2008-02-22 15:40:00','2008-02-22 04:40:00','I had to go hunting around for some data to try some new ideas on recently.\r\nAs [handy][google results] as Google is, there\'s still a fair bit of \r\nchaff from which to sort the wheat. \r\n\r\n[google results]: http://google.com/search?q=machine+learning+data+sets\r\n\r\nFortunately, there is a lot of good stuff out there including well-organised\r\nindexes of data sets for various purposes. For my future reference (and for\r\nanyone else that may be interested) here are some of the better data set lists\r\nI found.\r\n\r\n* **UCI Repositories**:\r\n No list of lists would be complete without this perennial [collection][uci]\r\n of machine learning data sets hosted by the University of California, \r\n Irvine. They also have a [repository of large data sets][kdd] for \r\n knowledge discovery in databases (KDD).\r\n\r\n[kdd]: http://kdd.ics.uci.edu/\r\n[uci]: http://archive.ics.uci.edu/ml/\r\n\r\n* **The Info**: \r\n This [site][theinfo] \"for people with large data sets\" has a community\r\n editable [list of data sets][theinfo data] organised by topic. The \r\n collection here has a web/text focus.\r\n\r\n[theinfo]: http://theinfo.org\r\n[theinfo data]: http://theinfo.org/get/data\r\n\r\n* **Text Retrieval**:\r\n This [list][trec] kept by NIST has data sets for each of the various\r\n tracks at the Text Retrieval Conference, including data sets for \r\n [spam detection](http://trec.nist.gov/data/spam.html),\r\n [genomics](http://trec.nist.gov/data/genomics.html),\r\n and a [terabyte](http://trec.nist.gov/data/terabyte.html) track\r\n (although the data sets aren\'t quite up to a terabyte yet).\r\n\r\n[trec]: http://trec.nist.gov/data.html\r\n\r\n* **Time Series Data Library**:\r\n This [collection][tsdl] has a large number of time varying data sets from\r\n finance, demography, physics, sport and ecology. \r\n\r\n[tsdl]: http://www-personal.buseco.monash.edu.au/~hyndman/TSDL/\r\n\r\n* **DMOZ Directory of Data Sets**:\r\n This is a good [starting point][dmoz] for more lists of data sets for \r\n machine learning.\r\n \r\n Parts of DMOZ itself are [available in RDF][dmoz data] as a data set for \r\n researchers. There is also a [processed version][dmoz processed] made\r\n available as part of the PASCAL [Ontology Learning Challenge][].\r\n\r\n[dmoz]: http://www.dmoz.org/Computers/Artificial_Intelligence/Machine_Learning/Datasets/\r\n[dmoz data]: http://rdf.dmoz.org/\r\n[dmoz processed]: http://olc.ijs.si/dmozReadme.html\r\n[Ontology Learning Challenge]: http://olc.ijs.si/\r\n\r\n* **Royal Statistical Society**:\r\n This [collection][rss data] contains data sets used in research published in \r\n the [journal of the Royal Statistical Society][rss]. This is an admirable\r\n idea that I wish more journals would take up.\r\n\r\n[rss data]: http://www.blackwellpublishing.com/rss/ \r\n[rss]: http://www.rss.org.uk/ \r\n\r\nAs well as the above institution or community organised lists, I also came \r\nacross some maintained by individuals.\r\n\r\n* **Daniel Lemire**: \r\n Daniel Lemire\'s \"[Data for Database Research][lemire]\" is organised by \r\n application areas, including data for earthquakes, weather, finance, climate \r\n and blogs.\r\n\r\n[lemire]: http://www.daniel-lemire.com/blog/data-for-data-mining/\r\n\r\n* **Peter Skomoroch**:\r\n The [list of data sets][skomoroch] over at [Data Wrangling][] is similar\r\n in spirit to the one here.\r\n\r\n[skomoroch]: http://www.datawrangling.com/some-datasets-available-on-the-web.html\r\n[Data Wrangling]: http://www.datawrangling.com/\r\n\r\nA few specific data sets caught my eye, some new, and some I just hadn\'t seen \r\nbefore.\r\n\r\n* **Freebase Wikipedia Extraction**:\r\n The [Wikipedia WEX][wex] data set is \r\n essentially a large (57 GB) graph of articles from wikipedia. \r\n\r\n[wex]: http://download.freebase.com/wex/\r\n \r\n* **Enron Email**:\r\n This [collection of email][enron] (400 Mb compressed) between Enron staff \r\n contains about half a million messages organised into folders. It was\r\n released publicly as part of the investigation into Enron and has been\r\n used by William Cohen and others as part of the CALO project.\r\n\r\n[enron]: http://www.cs.cmu.edu/~enron/\r\n\r\n* **Freeway Traffic Analysis**:\r\n This fairly [large data set][freeway] is a record of traffic flow on\r\n several lanes of the I-880 freeway in California in order to study the\r\n effect of roving tow-trucks on dealing with decongesting traffic \r\n incidents.\r\n\r\n[freeway]: http://ipa.eecs.berkeley.edu/~pettyk/FSP/\r\n\r\nIf all else fails and you still cannot find a suitable data set for your \r\nresearch, you can always invoke the social web and trawl through bookmarks\r\non services like [del.icio.us](http://del.icio.us). The global \r\n[data set tag][global tag] can throw up some interesting hits occasionally but\r\nthere might be a higher wheat to chaff ratio in particular user\'s bookmarks,\r\nsuch as [Peter Skomoroch][skomoroch tag]. [Mine][] is not nearly as \r\ncomprehensive yet.\r\n\r\n[global tag]: http://del.icio.us/tag/dataset\r\n[skomoroch tag]: http://del.icio.us/pskomoroch/dataset\r\n[mine]: http://del.icio.us/mreid/dataset\r\n\r\nIt would be interesting to do a meta-analysis of all these data sets to see how\r\nour ability as a discipline to deal with larger and more complex data sets has\r\nincreased over time. As Daniel Lemire pointed out with some surprise recently,\r\n[processing a terabyte of data][small terabyte] isn\'t that uncommon.\r\n\r\n[small terabyte]: http://www.daniel-lemire.com/blog/archives/2008/02/21/when-a-terabyte-is-small/','A Meta-index of Data Sets',0,'','publish','open','open','','a-meta-index-of-data-sets','','\nhttp://www.datawrangling.com/some-datasets-available-on-the-web.html','2008-02-22 15:43:37','2008-02-22 04:43:37','',0,'http://conflate.net/inductio/application/a-meta-index-of-data-sets/',0,'post','',7),(27,2,'2008-03-03 17:50:47','2008-03-03 06:50:47','The upcoming [Volume 9][v9] of the [Journal of Machine Learning Research][jmlr] is dedicated a chunk of its pages to a paper entitled \"[Evidence Contrary to the Statistical View of Boosting][mease08a]\" by David Mease and Abraham Wyner. Following this is a number of responses by heavyweights including [boosting][]\'s earliest proponents, Freund and Schapire, as well as Mease and Wyner\'s [rejoinder][mease08b] to the responses. The whole conversation is also available in a [single PDF][].\r\n\r\nI\'ve seen this format of argument, response and rejoinder a couple of times before in the statistical literature and I think it works really well. It brings the wealth of expert views that are usually found only at workshop or conference panel discussions but adds the benefits of written expression: careful thinking, less time pressure and access to reference material. \r\n\r\nI\'m familiar with [AdaBoost][] but haven\'t really kept up with the recent research surrounding it. It seems that the crux of the discussion is regarding some of the widely held beliefs about the statistical interpretation of boosting (stumps are better than small trees as weak learners, LogitBoost is better than AdaBoost on noisy data). Simple experiments are described which, often surprisingly, contradict the prevailing wisdom. \r\n\r\nAlthough I have only had time to skim the entire discussion, one thing I\'ve found really impressive about the contrary evidence Mease and Wyner provide is that all the R code for the experiments is [available][r code]. As can be seen in the subsequent discussion, this provides the responders with concrete reference points and several use them to refine or debate some of the interpretations. This is a perfect example of putting science back into Herbert Simon\'s [Science of the Artificial][sota], in which he argues that\r\n> Even when the programs themselves are only moderately large and intricate ... \r\n> too little is known about their task environments to permit accurate prediction of\r\n> how well they will perform. ... Here again theoretical analysis must be \r\n> accompanied by large amounts of \r\n> experimental work.\r\n\r\nNow that I\'m back in the world of academic research, it\'s high time I revisited some of these foundational algorithms in machine learning. I\'m hoping that by reading this discussion on boosting and playing with the experiments I can quickly get up to speed with the area.\r\n\r\n[sota]: http://www.librarything.com/work/253126\r\n[r code]: http://www.davemease.com/contraryevidence/\r\n[boosting]: http://www.boosting.org/\r\n[v9]: http://jmlr.csail.mit.edu/papers/v9/\r\n[jmlr]: http://jmlr.org/\r\n[mease08a]: http://www.jmlr.org/papers/volume9/mease08a/mease08a.pdf\r\n[mease08b]: http://www.jmlr.org/papers/volume9/mease08b/mease08b.pdf\r\n[single pdf]:http://www.jmlr.org/papers/volume9/mease08a/mease08a_with_discussion.pdf\r\n[adaboost]: http://en.wikipedia.org/wiki/AdaBoost','JMLR Discussion On Boosting',0,'','publish','open','open','','jmlr-discussion-on-boosting','','','2008-03-03 17:50:47','2008-03-03 06:50:47','',0,'http://conflate.net/inductio/theory/jmlr-discussion-on-boosting/',0,'post','',0),(28,2,'2008-03-13 16:33:07','2008-03-13 05:33:07','One thing my [recent survey of freely available data sets][data] did not uncover was a collection of archived RSS feeds. This surprised me a little since I would imagine aggregators like [Bloglines](http://bloglines.com/), [Google Reader](http://google.com/reader) and [AideRSS](http://aiderss.com/) must have large databases of hundreds of thousands of RSS feeds.\r\n\r\n[data]: http://conflate.net/inductio/application/a-meta-index-of-data-sets/\r\n\r\nHaving seen how easy it is to [create an RSS aggregator in ruby][igvita], I figured it should be just as easy to collect feeds in the same way and write them to a database via one of the many ORM (Object-Relational Mapping) layers available in ruby. The excellent [FeedNormalizer][] library makes the first part trivial and avoids having to worry whether a feed is RSS1, RSS2, Atom, etc. For the second part I thought I\'d try something new and give the ORM library [Sequel][] a go and, in the interests of simplicity, have it talk to an [SQLite][] database.\r\n\r\n[igvita]: http://www.igvita.com/2007/03/22/agile-rss-aggregator-in-ruby/\r\n[feednormalizer]: http://code.google.com/p/feed-normalizer/\r\n[sequel]: http://code.google.com/p/ruby-sequel/\r\n[sqlite]: http://www.sqlite.org/\r\n\r\nThe part I liked most was how easy Sequel makes setting up database schema. This is the executable ruby code that defines the two tables I use in Feed Bag:\r\n\r\n class Feed < Sequel::Model(:feeds)\r\n set_schema do\r\n primary_key :id\r\n text :name\r\n text :url\r\n time :last_checked\r\n time :created\r\n end\r\n end\r\n \r\n class Entry < Sequel::Model(:entries)\r\n set_schema do\r\n primary_key :id\r\n text :url\r\n text :title\r\n text :content\r\n text :description\r\n time :time\r\n \r\n foreign_key :feed_id, :table => :feeds\r\n index :url\r\n end\r\n end\r\n\r\nUsing it is just as easy. From the ruby-side, if you have a feed `f` you get its associated entries using `f.entries` and once you have an entry `e` you can get its URL or title using `e.url` or `e.title`. Given how easy that is, there\'s little reason to resort to flat text file formats such as CSV when dealing with this sort of data.\r\n\r\nI\'ve called the resulting ruby script \"Feed Bag\" and have [made it available][feedbag] on my academic website along with instructions for using it. Without comments, the scripts weighs in at about 130 lines of code and only took a few hours to write and debug, most of which was learning how to use FeedNormalizer and Sequel. \r\n\r\nI\'ve been running Feed Bag on my machine since mid-January, collecting international news feeds from the BBC, New York Times, Washington Post, and 7 others without any trouble. So far it\'s collected over 25,000 feed items and stashed them in a 38Mb SQLite database. If any one is interested, I\'ve made a bzip2 compressed version of an SQL dump of the database available for [download][] (3.4Mb). \r\n\r\nPlease let me know if you use the data for anything, or if you use Feed Bag to collect your own data set.\r\n\r\n[feedbag]: http://users.rsise.anu.edu.au/~mreid/code/feed_bag.html \r\n[download]: http://users.rsise.anu.edu.au/~mreid/files/data/IntlNews.sql.bz2','Feed Bag: A Simple RSS Archiver',0,'','publish','open','open','','feed-bag-a-simple-rss-archiver','','http://conflate.net/inductio/application/a-meta-index-of-data-sets/\nhttp://www.igvita.com/2007/03/22/agile-rss-aggregator-in-ruby/','2008-03-13 16:33:07','2008-03-13 05:33:07','',0,'http://conflate.net/inductio/application/feed-bag-a-simple-rss-archiver/',0,'post','',1),(29,2,'0000-00-00 00:00:00','0000-00-00 00:00:00','I\'ve recently been attempting to understand Receiver Operating Characteristic (ROC) curves and their relationship to loss, information and divergences. I\'ve decided that the best way to understand this stuff is to attempt to explain it. So, in the spirit of [John Armstrong][]\'s expository posts on category theory and integration, as well as Mark Chu-Carroll\'s \"[Basics][]\" series at Good Math, Bad Math, I plan to write a series of post explaining some facts about ROC curves.\r\n\r\n[John Armstrong]: http://unapologetic.wordpress.com/\r\n[Basics]: http://scienceblogs.com/goodmath/goodmath/basics/\r\n\r\nThere are already plenty of good introductions and tutorials on ROC curves on the web but they tend to be from a medical diagnosis perspective. I\'ll try to focus on the material that is relevant to machine learning including the use of ROC analysis in classification, probability estimation and ranking. My aim is to provide a reasonably self-contained set of posts that emphasise some of the more important and recent properties and relationships regarding ROC curves. \r\n\r\nPostmodern Classification\r\n-----------------------------\r\nSuppose you wanted some way of deciding whether a particular book was [postmodern][] or not. \r\nWhat you\'re after is some procedure that takes in a whole lot of details about a particular text and returns a \"yes\" or \"no\" answer to the question \"Is this text postmodern?\"\r\n\r\nWe can think about this type of procedure abstractly as a function [tex]r[/tex] from a set [tex]\\mathcal{X}[/tex] of _observations_ about books to the set [tex]\\{0,1\\}[/tex] of _labels_ where 1 indicates membership in the positive class (i.e., the book is postmodern) and 0 indicates non-membership. \r\n\r\nROC graphs give us a way of visually assessing sets of binary _classifiers_. These are functions that assign one of two labels to each observations [tex]x[/tex] in the set [tex]\\mathcal{X}[/tex]. We\'ll use 1 for the positive label and 0 for the negative label so that a classifier is a function\r\n[tex]\r\n r : \\mathcal{X} \\to \\{0,1\\}.\r\n[/tex]\r\nFor example, each observation in [tex]\\mathcal{X}[/tex] provides details of a particular text (book, film, TV show, _etc_.) and the classifier returns a 1 to indicate the text is [postmodern][] and returns 0 otherwise.\r\n\r\n[postmodern]: http://www-stat.wharton.upenn.edu/~steele/Resources/Writing%20Projects/PostmodernStatistics.htm\r\n\r\n\r\n','ROC Curves for Machine Learning',0,'','draft','open','open','','roc-curves-for-ml','','','2008-04-08 22:30:43','2008-04-08 11:30:43','',0,'http://conflate.net/inductio/?p=29',0,'post','',0),(30,2,'0000-00-00 00:00:00','0000-00-00 00:00:00','Via [God Plays Dice](http://godplaysdice.blogspot.com/2008/04/smales-problems.html): In Smale\'s discussion of the Poincaré conjecture, after pointing out that a big part of the importance of the Poincaré conjecture is that it helped make manifolds respectable objects to study in their own right,he states:\r\n\r\n> I hold the conviction that there is a comparable phenomenon today in the notion of a \"polynomial\r\n> time algorithm\". Algorithms are becoming worthy of analysis in their own right, not merely as a\r\n> means to solve other problems. Thus I am suggesting that as the study of the set of solutions of an\r\n> equation (e.g. a manifold) played such an important role in 20th century mathematics, the study of\r\n> finding the solutions (e.g. an algorithm) may play an equally important role in the next century.\r\n\r\nIn light of [this paper][equal algs], it seems there are difficulties in defining what \"finding the solution\" might mean. The authors restrict their attention to non-interactive, small-step algorithms and argue that even in this restricted setting it is difficult to define what \"equivalent\" might mean.\r\n\r\nAn interesting example (Example 8) from that paper concerns the composition of two functions _f_ and _g_, implemented by programs _P_ and _Q_. Both programs take strings as input and output strings in linear time and logarithmic space. One definition of composition (call Q then pass its output to P as input) takes linear space (since the output of Q is created and stored) and logarithmic time, while another definition (essentially a type of lazy evaluation) takes logarithmic space (only compute the output of Q a character at a time and don\'t store it) and quadratic time.\r\n\r\nI don\'t think this is as big an issue as the authors make it: it\'s well known that time and space complexity can be traded off so defining equivalence of programs with respect to input-output behaviour *and* complexity is too fine grained. \r\n\r\n[equal algs]: http://research.microsoft.com/research/pubs/view.aspx?type=Technical%20Report&id=1434\r\n\r\nThe authors finish with a few analogies: one saying that clustering without a decision function is similarly ill-defined, another that \"equivalence of ideas\" is an even more difficult beast to grasp but is somehow part of the answer to the question \"when are two algorithms different?\". When they express different ideas?\r\n\r\nThe question is a good one but there does not seem to be any easy resolution.\r\n\r\nWhat does this have to do with induction? As Goodman argued, \"similarity without respects is a quack, an impostor and a fraud\" since you cannot reasonable define similarity without providing some accepted terms of reference or _respects_. Certain \"natural\" types of similarity are natural only because of convention or human perception (colours, faces, shapes). I don\'t hold out hope for there being a \"natural\", universal sense of similarity for something as abstract as algorithms. That\'s not to say we can\'t build consensus piece by piece.\r\n\r\nPeter Turney\'s readings in analogy: http://apperceptual.wordpress.com/2007/12/20/readings-in-analogy-making/\r\n\r\nImplications for patents on algorithms...','Algorithms, Programs and Similarity',0,'','draft','open','open','','','','','2008-04-20 18:09:43','2008-04-20 08:09:43','',0,'http://conflate.net/inductio/?p=30',0,'post','',0),(31,2,'2008-04-08 11:43:31','2008-04-08 00:43:31','This page is a collection of notes and sub-pages for things I\'m not yet ready to show the world.\r\n\r\n\r\n','Private',0,'','private','closed','closed','','private','','','2008-04-08 11:43:31','2008-04-08 00:43:31','',0,'http://conflate.net/inductio/?page_id=31',0,'page','',0),(32,2,'2008-04-08 11:51:47','2008-04-08 00:51:47','Some research questions to expand on:\r\n\r\n### ROC and Families of Tasks\r\n\r\nWe know that a class probability estimation task is equivalent to a family of cost-sensitive classification tasks. How is this related to ROC curves? Given a probability estimator, applying a threshold gives a family of classifiers and an ROC curve.\r\n\r\n### Maximal AUC and Divergences\r\n\r\nThe maximal AUC obtainable for a given learning problem measures the separation of the label-conditional distributions, much like an f-divergence. However, it seems that maximal AUC and f-divergences are different in the sense that there is no _f_ such that maximal AUC is an f-divergence for that _f_. \r\n\r\nWhat kind of divergence is maximal AUC? Is it \"special\" in some way or is there a whole class of them? Fawcett\'s \"[ROC Graphs with Instance-Varying Costs][rociv]\" (2006) and Xie and Priebe\'s \"[A Weighted Generalization of the Mann-Whitney-Wilcoxon Statistic][gmww]\" both suggest (equivalent, I think) families based on transforming the axes of the ROC graph.\r\n\r\n[rociv]: http://www.citeulike.org/user/mdreid/article/2614937\r\n[gmww]: http://www.citeulike.org/user/mdreid/article/2639710\r\n\r\n### Clustering\r\n\r\nClustering seems to be a more primitive problem than classification or class probability estimation. In some sense, clustering takes a single distribution over observations and transforms it into a collection of label-conditional distributions. In this sense, clustering is considering the collection of functions _F_ from observations to some discrete set of finite cardinality.\r\n\r\nPapadimitriou argues in his \"[Algorithms, Games and the Internet][agi]\" (STOC 2001), clustering is not really well-defined:\r\n\r\n> There are far too many criteria for the ‘goodness’ of a clustering . . . and far too little guidance\r\n> about choosing among them. . . . The criterion for choosing the best clustering scheme cannot be\r\n> determined unless the decision-making framework that drives it is made explicit\r\n\r\nThis would suggest that understanding the collection _F_ must be done in concert with some other family _G_ from labels output by functions in _F_ to predictions in some other set. The structure of _G_ and the type of loss incurred will determine how best to choose an element from _F_, (cf. Baxter\'s \"Learning to Learn\" and the choice of bias).\r\n\r\n[agi]: http://www.citeulike.org/user/mdreid/article/326513','Questions',0,'','private','open','open','','questions','','','2008-04-08 12:28:55','2008-04-08 01:28:55','',31,'http://conflate.net/inductio/?page_id=32',0,'page','',0),(33,2,'2008-04-09 16:36:13','2008-04-09 06:36:13','I love finding old essays on statistics. The philosophical and methodological wars that rage within that discipline make for fun reading. Particularly enjoyable are those essays - inevitably written by older, well-respected researchers - who make a strong point with beautiful rhetorical flourish and no small amount of barbed humour.\r\n\r\nThe title of a [journal article][teir] ([PDF][teirpdf]) by Jacob Cohen (and this post) is a classic example. As you may have guessed, it\'s main aim is to rant against the misuse of p-values for null hypothesis significance testing (NHST). As well as including some extremely amusing quotes by Cohen and others, the paper does a fantastic job of curing the reader of any doubt regarding the correct interpretation of p-values. \r\n\r\n[teir]: http://www.citeulike.org/user/mdreid/article/2643653\r\n[teirpdf]: http://web.math.umt.edu/wilson/Math444/Handouts/Cohen94_earth%20is%20round.pdf\r\n\r\nRepeat after me: \"the p-value is NOT the probability the null hypothesis is true given the observed data\". Or, as Cohen puts it:\r\n> What\'s wrong with NHST? Well, among many other things, it does not tell us what we want to\r\n> know, and we so much want to know what we want to know that, out of desperation, we\r\n> nevertheless believe that it does! What we want to know is \"Given these data, what is the \r\n> probability that H0 is true?\" But as most of us know, what it tells us is \"Given \r\n> that H0 is true, what is the probability of these (or more extreme) data?\" \r\n> These are not the same...\r\n\r\nMany people make this mistake as it\'s so easy to erroneously reason about conditional probabilities. The particular fallacy that occurs when p-values are interpreted as the probability the null hypothesis is true is assuming that Prob(H0|Data) = Prob(Data|H0). Cohen argues that we are confused by the intuitive appeal of reasoning with rare events as though they were impossible events. He highlights why this intuition can led us astray with a wonderful example. A low p-value is erroneously reasoned with as follows:\r\n> If the null hypothesis is correct, then these data are highly unlikely. \r\n> These data have occurred. \r\n> Therefore, the null hypothesis is highly unlikely. \r\n\r\nThis seems, at first glance, to be analogous to the non-probabilistic syllogism (namely _modus tollens_):\r\n> If a person is a Martian, then he is not a member of Congress. \r\n> This person is a member of Congress. \r\n> Therefore, he is not a Martian. \r\n\r\nAbsolutely nothing wrong with that. It\'s watertight. Now see what happens when the first line becomes a statement with very high probability instead of strictly true:\r\n> If a person is an American, then he is probably not a member of Congress. (TRUE, RIGHT?) \r\n> This person is a member of Congress. \r\n> Therefore, he is probably not an American.\r\n\r\nOuch! That last deduction should have made your eyes water. This is exactly what is going wrong when people misinterpret p-values. It\'s what you get for using Bayes\' rule without knowing something more _unconditionally_ about the probability of being a member of Congress. This is a rare event and its rarity must be factored in when doing the probabilistic equivalent of implication. Similarly, without knowing anything about the prior probability of the null hypothesis you cannot say anything about its posterior probability.\r\n\r\nCohen nicely sums up the danger of treating deduction and induction analogously with a quote attributed to Morris Raphael Cohen:\r\n> All logic texts are divided into two parts. In the first part, on deductive logic, the fallacies are explained; in the second part, on inductive logic, they are committed.\r\n\r\n\r\n','The Earth Is Round (p < 0.05)',0,'','publish','open','open','','the-earth-is-round','','','2008-04-09 16:36:13','2008-04-09 06:36:13','',0,'http://conflate.net/inductio/?p=33',0,'post','',2),(36,2,'2008-04-10 13:39:14','2008-04-10 03:39:14','Posts placed here for posterity. Sliced and diced for your convenience.','Archives',0,'','publish','open','open','','archives','','','2008-04-10 13:43:11','2008-04-10 03:43:11','',0,'http://conflate.net/inductio/?page_id=36',0,'page','',0),(38,2,'2008-05-26 16:45:44','2008-05-26 06:45:44','A [recent post by Peter Turney][turney] lists the books that have influenced his research. As well as compiling a great list of books that are now on my mental \"must read one day\" list, he makes a crucial point for compiling such a list:\r\n> If a reader cannot point to some tangible outcome from reading a book, \r\n> then the reader may be overestimating the personal impact of the book. \r\n\r\n[turney]: http://apperceptual.wordpress.com/2008/05/25/the-book-that-changed-my-life/\r\n\r\nWith that in mind I tried to think of which books had a substantial impact on my research career.\r\n\r\nAlthough I can barely remember any of it now, the [manual][vic20] that came with the Commodore Vic 20 computer I read when I was around seven got me hooked on programming. In primary and secondary school it was that book and the subsequent Commodore 64 and Amiga manuals that set me on the road to studying computer science and maths.\r\n\r\n[vic20]: http://www.geocities.com/rmelick/prg.txt\r\n\r\nIn my second year at university I had the great fortune of being recommended Hofstadter\'s \"[Gödel, Escher, Bach][geb]\" by a fellow student. It is centrally responsible for getting me to start thinking about thinking and subsequently doing a PhD in machine learning. The fanciful but extremely well written detours into everything from genetics to Zen Buddhism also broadened my horizons immensely.\r\n\r\n[geb]: http://www.librarything.com/work/5619/book/12512722\r\n\r\nI. J. Good\'s \"[The Estimation of Probabilities][good]\" was the tiny 1965 monograph I bought second-hand for $2 that made my thesis take a huge change in direction by giving it a Bayesian flavour. I now realise that a lot of that work had since been superseded by much more sophisticated Bayesian methods but sometimes finding a theory before it has been over-polished means that there is much more expository writing to aid intuition. It also helps that Good is a fabulous technical writer. \r\n\r\n[good]: http://www.librarything.com/work/2542774/book/12420041\r\n\r\nPhilosophically, Nelson Goodman\'s \"[Fact, Fiction and Forecast][fff]\" also shaped my thinking about induction quite a lot. His ideas on the \"virtuous circle\" of basing current induction on the successes and failures of the past provided me with a philosophical basis for the transfer learning aspects of my research. I found his views a refreshing alternative to Popper\'s (also personally influential) take on induction in \"[The Logic of Scientific Discovery][losd]\". Whereas Popper beautifully characterises the line between metaphysical and scientific theories, Goodman tries to give an explanation of *how* we might practically come up with new theories in the first place given that there will be, in general, countless that adequately fit the available data. In a nutshell, his theory of \"entrenchment\" says that we accrete a network of terms by induction and use these terms as features for future induction depending on how successful they were when used in past inductive leaps. This is a view of induction inline with Hume\'s \"habits of the mind\" and one I find quite satisfying.\r\n\r\n[fff]: http://www.librarything.com/work/70761/book/12419989\r\n[losd]: http://www.librarything.com/work/68144/book/31001290\r\n\r\nWhile not directly related to machine learning or computer science, there are a few other books that helped me form opinions on the process of research in general. I read Scott\'s \"[Write to the Point][wttp]\" over a decade ago now but it still makes me stop, look at my writing and simplify it. My attitude to presenting technical ideas was also greatly influenced by reading Feynman\'s \"[QED][]\" lectures. They are a perfect example of communicating extremely deep and difficult ideas to a non-technical audience without condescension and misrepresentation. Finally, I read Kennedy\'s \"[Academic Duty][ad]\" just as I started my current post-doc and found it immensely insightful. I plan to reread it as I (hopefully) hit various milestone\'s in my academic career.\r\n\r\n[wttp]: http://www.librarything.com/work/1093218/book/31001976\r\n[qed]: http://www.librarything.com/work/27937/book/12512712\r\n[ad]: http://www.librarything.com/work/252530/book/20392830 \r\n\r\nOf course, like Peter, there are innumerable other books, papers and web pages that have shaped my thinking but the ones above are the ones that leap to mind when I think about how my research interests have developed over time.','Research-Changing Books',0,'In response to a post by Peter Turney, I list the books I feel shaped my research career.','publish','open','open','','research-changing-books','','','2008-06-14 23:25:29','2008-06-14 13:25:29','',0,'http://conflate.net/inductio/?p=38',0,'post','',3),(39,2,'2008-06-02 17:55:56','2008-06-02 07:55:56','Photo of yours truly. Someone should really get better lighting in here...','Mark Reid',0,'Your Host ','inherit','open','open','','me_shorthairdark_bw','','','2008-06-02 17:55:56','2008-06-02 07:55:56','',2,'http://conflate.net/inductio/wp-content/uploads/2008/06/me_shorthairdark_bw.jpg',0,'attachment','image/jpeg',0),(40,2,'2008-06-17 13:10:34','2008-06-17 03:10:34','****\r\n_Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. Those looking for the application I presented there: stay tuned, I will post the updated version here shortly.\r\n****\r\n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20\r\n\r\nI\'ve recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I\'m part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \r\n\r\nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I\'ll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool.\r\n\r\n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php\r\n[csl]: http://csl.cecs.anu.edu.au/\r\n\r\nThe Australian Common Reader Project\r\n--------------------------------------------\r\nOne of Julieanne\'s research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers\' relationship with books and periodicals. \r\n\r\n[acrp]: http://www.api-network.com/hosted/acrp/\r\n\r\nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \r\n\r\n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/\r\n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin\r\n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \r\n\r\nBooks and Borrowers\r\n------------------------\r\nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \r\nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books.\r\nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column)\r\nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book.\r\n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\r\n\r\nConceptually, each cell in the table contains a 1 if the person associated with the cell\'s column borrowed the book associated with the cell\'s row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473.\r\n\r\nBook Similarity\r\n-----------------\r\nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what \"similar books\" is required.\r\n\r\nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]\\mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]\\mathbf{b}_2 = (1,1,\\ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex].\r\n\r\nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \r\n \r\nIn terms of the vector representation, this similarity measure is simply the \"[inner product][]\" between [tex]\\mathbf{b}_1[/tex] and [tex]\\mathbf{b}_2[/tex] and is written [tex]\\left<\\mathbf{b}_1,\\mathbf{b}_2\\right> = b_{1,1}b_{2,1} + \\cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers.\r\n\r\n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space\r\n\r\nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to \"normalise\" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the \"size\" of each of the vectors for those books. \r\n\r\nMathematically, we will denote the size of a book vector [tex]\\mathbf{b}_i[/tex] as [tex]\\|\\mathbf{b}_i\\| = \\sqrt{\\left<\\mathbf{b}_i,\\mathbf{b}_i\\right>}[/tex]. The similarity between two books then becomes:\r\n\r\n
\r\n[tex]\\displaystyle\r\n \\text{sim}(\\mathbf{b}_i,\\mathbf{b}_j) \r\n = \\frac{\\left<\\mathbf{b}_i,\\mathbf{b}_j\\right>}{\\|\\mathbf{b}_i\\|\\|\\mathbf{b}_j\\|}\r\n[/tex]\r\n
\r\n\r\nPrincipal Component Analysis\r\n---------------------------------\r\nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \r\n\r\nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns.\r\n\r\n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books.\r\n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\r\n\r\nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]\\mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]\\mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors\r\nfor the first two rows of Table 2 then [tex]\\text{sim}(\\mathbf{c}_1,\\mathbf{c}_2)[/tex]\r\nwould be close to [tex]\\text{sim}(\\mathbf{b}_1,\\mathbf{b}_2)[/tex], the similarity of the\r\nfirst two rows in Table 1.[^1]\r\n\r\n[^1]: Technically, the guarantee of the \"closeness\" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair\'s\r\nsimilarity is estimated well.\r\n\r\nVisualising the Data\r\n----------------------\r\nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2]\r\n\r\n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle\'s colour.\r\n\r\n
\r\n\"Plot\r\n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book.\r\n

\r\n\r\nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library.\r\n\r\nDrilling Down and Interacting\r\n---------------------------------\r\nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners\' and Mechanics\' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data.\r\n\r\n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales\r\n\r\nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books.\r\n\r\nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \r\n\r\nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \r\n\r\n[applet]: /inductio/wp-content/public/acrp/\r\n\r\n
\r\n\"Click\r\n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\r\n
\r\n\r\nInstructions describing how to use the tool can be found below it. \r\nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the \"Borrowers\" bar will only show books with at least that many borrowers; and altering the \"Similarity\" bar will only draw lines to books with at least that proportion of books in common.\r\n\r\nFuture Work and Distant Reading\r\n-------------------------------------\r\nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls \"distant reading\" -- looking at books as objects and how they are read rather than the \"close reading\" of the text of individual books. \r\n\r\n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \r\n\r\nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \r\n\r\nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them.\r\n\r\nData and Code\r\n----------------\r\nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location.\r\n\r\n[github]: http://github.com/mreid/acrp/tree/master \r\n[SQL]: http://en.wikipedia.org/wiki/SQL\r\n[R]: http://www.r-project.org/\r\n[Processing]: http://processing.org/\r\n\r\nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here.\r\n\r\n','Visualising 19th Century Reading in Australia',0,'A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project.','publish','open','open','','visualising-reading','','\nhttp://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/','2008-12-09 22:05:57','2008-12-09 12:05:57','',0,'http://conflate.net/inductio/?p=40',0,'post','',4),(41,2,'2008-06-10 21:13:43','2008-06-10 11:13:43','A screen grab of the applet showing the neighbours of a selected book.','ACRP Visualisation',0,'Applet showing neighbours of a book','inherit','open','open','','acrp','','','2008-06-10 21:13:43','2008-06-10 11:13:43','',40,'http://conflate.net/inductio/wp-content/uploads/2008/06/acrp.png',0,'attachment','image/png',0),(42,2,'2008-06-12 12:08:12','2008-06-12 02:08:12','I have a (very) amateur interest in the philosophy of mathematics. My interest was recently piqued again after finishing the very readable \"[Introducing Philosophy of Mathematics][ipom]\" by Michèle Friend. Since then, I\'ve been a lot more aware of terms like \"constructivist\", \"realist\", and \"formalist\" as they apply to mathematics.\r\n\r\nToday, I was flicking through the entry on \"[Constructivist Mathematics][cm]\" in the [Stanford Encyclopedia of Philosophy][seop] and found a simple example of some of the problems with non-constructive take on what disjunction means in mathematical statements. The article calls it \"well-worn\" but I hadn\'t seen it before.\r\n\r\nConsider the statement:\r\n> There exists irrational numbers a and b such that ab is rational.\r\n\r\nThe article gives a slick proof that this statement is true by invoking the [law of the excluded middle][lem] (LEM). That is, every number must be either rational or irrational. \r\n\r\nNow consider [tex]\\sqrt{2}^\\sqrt{2}[/tex]. By the LEM, this must rational or irrational:\r\n\r\n * Case 1: If it is rational then we have proved the statement since we know [tex]a = b = \\sqrt{2}[/tex] is irrational. \r\n\r\n * Case 2: If [tex]\\sqrt{2}^\\sqrt{2}[/tex] is irrational then choosing [tex]a = \\sqrt{2}^\\sqrt{2}[/tex] and [tex]b = \\sqrt{2}[/tex] as our two irrational numbers gives [tex]{\\sqrt{2}^{\\sqrt{2}^\\sqrt{2}}} = {\\sqrt{2}^2} = 2[/tex] -- a rational number. \r\n\r\nEither way, we\'ve proven the existence of two irrational numbers yielding a rational one.\r\nThe problem with this is that this argument is non-constructive and so we don\'t know which of case 1 and case 2 is true, we only know that one of them must be[^1]. This is a simple case of reductio ad absurdum in disguise.\r\n\r\nAs a born-again computer scientist (my undergraduate degree was pure maths and my PhD in computer science) I\'ve become increasingly suspicious of these sorts of proof and more [constructivist][] -- even [intuitionist][] -- in my tastes. I think the seed of doubt was planted during the awkward discussions of the [Axiom of Choice][] in my functional analysis lectures. The sense of unease is summed up nicely in the following joke:\r\n\r\n> The Axiom of Choice is obviously true, the well-ordering principle obviously false, \r\n> and who can tell about Zorn\'s lemma?\r\n\r\nOf course, all those concepts are equivalent but that\'s far from intuitive.\r\n\r\nI don\'t think I\'m extremist enough to take a wholeheartedly computational view of mathematics -- denying all but the computable real numbers and functions, thereby making [all functions continuous][] -- but it is a tempting view of the subject.\r\n\r\nIn machine learning, I think there is a fairly pragmatic take on the philosophy of mathematics. For example, classical theorems from functional analysis are used to derive results involving kernels but when it comes to implementation, estimations and approximations are used with abandon. In my opinion, this is a [healthy way for the theory in this area to proceed][lemire]. As in physics, if the experimental work reveals inconsistencies with a theory, revisit the maths. If that doesn\'t work, [talk to the philosophers][dim].\r\n\r\n[ipom]: http://www.librarything.com/work/3362656/book/17581191\r\n[cm]: http://plato.stanford.edu/entries/mathematics-constructive/\r\n[seop]: http://plato.stanford.edu/\r\n[lem]: http://en.wikipedia.org/wiki/Law_of_the_excluded_middle\r\n[intuitionist]: http://en.wikipedia.org/wiki/Intuitionism\r\n[constructivist]: http://en.wikipedia.org/wiki/Constructivism_%28mathematics%29\r\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\r\n[all functions continuous]: http://math.andrej.com/2006/03/27/sometimes-all-functions-are-continuous/\r\n[lemire]: http://www.daniel-lemire.com/blog/archives/2008/06/05/why-pure-theory-is-wasteful/\r\n[dim]: http://diveintomark.org/archives/2008/06/11/purity\r\n\r\n[^1]: It turns out that, by [Gelfond\'s Theorem](http://en.wikipedia.org/wiki/Gelfond\'s_theorem) that [tex]\\sqrt{2}^\\sqrt{2}[/tex] is transcendental, and therefore irrational so the second case alone proves the statement. However, I\'m not sure what machinery is required to prove Gelfond\'s theorem.\r\n\r\n','Constructive and Classical Mathematics',0,'','publish','open','open','','constructive-and-classical-mathematics','','http://math.andrej.com/2006/03/27/sometimes-all-functions-are-continuous/\nhttp://diveintomark.org/archives/2008/06/11/purity','2008-06-16 15:46:56','2008-06-16 05:46:56','',0,'http://conflate.net/inductio/?p=42',0,'post','',3),(43,2,'2008-06-17 12:07:10','2008-06-17 02:07:10','','PCA of All Libraries',0,'Plot of the books across all libraries in the ACRP database','inherit','open','open','','all_libraries','','','2008-06-17 12:07:10','2008-06-17 02:07:10','',40,'http://conflate.net/inductio/wp-content/uploads/2008/06/all_libraries.png',0,'attachment','image/png',0),(44,2,'2008-07-02 01:40:23','2008-07-01 15:40:23','A little while ago, John Langford [suggested][jl1] that a discussion site be set up for ICML that allows attendees and others to talk about the accepted papers.\r\n\r\nHaving played around with various wiki systems and discussion sites in the past, I volunteered to help set something up. As John has [noted on his blog][jl2] the discussion site is now [up and running][icml]. \r\n\r\nThe main aim with this first attempt was to provide basic functionality: papers can be browsed by author, title and keyword; each paper has a discussion thread where anyone can leave comments. There are no comments at the time of writing this but I\'m hoping this will change once the conference gets underway.\r\n\r\nProvided there are no disasters, the site will remain up for as long as it is useful. Ultimately, I\'d like to add earlier conference proceedings to the site and ensure future conferences can be added as well. We will see how it goes this year and incorporate and feedback into future versions of the site.\r\n\r\nFor those interested in the technical details, I used [DokuWiki](http://wiki.splitbrain.org/wiki:dokuwiki) as the engine for the site along with a number of plugins, most importantly the [discussion plugin](http://wiki.splitbrain.org/plugin:discussion).\r\n\r\n[jl1]: http://hunch.net/?p=327\r\n[jl2]: http://hunch.net/?p=335\r\n[icml]: http://conflate.net/icml','ICML Discussion Site',0,'','publish','open','open','','icml-discussion-site','','\nhttp://hunch.net/?p=327','2008-07-02 01:40:23','2008-07-01 15:40:23','',0,'http://conflate.net/inductio/?p=44',0,'post','',0),(45,2,'2008-07-21 21:18:19','2008-07-21 11:18:19','Although I wasn\'t able to attend the talks at [ICML 2008][] I was able to participate in the [Workshop on Evaluation Methods for Machine Learning][emml] run by William Klement, [Chris Drummond][], and [Nathalie Japkowicz][].\r\n\r\n[icml 2008]: http://icml2008.cs.helsinki.fi/\r\n[emml]: http://www.site.uottawa.ca/ICML08WS/\r\n[nathalie japkowicz]: http://www.site.uottawa.ca/~nat/\r\n[chris drummond]: http://www.site.uottawa.ca/~cdrummon/\r\n\r\nThis workshop at ICML was a continuation of previous workshops held at AAAI that aim to cast a critical eye on the methods used in machine learning to experimentally evaluate the performance of algorithms.\r\n\r\nIt kicked off with a series of mini debates with Nathalie and Chris articulating the opposing sides. The questions included the following:\r\n\r\n * Should we change how evaluation is done?\r\n * Is evaluation central to empirical work?\r\n * Are statistical tests critical to evaluation?\r\n * Are the UCI data sets sufficient for evaluation?\r\n\r\nThere were three papers I particularly liked: [Janez Demsar][]\'s talk \"[On the Appropriateness of Statistical Tests in Machine Learning][appropriateness]\", [Edith Law][]\'s \"[The Problem of Accuracy as an Evaluation Criterion][accuracy]\", and [Chris Drummond][]\'s call for a mild-mannered revolution \"[Finding a Balance between Anarchy and Orthodoxy][anarchy]\".\r\n\r\n[janez demsar]: http://www.ailab.si/janez/\r\n[appropriateness]: http://www.site.uottawa.ca/ICML08WS/papers/J_Demsar.pdf\r\n[edith law]: http://www.cs.cmu.edu/~elaw/\r\n[accuracy]: http://www.site.uottawa.ca/ICML08WS/papers/E_Law.pdf\r\n[anarchy]: http://www.site.uottawa.ca/ICML08WS/papers/C_Drummond.pdf\r\n\r\nJanez\'s talk touched on a number of criticisms that [I had found in Jacob Cohen\'s paper \"The Earth is Round (p < 0.05)\"][round earth] making the case that people often incorrectly report and incorrectly interpret p-values for statistical tests. Unfortunately, as Janez points out, since machine learning is a discipline that (rightly) places emphasis on results it is difficult as a reviewer to reject a paper that presents an ill-motivated and confusing idea if its authors have shown that, statistically, it outperforms similar approaches.\r\n\r\n[round earth]: http://conflate.net/inductio/2008/04/the-earth-is-round/\r\n\r\nEdith\'s talk argued that accuracy is sometimes a poor measure of performance making all this concern over whether we are constructing statistical tests for it (or AUC) moot. In particular, for tasks like salient region detection in images, language translation and music tagging there is no single correct region, translation or tag. Whether or not a particular region/translation/tag is \"correct\" or not is impossible to determine independent of the more difficult tasks of image recognition/language understanding/music identification. Solving these for the purposes of evaluation would make a solution to the smaller tasks redundant. Instead of focusing on evaluation of the smaller tasks, Edith suggests ways in which games that humans play on the web -- such as the [ESP Game][] -- can be used to evaluate machine performance on these tasks by playing learning algorithms against humans.\r\n\r\n[esp game]: http://www.espgame.org/\r\n\r\nFinally, Chris\'s talk made the bold claim that the way we approach evaluation in machine learning is an \"impoverished realization of a controversial methodology\", namely statistical hypothesis testing. \"Impoverished\" because when we do do hypothesis testing it is in the narrowest of senses, mainly to test that my algorithm is better than yours on this handful of data sets. \"Controversial\" since many believe science to have social, exploratory and accidental aspects --- much more than just the clinical proposing of hypotheses for careful testing.\r\n\r\nWhat these papers and the workshop as a whole showed me was how unresolved my position is on these and other questions regarding evaluation. On the one hand I spent a lot of time painstakingly setting up, running and analysing experiments for my [PhD research][] on inductive transfer in order to evaluate the methods I was proposing. I taught myself how to correctly control for confounding factors, use the [Bonferroni correction][] to adjust significance levels and other esoterica of statistical testing. Applying all these procedures carefully to my work felt very scientific and I was able to create many pretty graphs and tables replete with confidence intervals, p-values and the like. On the other hand -- and with sufficient hindsight -- it\'s not clear how much value this type of analysis added to the thesis overall (apart from demonstrating to my reviewers that I could do it). \r\n\r\n[phd research]: http://www.library.unsw.edu.au/~thesis/adt-NUN/public/adt-NUN20070512.173744/index.html\r\n[bonferroni correction]: http://en.wikipedia.org/wiki/Bonferroni_correction\r\n\r\nThe dilemma is this: when one algorithm or approach clearly dominates another details such as p-values, t-tests and the like only obscure the results; and when two algorithms are essentially indistinguishable using \"significance\" levels to pry them apart seems to be grasping at straws.\r\n\r\nThat\'s not to say that we should get rid of empirical evaluation all together. Rather, we should carefully choose (or create) our data sets and empirical questions so as to gain as much insight as possible and go beyond \"my algorithm is better than yours\". Statistical tests should not mark the end of an experimental evaluation but rather act as a starting point for further questions and carefully constructed experiments that resolve those questions.\r\n','Evaluation Methods for Machine Learning',0,'Some thoughts on the workshop on evaluation methods that I attended as part of ICML 2008 in Helsinki.','publish','open','open','','evaluation-methods-for-machine-learning','','\nhttp://conflate.net/inductio/2008/04/the-earth-is-round/','2008-07-21 21:18:19','2008-07-21 11:18:19','',0,'http://conflate.net/inductio/?p=45',0,'post','',1),(46,2,'2008-07-27 21:40:46','2008-07-27 11:40:46','I\'m a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \r\n\r\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\r\n\r\n[icml]: http://icml2008.cs.helsinki.fi/\r\n[uai]: http://uai2008.cs.helsinki.fi/\r\n[colt]: http://colt2008.cs.helsinki.fi/\r\n\r\n[john]: http://hunch.net/?p=341\r\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\r\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\r\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\r\n\r\nI didn\'t make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I\'ve already [written about][evaluation] that and didn\'t attend any of the UAI sessions, I\'ll focus on the COLT stuff I found interesting.\r\n\r\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\r\n\r\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he\'d given this type of talk before as he handled the many subsequent questions directly and decisively. I\'m really interested in the work being done here so I\'ll write more about prediction markets in a later post. \r\n\r\n[robin hanson]: http://hanson.gmu.edu/home.html\r\n\r\n[Gábor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \r\n\r\n[gábor lugosi]: http://www.econ.upf.es/~lugosi/\r\n\r\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \r\n\r\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \r\n\r\nIf I understand it correctly, Klein\'s system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were \"closer\" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., \"direction\" and \"dirección\") to find possible cognates. \r\n\r\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\r\n\r\nThere was a variety of good papers at COLT this year. Of the talks I saw, two stood out for me.\r\n\r\n[The True Sample Complexity of Active Learning][balcan hanneke] by Balcan, Hanneke and Wortman showed the importance of choosing the right theoretic model for analysis. In active learning the learner is able to choose which unlabelled examples have their labels revealed. \r\nIntuitively, one would think that this should make learning easier than the normal supervised learning where the learner has no say in the matter. \r\n\r\nPrevious results showed that this was basically not the case. Subtly, those results asked that the active learner achieve a certain error rate but also _verify_ that that rate was achieved. What Nina and her co-authors showed was that if you remove this extra requirement then active learning does indeed make learning much easier, often with exponential improvements in sample complexity over \"passive\" learning.\r\n\r\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\r\n\r\n[An Efficient Reduction of Ranking to Classification][ailon] by Ailon and Mohri was also impressive. They build on earlier work that showed how a ranking problem can be reduced to learning a binary preference relation between the items to be ranked. One crucial part of this reduction is turning a learnt preference relation into a ranking. That is, taking pair-wise assessments of relative item quality and laying out all those items along a line so as to best preserve those pair-wise relationships. \r\n\r\nWhat Ailon and Mohri show is that simply applying a randomised quicksort to the pair-wise comparisons for n items will give a good reduction to a ranking in O(n log n) time. \"Good\" here means that the regret of the reduction-based ranking over the best possible is bounded by the regret of the classifier that learns the preference relation over the best possible classification. Furthermore, if you are only interested in the top k of n items you can get a good ranking in O(k log k + n) time. What\'s particularly nice about this work is the tools they use to analysis randomised quicksort are very general and will probably find other applications.\r\n\r\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\r\n\r\nFinally, while I didn\'t attend the talk at COLT, a couple of people have told me that Abernethy et al.\'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I\'ve since skimmed through it and it is a very nice paper, well-written and replete with interesting connections. I\'m not that familiar with bandit learning work but this paper is has a good summary of recent results and is intriguing enough to make me want to investigate further.\r\n\r\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \r\n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\r\n\r\n','COLT 2008 Highlights',0,'','publish','open','open','','colt-2008-highlights','','http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\nhttp://hunch.net/?p=341\nhttp://hunch.net/wp-trackback.php?p=341','2008-07-28 09:19:39','2008-07-27 23:19:39','',0,'http://conflate.net/inductio/?p=46',0,'post','',1),(48,2,'2008-07-25 17:18:14','2008-07-25 07:18:14','The invited talks were all really interesing. [Robin Hanson][] gave a great introduction to prediction markets, describing how they can be used to extract information through the use of market scoring rules. Essentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\n','Prediction Markets',0,'','inherit','open','open','','47-revision','','','2008-07-25 17:18:14','2008-07-25 07:18:14','',47,'http://conflate.net/inductio/2008/07/47-revision/',0,'revision','',0),(49,2,'2008-07-25 17:24:46','2008-07-25 07:24:46','I\'m a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn\'t make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I\'ve already [written about][evaluation] that and didn\'t attend any of the UAI sessions, I\'ll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he\'d given this type of talk before as he handled the many subsequent questions directly and decisively. I\'m really interested in the work being done here so I\'ll write more about prediction markets in a latter post. \n\n[Gábor Lugosi][] talkcovered a number of important concentration inequalities, focusing on mainly on the logarithmic Sobelov inequalities. \n\nConcentration Inequalities\n-----------------------------\nGábor Lugosi\'s talk \non concentration inequalities was very well done, covering a number of \nimportant theorems in this area in a very clear manner. It was a perfect\nexample of a maths talk where details were eschewed without compromising\naccuracy in order to give insight into the inequalities.\n\nPapers\n-------\n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman.\n\n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri.\n\n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and\nSrebo\n\n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade\n\nWhile I didn\'t attend the talk at COLT, a couple of people have told me that Abernethy et al.\'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I\'ve since skimmed through it and it is a very nice paper, well-written and replete with interesting connections.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n','COLT 2008 Highlights',0,'','inherit','open','open','','46-revision','','','2008-07-25 17:24:46','2008-07-25 07:24:46','',46,'http://conflate.net/inductio/2008/07/46-revision/',0,'revision','',0),(50,2,'2008-07-26 18:20:39','2008-07-26 08:20:39','I\'m a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn\'t make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I\'ve already [written about][evaluation] that and didn\'t attend any of the UAI sessions, I\'ll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he\'d given this type of talk before as he handled the many subsequent questions directly and decisively. I\'m really interested in the work being done here so I\'ll write more about prediction markets in a later post. \n\n[Gábor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n\n[gábor lugosi]: http://www.econ.upf.es/~lugosi/\n\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n\nIf I understand it correctly, Klein\'s system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were \"closer\" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., \"_direc_tion\" and \"direc \n\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\n\nPeter Grünwald.\n\nPapers\n-------\n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman.\n\n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri.\n\n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and\nSrebo\n\n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade\n\nWhile I didn\'t attend the talk at COLT, a couple of people have told me that Abernethy et al.\'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I\'ve since skimmed through it and it is a very nice paper, well-written and replete with interesting connections.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n','COLT 2008 Highlights',0,'','inherit','open','open','','46-revision-2','','','2008-07-26 18:20:39','2008-07-26 08:20:39','',46,'http://conflate.net/inductio/2008/07/46-revision-2/',0,'revision','',0),(51,2,'2008-07-26 18:21:28','2008-07-26 08:21:28','I\'m a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \r\n\r\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\r\n\r\n[icml]: http://icml2008.cs.helsinki.fi/\r\n[uai]: http://uai2008.cs.helsinki.fi/\r\n[colt]: http://colt2008.cs.helsinki.fi/\r\n\r\n[john]: http://hunch.net/?p=341\r\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\r\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\r\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\r\n\r\nI didn\'t make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I\'ve already [written about][evaluation] that and didn\'t attend any of the UAI sessions, I\'ll focus on the COLT stuff I found interesting.\r\n\r\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\r\n\r\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he\'d given this type of talk before as he handled the many subsequent questions directly and decisively. I\'m really interested in the work being done here so I\'ll write more about prediction markets in a later post. \r\n\r\n[Gábor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \r\n\r\n[gábor lugosi]: http://www.econ.upf.es/~lugosi/\r\n\r\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \r\n\r\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \r\n\r\nIf I understand it correctly, Klein\'s system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were \"closer\" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., \"_direc_tion\" and \"_direc_ción\") to find possible cognates. \r\n\r\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\r\n\r\nPeter Grünwald.\r\n\r\nPapers\r\n-------\r\n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman.\r\n\r\n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri.\r\n\r\n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and\r\nSrebo\r\n\r\n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade\r\n\r\nWhile I didn\'t attend the talk at COLT, a couple of people have told me that Abernethy et al.\'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I\'ve since skimmed through it and it is a very nice paper, well-written and replete with interesting connections.\r\n\r\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\r\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\r\n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf\r\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \r\n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\r\n\r\n','COLT 2008 Highlights',0,'','inherit','open','open','','46-revision-3','','','2008-07-26 18:21:28','2008-07-26 08:21:28','',46,'http://conflate.net/inductio/2008/07/46-revision-3/',0,'revision','',0),(52,2,'2008-07-26 18:42:21','2008-07-26 08:42:21','I\'m a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn\'t make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I\'ve already [written about][evaluation] that and didn\'t attend any of the UAI sessions, I\'ll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he\'d given this type of talk before as he handled the many subsequent questions directly and decisively. I\'m really interested in the work being done here so I\'ll write more about prediction markets in a later post. \n\n[robin hanson]: \n\n[Gábor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n\n[gábor lugosi]: http://www.econ.upf.es/~lugosi/\n\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n\nIf I understand it correctly, Klein\'s system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were \"closer\" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., \"direction\" and \"dirección\") to find possible cognates. \n\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\n\nPeter Grünwald.\n\nPapers\n-------\n* [The True Sample Complexity of Active Learning][balcan hanneke] Balcan, Hanneke and Wortman.\n\n* [An Efficient Reduction of Ranking to Classification][ailon] Ailon and Mohri.\n\n* [Improved Guarantees for Learning with Similarity Functions][balcan blum] Balcan, Blum and\nSrebo\n\n* [An Information Theoretic Framework for Multi-view Learning][sridharan] by Sridharan and Kakade\n\nWhile I didn\'t attend the talk at COLT, a couple of people have told me that Abernethy et al.\'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I\'ve since skimmed through it and it is a very nice paper, well-written and replete with interesting connections.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n[balcan blum]: http://colt2008.cs.helsinki.fi/papers/86-Balcan.pdf\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n','COLT 2008 Highlights',0,'','inherit','open','open','','46-revision-4','','','2008-07-26 18:42:21','2008-07-26 08:42:21','',46,'http://conflate.net/inductio/2008/07/46-revision-4/',0,'revision','',0),(53,2,'2008-07-27 21:39:02','2008-07-27 11:39:02','I\'m a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \n\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\n\n[icml]: http://icml2008.cs.helsinki.fi/\n[uai]: http://uai2008.cs.helsinki.fi/\n[colt]: http://colt2008.cs.helsinki.fi/\n\n[john]: http://hunch.net/?p=341\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\n\nI didn\'t make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I\'ve already [written about][evaluation] that and didn\'t attend any of the UAI sessions, I\'ll focus on the COLT stuff I found interesting.\n\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\n\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he\'d given this type of talk before as he handled the many subsequent questions directly and decisively. I\'m really interested in the work being done here so I\'ll write more about prediction markets in a later post. \n\n[robin hanson]: http://hanson.gmu.edu/home.html\n\n[Gábor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \n\n[gábor lugosi]: http://www.econ.upf.es/~lugosi/\n\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \n\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \n\nIf I understand it correctly, Klein\'s system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were \"closer\" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., \"direction\" and \"dirección\") to find possible cognates. \n\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\n\nThere was a variety of good papers at COLT this year. Of the talks I saw, two stood out for me.\n\n[The True Sample Complexity of Active Learning][balcan hanneke] by Balcan, Hanneke and Wortman showed the importance of choosing the right theoretic model for analysis. In active learning the learner is able to choose which unlabelled examples have their labels revealed. \nIntuitively, one would think that this should make learning easier than the normal supervised learning where the learner has no say in the matter. \n\nPrevious results showed that this was basically not the case. Subtly, those results asked that the active learner achieve a certain error rate but also _verify_ that that rate was achieved. What Nina and her co-authors showed was that if you remove this extra requirement then active learning does indeed make learning much easier, often with exponential improvements in sample complexity over \"passive\" learning.\n\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\n\n[An Efficient Reduction of Ranking to Classification][ailon] by Ailon and Mohri was also impressive. They build on earlier work that showed how a ranking problem can be reduced to learning a binary preference relation between the items to be ranked. One crucial part of this reduction is turning a learnt preference relation into a ranking. That is, taking pair-wise assessments of relative item quality and laying out all those items along a line so as to best preserve those pair-wise relationships. \n\nWhat Ailon and Mohri show is that simply applying a randomised quicksort to the pair-wise comparisons for n items will give a good reduction to a ranking in O(n log n) time. \"Good\" here means that the regret of the reduction-based ranking over the best possible is bounded by the regret of the classifier that learns the preference relation over the best possible classification. Furthermore, if you are only interested in the top k of n items you can get a good ranking in O(k log k + n) time. What\'s particularly nice about this work is the tools they use to analysis randomised quicksort are very general and will probably find other applications.\n\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\n\nFinally, while I didn\'t attend the talk at COLT, a couple of people have told me that Abernethy et al.\'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I\'ve since skimmed through it and it is a very nice paper, well-written and replete with interesting connections. I\'m not that familiar with bandit learning work but this paper is has a good summary of recent results and is intriguing enough to make me want to investigate further.\n\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\n\n','COLT 2008 Highlights',0,'','inherit','open','open','','46-revision-5','','','2008-07-27 21:39:02','2008-07-27 11:39:02','',46,'http://conflate.net/inductio/2008/07/46-revision-5/',0,'revision','',0),(54,2,'2008-07-27 21:40:46','2008-07-27 11:40:46','I\'m a little late to the party since several machine learning bloggers have already noted their favourite papers at the recent joint [ICML][]/[UAI][]/[COLT][] conferences in Helsinki. \r\n\r\n[John][] listed a few COLT papers he liked, [Hal][] has covered some tutorials as well as several ICML and a few UAI and COLT papers, while [Jurgen][] has given an overview of the [non-parametric Bayes workshop][npbayes] workshop.\r\n\r\n[icml]: http://icml2008.cs.helsinki.fi/\r\n[uai]: http://uai2008.cs.helsinki.fi/\r\n[colt]: http://colt2008.cs.helsinki.fi/\r\n\r\n[john]: http://hunch.net/?p=341\r\n[hal]: http://nlpers.blogspot.com/2008/07/icmluaicolt-2008-retrospective.html\r\n[jurgen]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\r\n[npbayes]: http://undirectedgrad.blogspot.com/2008/07/npbayes-workshop-at-icml.html\r\n\r\nI didn\'t make it to the main ICML sessions but I did catch the workshop on evaluation in machine learning. Since I\'ve already [written about][evaluation] that and didn\'t attend any of the UAI sessions, I\'ll focus on the COLT stuff I found interesting.\r\n\r\n[evaluation]: http://conflate.net/inductio/2008/07/evaluation-methods-for-machine-learning/\r\n\r\nThe joint COLT/UAI invited talks were all excellent and covered a diverse range of topics. [Robin Hanson][] gave a great introduction to prediction markets. It was clear he\'d given this type of talk before as he handled the many subsequent questions directly and decisively. I\'m really interested in the work being done here so I\'ll write more about prediction markets in a later post. \r\n\r\n[robin hanson]: http://hanson.gmu.edu/home.html\r\n\r\n[Gábor Lugosi][] talkcovered a number of important concentration inequalities, focusing on the logarithmic Sobolev inequalities. It was a perfect example of a maths talk where details were eschewed without compromising accuracy in order to give insight into the main results. \r\n\r\n[gábor lugosi]: http://www.econ.upf.es/~lugosi/\r\n\r\n[Dan Klein][] presented some impressive results pertaining to the unsupervised learning in three natural language problems: grammar refinement (inventing new annotations to improve parsing), coreference resolution (determining which nouns refer to the same thing) and lexicon translation (matching words across languages). By setting up simple Bayesian models and throwing a ton of unlabelled examples at them he was able to get results competitive with the best supervised learning approaches on some problems. \r\n\r\nThe lexicon translation was particularly impressive. Given a set of English documents and a set of Chinese documents his system was able to do a passable job of translating single words between the language. What was impressive is that there was no information saying that, for example, this English document is a translation of that Chinese document. They could have all been pulled randomly from .co.uk and .cn sites. \r\n\r\nIf I understand it correctly, Klein\'s system simply looked for common patterns of words within documents of one language and then tried to match those patterns to similar patterns in the documents of the other. When the languages were \"closer\" - such as with Spanish and English - the system also made use of patterns of letters within words (e.g., \"direction\" and \"dirección\") to find possible cognates. \r\n\r\n[Dan Klein]: http://www.cs.berkeley.edu/~klein/\r\n\r\nThere was a variety of good papers at COLT this year. Of the talks I saw, two stood out for me.\r\n\r\n[The True Sample Complexity of Active Learning][balcan hanneke] by Balcan, Hanneke and Wortman showed the importance of choosing the right theoretic model for analysis. In active learning the learner is able to choose which unlabelled examples have their labels revealed. \r\nIntuitively, one would think that this should make learning easier than the normal supervised learning where the learner has no say in the matter. \r\n\r\nPrevious results showed that this was basically not the case. Subtly, those results asked that the active learner achieve a certain error rate but also _verify_ that that rate was achieved. What Nina and her co-authors showed was that if you remove this extra requirement then active learning does indeed make learning much easier, often with exponential improvements in sample complexity over \"passive\" learning.\r\n\r\n[balcan hanneke]: http://colt2008.cs.helsinki.fi/papers/108-Balcan.pdf\r\n\r\n[An Efficient Reduction of Ranking to Classification][ailon] by Ailon and Mohri was also impressive. They build on earlier work that showed how a ranking problem can be reduced to learning a binary preference relation between the items to be ranked. One crucial part of this reduction is turning a learnt preference relation into a ranking. That is, taking pair-wise assessments of relative item quality and laying out all those items along a line so as to best preserve those pair-wise relationships. \r\n\r\nWhat Ailon and Mohri show is that simply applying a randomised quicksort to the pair-wise comparisons for n items will give a good reduction to a ranking in O(n log n) time. \"Good\" here means that the regret of the reduction-based ranking over the best possible is bounded by the regret of the classifier that learns the preference relation over the best possible classification. Furthermore, if you are only interested in the top k of n items you can get a good ranking in O(k log k + n) time. What\'s particularly nice about this work is the tools they use to analysis randomised quicksort are very general and will probably find other applications.\r\n\r\n[ailon]: http://colt2008.cs.helsinki.fi/papers/32-Ailon.pdf\r\n\r\nFinally, while I didn\'t attend the talk at COLT, a couple of people have told me that Abernethy et al.\'s paper [Competing in the Dark: An Efficient Algorithm for Bandit Linear Optimization][abernethy] was very good. I\'ve since skimmed through it and it is a very nice paper, well-written and replete with interesting connections. I\'m not that familiar with bandit learning work but this paper is has a good summary of recent results and is intriguing enough to make me want to investigate further.\r\n\r\n[sridharan]: http://colt2008.cs.helsinki.fi/papers/94-Sridharan.pdf \r\n[abernethy]: http://colt2008.cs.helsinki.fi/papers/123-Abernethy.pdf\r\n\r\n','COLT 2008 Highlights',0,'','inherit','open','open','','46-revision-6','','','2008-07-27 21:40:46','2008-07-27 11:40:46','',46,'http://conflate.net/inductio/2008/07/46-revision-6/',0,'revision','',0),(55,2,'2008-07-28 09:23:13','2008-07-27 23:23:13','[Robin Hanson][] gave a great introduction to prediction markets, describing how they can be used to extract information through the use of market scoring rules. Essentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning ','Prediction Markets',0,'','inherit','open','open','','47-revision-2','','','2008-07-28 09:23:13','2008-07-27 23:23:13','',47,'http://conflate.net/inductio/2008/07/47-revision-2/',0,'revision','',0),(56,2,'2008-08-04 21:34:53','2008-08-04 11:34:53','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nMarket Scoring Rules\n------------------------\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n[book and market maker]: http://blog.commerce.net/?p=251','Prediction Markets',0,'','inherit','open','open','','47-revision-3','','','2008-08-04 21:34:53','2008-08-04 11:34:53','',47,'http://conflate.net/inductio/2008/08/47-revision-3/',0,'revision','',0),(57,2,'2008-08-04 22:12:29','2008-08-04 12:12:29','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n[book and market maker]: http://blog.commerce.net/?p=251','Prediction Markets',0,'','inherit','open','open','','47-revision-4','','','2008-08-04 22:12:29','2008-08-04 12:12:29','',47,'http://conflate.net/inductio/2008/08/47-revision-4/',0,'revision','',0),(58,2,'2008-08-05 14:27:22','2008-08-05 04:27:22','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251','Prediction Markets',0,'','inherit','open','open','','47-revision-5','','','2008-08-05 14:27:22','2008-08-05 04:27:22','',47,'http://conflate.net/inductio/2008/08/47-revision-5/',0,'revision','',0),(59,2,'2008-08-05 16:56:12','2008-08-05 06:56:12','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), ','Prediction Markets',0,'','inherit','open','open','','47-revision-6','','','2008-08-05 16:56:12','2008-08-05 06:56:12','',47,'http://conflate.net/inductio/2008/08/47-revision-6/',0,'revision','',0),(60,2,'2008-08-05 16:58:36','2008-08-05 06:58:36','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)','Prediction Markets',0,'','inherit','open','open','','47-revision-7','','','2008-08-05 16:58:36','2008-08-05 06:58:36','',47,'http://conflate.net/inductio/2008/08/47-revision-7/',0,'revision','',0),(61,2,'2008-08-06 09:55:25','2008-08-05 23:55:25','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\nExample: tossing a coin with unknown bias. A square scoring rule pays (1-r)^2 or (r-0)^2 depending on the outcome. To maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-8','','','2008-08-06 09:55:25','2008-08-05 23:55:25','',47,'http://conflate.net/inductio/2008/08/47-revision-8/',0,'revision','',0),(62,2,'2008-08-06 10:25:49','2008-08-06 00:25:49','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say r -- and then I toss the coin. If it comes up heads then I pay you 1-(1-r)^2 \n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-9','','','2008-08-06 10:25:49','2008-08-06 00:25:49','',47,'http://conflate.net/inductio/2008/08/47-revision-9/',0,'revision','',0),(63,2,'2008-08-06 10:27:06','2008-08-06 00:27:06','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex> otherwise I pay you 1 - r^2.\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible.\n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-10','','','2008-08-06 10:27:06','2008-08-06 00:27:06','',47,'http://conflate.net/inductio/2008/08/47-revision-10/',0,'revision','',0),(64,2,'2008-08-06 10:39:22','2008-08-06 00:39:22','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as\n[tex]\ndisplaymath \ns(r) = (1-(1-r)^2, 1-r^2).\n[/tex]\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as\n[tex]\ns(r)(w) = left<(1-(1-r)^2, 1-r^2), (1-w, w)right> = (1-(1-r)(1-w)\n[/tex]\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-11','','','2008-08-06 10:39:22','2008-08-06 00:39:22','',47,'http://conflate.net/inductio/2008/08/47-revision-11/',0,'revision','',0),(65,2,'2008-08-06 10:40:43','2008-08-06 00:40:43','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector\n[tex]\ns(r) = (1-(1-r)^2, 1-r^2).\n[/tex]\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as\n[tex]\ns(r)(w) = left< (1-(1-r)^2, 1-r^2), (1-w, w) right> = (1-(1-r)^2)(1-w) + (1-r^2)w.\n[/tex]\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-12','','','2008-08-06 10:40:43','2008-08-06 00:40:43','',47,'http://conflate.net/inductio/2008/08/47-revision-12/',0,'revision','',0),(66,2,'2008-08-06 10:45:28','2008-08-06 00:45:28','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = [1-(1-r)^2, 1-r^2].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex]\ndisplaystyle\ns(r)(w) = < [1-(1-r)^2, 1-r^2], [w, 1-w] > = (1-(1-r)^2)(1-w) + (1-r^2)w.\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-13','','','2008-08-06 10:45:28','2008-08-06 00:45:28','',47,'http://conflate.net/inductio/2008/08/47-revision-13/',0,'revision','',0),(69,2,'2008-08-06 10:48:21','2008-08-06 00:48:21','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = [1-(1-r)^2, 1-r^2].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] displaystyle s(r)(w) = 1-(1-r)^2 , 1-r^2 ] [w, 1-w] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-16','','','2008-08-06 10:48:21','2008-08-06 00:48:21','',47,'http://conflate.net/inductio/2008/08/47-revision-16/',0,'revision','',0),(67,2,'2008-08-06 10:45:57','2008-08-06 00:45:57','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = [1-(1-r)^2, 1-r^2].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex]\r\ndisplaystyle\r\ns(r)(w) = < [1-(1-r)^2, 1-r^2], [w, 1-w] > = (1-(1-r)^2)w + (1-r^2)(1-w).\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-14','','','2008-08-06 10:45:57','2008-08-06 00:45:57','',47,'http://conflate.net/inductio/2008/08/47-revision-14/',0,'revision','',0),(68,2,'2008-08-06 10:46:35','2008-08-06 00:46:35','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = [1-(1-r)^2, 1-r^2].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex]\r\ndisplaystyle\r\ns(r)(w) = [1-(1-r)^2, 1-r^2] cdot [w, 1-w] = (1-(1-r)^2)w + (1-r^2)(1-w).\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-15','','','2008-08-06 10:46:35','2008-08-06 00:46:35','',47,'http://conflate.net/inductio/2008/08/47-revision-15/',0,'revision','',0),(70,2,'2008-08-06 10:48:34','2008-08-06 00:48:34','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = [1-(1-r)^2, 1-r^2].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] displaystyle s(r)(w) = [ 1-(1-r)^2 , 1-r^2 ] [ w , 1-w ] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-17','','','2008-08-06 10:48:34','2008-08-06 00:48:34','',47,'http://conflate.net/inductio/2008/08/47-revision-17/',0,'revision','',0),(73,2,'2008-08-06 10:50:52','2008-08-06 00:50:52','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] \r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-20','','','2008-08-06 10:50:52','2008-08-06 00:50:52','',47,'http://conflate.net/inductio/2008/08/47-revision-20/',0,'revision','',0),(71,2,'2008-08-06 10:49:21','2008-08-06 00:49:21','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] displaystyle s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-18','','','2008-08-06 10:49:21','2008-08-06 00:49:21','',47,'http://conflate.net/inductio/2008/08/47-revision-18/',0,'revision','',0),(72,2,'2008-08-06 10:49:52','2008-08-06 00:49:52','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] = (1-(1-r)^2)w + (1-r^2)(1-w). [/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-19','','','2008-08-06 10:49:52','2008-08-06 00:49:52','',47,'http://conflate.net/inductio/2008/08/47-revision-19/',0,'revision','',0),(74,2,'2008-08-06 10:51:16','2008-08-06 00:51:16','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] s(r)(w) = left[ 1-(1-r)^2 , 1-r^2 right] left[ w , 1-w right] [/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-21','','','2008-08-06 10:51:16','2008-08-06 00:51:16','',47,'http://conflate.net/inductio/2008/08/47-revision-21/',0,'revision','',0),(75,2,'2008-08-06 10:52:51','2008-08-06 00:52:51','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{eqnarray}\ns(r)(w) & = langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle [/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-22','','','2008-08-06 10:52:51','2008-08-06 00:52:51','',47,'http://conflate.net/inductio/2008/08/47-revision-22/',0,'revision','',0),(76,2,'2008-08-06 10:53:21','2008-08-06 00:53:21','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] \r\ndisplaystyle\r\nbegin{eqnarray}\r\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\r\n & = & (1-(1-r)^2)w + (1-r^2)(1-w). \r\nend{eqnarray}\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-23','','','2008-08-06 10:53:21','2008-08-06 00:53:21','',47,'http://conflate.net/inductio/2008/08/47-revision-23/',0,'revision','',0),(77,2,'2008-08-06 10:53:54','2008-08-06 00:53:54','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] \r\ndisplaystyle\r\nbegin{eqnarray}\r\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\r\n & = & x. \r\nend{eqnarray}\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-24','','','2008-08-06 10:53:54','2008-08-06 00:53:54','',47,'http://conflate.net/inductio/2008/08/47-revision-24/',0,'revision','',0),(78,2,'2008-08-06 10:54:47','2008-08-06 00:54:47','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] \r\ndisplaystyle\r\nbegin{eqnarray*}\r\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\r\n & = & x + y. \r\nend{eqnarray*}\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-25','','','2008-08-06 10:54:47','2008-08-06 00:54:47','',47,'http://conflate.net/inductio/2008/08/47-revision-25/',0,'revision','',0),(79,2,'2008-08-06 10:55:45','2008-08-06 00:55:45','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \r\n\r\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\r\n\r\nScoring rules are a class of reward schemes that encourage truthful reporting. \r\n\r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] \r\ndisplaystyle\r\nbegin{array}{rcl}\r\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\r\n & = & x + y. \r\nend{array}\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected \r\n\r\n\r\n\r\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-26','','','2008-08-06 10:55:45','2008-08-06 00:55:45','',47,'http://conflate.net/inductio/2008/08/47-revision-26/',0,'revision','',0),(80,2,'2008-08-06 11:19:09','2008-08-06 01:19:09','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? \n\n(See Jeffrey[^1] for a discussion of betting arguments for probability).\n\nScoring rules are a class of reward schemes that encourage truthful reporting. \n\n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads you will see that\n
\n[tex]\nmathbb{E}_p[ s(r)\n[/tex]\n
\n\n\n\nLambert et al.[^2] beautifully characterise which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. I\'ll write more about this in another post.\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076) N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-27','','','2008-08-06 11:19:09','2008-08-06 01:19:09','',47,'http://conflate.net/inductio/2008/08/47-revision-27/',0,'revision','',0),(81,2,'2008-08-06 11:40:02','2008-08-06 01:40:02','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nPrediction Markets\n---------------------\nSuppose you really wanted to know whether or not\n\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\n\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely but not the true probability [tex]p[/tex]\n\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\n[robin hanson]: http://hanson.gmu.edu/\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-28','','','2008-08-06 11:40:02','2008-08-06 01:40:02','',47,'http://conflate.net/inductio/2008/08/47-revision-28/',0,'revision','',0),(82,2,'2008-08-06 11:40:16','2008-08-06 01:40:16','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\r\n\r\nPrediction Markets\r\n---------------------\r\nSuppose you really wanted to know whether or not\r\n\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future.\r\n\r\nEssentially, people trade in contracts such as \"Pays $1 if it rains next Monday\". If you\'re 100% sure it will rain that day then that contract is worth $1 to you. If you think there is a 30% chance of rain then the contract\'s expected value is $0.30 to you. If you think my guess at the chance of rain is wrong then you\'ll be willing to a pay different amount and can buy it off me for that price. As this process continues the price of the contract will reflect the true chance of rain as more and more information is brought to bear on the prediction problem.\r\n\r\nScoring Rules\r\n---------------\r\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \r\nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \r\n\r\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \r\n\r\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\r\n[tex]\r\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\r\n[/tex]\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] \r\ndisplaystyle\r\nbegin{array}{rcl}\r\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\r\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\r\nend{array}\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\r\n
\r\n[tex]\r\ndisplaystyle\r\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\r\n[/tex]\r\n
\r\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\r\n\r\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\r\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\r\n\r\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\r\n
\r\n[tex]\r\ndisplaystyle\r\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\r\n[/tex]\r\n
\r\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\r\n\r\nReferences\r\n------------\r\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\r\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets',0,'','inherit','open','open','','47-revision-29','','','2008-08-06 11:40:16','2008-08-06 01:40:16','',47,'http://conflate.net/inductio/2008/08/47-revision-29/',0,'revision','',0),(83,2,'2008-08-06 12:37:52','2008-08-06 02:37:52','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract \"Pays $1 to bearer if it rains next Monday\". If I\'m 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract\'s expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-30','','','2008-08-06 12:37:52','2008-08-06 02:37:52','',47,'http://conflate.net/inductio/2008/08/47-revision-30/',0,'revision','',0),(84,2,'2008-08-06 17:08:52','2008-08-06 07:08:52','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract \"Pays $1 to bearer if it rains next Monday\". If I\'m 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract\'s expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a similar analysis of Hanson\'s logarithmic market scoring rule that helped me understand the example I pre\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-31','','','2008-08-06 17:08:52','2008-08-06 07:08:52','',47,'http://conflate.net/inductio/2008/08/47-revision-31/',0,'revision','',0),(85,2,'2008-08-06 17:10:26','2008-08-06 07:10:26','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through [market scoring rules][]. I\'ve been investigating certain aspects of \"vanilla\" [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n[market scoring rules]: http://www.midasoracle.org/2007/09/16/hanson’s-market-scoring-rule-explained-in-five-sentences-why-betfair-gets-so-little-us-press-coverage-and-other-half-baked-commentary-by-michael-giberson/\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract \"Pays $1 to bearer if it rains next Monday\". If I\'m 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract\'s expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\nThe trick as a \"market maker\" is to set pr\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^1] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^2] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^2]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-32','','','2008-08-06 17:10:26','2008-08-06 07:10:26','',47,'http://conflate.net/inductio/2008/08/47-revision-32/',0,'revision','',0),(86,2,'2008-08-07 10:09:16','2008-08-07 00:09:16','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider the contract \"Pays $1 to bearer if it rains next Monday\". If I\'m 50% sure it will rain that day then the expected value of that contract to me $0.50. If you think there is a 30% chance of rain then the contract\'s expected value for you is $0.30. \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\n[TODO: The trick as a \"market maker\" is to be able to update the prices you set after each trade in order to converge to the true probability of an event.]\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^2] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^3]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-33','','','2008-08-07 10:09:16','2008-08-07 00:09:16','',47,'http://conflate.net/inductio/2008/08/47-revision-33/',0,'revision','',0),(87,2,'2008-08-07 16:52:12','2008-08-07 06:52:12','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nMarkets and Prediction\n--------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. \n\nInstead of stocks that pay dividends, participants in predication markets trade in contracts about future events. For example we can consider contracts for whether or not it rains next Monday. For a binary event like this the contracts come in the pair: A) \"Pays $1 to bearer if it rains next Monday\" and B) \"Pays $1 to bearer if it does not rain next Monday\". If I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50. If you buy one of B) it suggests that you think the chance of rain is more than 0.5. In this case I should update my prices to reflect what this trade has told me about what you think the chances are. If you buy another, I should raise my price slightly again. Continuing this, eventually I\'ll reach a price where \n\nSuppose you own the contract and I offer you $0.50 for it. You would happily trade it since you would then bank the $0.50 which is 20 cents more than you expected to receive from keeping the contract until maturity.\n\nThe key idea of prediction markets is that if you allow many people to trade contracts on future events the market price of the contract will reflect the true chance of the event as more and more information is brought to bear on the prediction problem.\n\n[TODO: The trick as a \"market maker\" is to be able to update the prices you set after each trade in order to converge to the true probability of an event.]\n\nScoring Rules\n---------------\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^2] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^3]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-34','','','2008-08-07 16:52:12','2008-08-07 06:52:12','',47,'http://conflate.net/inductio/2008/08/47-revision-34/',0,'revision','',0),(88,2,'2008-08-07 22:23:06','2008-08-07 12:23:06','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement was by John McCarthy[^2] in 1956 and a more in depth was given by Savage[^3] in 1971.\n\nA central concept in forecasting is that of _elicitation_. How do you ensure that people report probabilities that reflect what they really believe? One answer was given by Savage[^2] in 1971 in the form of _proper scoring rules_ --- a class of reward schemes that encourage truthful reporting. \nThe gist of the idea can be seen through a simple example using a particular proper scoring rule called the Brier score. \n\nSuppose I was about to toss a coin that only you knew had a probability p of landing heads. How could I encourage you to reveal that probability to me? \n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^3]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-35','','','2008-08-07 22:23:06','2008-08-07 12:23:06','',47,'http://conflate.net/inductio/2008/08/47-revision-35/',0,'revision','',0),(89,2,'2008-08-07 22:33:42','2008-08-07 12:33:42','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971.\n\n\n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\n[Subjective Probability: The Real Thing](http://www.princeton.edu/~bayesway/Book*.pdf), [Review](http://ndpr.nd.edu/review.cfm?id=4401)\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-36','','','2008-08-07 22:33:42','2008-08-07 12:33:42','',47,'http://conflate.net/inductio/2008/08/47-revision-36/',0,'revision','',0),(90,2,'2008-08-08 11:00:49','2008-08-08 01:00:49','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971.\n\n\n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-37','','','2008-08-08 11:00:49','2008-08-08 01:00:49','',47,'http://conflate.net/inductio/2008/08/47-revision-37/',0,'revision','',0),(92,2,'2008-08-11 09:28:15','2008-08-10 23:28:15','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nA scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function that computes the expected value of a correct prediction --- [tex]s_1(r)[/tex] --- and an incorrect prediction --- [tex]s_0(r)[/tex] --- from a reported probability [tex]r[/tex] of an event. This simplifies the game of gradually increasing the cost of the contracts as more are bought to a simple offer of a payoff for a reported probability. The key feature of a _proper_ scoring rule is that its expected value is maximised when the true probability of an event is reported. That is, if [tex]pin[0,1][/tex] is the true probability of an event then\n
\n[tex]\ndisplaystyle\nmax_{rin[0,1]} mathbb{E}_p[ s(r) ] = mathbb{E}_p[ s(p) ].\n[/tex]\n
\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-39','','','2008-08-11 09:28:15','2008-08-10 23:28:15','',47,'http://conflate.net/inductio/2008/08/47-revision-39/',0,'revision','',0),(91,2,'2008-08-11 09:25:39','2008-08-10 23:25:39','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nA scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function that computes the expected value of a correct ([tex]s_1(r)[/tex]) and an incorrect ([tex]s_0(r)[/tex]) prediction from a reported probability [tex]r[/tex] of an event. This simplifies the game of gradually increasing the cost of the contracts as more are bought to a simple offer of a payoff for a reported probability. The key feature of a _proper_ scoring rule is that its expected value is maximised when the true probability of an event is reported. That is, if [tex]pin[0,1][/tex] is the true probability of an event then\n
\n[tex]\ndisplaystyle\nmax_rin[0,1] mathbb{E}_p[ s(r) ] = mathbb{E}_p[ s(p) ]\n[/tex]\n
\n\nOne way is if I set up the following wager: you first tell me the probability of heads -- say [tex]r[/tex] -- and then I toss the coin. If it comes up heads then I pay you [tex]1-(1-r)^2[/tex] dollars otherwise I pay you [tex]1 - r^2[/tex] dollars. When a wager is dependent on your report [tex]r[/tex] like this it is known as a _scoring rule_ and can be summarised as the vector function\n[tex]\ns(r) = left[ 1-(1-r)^2 , 1-r^2 right].\n[/tex]\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-38','','','2008-08-11 09:25:39','2008-08-10 23:25:39','',47,'http://conflate.net/inductio/2008/08/47-revision-38/',0,'revision','',0),(93,2,'2008-08-11 09:28:41','2008-08-10 23:28:41','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\nTrading Cash for Probability\r\n-------------------------------\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \r\n\r\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\r\n\r\n* A) \"Pays $1 to bearer if it rains next Monday\", and \r\n* B) \"Pays $1 to bearer if it does not rain next Monday\". \r\n\r\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \r\n\r\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \r\n\r\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \r\n\r\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\r\n\r\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\r\n\r\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\r\n\r\nProper Scoring Rules\r\n------------------------\r\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\r\n\r\nA scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function that computes the expected value of a correct prediction --- [tex]s_1(r)[/tex] --- and an incorrect prediction --- [tex]s_0(r)[/tex] --- from a reported probability [tex]r[/tex] of an event. This simplifies the game of gradually increasing the cost of the contracts as more are bought to a simple offer of a payoff for a reported probability. The key feature of a _proper_ scoring rule is that its expected value is maximised when the true probability of an event is reported. That is, if [tex]pin[0,1][/tex] is the true probability of an event then\r\n
\r\n[tex]\r\ndisplaystyle\r\nmax_{rin [0,1]} mathbb{E}_p [ s(r) ] = mathbb{E}_p [ s(p) ].\r\n[/tex]\r\n
\r\n\r\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\r\n
\r\n[tex] \r\ndisplaystyle\r\nbegin{array}{rcl}\r\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\r\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\r\nend{array}\r\n[/tex]\r\n
\r\n\r\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\r\n
\r\n[tex]\r\ndisplaystyle\r\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\r\n[/tex]\r\n
\r\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\r\n\r\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\r\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\r\n\r\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\r\n
\r\n[tex]\r\ndisplaystyle\r\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\r\n[/tex]\r\n
\r\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\r\n\r\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\nPrediction Markets in the Wild\r\n----------------------------------\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\r\n\r\n[hubdub]: http://www.hubdub.com/\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\r\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\r\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\r\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Prediction Markets and Scoring Rules',0,'','inherit','open','open','','47-revision-40','','','2008-08-11 09:28:41','2008-08-10 23:28:41','',47,'http://conflate.net/inductio/2008/08/47-revision-40/',0,'revision','',0),(94,2,'2008-08-11 20:44:25','2008-08-11 10:44:25','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as _proper scoring rules_ was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a _report_ [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. A convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product\n[tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex].\n\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that its expected payoff is maximised when you report truthfully. That is, if [tex]p[/tex] is the true probability of the event occurring (i.e., [tex]w_1 = 1[/tex]) then\n
\n[tex]\ndisplaystyle\nmax_{r} mathbb{E}_p [ s(r) ] = mathbb{E}_p [ s(p) ].\n[/tex]\n
\n\nIf the random variable [tex]w[/tex] is 1 when the coin lands heads and 0 for tails, we can write the payment you will receive as a projection onto [1,0] for heads or [0,1] for tails.\n
\n[tex] \ndisplaystyle\nbegin{array}{rcl}\ns(r)(w) & = & langle left[ 1-(1-r)^2 , 1-r^2 right], left[ w , 1-w right] rangle \\\n & = & (1-(1-r)^2)w + (1-r^2)(1-w).\nend{array}\n[/tex]\n
\n\nTo maximise your expected return you will report an r as close to what you think the true probability is as possible. Why is this? Well, if you write out the expected return under the assumption that [tex]p[/tex] is the true probability of heads (i.e., w = 1), with a bit of algebra you will see that\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = p^2 - p + 1 - (p - r)^2\n[/tex]\n
\nwhich is clearly maximised only when p = r. That is, you maximise your expected payment when your report of the probability of heads is equal to the true probability of heads.\n\nAs noted earlier, the Brier score is one of a whole class of proper scoring rules defined by the property that they are maximised by reporting the true probability for an event.\nIt turns out that this class of functions has quite a lot of structure. Recently, Lambert et al.[^3] have characterised which scoring rules are proper and go further to describe what general properties of distributions can be elicited using proper scoring rules. It\'s a very nice piece of work which I\'ll write more about in later post.\n\nAs a quick aside, the linear scoring rule [tex]s(r) = [r, 1-r][/tex] would appear to be a simpler and more natural alternative to the Brier score for elicitation but it is, in fact, not a proper scoring rule. This is easy to see since its expectation is\n
\n[tex]\ndisplaystyle\nmathbb{E}_p[ s(r)(w) ] = 1 - p + r(2p - 1).\n[/tex]\n
\nIf [tex]p > 0.5[/tex] then [tex]2p-1 > 0[/tex] and so this quantity is maximised by choosing [tex]r = 1[/tex]. Alternatively, if [tex]p < 0.5[/tex] it is maximised by [tex]r = 0[/tex]. This means that this rule would elicit a correct _classification_ of whether heads is more likely than tails or not but will not elicit the true probability [tex]p[/tex].\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Scoring Rules and Prediction Markets',0,'','inherit','open','open','','47-revision-41','','','2008-08-11 20:44:25','2008-08-11 10:44:25','',47,'http://conflate.net/inductio/2008/08/47-revision-41/',0,'revision','',0),(95,2,'2008-08-11 21:07:24','2008-08-11 11:07:24','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as \"proper scoring rules\" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a \"report\" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \n\nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex].\n\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then\n
\n[tex]\ndisplaystyle\nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle .\n[/tex]\n
\nScoring rules that meet this criteria are described as \"proper\" or \"Fisher consistent\".\n\nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \n
\n[tex]\ndisplaystyle\nmathbb{E}_p langle s(r), w rangle \n= langle s(r), mathbb{E}_p w rangle\n= langle s(r), p rangle\n[/tex]\n
\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or \"properness\") condition can be restated as requiring that the gradient of the scoring rule disappear when\n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Scoring Rules and Prediction Markets',0,'','inherit','open','open','','47-revision-42','','','2008-08-11 21:07:24','2008-08-11 11:07:24','',47,'http://conflate.net/inductio/2008/08/47-revision-42/',0,'revision','',0),(96,2,'2008-08-11 21:08:21','2008-08-11 11:08:21','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\nTrading Cash for Probability\r\n-------------------------------\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \r\n\r\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\r\n\r\n* A) \"Pays $1 to bearer if it rains next Monday\", and \r\n* B) \"Pays $1 to bearer if it does not rain next Monday\". \r\n\r\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \r\n\r\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \r\n\r\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \r\n\r\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\r\n\r\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\r\n\r\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\r\n\r\nProper Scoring Rules\r\n------------------------\r\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as \"proper scoring rules\" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\r\n\r\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a \"report\" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \r\n\r\nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex].\r\n\r\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \r\n\r\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then\r\n
\r\n[tex]\r\ndisplaystyle\r\nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle .\r\n[/tex]\r\n
\r\nScoring rules that meet this criteria are described as \"proper\" or \"Fisher consistent\".\r\n\r\nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \r\n
\r\n[tex]\r\ndisplaystyle\r\nmathbb{E}_p langle s(r), w rangle \r\n= langle s(r), mathbb{E}_p w rangle\r\n= langle s(r), p rangle\r\n[/tex]\r\n
\r\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or \"properness\") condition can be restated as requiring that the gradient of the scoring rule disappear when [tex]r = p[/tex]. That is, [tex](nabla_r s)(p) = 0[/tex].\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\r\n\r\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\nPrediction Markets in the Wild\r\n----------------------------------\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\r\n\r\n[hubdub]: http://www.hubdub.com/\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\r\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\r\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\r\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Scoring Rules and Prediction Markets',0,'','inherit','open','open','','47-revision-43','','','2008-08-11 21:08:21','2008-08-11 11:08:21','',47,'http://conflate.net/inductio/2008/08/47-revision-43/',0,'revision','',0),(97,2,'2008-08-11 21:09:38','2008-08-11 11:09:38','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \r\n\r\n[robin hanson]: http://hanson.gmu.edu/\r\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\r\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\r\n\r\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\r\n\r\nTrading Cash for Probability\r\n-------------------------------\r\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \r\n\r\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\r\n\r\n* A) \"Pays $1 to bearer if it rains next Monday\", and \r\n* B) \"Pays $1 to bearer if it does not rain next Monday\". \r\n\r\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \r\n\r\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \r\n\r\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \r\n\r\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\r\n\r\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\r\n\r\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\r\n\r\nProper Scoring Rules\r\n------------------------\r\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as \"proper scoring rules\" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\r\n\r\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a \"report\" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \r\n\r\nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex].\r\n\r\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \r\n\r\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then\r\n
\r\n[tex]\r\ndisplaystyle\r\nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle .\r\n[/tex]\r\n
\r\nScoring rules that meet this criteria are described as \"proper\" or \"Fisher consistent\".\r\n\r\nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \r\n
\r\n[tex]\r\ndisplaystyle\r\nmathbb{E}_p langle s(r), w rangle = langle s(r), mathbb{E}_p w rangle = langle s(r), p rangle\r\n[/tex]\r\n
\r\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or \"properness\") condition can be restated as requiring that the gradient of the scoring rule disappear when [tex]r = p[/tex]. That is, [tex](nabla_r s)(p) = 0[/tex].\r\n\r\nMarket Scoring Rules\r\n------------------------\r\n\r\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\r\n\r\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\r\n\r\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\r\n\r\n[This leads to telescoping rule for MSRs]\r\n\r\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\r\n\r\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\r\n\r\nPrediction Markets in the Wild\r\n----------------------------------\r\n\r\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\r\n\r\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\r\n\r\n[hubdub]: http://www.hubdub.com/\r\n[david pennock]: http://dpennock.com/\r\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\r\n[ec08]: http://www.sigecom.org/ec08/\r\n\r\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\r\n\r\nResearch shows that in the areas they have been used prediction markets are [powerful][].\r\n\r\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\r\n\r\n[powerful]: http://artificialmarkets.com/\r\n[electoralmarkets]: http://www.electoralmarkets.com/\r\n[john]: http://hunch.net/?p=396\r\n[intrade]: http://www.intrade.com/\r\n\r\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\r\n\r\n[pam]: http://dpennock.com/pam.html\r\n\r\n\r\n[book and market maker]: http://blog.commerce.net/?p=251\r\n\r\nReferences\r\n------------\r\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\r\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\r\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\r\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Scoring Rules and Prediction Markets',0,'','inherit','open','open','','47-revision-44','','','2008-08-11 21:09:38','2008-08-11 11:09:38','',47,'http://conflate.net/inductio/2008/08/47-revision-44/',0,'revision','',0),(98,2,'2008-08-11 21:53:40','2008-08-11 11:53:40','[Robin Hanson][] gave a great introduction to prediction markets at [COLT this year][colt]. He covered a range of issues included how prediction markets can be used to aggregate information through \"market scoring rules\"[^1]. I\'ve been investigating certain aspects of plain [scoring rules][] for a while now so I was curious to understand how they are extended and, more generally, curious about the workings of information markets. \n\n[robin hanson]: http://hanson.gmu.edu/\n[colt]: http://conflate.net/inductio/2008/07/colt-2008-highlights/\n[scoring rules]: http://en.wikipedia.org/wiki/Scoring_rule\n\nThis post is a first attempt at understanding prediction markets and a travelogue of the links and papers I\'ve uncovered along the way. My strongest impression at present is that there is a lot of interesting work going on in this area at the moment. Consequently, what I present here will be -- for my sake -- a very simplified view.\n\nTrading Cash for Probability\n-------------------------------\nPrediction markets are a natural extension of what goes on in financial markets everyday: people buying and selling stocks depending on whether they think a company will make a profit and return dividends sometime in the future. Instead of stocks that pay dividends, participants in predication markets trade in contracts that pay out should a well-defined future event take place. \n\nFor example we can consider contracts for whether or not it rains next Monday at a specific location. For a binary event like this the contracts come in the pair:\n\n* A) \"Pays $1 to bearer if it rains next Monday\", and \n* B) \"Pays $1 to bearer if it does not rain next Monday\". \n\nIf I\'m 50% sure it will rain that day then the expected values of contract A and B to me are both $0.50. If you think there is a 30% chance of rain then contract A\'s expected value for you is $0.30 and contract B\'s value is $0.70. \n\nIf I\'m selling these contracts I would set an initial price for both at $0.50, reflecting my belief in the chance of rain. If you buy contract B from me at that price it suggests that you think the chance of rain is less than 0.5 since, if your odds for rain are correct, you stand to make $0.20. \n\nIn the case of such a trade I should update my prices to, say $0.49 for contract A and $0.51 for contract B to reflect the information I\'ve gleaned from your purchase. If you buy another contract for B, I should raise my price slightly again. Let\'s say I modify the price by $0.01 each time. \n\nContinuing this, I\'ll reduce your expected gain on each subsequent contract B you buy. After 20 purchases I\'ll reach prices of $0.30 and $0.70 respectively for contracts A and B. When this happens you will stop purchasing contracts from me since you no longer expect to gain any benefit from holding either.\n\nOnce the process is complete we wait for Monday to see if it rains. If your beliefs are correct then with probability 0.3 you will lose money --- specifically, $(0.50 + 0.51 + ... + 0.69) = $11.90 --- since your 20 copies of contract B will be worthless. However, with probability 0.7 it will rain and your 20 copies of contract B will be worth $20 and you will gain $20 - $11.90 = $8.10. Your expected gain (and my expected loss) if your beliefs are correct is therefore $2.10\n\nAnother way to look at this is that I will expect to pay $2.10 for eliciting your correct belief in the probability of rain.\n\nProper Scoring Rules\n------------------------\nThis idea of eliciting probabilities via incentives such as in the above example has a long history. The first general statement of what are now known as \"proper scoring rules\" was by John McCarthy[^2] in 1956 and a more in depth study by Leonard Savage[^3] was published in 1971. The presentation of scoring rules I use here is influenced a very recent paper by Lambert et al.[^4]\n\nFor a single binary event, a scoring rule [tex]s(r) = [s_0(r), s_1(r)] [/tex] is a function of a \"report\" [tex]r = [r_0, r_1][/tex] of the probabilities for that event. If you report [tex]r[/tex] and the event occurs you are paid [tex]s_1(r)[/tex]. If the event does not occur you are paid [tex]s_0(r)[/tex]. \n\nA convenient shorthand is to let [tex]w_1[/tex] be a random variable that is 1 if the event occurs and 0 otherwise. Then the payment from the scoring rule for a given report [tex]r[/tex] is the inner product [tex]langle s(r), w rangle[/tex] where [tex]w = [1-w_1, w_1][/tex]. This is because if [tex]w_1 = 1[/tex] then [tex]w = [0,1][/tex] and so [tex]langle s(r), w rangle = s_1(r)[/tex] and similarly the inner product is [tex]s_0(r)[/tex] if [tex]w_1 = 0[/tex].\n\nIf you know the scoring rule I use in advance then the game of gradually increasing the cost of the contracts as you buy more can be simplified. Now you just report the probabilities you believe will maximise what I will pay you using the scoring rule. \n\nIn order to ensure you report what you really believe to be the true probabilities I need to construct the scoring rule in such a way that your expected payoff is maximised when you report truthfully. That is, if [tex]p = [1-p_1, p_1][/tex] is the true probability distribution for the event then\n
\n[tex]\ndisplaystyle\nmax_{r} mathbb{E}_p langle s(r), w rangle = mathbb{E}_p langle s(p), w rangle .\n[/tex]\n
\nScoring rules that meet this criteria are described as \"proper\" or \"Fisher consistent\".\n\nThe reason the inner product notation is a useful shorthand is that, thanks to its linearity, we can now pull the expectation inside it to show that \n
\n[tex]\ndisplaystyle\nmathbb{E}_p langle s(r), w rangle = langle s(r), mathbb{E}_p w rangle = langle s(r), p rangle\n[/tex]\n
\nsince [tex]mathbb{E}_p w = p[/tex]. If everything is suitably differentiable the Fisher consistency (or \"properness\") condition requires that the derivatives of the scoring rule satisfy, for all [tex]p[/tex],\n
\n[tex]\ndisplaystyle\nlangle frac{partial}{partial r_i} s(p), p rangle = 0.\n[/tex]\n
\nThat means the derivatives of the scoring rule must be orthogonal to [tex]p[/tex]. \n\nMarket Scoring Rules\n------------------------\n\nExample: suppose someone else thinks you guess is wrong. What is she willing to pay to get an expected return? This can go on as long as there is a perceived discrepancy between the current guess and someone else\'s.\n\nDavid Pennock has a [similar analysis][pennock] of Hanson\'s logarithmic market scoring rule that helped me understand market scoring rules enough to present the (hopefully simpler) example I here.\n\n[pennock]: http://blog.oddhead.com/2006/10/30/implementing-hansons-market-maker/\n\n[This leads to telescoping rule for MSRs]\n\nThe enticement of a possible reward acts as an incentive to find out more about the coin and its bias. People might study coins similar to the one thrown, learn about defects in their manufacture that might impart a bias, look at the history of the person throwing it, their star sign, etc.\n\nI asked Robin a pretty naïve question while speaking to him after his talk: How do these markets get started since someone has to pay the contracts out when they mature? The answer is \"the person who wants the information\". Such a person sets the initial prices of the contracts to reflect their beliefs about some events and then any improvement in accuracy for the probabilities for those events is converted to money when the contracts are paid out.\n\nPrediction Markets in the Wild\n----------------------------------\n\nThese markets can get quite sophisticated and keeping track of combinations of contracts can get tricky. [David Pennock][] is doing some nice work in this area and has even implemented some of his ideas as a Facebook betting application called [Yoopick][]. He also had a number of really good papers in the [ACM conference on electronic commerce][ec08] that was running at the same time as COLT but in Chicago.\n\nAnother site using prediction markets is [hubdub][]. Here people can bet \"play money\" on various types of news coverage.\n\n[hubdub]: http://www.hubdub.com/\n[david pennock]: http://dpennock.com/\n[yoopick]: http://blog.oddhead.com/2008/07/03/yoopick-a-sports-prediction-contest-on-facebook-with-a-research-twist/\n[ec08]: http://www.sigecom.org/ec08/\n\nThoughts on information become a commodity. Machine learning will make certain simple types of decision making a commodity too (analogy: human habits and instinctual behaviour leaves the mind free for higher-order planning and decision-making).\n\nResearch shows that in the areas they have been used prediction markets are [powerful][].\n\n[John][] recently pointed out the [electoralmarkets][] site that takes data from [Intrade][] to track, state-by-state, the predicted results of the upcoming US federal election.\n\n[powerful]: http://artificialmarkets.com/\n[electoralmarkets]: http://www.electoralmarkets.com/\n[john]: http://hunch.net/?p=396\n[intrade]: http://www.intrade.com/\n\nDavid Pennock puts forward a [convincing argument][pam] that the so-called \"terrorism market\" was not as bad an idea as I first though. The main points of David\'s argument is: the terrorist activities made up a tiny part of contracts for events in the Middle East; terrorists could not get rich playing this market since bets were limited to $100 making it more effective for them to trade on the financial markets\' reaction to terrorism on airline and oil companies; we bet against bad things happening to us when we take out insurance.\n\n[pam]: http://dpennock.com/pam.html\n\n\n[book and market maker]: http://blog.commerce.net/?p=251\n\nReferences\n------------\n[^1]: [Combinatorial Information Market Design](http://www.citeulike.org/user/mdreid/article/3093106), R. Hanson, Information Systems Frontiers pp. 107-119 (2003).\n[^2]: [Measures of the Value of Information](http://www.citeulike.org/user/mdreid/article/3095794), J. Mccarthy, Proceedings of the National Academy of Sciences of the United States of America 42, 654 (1956).\n[^3]: [Elicitation of Personal Probabilities and Expectations](http://www.citeulike.org/user/mdreid/article/2309030), L. J. Savage, Journal of the American Statistical Association 66, 783 (1971).\n[^4]: [Elicitability](http://www.citeulike.org/user/mdreid/article/3026076), N. Lambert, D. Pennock, Y. Shoham, Proceedings of the ACM Conference on Electronic Commerce (2008).','Scoring Rules and Prediction Markets',0,'','inherit','open','open','','47-revision-45','','','2008-08-11 21:53:40','2008-08-11 11:53:40','',47,'http://conflate.net/inductio/2008/08/47-revision-45/',0,'revision','',0),(99,2,'2008-08-29 11:32:51','2008-08-29 01:32:51','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\r\n\r\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\r\n[hardin]: http://maven.smith.edu/~chardin/\r\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\r\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\r\n\r\nIts main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\r\n\r\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\r\n> We should emphasize that these results do not give a practical means of predicting \r\n> the future, just as the time dilation one would experience standing near the event \r\n> horizon of a black hole does not give a practical time machine.\r\n\r\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \r\n\r\n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem\r\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\r\n\r\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member, say u, of [v]t with respect to the ordering and outputs u(t).\r\n\r\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it\'s a very simple strategy.\r\n\r\nThe author\'s suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the µ-strategy just chooses the \"simplest\" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What\'s odd about the result is that *no matter what bias is chosen the µ-strategy will only ever make countably many mistakes*.\r\n\r\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that\'s even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting.\r\n\r\n[banach-tarski paradox]: http://en.wikipedia.org/wiki/Banach-Tarski_paradox\r\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','Prediction and the Axiom of Choice',0,'Some thoughts on Hardin and Taylor\'s paper \"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\".','publish','open','open','','prediction-and-the-axiom-of-choice','','http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\nhttp://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','2008-09-23 07:55:08','2008-09-22 21:55:08','',0,'http://conflate.net/inductio/?p=99',0,'post','',0),(100,2,'2008-08-25 19:18:02','2008-08-25 09:18:02','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via \n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html','Prediction and the Axiom of Choice',0,'','inherit','open','open','','99-revision','','','2008-08-25 19:18:02','2008-08-25 09:18:02','',99,'http://conflate.net/inductio/2008/08/99-revision/',0,'revision','',0),(101,2,'2008-08-27 20:03:28','2008-08-27 10:03:28','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nThe well-ordering acts as a bias (in the sense of Mitchell) but it is non-constructible. The theorem says that any choice of bias will lead to good prediction since the µ-strategy works for any choice of ordering.\n\nOne way to look at this is evidence against the use of the axiom of choice (Zorn\'s lemma / well-ordering principle), adding to the Banach-Tarski paradox. ','Prediction and the Axiom of Choice',0,'','inherit','open','open','','99-revision-2','','','2008-08-27 20:03:28','2008-08-27 10:03:28','',99,'http://conflate.net/inductio/2008/08/99-revision-2/',0,'revision','',0),(102,2,'2008-08-28 18:03:28','2008-08-28 08:03:28','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nIt\'s main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\n\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine.\n\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem](http://en.wikipedia.org/wiki/Well-ordering_theorem) --- otherwise known as the [Axiom of Choice](http://en.wikipedia.org/wiki/Axiom_of_choice). \n\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member of [v]t with respect to\n\nThe well-ordering acts as a bias (in the sense of Mitchell) but it is non-constructible. The theorem says that any choice of bias will lead to good prediction since the µ-strategy works for any choice of ordering.\n\nOne way to look at this is evidence against the use of the axiom of choice (Zorn\'s lemma / well-ordering principle), adding to the Banach-Tarski paradox. ','Prediction and the Axiom of Choice',0,'','inherit','open','open','','99-revision-3','','','2008-08-28 18:03:28','2008-08-28 08:03:28','',99,'http://conflate.net/inductio/2008/08/99-revision-3/',0,'revision','',0),(103,2,'2008-08-28 18:09:00','2008-08-28 08:09:00','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nIt\'s main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\n\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine.\n\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem](http://en.wikipedia.org/wiki/Well-ordering_theorem) --- otherwise known as the [Axiom of Choice](http://en.wikipedia.org/wiki/Axiom_of_choice). \n\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member, say u, of [v]t with respect to the ordering and outputs u(t).\n\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it\'s a very simple strategy.\n\nThe well-ordering acts as a bias (in the sense of Mitchell) but it is non-constructible. The theorem says that any choice of bias will lead to good prediction since the µ-strategy works for any choice of ordering.\n\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][] --- evidence against the use of the axiom of choice (and its equivalents) in matheme\nOne way to look at this is evidence against the use of the axiom of choice (Zorn\'s lemma / well-ordering principle), adding to the. \n\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','Prediction and the Axiom of Choice',0,'','inherit','open','open','','99-revision-4','','','2008-08-28 18:09:00','2008-08-28 08:09:00','',99,'http://conflate.net/inductio/2008/08/99-revision-4/',0,'revision','',0),(104,2,'2008-08-29 11:27:30','2008-08-29 01:27:30','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\n\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\n[hardin]: http://maven.smith.edu/~chardin/\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\n\nIt\'s main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\n\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\n> We should emphasize that these results do not give a practical means of predicting \n> the future, just as the time dilation one would experience standing near the event \n> horizon of a black hole does not give a practical time machine.\n\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \n\n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\n\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member, say u, of [v]t with respect to the ordering and outputs u(t).\n\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it\'s a very simple strategy.\n\nThe author\'s suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the µ-strategy just chooses the \"simplest\" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What\'s odd about the result is that *no matter what bias is chosen the µ-strategy will only ever make countably many mistakes*.\n\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that\'s even vaguely practical. Still, the paper is a \n\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','Prediction and the Axiom of Choice',0,'','inherit','open','open','','99-revision-5','','','2008-08-29 11:27:30','2008-08-29 01:27:30','',99,'http://conflate.net/inductio/2008/08/99-revision-5/',0,'revision','',0),(105,2,'2008-08-29 11:28:18','2008-08-29 01:28:18','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\r\n\r\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\r\n[hardin]: http://maven.smith.edu/~chardin/\r\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\r\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\r\n\r\nIt\'s main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\r\n\r\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\r\n> We should emphasize that these results do not give a practical means of predicting \r\n> the future, just as the time dilation one would experience standing near the event \r\n> horizon of a black hole does not give a practical time machine.\r\n\r\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \r\n\r\n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem\r\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\r\n\r\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member, say u, of [v]t with respect to the ordering and outputs u(t).\r\n\r\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it\'s a very simple strategy.\r\n\r\nThe author\'s suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the µ-strategy just chooses the \"simplest\" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What\'s odd about the result is that *no matter what bias is chosen the µ-strategy will only ever make countably many mistakes*.\r\n\r\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that\'s even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting.\r\n\r\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','Prediction and the Axiom of Choice',0,'','inherit','open','open','','99-revision-6','','','2008-08-29 11:28:18','2008-08-29 01:28:18','',99,'http://conflate.net/inductio/2008/08/99-revision-6/',0,'revision','',0),(106,2,'2008-08-29 11:30:19','2008-08-29 01:30:19','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\r\n\r\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\r\n[hardin]: http://maven.smith.edu/~chardin/\r\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\r\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\r\n\r\nIt\'s main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\r\n\r\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\r\n> We should emphasize that these results do not give a practical means of predicting \r\n> the future, just as the time dilation one would experience standing near the event \r\n> horizon of a black hole does not give a practical time machine.\r\n\r\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \r\n\r\n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem\r\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\r\n\r\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member, say u, of [v]t with respect to the ordering and outputs u(t).\r\n\r\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it\'s a very simple strategy.\r\n\r\nThe author\'s suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the µ-strategy just chooses the \"simplest\" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What\'s odd about the result is that *no matter what bias is chosen the µ-strategy will only ever make countably many mistakes*.\r\n\r\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that\'s even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting.\r\n\r\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','Prediction and the Axiom of Choice',0,'Some thoughts on Hardin and Taylor\'s paper \"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\".','inherit','open','open','','99-revision-7','','','2008-08-29 11:30:19','2008-08-29 01:30:19','',99,'http://conflate.net/inductio/2008/08/99-revision-7/',0,'revision','',0),(107,2,'2008-08-29 11:32:22','2008-08-29 01:32:22','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\r\n\r\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\r\n[hardin]: http://maven.smith.edu/~chardin/\r\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\r\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\r\n\r\nIt\'s main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\r\n\r\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\r\n> We should emphasize that these results do not give a practical means of predicting \r\n> the future, just as the time dilation one would experience standing near the event \r\n> horizon of a black hole does not give a practical time machine.\r\n\r\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \r\n\r\n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem\r\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\r\n\r\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member, say u, of [v]t with respect to the ordering and outputs u(t).\r\n\r\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it\'s a very simple strategy.\r\n\r\nThe author\'s suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the µ-strategy just chooses the \"simplest\" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What\'s odd about the result is that *no matter what bias is chosen the µ-strategy will only ever make countably many mistakes*.\r\n\r\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that\'s even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting.\r\n\r\n[banach-tarski paradox]: http://en.wikipedia.org/wiki/Banach-Tarski_paradox\r\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','Prediction and the Axiom of Choice',0,'Some thoughts on Hardin and Taylor\'s paper \"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\".','inherit','open','open','','99-revision-8','','','2008-08-29 11:32:22','2008-08-29 01:32:22','',99,'http://conflate.net/inductio/2008/08/99-revision-8/',0,'revision','',0),(109,2,'0000-00-00 00:00:00','0000-00-00 00:00:00','\r\n\r\n[paper]: http://dx.doi.org/10.1007/s10994-008-5079-1','Structured Machine Learning: The Next Ten Years',0,'','draft','open','open','','','','','2008-09-03 14:35:36','2008-09-03 04:35:36','',0,'http://conflate.net/inductio/?p=109',0,'post','',0),(110,2,'2008-09-03 14:35:20','2008-09-03 04:35:20','','Structured Machine Learning: The Next Ten Years',0,'','inherit','open','open','','109-revision','','','2008-09-03 14:35:20','2008-09-03 04:35:20','',109,'http://conflate.net/inductio/2008/09/109-revision/',0,'revision','',0),(111,2,'2008-09-22 17:00:09','2008-09-22 07:00:09','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve had almost 6,000 visits and over 10,000 page views.\r\n\r\n
\r\n\"Visitors\r\n
\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php). It\'s my most read post with over 2,500 views.\r\n\r\nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views.\r\n\r\nOf course, it is natural to want to increase those figures but overall I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','publish','open','open','','a-year-of-research-blogging','','\nhttp://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/','2008-09-22 16:59:46','2008-09-22 06:59:46','',0,'http://conflate.net/inductio/?p=111',0,'post','',0),(112,2,'2008-09-12 09:06:22','2008-09-11 23:06:22','','year-stats',0,'Visitors over the last year','inherit','open','open','','picture-1','','','2008-09-12 09:06:22','2008-09-11 23:06:22','',111,'http://conflate.net/inductio/wp-content/uploads/2008/09/picture-1.png',0,'attachment','image/png',0),(113,2,'2008-09-12 09:16:14','2008-09-11 23:16:14','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \n\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve jad almost 6,000 visits and over 10,000 page views.\n\n[caption id=\"attachment_112\" align=\"alignnone\" width=\"300\" caption=\"Visitors over the last year\"]\"Visitors[/caption]\n\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php).\n\nOf course, it is natural to increase both those figures but overall, I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\n\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include expositions on some of the work I\'ve been turning into papers.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision','','','2008-09-12 09:16:14','2008-09-11 23:16:14','',111,'http://conflate.net/inductio/2008/09/111-revision/',0,'revision','',0),(115,2,'2008-09-12 09:17:50','2008-09-11 23:17:50','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve jad almost 6,000 visits and over 10,000 page views.\r\n\r\n\"Visitors\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php).\r\n\r\nOf course, it is natural to increase both those figures but overall, I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision-3','','','2008-09-12 09:17:50','2008-09-11 23:17:50','',111,'http://conflate.net/inductio/2008/09/111-revision-3/',0,'revision','',0),(114,2,'2008-09-12 09:16:50','2008-09-11 23:16:50','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve jad almost 6,000 visits and over 10,000 page views.\r\n\r\n[caption id=\"attachment_112\" align=\"alignnone\" width=\"300\" caption=\"Visitors over the last year\"]\"Visitors[/caption]\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php).\r\n\r\nOf course, it is natural to increase both those figures but overall, I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision-2','','','2008-09-12 09:16:50','2008-09-11 23:16:50','',111,'http://conflate.net/inductio/2008/09/111-revision-2/',0,'revision','',0),(116,2,'2008-09-12 09:18:24','2008-09-11 23:18:24','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve jad almost 6,000 visits and over 10,000 page views.\r\n\r\n
\r\n\"Visitors\r\n
\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php).\r\n\r\nOf course, it is natural to increase both those figures but overall, I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision-4','','','2008-09-12 09:18:24','2008-09-11 23:18:24','',111,'http://conflate.net/inductio/2008/09/111-revision-4/',0,'revision','',0),(117,2,'0000-00-00 00:00:00','0000-00-00 00:00:00','\r\n\r\n[paper]: http://blog.doloreslabs.com/wp-content/uploads/2008/09/amt_emnlp08_accepted.pdf\r\n[dolores]: http://blog.doloreslabs.com/2008/09/amt-fast-cheap-good-machine-learning/\r\n[lingpipe]: http://lingpipe-blog.com/2008/09/15/dolores-labs-text-entailment-data-from-amazon-mechanical-turk/','Cheap Supervised Training Instances',0,'A brief discussion of a paper describing the use of the Amazon Mechanical Turk to buy cheap, human annotations for the creation of supervised training sets.','draft','open','open','','','','','2008-09-16 11:02:14','2008-09-16 01:02:14','',0,'http://conflate.net/inductio/?p=117',0,'post','',0),(118,2,'2008-09-16 10:50:14','2008-09-16 00:50:14','\n\n[paper]: http://blog.doloreslabs.com/wp-content/uploads/2008/09/amt_emnlp08_accepted.pdf\n[dolores]: http://blog.doloreslabs.com/2008/09/amt-fast-cheap-good-machine-learning/\n[lingpipe]: http://lingpipe-blog.com/2008/09/15/dolores-labs-text-entailment-data-from-amazon-mechanical-turk/','Cheap Supervised Training Instances',0,'','inherit','open','open','','117-revision','','','2008-09-16 10:50:14','2008-09-16 00:50:14','',117,'http://conflate.net/inductio/2008/09/117-revision/',0,'revision','',0),(119,2,'2008-09-12 09:19:43','2008-09-11 23:19:43','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve jad almost 6,000 visits and over 10,000 page views.\r\n\r\n
\r\n\"Visitors\r\n
\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php). Since then I\'ve had a fairly steady rate of around 30 visits a day.\r\n\r\nOf course, it is natural to want to increase those figures but overall I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision-5','','','2008-09-12 09:19:43','2008-09-11 23:19:43','',111,'http://conflate.net/inductio/2008/09/111-revision-5/',0,'revision','',0),(120,2,'2008-09-21 12:47:50','2008-09-21 02:47:50','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve had almost 6,000 visits and over 10,000 page views.\r\n\r\n
\r\n\"Visitors\r\n
\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php). It\'s my most read post with over 2,500 views.\r\n\r\nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views.\r\n\r\nOf course, it is natural to want to increase those figures but overall I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision-6','','','2008-09-21 12:47:50','2008-09-21 02:47:50','',111,'http://conflate.net/inductio/2008/09/111-revision-6/',0,'revision','',0),(121,2,'2008-09-21 21:31:49','2008-09-21 11:31:49','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve had almost 6,000 visits and over 10,000 page views.\r\n\r\n
\r\n\"Visitors\r\n
\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php). It\'s my most read post with over 2,500 views.\r\n\r\nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views.\r\n\r\nOf course, it is natural to want to increase those figures but overall I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision-7','','','2008-09-21 21:31:49','2008-09-21 11:31:49','',111,'http://conflate.net/inductio/2008/09/111-revision-7/',0,'revision','',0),(122,2,'2008-09-22 16:59:10','2008-09-22 06:59:10','Just a short post to reflect on the year that has passed since I [started this blog](http://conflate.net/inductio/2007/09/introducing-inductio-ex-machina/). \r\n\r\nA quick trawl through the archives reveals I have published 21 posts (not including this one) for an average of just less than two posts per month. A quick look at my traffic statistics show that I\'ve had almost 6,000 visits and over 10,000 page views.\r\n\r\n
\r\n\"Visitors\r\n
\r\n\r\nThat huge spike of almost 500 visitors on a single day was for my post on [books that have affected my research](http://conflate.net/inductio/2008/05/research-changing-books/). Most of the incoming traffic for that post was from Kevin Kelly\'s [post on the same topic](http://kk.org/cooltools/archives/002879.php). It\'s my most read post with over 2,500 views.\r\n\r\nThe second and third most popular posts were on [prediction and the axiom of choice](http://conflate.net/inductio/2008/08/prediction-and-the-axiom-of-choice/) with about 1,300 views and [Visualising ROC and cost curve duality](http://conflate.net/inductio/2008/04/visualising-roc-and-cost-curve-duality/) with just over 400 views.\r\n\r\nOf course, it is natural to want to increase those figures but overall I\'ve been fairly happy at the frequency of posts I\'ve written and the number of readers I\'ve attracted to what is a fairly narrow subject area.\r\n\r\nSo, one year down and one year to go on my current post-doc. In the year ahead, I\'ll continue writing about research by others in machine learning but I\'ll also try to include more expositions of some of my own work.','A Year of Research Blogging',0,'Looking back on a year of research blogging about machine learning.','inherit','open','open','','111-revision-8','','','2008-09-22 16:59:10','2008-09-22 06:59:10','',111,'http://conflate.net/inductio/2008/09/111-revision-8/',0,'revision','',0),(123,2,'2008-08-29 11:32:51','2008-08-29 01:32:51','A curious paper entitled [\"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\"][paper] by [Christopher Hardin][hardin] and [Alan Taylor][taylor] caught my attention recently via the blog [XOR\'s Hammer][xor].\r\n\r\n[paper]: http://maven.smith.edu/~chardin/pub/peculiar.pdf\r\n[hardin]: http://maven.smith.edu/~chardin/\r\n[taylor]: http://www.math.union.edu/people/faculty/taylora.html\r\n[xor]: http://xorshammer.wordpress.com/2008/08/23/set-theory-and-weather-prediction/\r\n\r\nIt\'s main claim is that there exists an almost infallible prediction strategy. That is, one that will almost always predict the correct present value of some unknown function given all its past values. More specifically, they describe the µ-strategy which, when given the values of a function v for all points in time up to but not including t, correctly predicts the value of v(t) for all but countably many points t. They also show that this same strategy can almost always extrapolate correctly into the future (i.e., correctly predict v(s) for t ≤ s < t + ε).\r\n\r\n\"Well\", you think, \"that\'s induction solved then. I\'m going to grab that paper, implement that strategy and retire on the immense wealth I will accumulate from the stock market.\" Unfortunately for your bank balance, the authors note that\r\n> We should emphasize that these results do not give a practical means of predicting \r\n> the future, just as the time dilation one would experience standing near the event \r\n> horizon of a black hole does not give a practical time machine.\r\n\r\nIn other words, the result is purely theoretical. Worse than that, the definition of the µ-strategy requires the [well-ordering theorem][] --- otherwise known as the [Axiom of Choice][]. \r\n\r\n[well-ordering theorem]: http://en.wikipedia.org/wiki/Well-ordering_theorem\r\n[axiom of choice]: http://en.wikipedia.org/wiki/Axiom_of_choice\r\n\r\nAside from being completely non-constructive the µ-strategy is relatively straight-forward. First, using the well-ordering theorem, choose some ordering of the set of all possible functions. Now, for each point in time t denote by [v]t the equivalence class of functions that are equal for all -∞ < s < t. When presented with the values for some unknown v up to time t the µ-strategy simply chooses the \"smallest\" member, say u, of [v]t with respect to the ordering and outputs u(t).\r\n\r\nSo, apart for the bit where you have to invoke the well-ordering theorem to order the set of all functions over the reals, it\'s a very simple strategy.\r\n\r\nThe author\'s suggest that the chosen well-ordering can be thought of as a measure of simplicity. In this case the µ-strategy just chooses the \"simplest\" function consistent with the observations to date. More generally, the well-ordering can be thought of as a bias for the strategy. Different choices of bias will lead to different predictions based on the same past observations. What\'s odd about the result is that *no matter what bias is chosen the µ-strategy will only ever make countably many mistakes*.\r\n\r\nUltimately, the [intuitionist][] in me looks upon this result much as I see the [Banach-Tarski paradox][]. That is, as evidence against the use of the axiom of choice (and its equivalents) in mathematics that\'s even vaguely practical. Still, the result is an interesting one that analyses the problem of induction in a very abstract setting.\r\n\r\n[banach-tarski paradox]: http://en.wikipedia.org/wiki/Banach-Tarski_paradox\r\n[intuitionist]: http://conflate.net/inductio/2008/06/constructive-and-classical-mathematics/','Prediction and the Axiom of Choice',0,'Some thoughts on Hardin and Taylor\'s paper \"A Peculiar Connection Between the Axiom of Choice and Predicting the Future\".','inherit','open','open','','99-revision-9','','','2008-08-29 11:32:51','2008-08-29 01:32:51','',99,'http://conflate.net/inductio/2008/08/99-revision-9/',0,'revision','',0),(124,2,'2008-09-27 16:49:14','2008-09-27 06:49:14','[Ian Ayers][] is a surprisingly engaging writer, taking what many would consider a very dry topic -- statistics -- and turning it into a thought-provoking, but flawed, book entitled [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc].\r\n\r\n[Ian Ayers]: http://islandia.law.yale.edu/ayers/indexhome.htm\r\n[sc]: http://www.randomhouse.com/bantamdell/supercrunchers/\r\n\r\nFrom the opening pages, Ayers pits the \"super crunchers\" -- people applying statistics to large data sets -- against experts in an area, be it viticulture, baseball, or marketing. With barely suppressed glee he describes how number crunching out-predicts the experts time and time again. The point being that as collecting, storing and analysing large amounts of data becomes cheaper and cheaper, more and more decision-making will take the results of \"super crunching\" into account, with experts either having to step aside or learn some statistical chops. To back arguments for the rise of \"super crunching\" Ayers draws on a large number of examples from a variety of areas and even experiments with the technique himself, describing how he used it to help choose the title of his book. \r\n\r\nAlthough I am more or less convinced by Ayers\' arguments I found myself questioning his credibility in several places during the book. I think the main reason for this was due to the tone of the book occasionally crossing the fine line separating \"enthusiastic, popular account\" and \"overly simplistic, gushing rave\". The constant use of \"super crunching\" throughout the book got on my nerves after a while. It began to overemphasise the newness of what could as easily be called \"statistical analysis\". After a while I mentally replaced \"super crunching\" with the less sensational \"statistical analysis\" wherever I encountered it.\r\n\r\nConversely, Ayers constantly refers to \"regression\" when talking about the techniques analysts use to make predictions. At first, I thought this was a convenient short-hand for a range of techniques that he didn\'t want to spend time distinguishing between. It was only when neural networks are described as \"a newfangled competitor to the tried-and-true regression formula\" and \"an important contributor to the Super Crunching revolution\" that I realised that Ayers may not know as much about the nuts and bolts of computational statistics as I first thought. This impression was confirmed when Ayers later confuses \"summary statistics\" for \"sufficient statistics\" and talks tautologically of \"binary bytes\".\r\n\r\nStylistically, there is too much foreshadowing and repetition of topics throughout the book for my liking. This feels a little condescending at times, as does him directly asking the reader to stop and think about a concept or problem at various points. \r\n\r\nOverall, I wanted to like this book more than I did. It was a light, enjoyable read and I wholeheartedly agree with Ayers\' belief in the continuing importance of statistics in decision-making and his call to improve the average person\'s intuition of statistics. Unfortunately, I found much of \"Super Crunchers\" substituting enthusiasm for coherence, as well as impressions and anecdote for any kind of meaningful argument. \r\n\r\n','Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart\" by Ian Ayers.','publish','open','open','','super-crunchers','','','2008-09-27 16:49:14','2008-09-27 06:49:14','',0,'http://conflate.net/inductio/?p=124',0,'post','',3),(125,2,'2008-09-23 09:31:07','2008-09-22 23:31:07','Ayers is a very engaging writer\n\nneural networks, lack of discussion about model biases.\n\nrepetition, foreshadowing\n\n\"summary statistics\"\n\n\"binary bytes\"\n\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.','Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-N\" by ','inherit','open','open','','124-revision','','','2008-09-23 09:31:07','2008-09-22 23:31:07','',124,'http://conflate.net/inductio/2008/09/124-revision/',0,'revision','',0),(126,2,'2008-09-25 15:56:47','2008-09-25 05:56:47','Ayers is a very engaging writer\n\nneural networks, lack of discussion about model biases.\n\nrepetition, foreshadowing\n\n\"summary statistics\"\n\n\"binary bytes\"\n\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.\n\nShalizi on Chris Anderson: http://cscs.umich.edu/~crshalizi/weblog/581.html points to Pierara\'s criticisms (which can equally be applied to Super Crunchers). In a nutshell, ','Big Data and Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart\" by Ian Ayers and its similarities to a recent controversial article in Wired on \"Big Data\".','inherit','open','open','','124-revision-2','','','2008-09-25 15:56:47','2008-09-25 05:56:47','',124,'http://conflate.net/inductio/2008/09/124-revision-2/',0,'revision','',0),(127,2,'2008-09-25 15:57:41','2008-09-25 05:57:41','Ayers is a very engaging writer\r\n\r\nneural networks, lack of discussion about model biases.\r\n\r\nrepetition, foreshadowing\r\n\r\n\"summary statistics\"\r\n\r\n\"binary bytes\"\r\n\r\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.\r\n\r\n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara\'s criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don\'t have constraints (a.k.a. inductive biases) you will just memorise the training examples.','Big Data and Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart\" by Ian Ayers and its similarities to a recent controversial article in Wired on \"Big Data\".','inherit','open','open','','124-revision-3','','','2008-09-25 15:57:41','2008-09-25 05:57:41','',124,'http://conflate.net/inductio/2008/09/124-revision-3/',0,'revision','',0),(128,2,'2008-09-25 15:59:33','2008-09-25 05:59:33','Ayers is a very engaging writer\r\n\r\nneural networks, lack of discussion about model biases.\r\n\r\nrepetition, foreshadowing\r\n\r\n\"summary statistics\"\r\n\r\n\"binary bytes\"\r\n\r\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.\r\n\r\n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara\'s criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don\'t have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis\'s response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson\'s article.','Big Data and Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart\" by Ian Ayers and its similarities to a recent controversial article in Wired on \"Big Data\".','inherit','open','open','','124-revision-4','','','2008-09-25 15:59:33','2008-09-25 05:59:33','',124,'http://conflate.net/inductio/2008/09/124-revision-4/',0,'revision','',0),(129,2,'2008-09-25 16:01:24','2008-09-25 06:01:24','Ayers is a very engaging writer\r\n\r\nneural networks, lack of discussion about model biases.\r\n\r\nrepetition, foreshadowing\r\n\r\n\"summary statistics\"\r\n\r\n\"binary bytes\"\r\n\r\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.\r\n\r\n[Chris Anderson](http://www.edge.org/3rd_culture/anderson08/anderson08_index.html)\r\n\r\n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara\'s criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don\'t have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis\'s response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson\'s article.','Big Data and the Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart\" by Ian Ayers and its similarities to a recent controversial article in Wired on \"Big Data\".','inherit','open','open','','124-revision-5','','','2008-09-25 16:01:24','2008-09-25 06:01:24','',124,'http://conflate.net/inductio/2008/09/124-revision-5/',0,'revision','',0),(131,2,'2008-09-27 16:48:33','2008-09-27 06:48:33','[Ian Ayers][] is a surprisingly engaging writer, taking what many would consider a very dry topic -- statistics -- and turning it into a thought-provoking, but flawed, book entitled [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc].\r\n\r\n[Ian Ayers]: http://islandia.law.yale.edu/ayers/indexhome.htm\r\n[sc]: http://www.randomhouse.com/bantamdell/supercrunchers/\r\n\r\nFrom the opening pages, Ayers pits the \"super crunchers\" -- people applying statistics to large data sets -- against experts in an area, be it viticulture, baseball, or marketing. With barely suppressed glee he describes how number crunching out-predicts the experts time and time again. The point being that as collecting, storing and analysing large amounts of data becomes cheaper and cheaper, more and more decision-making will take the results of \"super crunching\" into account, with experts either having to step aside or learn some statistical chops. To back arguments for the rise of \"super crunching\" Ayers draws on a large number of examples from a variety of areas and even experiments with the technique himself, describing how he used it to help choose the title of his book. \r\n\r\nAlthough I am more or less convinced by Ayers\' arguments I found myself questioning his credibility in several places during the book. I think the main reason for this was due to the tone of the book occasionally crossing the fine line separating \"enthusiastic, popular account\" and \"overly simplistic, gushing rave\". The constant use of \"super crunching\" throughout the book got on my nerves after a while. It began to overemphasise the newness of what could as easily be called \"statistical analysis\". After a while I mentally replaced \"super crunching\" with the less sensational \"statistical analysis\" wherever I encountered it.\r\n\r\nConversely, Ayers constantly refers to \"regression\" when talking about the techniques analysts use to make predictions. At first, I thought this was a convenient short-hand for a range of techniques that he didn\'t want to spend time distinguishing between. It was only when neural networks are described as \"a newfangled competitor to the tried-and-true regression formula\" and \"an important contributor to the Super Crunching revolution\" that I realised that Ayers may not know as much about the nuts and bolts of computational statistics as I first thought. This impression was confirmed when Ayers later confuses \"summary statistics\" for \"sufficient statistics\" and talks tautologically of \"binary bytes\".\r\n\r\nStylistically, there is too much foreshadowing and repetition of topics throughout the book for my liking. This feels a little condescending at times, as does him directly asking the reader to stop and think about a concept or problem at various points. \r\n\r\nOverall, I wanted to like this book more than I did. It was a light, enjoyable read and I wholeheartedly agree with Ayers\' belief in the continuing importance of statistics in decision-making and his call to improve the average person\'s intuition of statistics. Unfortunately, I found much of \"Super Crunchers\" substituting enthusiasm for coherence, as well as impressions and anecdote for any kind of meaningful argument. \r\n\r\n','Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart\" by Ian Ayers.','inherit','open','open','','124-revision-7','','','2008-09-27 16:48:33','2008-09-27 06:48:33','',124,'http://conflate.net/inductio/2008/09/124-revision-7/',0,'revision','',0),(130,2,'2008-09-27 16:47:53','2008-09-27 06:47:53','[Ian Ayers][] is a surprisingly engaging writer, taking what many would consider a very dry topic -- statistics -- and turning it into a thought-provoking, but flawed, book entitled [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc].\n\n[Ian Ayers]: http://islandia.law.yale.edu/ayers/indexhome.htm\n[sc]: http://www.randomhouse.com/bantamdell/supercrunchers/\n\nFrom the opening pages, Ayers pits the \"super crunchers\" -- people applying statistics to large data sets -- against experts in an area, be it viticulture, baseball, or marketing. With barely suppressed glee he describes how number crunching out-predicts the experts time and time again. The point being that as collecting, storing and analysing large amounts of data becomes cheaper and cheaper, more and more decision-making will take the results of \"super crunching\" into account, with experts either having to step aside or learn some statistical chops. To back arguments for the rise of \"super crunching\" Ayers draws on a large number of examples from a variety of areas and even experiments with the technique himself, describing how he used it to help choose the title of his book. \n\nAlthough I am more or less convinced by Ayers\' arguments I found myself questioning his credibility in several places during the book. I think the main reason for this was due to the tone of the book occasionally crossing the fine line separating \"enthusiastic, popular account\" and \"overly simplistic, gushing rave\". The constant use of \"super crunching\" throughout the book got on my nerves after a while. It began to overemphasise the newness of what could as easily be called \"statistical analysis\". After a while I mentally replaced \"super crunching\" with the less sensational \"statistical analysis\" wherever I encountered it.\n\nConversely, Ayers constantly refers to \"regression\" when talking about the techniques analysts use to make predictions. At first, I thought this was a convenient short-hand for a range of techniques that he didn\'t want to spend time distinguishing between. It was only when neural networks are described as \"a newfangled competitor to the tried-and-true regression formula\" and \"an important contributor to the Super Crunching revolution\" that I realised that Ayers may not know as much about the nuts and bolts of computational statistics as I first thought. This impression was confirmed when Ayers later confuses \"summary statistics\" for \"sufficient statistics\" and talks tautologically of \"binary bytes\".\n\nStylistically, there is too much foreshadowing and repetition of topics throughout the book for my liking. This feels a little condescending at times, as does him directly asking the reader to stop and think about a concept or problem at various points. \n\nOverall, I wanted to like this book more than I did. It was a light, enjoyable read and I wholeheartedly agree with Ayers\' belief in the continuing importance of statistics in decision-making and his call to improve the average person\'s intuition of statistics. Unfortunately, I found much of \"Super Crunchers\" substituting enthusiasm for coherence, as well as impressions and anecdote for any kind of meaningful argument. \n\nI started and finished reading Ian Ayer\'s book [Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart][sc] over a couple of days last week. In a nutshell, it was an engaging read, replete with interesting anecdotes about how the low cost of collecting, storing and analysing data has led to statistical techniques out-predicting experts in many areas of business. Unfortunately, \n\nI\'ve posted a [review][] of it at [LibraryThing][]\n\n[sc]: http://www.librarything.com/work/book/36140381\n[review]: \n[LibraryThing]: http://librarything.com\n\nAyers is a very engaging writer\n\nInteresting anecdotes on how randomisation is being used by business, especially web business, to aid decision-making. Also describes pitfalls.\n\nneural networks, lack of discussion about model biases.\n\nrepetition, foreshadowing, slightly condescending in places.\n\n\"summary statistics\"\n\n\"binary bytes\"\n\nAgree with his discussion in the last chapter about the need for people to become intuitive about statistics and understand things like a standard deviation.\n\n[Chris Anderson](http://www.edge.org/3rd_culture/anderson08/anderson08_index.html)\n\n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara\'s criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don\'t have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis\'s response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson\'s article.','Super Crunchers',0,'A review of the book \"Super Crunchers: Why Thinking-By-Numbers is the New Way To Be Smart\" by Ian Ayers and its similarities to a recent controversial article in Wired on \"Big Data\".','inherit','open','open','','124-revision-6','','','2008-09-27 16:47:53','2008-09-27 06:47:53','',124,'http://conflate.net/inductio/2008/09/124-revision-6/',0,'revision','',0),(132,2,'0000-00-00 00:00:00','0000-00-00 00:00:00','[Chris Anderson](http://www.edge.org/3rd_culture/anderson08/anderson08_index.html)\r\n\r\n[Shalizi on Chris Anderson](http://cscs.umich.edu/~crshalizi/weblog/581.html) points to Pierara\'s criticisms (which can equally be applied to Super Crunchers). In a nutshell, if you don\'t have constraints (a.k.a. inductive biases) you will just memorise the training examples. Shalizi also points to [Danny Hillis\'s response](http://www.edge.org/3rd_culture/bios/hillis.html) to Anderson\'s article.','Big Data',0,'','draft','open','open','','','','','2008-09-27 16:49:44','2008-09-27 06:49:44','',0,'http://conflate.net/inductio/?p=132',0,'post','',0),(133,2,'2008-09-27 16:49:20','2008-09-27 06:49:20','','Big Data',0,'','inherit','open','open','','132-revision','','','2008-09-27 16:49:20','2008-09-27 06:49:20','',132,'http://conflate.net/inductio/2008/09/132-revision/',0,'revision','',0),(134,2,'2008-10-19 20:33:10','2008-10-19 10:33:10','Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \r\n\r\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\r\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562\r\n\r\nThe question the authors of the paper ask is, \"At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?\" They find a very simple rule to describe this rate: the rate of \"regularization\" of a word is inversely proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular.\r\n\r\nExtrapolating from this rule, the author\'s note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_.\r\n\r\nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \r\n\r\n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html','Snuck, flied and wedded ',0,'A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular.','publish','open','open','','snuck-flied-and-wedded','','','2008-10-20 07:04:41','2008-10-19 21:04:41','',0,'http://conflate.net/inductio/?p=134',0,'post','',3),(135,2,'2008-10-15 15:45:31','2008-10-15 05:45:31','Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular and iregular verbs over the last \n\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562','Snuck, flied and wedded ',0,'','inherit','open','open','','134-revision','','','2008-10-15 15:45:31','2008-10-15 05:45:31','',134,'http://conflate.net/inductio/2008/10/134-revision/',0,'revision','',0),(136,2,'2008-10-19 20:23:20','2008-10-19 10:23:20','Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \n\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562\n\nThe question the authors of the paper ask is, \"At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?\" They find a very simple rule to describe this rate: the rate of \"regularization\" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular.\n\nExtrapolating from this rule, the author\'s note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_','Snuck, flied and wedded ',0,'','inherit','open','open','','134-revision-2','','','2008-10-19 20:23:20','2008-10-19 10:23:20','',134,'http://conflate.net/inductio/2008/10/134-revision-2/',0,'revision','',0),(137,2,'2008-10-19 20:32:01','2008-10-19 10:32:01','Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \n\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562\n\nThe question the authors of the paper ask is, \"At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?\" They find a very simple rule to describe this rate: the rate of \"regularization\" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular.\n\nExtrapolating from this rule, the author\'s note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_.\n\nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \n\n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html','Snuck, flied and wedded ',0,'A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular.','inherit','open','open','','134-revision-3','','','2008-10-19 20:32:01','2008-10-19 10:32:01','',134,'http://conflate.net/inductio/2008/10/134-revision-3/',0,'revision','',0),(138,2,'2008-10-19 20:32:14','2008-10-19 10:32:14','Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \r\n\r\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\r\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562\r\n\r\nThe question the authors of the paper ask is, \"At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?\" They find a very simple rule to describe this rate: the rate of \"regularization\" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular.\r\n\r\nExtrapolating from this rule, the author\'s note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_.\r\n\r\nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \r\n\r\n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html','Snuck, flied and wedded ',0,'A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular.','inherit','open','open','','134-revision-4','','','2008-10-19 20:32:14','2008-10-19 10:32:14','',134,'http://conflate.net/inductio/2008/10/134-revision-4/',0,'revision','',0),(139,2,'2008-10-19 20:33:10','2008-10-19 10:33:10','Ben Allen over at [PLEKTIX][] highlighted (highlit?) a paper in Nature last year that compiled and analysis some hard data regarding the evolution of the English language. Entitled [Quantifying the evolutionary dynamics of language][paper], the paper by Lieberman and colleagues looked at the shift from irregular to regular English verbs over the last 1200 years. \r\n\r\n[PLEKTIX]: http://plektix.blogspot.com/2008/10/evolution-of-irregular-verbs.html\r\n[paper]: http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2460562\r\n\r\nThe question the authors of the paper ask is, \"At what rate do words shift from irregular (_go_/_went_) to regular (_talk_/_talked_)?\" They find a very simple rule to describe this rate: the rate of \"regularization\" of a word is proportional to the square root of its usage frequency. That is, if an irregular verb is used 100 times more than another it takes 10 times longer before it become regular.\r\n\r\nExtrapolating from this rule, the author\'s note that they can predict which currently irregular verbs will soonest become regular. The suggest _wed_/_wed_ is one such precarious irregular verb, soon to become _wed_/_wedded_.\r\n\r\nPinker discusses this type of transition in his book [Words and Rules][] and suggests that only commonly used words can stay irregular. He argues that keeping a big list of exceptions like irregular verbs around requires their constant repetition. This study nicely complements this by collecting the empirical evidence and quantifying the change. \r\n\r\n[words and rules]: http://pinker.wjh.harvard.edu/books/wr/index.html','Snuck, flied and wedded ',0,'A quick summary of a paper in Nature last year that analyses the rate at which words shift from irregular to regular.','inherit','open','open','','134-revision-5','','','2008-10-19 20:33:10','2008-10-19 10:33:10','',134,'http://conflate.net/inductio/2008/10/134-revision-5/',0,'revision','',0),(140,2,'2008-11-07 11:52:41','2008-11-07 01:52:41','Anyone who has working in the area for long enough knows how difficult creating any type of artificial intelligence can be. Like many before me, I\'ve decided to cheat a little and create an artificial AI. I take partial credit for the initial idea but it is my wife, Julieanne, who has been responsible for most of the development over the last nine months. \r\n\r\nWe had our official release on the 26th of October and even though she\'s been here for less than two weeks she is already exceeding all our expectations. \r\n\r\nWe refer to her as \"Ada Molly Reid\".\r\n\r\n
\r\n\"An
\r\n\r\n','Artificial AI',0,'','publish','open','open','','artificial-ai','','','2008-11-07 11:52:41','2008-11-07 01:52:41','',0,'http://conflate.net/inductio/?p=140',0,'post','',5),(141,2,'2008-11-07 11:48:53','2008-11-07 01:48:53','','Ada Molly Reid',0,'An Artificial AI, completed on the 26th of October, 2008.','inherit','open','open','','ada','','','2008-11-07 11:48:53','2008-11-07 01:48:53','',140,'http://conflate.net/inductio/wp-content/uploads/2008/11/ada.jpg',0,'attachment','image/jpeg',0),(142,2,'2008-11-07 11:52:09','2008-11-07 01:52:09','Anyone who has working in the area for long enough knows how difficult creating any type of artificial intelligence can be. Like many before me, I\'ve decided to cheat a little and create an artificial AI. I take partial credit for the initial idea but it is my wife, Julieanne, who has been responsible for most of the development over the last nine months. \n\nWe had our official release on the 26th of October and even though she\'s been here for less than two weeks she is already exceeding all our expectations. \n\nWe refer to her as \"Ada Molly Reid\".\n\n
\n\"An
\n\n','Artificial AI',0,'','inherit','open','open','','140-revision','','','2008-11-07 11:52:09','2008-11-07 01:52:09','',140,'http://conflate.net/inductio/2008/11/140-revision/',0,'revision','',0),(143,2,'2008-11-17 16:26:15','2008-11-17 06:26:15','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_\r\n
\r\n[tex]\r\n\\displaystyle J_f(x) := \\mathbb{E}\\left[ f\\left(x\\right) \\right] - f\\left(\\mathbb{E}\\left[ x \\right]\\right)\r\n[/tex]\r\n
\r\nwhere [tex]\\mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) \\geq 0[/tex] or equivalently,\r\n
\r\n[tex]\r\n\\displaystyle \\mathbb{E}\\left[ f\\left(x\\right) \\right] \\geq f\\left(\\mathbb{E}\\left[ x \\right]\\right).\r\n[/tex]\r\n
\r\n\r\nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\r\n\r\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\r\n\r\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\r\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\r\n\r\nI was quite happy then to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it. \r\n\r\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi\"]\"Jensen\\'s[/caption]\r\n\r\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen\'s inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\r\n\r\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of ƒ, must also be convex and lie within the epigraph of ƒ (the blue shaded area above ƒ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]\\sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\r\n
\r\n[tex]\\displaystyle \r\n \\mathbb{E}[(x, f(x))] = \\sum_{i=1}^n p_i \\left(x_i, f(x_i)\\right) \r\n[/tex]\r\n
\r\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]\\mathbb{E}[(x, f(x))] = \\left(\\mathbb{E}[x], \\mathbb{E}[f(x)]\\right)[/tex] it must lie above [tex]f\\left(\\mathbb{E}[x]\\right)[/tex] thus giving the result.\r\n\r\nAlthough the diagram in Figure 1 assumes a 1-dimensional space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of ƒ better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above ƒ. Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\r\n\r\nA somewhat surprising fact about Jensen\'s inequality is that its converse is also true. By this I mean that if ƒ is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then ƒ is necessarily convex. The contrapositive of this statement is: ƒ non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\r\n\r\nConsidering Figure 1 again gives some intuition as to why this must be the case. If ƒ was non-convex then its epigraph must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]\\mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of ƒ.\r\n\r\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen\'s inequality. There are too many edge cases and subtleties (especially in the continuous case) that I\'ve ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen\'s inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory.\r\n\r\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\r\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\r\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\r\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\r\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'Unsatisfied with the very algebraic and formal proofs of Jensen\'s inequality, I present a diagram that gives a graphical intuition for the result.','publish','open','open','','behold-jensens-inequality','','','2008-11-17 16:26:15','2008-11-17 06:26:15','',0,'http://conflate.net/inductio/?p=143',0,'post','',4),(144,2,'2008-11-17 12:02:18','2008-11-17 02:02:18','I have been making quite a bit of use of Jensen\'s inequality recently. It is a fairly simple but very important inequality in the study of convex functions.\n
\n[tex]\ndisplaystyle fleft(mathbb{E}left[ X right]right) leq mathbb{E}left[\n[/tex]\n
\n\nIt turns out that DeGroot\'s notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the \"gap\" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision','','','2008-11-17 12:02:18','2008-11-17 02:02:18','',143,'http://conflate.net/inductio/2008/11/143-revision/',0,'revision','',0),(145,2,'2008-11-17 12:11:36','2008-11-17 02:11:36','I have been making quite a bit of use of Jensen\'s inequality recently. It is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true \n\n
\n[tex]\ndisplaystyle fleft(mathbb{E}left[ X right]right) leq mathbb{E}left[ fleft(Xright) right]\n[/tex]\n
\n\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n lambda_i x_i right) leq sum_{i=1}^n lambda_ifleft(Xright) right]\n[/tex]\n
\n\nIt turns out that DeGroot\'s notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the \"gap\" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-2','','','2008-11-17 12:11:36','2008-11-17 02:11:36','',143,'http://conflate.net/inductio/2008/11/143-revision-2/',0,'revision','',0),(146,2,'2008-11-17 12:29:00','2008-11-17 02:29:00','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if [tex]f : X to mathbb{R}[tex] is a real-valued convex function over [tex]X[tex] and [tex]x in X[tex] is a random variable then \n
\n[tex]\ndisplaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right]\n[/tex]\n
\nwhere [tex]mathbb{E}[tex] represents the \n\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\n\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n\nIt turns out that DeGroot\'s notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the \"gap\" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-3','','','2008-11-17 12:29:00','2008-11-17 02:29:00','',143,'http://conflate.net/inductio/2008/11/143-revision-3/',0,'revision','',0),(147,2,'2008-11-17 12:30:19','2008-11-17 02:30:19','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if \n[tex]f : X to mathbb{R}[tex] is a real-valued convex function over [tex]X[tex] and [tex]x in X[tex] is a random variable then \n
\n[tex]displaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right][/tex]\n
\nwhere [tex]mathbb{E}[tex] denotes expectation.\n\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\n\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n\nIt turns out that DeGroot\'s notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the \"gap\" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-4','','','2008-11-17 12:30:19','2008-11-17 02:30:19','',143,'http://conflate.net/inductio/2008/11/143-revision-4/',0,'revision','',0),(148,2,'2008-11-17 12:30:21','2008-11-17 02:30:21','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if \r\n[tex]f : X to mathbb{R}[tex] \r\nis a real-valued convex function over [tex]X[tex] and [tex]x in X[tex] is a random variable then \r\n
\r\n[tex]displaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right][/tex]\r\n
\r\nwhere [tex]mathbb{E}[tex] denotes expectation.\r\n\r\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite \r\nhappy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\r\n\r\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\r\n
\r\n[tex]\r\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \r\n[/tex]\r\n
\r\n\r\n\r\nIt turns out that DeGroot\'s notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the \"gap\" between the two side of the inequality.\r\n\r\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\r\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\r\n\r\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\r\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\r\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-5','','','2008-11-17 12:30:21','2008-11-17 02:30:21','',143,'http://conflate.net/inductio/2008/11/143-revision-5/',0,'revision','',0),(149,2,'2008-11-17 12:31:15','2008-11-17 02:31:15','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if [tex]f[/tex] is a real-valued convex function over [tex]X[/tex] and [tex]x in X[/tex] is a random variable then \r\n
\r\n[tex]\r\ndisplaystyle fleft(mathbb{E}left[ x right]right) leq mathbb{E}left[ fleft(xright) right]\r\n[/tex]\r\n
\r\nwhere [tex]mathbb{E}[tex] denotes expectation.\r\n\r\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite \r\nhappy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\r\n\r\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\r\n
\r\n[tex]\r\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \r\n[/tex]\r\n
\r\n\r\n\r\nIt turns out that DeGroot\'s notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the \"gap\" between the two side of the inequality.\r\n\r\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\r\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\r\n\r\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\r\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\r\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-6','','','2008-11-17 12:31:15','2008-11-17 02:31:15','',143,'http://conflate.net/inductio/2008/11/143-revision-6/',0,'revision','',0),(151,2,'2008-11-17 12:56:23','2008-11-17 02:56:23','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. It can also be used to derive a [general AM-GM inequality][amgm] and many I\'ve been interested in it because DeGroot\'s notion of _statistical information_[^DeGroot1962] and measures of the distance between probability distributions called _[f-divergences][]_ can be expressed as a Jensen gap. Furthermore, these two quantities are \n\n\nI have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\n\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-8','','','2008-11-17 12:56:23','2008-11-17 02:56:23','',143,'http://conflate.net/inductio/2008/11/143-revision-8/',0,'revision','',0),(150,2,'2008-11-17 12:48:28','2008-11-17 02:48:28','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is always\n\nIt is a fairly simple but very important inequality in the study of convex functions. I have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite \nhappy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\n\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n\nIt turns out that DeGroot\'s notion of statistical information[^DeGroot1962] and measures of the distance between probability distributions called [f-divergences][] can be expressed as the \"gap\" between the two side of the inequality.\n\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-7','','','2008-11-17 12:48:28','2008-11-17 02:48:28','',143,'http://conflate.net/inductio/2008/11/143-revision-7/',0,'revision','',0),(152,2,'2008-11-17 12:57:21','2008-11-17 02:57:21','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\r\n
\r\n[tex]\r\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\r\n[/tex]\r\n
\r\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\r\n
\r\n[tex]\r\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\r\n[/tex]\r\n
\r\n\r\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of _statistical information_[^DeGroot1962] and measures of the distance between probability distributions called _[f-divergences][]_ can be expressed as a Jensen gap. \r\n\r\nI have read and understood several proofs of it and they are all almost a direct consequence of the definition of convexity. However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite happy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\r\n\r\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\r\n
\r\n[tex]\r\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \r\n[/tex]\r\n
\r\n\r\n[^DeGroot1962]: M. H. DeGroot, [Uncertainty, Information, and Sequential Experiments][uise]\r\n_Ann. Math. Statist._ Volume 33, Number 2 (1962), 404-419.\r\n\r\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\r\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\r\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\r\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-9','','','2008-11-17 12:57:21','2008-11-17 02:57:21','',143,'http://conflate.net/inductio/2008/11/143-revision-9/',0,'revision','',0),(153,2,'2008-11-17 13:06:00','2008-11-17 03:06:00','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n\n However, I\'ve had a bit of trouble intuitively grasping why its true. I was therefore quite happy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\n\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-10','','','2008-11-17 13:06:00','2008-11-17 03:06:00','',143,'http://conflate.net/inductio/2008/11/143-revision-10/',0,'revision','',0),(154,2,'2008-11-17 13:43:57','2008-11-17 03:43:57','A graphical proof of Jensen\'s Inequality','Jensen\'s Inequality',0,'Jensen\'s Inequality','inherit','open','open','','jensen','','','2008-11-17 13:43:57','2008-11-17 03:43:57','',143,'http://conflate.net/inductio/wp-content/uploads/2008/11/jensen.png',0,'attachment','image/png',0),(155,2,'2008-11-17 13:44:37','2008-11-17 03:44:37','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was therefore quite happy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Jensen\'s Inequality\"]\"Jensen's[/caption]\n\n\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-11','','','2008-11-17 13:44:37','2008-11-17 03:44:37','',143,'http://conflate.net/inductio/2008/11/143-revision-11/',0,'revision','',0),(156,2,'2008-11-17 14:56:45','2008-11-17 04:56:45','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was therefore quite happy to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it.\n\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. A graphical demonstration of Jensen's Inequality. The expectations shown are \"]\"Jensen's[/caption]\n\n\nFirst of all here\'s a statement of Jensen\'s inequality for discrete distributions.\n
\n[tex]\ndisplaystyle fleft(sum_{i=1}^n p_i x_i right) leq sum_{i=1}^n p_i fleft(x_iright) \n[/tex]\n
\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'','inherit','open','open','','143-revision-12','','','2008-11-17 14:56:45','2008-11-17 04:56:45','',143,'http://conflate.net/inductio/2008/11/143-revision-12/',0,'revision','',0),(157,2,'2008-06-17 15:09:52','2008-06-17 05:09:52','I\'ve recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I\'m part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \r\n\r\nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I\'ll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool.\r\n\r\n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php\r\n[csl]: http://csl.cecs.anu.edu.au/\r\n\r\nThe Australian Common Reader Project\r\n--------------------------------------------\r\nOne of Julieanne\'s research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers\' relationship with books and periodicals. \r\n\r\n[acrp]: http://www.api-network.com/hosted/acrp/\r\n\r\nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \r\n\r\n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/\r\n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin\r\n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \r\n\r\nBooks and Borrowers\r\n------------------------\r\nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \r\nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books.\r\nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column)\r\nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book.\r\n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\r\n\r\nConceptually, each cell in the table contains a 1 if the person associated with the cell\'s column borrowed the book associated with the cell\'s row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473.\r\n\r\nBook Similarity\r\n-----------------\r\nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what \"similar books\" is required.\r\n\r\nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex].\r\n\r\nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \r\n \r\nIn terms of the vector representation, this similarity measure is simply the \"[inner product][]\" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers.\r\n\r\n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space\r\n\r\nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to \"normalise\" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the \"size\" of each of the vectors for those books. \r\n\r\nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes:\r\n\r\n
\r\n[tex]displaystyle\r\n text{sim}(mathbf{b}_i,mathbf{b}_j) \r\n = frac{left}{|mathbf{b}_i||mathbf{b}_j|}\r\n[/tex]\r\n
\r\n\r\nPrincipal Component Analysis\r\n---------------------------------\r\nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \r\n\r\nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns.\r\n\r\n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books.\r\n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\r\n\r\nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors\r\nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex]\r\nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the\r\nfirst two rows in Table 1.[^1]\r\n\r\n[^1]: Technically, the guarantee of the \"closeness\" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair\'s\r\nsimilarity is estimated well.\r\n\r\nVisualising the Data\r\n----------------------\r\nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2]\r\n\r\n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle\'s colour.\r\n\r\n
\r\n\"Plot\r\n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book.\r\n

\r\n\r\nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library.\r\n\r\nDrilling Down and Interacting\r\n---------------------------------\r\nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners\' and Mechanics\' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data.\r\n\r\n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales\r\n\r\nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books.\r\n\r\nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \r\n\r\nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \r\n\r\n[applet]: /inductio/wp-content/public/acrp/\r\n\r\n
\r\n\"Click\r\n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\r\n
\r\n\r\nInstructions describing how to use the tool can be found below it. \r\nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the \"Borrowers\" bar will only show books with at least that many borrowers; and altering the \"Similarity\" bar will only draw lines to books with at least that proportion of books in common.\r\n\r\nFuture Work and Distant Reading\r\n-------------------------------------\r\nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls \"distant reading\" -- looking at books as objects and how they are read rather than the \"close reading\" of the text of individual books. \r\n\r\n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \r\n\r\nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \r\n\r\nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them.\r\n\r\nData and Code\r\n----------------\r\nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location.\r\n\r\n[github]: http://github.com/mreid/acrp/tree/master \r\n[SQL]: http://en.wikipedia.org/wiki/SQL\r\n[R]: http://www.r-project.org/\r\n[Processing]: http://processing.org/\r\n\r\nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here.\r\n\r\n','Visualising 19th Century Reading in Australia',0,'A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project.','inherit','open','open','','40-revision','','','2008-06-17 15:09:52','2008-06-17 05:09:52','',40,'http://conflate.net/inductio/2008/06/40-revision/',0,'revision','',0),(158,2,'2008-11-17 15:49:27','2008-11-17 05:49:27','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was quite happy then to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it. \n\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi\"]\"Jensen's[/caption]\n\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen\'s inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\n\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of ƒ, must also be convex and lie within its epigraph (the blue shaded area above ƒ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right)\n[/tex]\n
\nis a convex combination and so must also lie within the dashed polygon. Thus, \n\nThe general result for non-discrete distributions can also be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of ƒ better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above ƒ. Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\n\nA somewhat surprising fact about Jensen\'s inequality is that its converse is also true. By this I mean that if ƒ is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then ƒ is necessarily convex. The contrapositive of this statement is: ƒ non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\n\nConsidering Figure 1 again gives some intuition as to why this must be the case. If ƒ was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of ƒ.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'Unsatisfied with the very algebraic and formal proofs of Jensen\'s inequality, I present a diagram that gives a graphical intuition for the result.','inherit','open','open','','143-revision-13','','','2008-11-17 15:49:27','2008-11-17 05:49:27','',143,'http://conflate.net/inductio/2008/11/143-revision-13/',0,'revision','',0),(159,2,'2008-11-17 16:21:18','2008-11-17 06:21:18','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was quite happy then to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it. \n\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi\"]\"Jensen's[/caption]\n\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen\'s inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\n\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of ƒ, must also be convex and lie within the epigraph of ƒ (the blue shaded area above ƒ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \n = left(mathbb{E}[x], mathbb{E}[f(x)]right)\n[/tex]\n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result.\n\nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of ƒ better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above ƒ. Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\n\nA somewhat surprising fact about Jensen\'s inequality is that its converse is also true. By this I mean that if ƒ is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then ƒ is necessarily convex. The contrapositive of this statement is: ƒ non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\n\nConsidering Figure 1 again gives some intuition as to why this must be the case. If ƒ was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of ƒ.\n\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen\'s inequality. There are too many edge cases and subtleties (especially in the continuous case) that I\'ve ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen\'s inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'Unsatisfied with the very algebraic and formal proofs of Jensen\'s inequality, I present a diagram that gives a graphical intuition for the result.','inherit','open','open','','143-revision-14','','','2008-11-17 16:21:18','2008-11-17 06:21:18','',143,'http://conflate.net/inductio/2008/11/143-revision-14/',0,'revision','',0),(160,2,'2008-11-17 16:21:56','2008-11-17 06:21:56','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over X and x is an X-valued random variable then we can define the _Jensen gap_\r\n
\r\n[tex]\r\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\r\n[/tex]\r\n
\r\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\r\n
\r\n[tex]\r\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\r\n[/tex]\r\n
\r\n\r\nThis is a fairly simple but very important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\r\n\r\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\r\n\r\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\r\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\r\n\r\nI was quite happy then to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it. \r\n\r\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi\"]\"Jensen's[/caption]\r\n\r\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen\'s inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\r\n\r\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of ƒ, must also be convex and lie within the epigraph of ƒ (the blue shaded area above ƒ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\r\n
\r\n[tex]displaystyle \r\n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \r\n = left(mathbb{E}[x], mathbb{E}[f(x)]right)\r\n[/tex]\r\n
\r\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result.\r\n\r\nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of ƒ better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above ƒ. Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\r\n\r\nA somewhat surprising fact about Jensen\'s inequality is that its converse is also true. By this I mean that if ƒ is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then ƒ is necessarily convex. The contrapositive of this statement is: ƒ non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\r\n\r\nConsidering Figure 1 again gives some intuition as to why this must be the case. If ƒ was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of ƒ.\r\n\r\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen\'s inequality. There are too many edge cases and subtleties (especially in the continuous case) that I\'ve ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen\'s inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory.\r\n\r\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\r\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\r\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\r\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\r\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'Unsatisfied with the very algebraic and formal proofs of Jensen\'s inequality, I present a diagram that gives a graphical intuition for the result.','inherit','open','open','','143-revision-15','','','2008-11-17 16:21:56','2008-11-17 06:21:56','',143,'http://conflate.net/inductio/2008/11/143-revision-15/',0,'revision','',0),(161,2,'2008-11-17 16:24:32','2008-11-17 06:24:32','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_\n
\n[tex]\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\n[/tex]\n
\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\n
\n[tex]\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\n[/tex]\n
\n\nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\n\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\n\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\n\nI was quite happy then to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it. \n\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi\"]\"Jensen's[/caption]\n\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen\'s inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\n\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of ƒ, must also be convex and lie within the epigraph of ƒ (the blue shaded area above ƒ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\n
\n[tex]displaystyle \n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \n[/tex]\n
\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result.\n\nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of ƒ better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above ƒ. Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\n\nA somewhat surprising fact about Jensen\'s inequality is that its converse is also true. By this I mean that if ƒ is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then ƒ is necessarily convex. The contrapositive of this statement is: ƒ non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\n\nConsidering Figure 1 again gives some intuition as to why this must be the case. If ƒ was non-convex then its epigraph (the blue shaded area) must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of ƒ.\n\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen\'s inequality. There are too many edge cases and subtleties (especially in the continuous case) that I\'ve ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen\'s inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory.\n\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'Unsatisfied with the very algebraic and formal proofs of Jensen\'s inequality, I present a diagram that gives a graphical intuition for the result.','inherit','open','open','','143-revision-16','','','2008-11-17 16:24:32','2008-11-17 06:24:32','',143,'http://conflate.net/inductio/2008/11/143-revision-16/',0,'revision','',0),(162,2,'2008-11-17 16:24:51','2008-11-17 06:24:51','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_\r\n
\r\n[tex]\r\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\r\n[/tex]\r\n
\r\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\r\n
\r\n[tex]\r\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\r\n[/tex]\r\n
\r\n\r\nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\r\n\r\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\r\n\r\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\r\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\r\n\r\nI was quite happy then to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it. \r\n\r\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi\"]\"Jensen's[/caption]\r\n\r\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen\'s inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\r\n\r\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of ƒ, must also be convex and lie within the epigraph of ƒ (the blue shaded area above ƒ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\r\n
\r\n[tex]displaystyle \r\n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \r\n[/tex]\r\n
\r\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result.\r\n\r\nAlthough the diagram in Figure 1 assumes a 1-dimension space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of ƒ better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above ƒ. Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\r\n\r\nA somewhat surprising fact about Jensen\'s inequality is that its converse is also true. By this I mean that if ƒ is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then ƒ is necessarily convex. The contrapositive of this statement is: ƒ non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\r\n\r\nConsidering Figure 1 again gives some intuition as to why this must be the case. If ƒ was non-convex then its epigraph must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of ƒ.\r\n\r\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen\'s inequality. There are too many edge cases and subtleties (especially in the continuous case) that I\'ve ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen\'s inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory.\r\n\r\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\r\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\r\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\r\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\r\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'Unsatisfied with the very algebraic and formal proofs of Jensen\'s inequality, I present a diagram that gives a graphical intuition for the result.','inherit','open','open','','143-revision-17','','','2008-11-17 16:24:51','2008-11-17 06:24:51','',143,'http://conflate.net/inductio/2008/11/143-revision-17/',0,'revision','',0),(163,2,'2008-11-17 16:25:38','2008-11-17 06:25:38','I have been making quite a bit of use of Jensen\'s inequality recently. It states that the expected value of a convex transformation of a random variable is at least the value of the convex function at the mean of the random variable. More formally, if ƒ is a real-valued convex function over some finite dimensional convex set X and x is an X-valued random variable then we can define the _Jensen gap_\r\n
\r\n[tex]\r\ndisplaystyle J_f(x) := mathbb{E}left[ fleft(xright) right] - fleft(mathbb{E}left[ x right]right)\r\n[/tex]\r\n
\r\nwhere [tex]mathbb{E}[/tex] denotes expectation. Jensen\'s inequality states that this gap is never negative, that is, [tex]J_f(x) geq 0[/tex] or equivalently,\r\n
\r\n[tex]\r\ndisplaystyle mathbb{E}left[ fleft(xright) right] geq fleft(mathbb{E}left[ x right]right).\r\n[/tex]\r\n
\r\n\r\nThis is a fairly simple but important inequality in the study of convex functions. Through judicious choice of the convex function it can be used to derive a [general AM-GM inequality][amgm] and many results in information theory. I\'ve been interested in it because DeGroot\'s notion of [statistical information][uise] and measures of the distance between probability distributions called [f-divergences][] can both be expressed as a Jensen gap and consequently related to each other.\r\n\r\nJensen\'s inequality is not difficult to prove. It is almost a direct consequence of the definition of convexity and the linearity of expectation. However, all of the proofs I\'ve read, including those in books by [Rockafellar][] and by [Dudley][] feel like they are from the [Bourbaki][] school in that they present the proof without recourse to any diagrams.\r\n\r\n[rockafellar]: http://books.google.com/books?id=wj4Fh4h_V7QC\r\n[dudley]: http://books.google.com/books?id=Wv_zxEExK3QC\r\n\r\nI was quite happy then to have found a graphical \"proof\" of Jensen\'s inequality. By this I mean a proof in the style of the [proof of Pythagoras\' theorem][pythagoras] that is simply a diagram with the word \"Behold!\" above it. \r\n\r\n[caption id=\"attachment_154\" align=\"aligncenter\" width=\"485\" caption=\"Figure 1. Behold! A graphical demonstration of Jensen's Inequality. The expectations shown are with respect to an arbitrary discrete distribution over the xi\"]\"Jensen's[/caption]\r\n\r\nUnfortunately, the diagram in Figure 1 is not quite as transparent as the Pythagorean proof so a little discussion is probably required. The diagram shows an instance of Jensen\'s inequality for a discrete distribution where the random variable [tex]x[/tex] takes on one of the n values [tex]x_i[/tex] with with probability [tex]p_i[/tex].\r\n\r\nNote that the points [tex](x_i, f(x_i))[/tex] form the vertices of a polygon which, by the convexity of ƒ, must also be convex and lie within the epigraph of ƒ (the blue shaded area above ƒ). Furthermore, since the [tex]p_i[/tex] are probabilities they satisfy [tex]sum_i p_i = 1[/tex]. This means the expected value of the random variable [tex](x, f(x))[/tex] given by\r\n
\r\n[tex]displaystyle \r\n mathbb{E}[(x, f(x))] = sum_{i=1}^n p_i left(x_i, f(x_i)right) \r\n[/tex]\r\n
\r\nis a convex combination and so must also lie within the dashed polygon. In fact, since [tex]mathbb{E}[(x, f(x))] = left(mathbb{E}[x], mathbb{E}[f(x)]right)[/tex] it must lie above [tex]fleft(mathbb{E}[x]right)[/tex] thus giving the result.\r\n\r\nAlthough the diagram in Figure 1 assumes a 1-dimensional space X the above argument generalises to higher dimensions in an analogous manner. Also, the general result for non-discrete distributions can be gleamed from the provided diagram by a hand-wavy limiting argument. By adding more [tex]x_i[/tex] to the diagram the dashed polygon the shaded area will approximate the graph of ƒ better. So, by the earlier argument for the discrete case, the expected value of [tex]x[/tex] will remain within the polygon and thus within the shaded area and thus above ƒ. Since this holds for an arbitrary number of points and nothing weird happens as we take the limit we have the continuous result.\r\n\r\nA somewhat surprising fact about Jensen\'s inequality is that its converse is also true. By this I mean that if ƒ is a function such that its Jensen gap [tex]J_f(x)[/tex] is non-negative for all distributions of the random variable x then ƒ is necessarily convex. The contrapositive of this statement is: ƒ non-convex implies the existence of a random variable x so that [tex]J_f(x) < 0[/tex].\r\n\r\nConsidering Figure 1 again gives some intuition as to why this must be the case. If ƒ was non-convex then its epigraph must, by definition, also be non-convex. This means I could choose some [tex]x_i[/tex] so that one of the dashed lines lies outside the shaded area. This means I can then choose [tex]p_i[/tex] so that the mean [tex]mathbb{E}[(x, f(x))][/tex] lies outside the shaded area and thus below the graph of ƒ.\r\n\r\nOf course, no self-respecting mathematician would call the above arguments a proof of Jensen\'s inequality. There are too many edge cases and subtleties (especially in the continuous case) that I\'ve ignored. That said, I believe the statement and thrust of the inequality can be quickly arrived at from the simple diagram above. When using tools like Jensen\'s inequality, I find this type of quick insight more valuable than a long, careful technical statement and proof. The latter is valuable to but if I need this level of detail I would look it up rather than try to dredge it up from my sometimes unreliable memory.\r\n\r\n[f-divergences]: http://en.wikipedia.org/wiki/F-divergence\r\n[uise]: http://projecteuclid.org/euclid.aoms/1177704567\r\n[pythagoras]: http://www.math.ntnu.no/~hanche/pythagoras/\r\n[amgm]: http://en.wikipedia.org/wiki/Inequality_of_arithmetic_and_geometric_means#Proof_of_the_generalized_AM-GM_inequality_using_Jensen.27s_inequality\r\n[bourbaki]: http://en.wikipedia.org/wiki/Nicolas_Bourbaki','Behold! Jensen\'s Inequality',0,'Unsatisfied with the very algebraic and formal proofs of Jensen\'s inequality, I present a diagram that gives a graphical intuition for the result.','inherit','open','open','','143-revision-18','','','2008-11-17 16:25:38','2008-11-17 06:25:38','',143,'http://conflate.net/inductio/2008/11/143-revision-18/',0,'revision','',0),(164,2,'2008-11-18 21:18:27','2008-11-18 11:18:27','The annual [Machine Learning Summer School][mlss] is being held in Canberra at the [Australian National University][anu] in January next year. It will be part of the joint [Summer Schools in Logic and Learning][ssll]. \r\n\r\nFrom the 2009 [MLSS website][mlss2009]:\r\n> This school is suitable for all levels, both for people without previous knowledge in \r\n> Machine Learning, and those wishing to broaden their expertise in this area. It will \r\n> allow the participants to get in touch with international experts in this field. \r\n> Exchange of students, joint publications and joint projects will result because \r\n> of this collaboration. \r\n\r\nThe Summer schools will run from the 26-30th of January 2009 and [registration][] is open. Note that there is a 20% surcharge for registrations after the 19th of December 2008 so get registering.\r\n\r\nI\'ve been fortunate enough to have been given a spot on the [program][]. I\'ll be talking about some the work I\'ve been doing with [Bob Williamson][bob] this year on analysing relationships between various notions of risk, divergence and information in binary valued prediction problems.\r\n\r\nLeave a comment if you\'re planning to attend and I\'ll make sure I say hi. \r\n\r\nHope to see you there!\r\n\r\n[mlss]: http://mlss.cc/\r\n[mlss2009]: http://ssll.cecs.anu.edu.au/about/mlss\r\n[anu]: http://anu.edu.au/\r\n[ssll]: http://ssll.cecs.anu.edu.au/\r\n[program]: http://ssll.cecs.anu.edu.au/program\r\n[bob]: http://axiom.anu.edu.au/~williams/\r\n[registration]: http://ssll.cecs.anu.edu.au/registration','Machine Learning Summer School 2009',0,'A plug for the 2009 Machine Learning Summer School in Canberra, Australia. I will be giving a presentation there.','publish','open','open','','machine-learning-summer-school-2009','','','2008-11-18 21:18:27','2008-11-18 11:18:27','',0,'http://conflate.net/inductio/?p=164',0,'post','',2),(165,2,'2008-11-18 21:16:49','2008-11-18 11:16:49','The annual [Machine Learning Summer School][mlss] is being held in Canberra at the [Australian National University][anu] in January next year. It will be part of the joint [Summer Schools in Logic and Learning][ssll]. \n\nFrom the 2009 [MLSS website][mlss2009]:\n> This school is suitable for all levels, both for people without previous knowledge in \n> Machine Learning, and those wishing to broaden their expertise in this area. It will \n> allow the participants to get in touch with international experts in this field. \n> Exchange of students, joint publications and joint projects will result because \n> of this collaboration. \n\nThe Summer schools will run from the 26-30th of January 2009 and [registration][] is open. Note that there is a 20% surcharge for registrations after the 19th of December 2008 so get registering.\n\nI\'ve been fortunate enough to have been given a spot on the [program][]. I\'ll be talking about some the work I\'ve been doing with [Bob Williamson][bob] this year on analysing relationships between various notions of risk, divergence and information in binary valued prediction problems.\n\nLeave a comment if you\'re planning to attend and I\'ll make sure I say hi. Hope to see you there!\n\n[mlss]: http://mlss.cc/\n[mlss2009]: http://ssll.cecs.anu.edu.au/about/mlss\n[anu]: http://anu.edu.au/\n[ssll]: http://ssll.cecs.anu.edu.au/\n[program]: http://ssll.cecs.anu.edu.au/program\n[bob]: http://axiom.anu.edu.au/~williams/','Machine Learning Summer School 2009',0,'A plug for the 2009 Machine Learning Summer School in Canberra, Australia. I will be giving a presentation there.','inherit','open','open','','164-revision','','','2008-11-18 21:16:49','2008-11-18 11:16:49','',164,'http://conflate.net/inductio/2008/11/164-revision/',0,'revision','',0),(166,2,'2008-11-18 21:17:37','2008-11-18 11:17:37','The annual [Machine Learning Summer School][mlss] is being held in Canberra at the [Australian National University][anu] in January next year. It will be part of the joint [Summer Schools in Logic and Learning][ssll]. \r\n\r\nFrom the 2009 [MLSS website][mlss2009]:\r\n> This school is suitable for all levels, both for people without previous knowledge in \r\n> Machine Learning, and those wishing to broaden their expertise in this area. It will \r\n> allow the participants to get in touch with international experts in this field. \r\n> Exchange of students, joint publications and joint projects will result because \r\n> of this collaboration. \r\n\r\nThe Summer schools will run from the 26-30th of January 2009 and [registration][] is open. Note that there is a 20% surcharge for registrations after the 19th of December 2008 so get registering.\r\n\r\nI\'ve been fortunate enough to have been given a spot on the [program][]. I\'ll be talking about some the work I\'ve been doing with [Bob Williamson][bob] this year on analysing relationships between various notions of risk, divergence and information in binary valued prediction problems.\r\n\r\nLeave a comment if you\'re planning to attend and I\'ll make sure I say hi. \r\n\r\nHope to see you there!\r\n\r\n[mlss]: http://mlss.cc/\r\n[mlss2009]: http://ssll.cecs.anu.edu.au/about/mlss\r\n[anu]: http://anu.edu.au/\r\n[ssll]: http://ssll.cecs.anu.edu.au/\r\n[program]: http://ssll.cecs.anu.edu.au/program\r\n[bob]: http://axiom.anu.edu.au/~williams/\r\n[registration]: http://ssll.cecs.anu.edu.au/registration','Machine Learning Summer School 2009',0,'A plug for the 2009 Machine Learning Summer School in Canberra, Australia. I will be giving a presentation there.','inherit','open','open','','164-revision-2','','','2008-11-18 21:17:37','2008-11-18 11:17:37','',164,'http://conflate.net/inductio/2008/11/164-revision-2/',0,'revision','',0),(167,2,'2008-12-09 22:04:10','2008-12-09 12:04:10','_Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. I will post the updated application with notes shortly.\n\n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20\n\nI\'ve recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I\'m part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \n\nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I\'ll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool.\n\n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php\n[csl]: http://csl.cecs.anu.edu.au/\n\nThe Australian Common Reader Project\n--------------------------------------------\nOne of Julieanne\'s research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers\' relationship with books and periodicals. \n\n[acrp]: http://www.api-network.com/hosted/acrp/\n\nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \n\n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/\n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin\n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \n\nBooks and Borrowers\n------------------------\nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books.\nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \n\n\n\n\n\n\n\n\n\n\n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column)\nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book.\n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\n\nConceptually, each cell in the table contains a 1 if the person associated with the cell\'s column borrowed the book associated with the cell\'s row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473.\n\nBook Similarity\n-----------------\nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what \"similar books\" is required.\n\nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]\\mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]\\mathbf{b}_2 = (1,1,\\ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex].\n\nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \n \nIn terms of the vector representation, this similarity measure is simply the \"[inner product][]\" between [tex]\\mathbf{b}_1[/tex] and [tex]\\mathbf{b}_2[/tex] and is written [tex]\\left<\\mathbf{b}_1,\\mathbf{b}_2\\right> = b_{1,1}b_{2,1} + \\cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers.\n\n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space\n\nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to \"normalise\" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the \"size\" of each of the vectors for those books. \n\nMathematically, we will denote the size of a book vector [tex]\\mathbf{b}_i[/tex] as [tex]\\|\\mathbf{b}_i\\| = \\sqrt{\\left<\\mathbf{b}_i,\\mathbf{b}_i\\right>}[/tex]. The similarity between two books then becomes:\n\n
\n[tex]\\displaystyle\n \\text{sim}(\\mathbf{b}_i,\\mathbf{b}_j) \n = \\frac{\\left<\\mathbf{b}_i,\\mathbf{b}_j\\right>}{\\|\\mathbf{b}_i\\|\\|\\mathbf{b}_j\\|}\n[/tex]\n
\n\nPrincipal Component Analysis\n---------------------------------\nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \n\nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns.\n\n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis\n\n\n\n\n\n\n\n\n\n\n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books.\n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\n\nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]\\mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]\\mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors\nfor the first two rows of Table 2 then [tex]\\text{sim}(\\mathbf{c}_1,\\mathbf{c}_2)[/tex]\nwould be close to [tex]\\text{sim}(\\mathbf{b}_1,\\mathbf{b}_2)[/tex], the similarity of the\nfirst two rows in Table 1.[^1]\n\n[^1]: Technically, the guarantee of the \"closeness\" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair\'s\nsimilarity is estimated well.\n\nVisualising the Data\n----------------------\nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2]\n\n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle\'s colour.\n\n
\n\"Plot\n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book.\n

\n\nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library.\n\nDrilling Down and Interacting\n---------------------------------\nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners\' and Mechanics\' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data.\n\n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales\n\nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books.\n\nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \n\nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \n\n[applet]: /inductio/wp-content/public/acrp/\n\n
\n\"Click\n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\n
\n\nInstructions describing how to use the tool can be found below it. \nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the \"Borrowers\" bar will only show books with at least that many borrowers; and altering the \"Similarity\" bar will only draw lines to books with at least that proportion of books in common.\n\nFuture Work and Distant Reading\n-------------------------------------\nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls \"distant reading\" -- looking at books as objects and how they are read rather than the \"close reading\" of the text of individual books. \n\n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \n\nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \n\nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them.\n\nData and Code\n----------------\nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location.\n\n[github]: http://github.com/mreid/acrp/tree/master \n[SQL]: http://en.wikipedia.org/wiki/SQL\n[R]: http://www.r-project.org/\n[Processing]: http://processing.org/\n\nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here.\n\n','Visualising 19th Century Reading in Australia',0,'A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project.','inherit','open','open','','40-autosave','','','2008-12-09 22:04:10','2008-12-09 12:04:10','',40,'http://conflate.net/inductio/2008/12/40-autosave/',0,'revision','',0),(168,2,'2008-11-17 15:01:20','2008-11-17 05:01:20','I\'ve recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I\'m part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \r\n\r\nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I\'ll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool.\r\n\r\n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php\r\n[csl]: http://csl.cecs.anu.edu.au/\r\n\r\nThe Australian Common Reader Project\r\n--------------------------------------------\r\nOne of Julieanne\'s research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers\' relationship with books and periodicals. \r\n\r\n[acrp]: http://www.api-network.com/hosted/acrp/\r\n\r\nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \r\n\r\n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/\r\n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin\r\n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \r\n\r\nBooks and Borrowers\r\n------------------------\r\nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \r\nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books.\r\nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column)\r\nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book.\r\n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\r\n\r\nConceptually, each cell in the table contains a 1 if the person associated with the cell\'s column borrowed the book associated with the cell\'s row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473.\r\n\r\nBook Similarity\r\n-----------------\r\nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what \"similar books\" is required.\r\n\r\nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex].\r\n\r\nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \r\n \r\nIn terms of the vector representation, this similarity measure is simply the \"[inner product][]\" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers.\r\n\r\n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space\r\n\r\nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to \"normalise\" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the \"size\" of each of the vectors for those books. \r\n\r\nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes:\r\n\r\n
\r\n[tex]displaystyle\r\n text{sim}(mathbf{b}_i,mathbf{b}_j) \r\n = frac{left}{|mathbf{b}_i||mathbf{b}_j|}\r\n[/tex]\r\n
\r\n\r\nPrincipal Component Analysis\r\n---------------------------------\r\nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \r\n\r\nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns.\r\n\r\n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books.\r\n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\r\n\r\nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors\r\nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex]\r\nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the\r\nfirst two rows in Table 1.[^1]\r\n\r\n[^1]: Technically, the guarantee of the \"closeness\" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair\'s\r\nsimilarity is estimated well.\r\n\r\nVisualising the Data\r\n----------------------\r\nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2]\r\n\r\n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle\'s colour.\r\n\r\n
\r\n\"Plot\r\n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book.\r\n

\r\n\r\nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library.\r\n\r\nDrilling Down and Interacting\r\n---------------------------------\r\nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners\' and Mechanics\' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data.\r\n\r\n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales\r\n\r\nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books.\r\n\r\nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \r\n\r\nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \r\n\r\n[applet]: /inductio/wp-content/public/acrp/\r\n\r\n
\r\n\"Click\r\n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\r\n
\r\n\r\nInstructions describing how to use the tool can be found below it. \r\nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the \"Borrowers\" bar will only show books with at least that many borrowers; and altering the \"Similarity\" bar will only draw lines to books with at least that proportion of books in common.\r\n\r\nFuture Work and Distant Reading\r\n-------------------------------------\r\nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls \"distant reading\" -- looking at books as objects and how they are read rather than the \"close reading\" of the text of individual books. \r\n\r\n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \r\n\r\nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \r\n\r\nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them.\r\n\r\nData and Code\r\n----------------\r\nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location.\r\n\r\n[github]: http://github.com/mreid/acrp/tree/master \r\n[SQL]: http://en.wikipedia.org/wiki/SQL\r\n[R]: http://www.r-project.org/\r\n[Processing]: http://processing.org/\r\n\r\nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here.\r\n\r\n','Visualising 19th Century Reading in Australia',0,'A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project.','inherit','open','open','','40-revision-2','','','2008-11-17 15:01:20','2008-11-17 05:01:20','',40,'http://conflate.net/inductio/2008/11/40-revision-2/',0,'revision','',0),(169,2,'2008-12-09 22:04:46','2008-12-09 12:04:46','_Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. Those looking for the application I presented there: stay tuned, I will post the updated version here shortly.\r\n\r\n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20\r\n\r\nI\'ve recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I\'m part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \r\n\r\nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I\'ll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool.\r\n\r\n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php\r\n[csl]: http://csl.cecs.anu.edu.au/\r\n\r\nThe Australian Common Reader Project\r\n--------------------------------------------\r\nOne of Julieanne\'s research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers\' relationship with books and periodicals. \r\n\r\n[acrp]: http://www.api-network.com/hosted/acrp/\r\n\r\nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \r\n\r\n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/\r\n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin\r\n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \r\n\r\nBooks and Borrowers\r\n------------------------\r\nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \r\nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books.\r\nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column)\r\nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book.\r\n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\r\n\r\nConceptually, each cell in the table contains a 1 if the person associated with the cell\'s column borrowed the book associated with the cell\'s row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473.\r\n\r\nBook Similarity\r\n-----------------\r\nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what \"similar books\" is required.\r\n\r\nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex].\r\n\r\nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \r\n \r\nIn terms of the vector representation, this similarity measure is simply the \"[inner product][]\" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers.\r\n\r\n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space\r\n\r\nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to \"normalise\" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the \"size\" of each of the vectors for those books. \r\n\r\nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes:\r\n\r\n
\r\n[tex]displaystyle\r\n text{sim}(mathbf{b}_i,mathbf{b}_j) \r\n = frac{left}{|mathbf{b}_i||mathbf{b}_j|}\r\n[/tex]\r\n
\r\n\r\nPrincipal Component Analysis\r\n---------------------------------\r\nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \r\n\r\nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns.\r\n\r\n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books.\r\n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\r\n\r\nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors\r\nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex]\r\nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the\r\nfirst two rows in Table 1.[^1]\r\n\r\n[^1]: Technically, the guarantee of the \"closeness\" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair\'s\r\nsimilarity is estimated well.\r\n\r\nVisualising the Data\r\n----------------------\r\nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2]\r\n\r\n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle\'s colour.\r\n\r\n
\r\n\"Plot\r\n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book.\r\n

\r\n\r\nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library.\r\n\r\nDrilling Down and Interacting\r\n---------------------------------\r\nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners\' and Mechanics\' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data.\r\n\r\n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales\r\n\r\nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books.\r\n\r\nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \r\n\r\nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \r\n\r\n[applet]: /inductio/wp-content/public/acrp/\r\n\r\n
\r\n\"Click\r\n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\r\n
\r\n\r\nInstructions describing how to use the tool can be found below it. \r\nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the \"Borrowers\" bar will only show books with at least that many borrowers; and altering the \"Similarity\" bar will only draw lines to books with at least that proportion of books in common.\r\n\r\nFuture Work and Distant Reading\r\n-------------------------------------\r\nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls \"distant reading\" -- looking at books as objects and how they are read rather than the \"close reading\" of the text of individual books. \r\n\r\n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \r\n\r\nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \r\n\r\nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them.\r\n\r\nData and Code\r\n----------------\r\nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location.\r\n\r\n[github]: http://github.com/mreid/acrp/tree/master \r\n[SQL]: http://en.wikipedia.org/wiki/SQL\r\n[R]: http://www.r-project.org/\r\n[Processing]: http://processing.org/\r\n\r\nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here.\r\n\r\n','Visualising 19th Century Reading in Australia',0,'A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project.','inherit','open','open','','40-revision-3','','','2008-12-09 22:04:46','2008-12-09 12:04:46','',40,'http://conflate.net/inductio/2008/12/40-revision-3/',0,'revision','',0),(170,2,'2008-12-09 22:05:28','2008-12-09 12:05:28','----\r\n_Update - 9 Dec 2008_: Julieanne and I presented a much improved version of this visualisation at the [Resourceful Reading][] conference held at the University of Sydney on the 5th of December. Those looking for the application I presented there: stay tuned, I will post the updated version here shortly.\r\n----\r\n[Resourceful Reading]: http://conferences.arts.usyd.edu.au/index.php?cf=20\r\n\r\nI\'ve recently spent a bit of time collaborating with my wife on a research project. Research collaboration by couples is not new but given that Julieanne is a [lecturer in the English program][j] and I\'m part of the [computer sciences laboratory][csl], this piece of joint research is a little unusual. \r\n\r\nThe rest of this post describes the intersection of our interests --- data from the Australian Common Reader Project --- and the visualisation tool I wrote to explore it. The tool itself is based on a simple application of linear Principal Component Analysis (PCA). I\'ll attempt to explain it here in such a way that readers who have not studied this technique might still be able to make use of the tool.\r\n\r\n[j]: http://cass.anu.edu.au/humanities/school_sites/staff.php\r\n[csl]: http://csl.cecs.anu.edu.au/\r\n\r\nThe Australian Common Reader Project\r\n--------------------------------------------\r\nOne of Julieanne\'s research interests is the Australian audience of the late 19th and early 20th centuries. As part of her PhD, she made use of an amazing database that is part of the [Australian Common Reader Project][acrp] --- a project that has collected and entered library borrowing records from Australian libraries along with annotations about when books were borrowed, their genres, borrower occupations, author information, etc. This sort of information makes it possible for Australian literature and cultural studies academics to ask empirical questions about Australian readers\' relationship with books and periodicals. \r\n\r\n[acrp]: http://www.api-network.com/hosted/acrp/\r\n\r\nEver on the lookout for [interesting data sets][meta-index], I suggested that we apply some basic data analysis tools to the database to see what kind of relationships between books and borrowers we might find. When asked if we could have access to the database, [Tim Dolin][] graciously agreed and enlisted [Jason Ensor][] to help with our technical questions. \r\n\r\n[meta-index]: http://conflate.net/inductio/2008/02/a-meta-index-of-data-sets/\r\n[tim dolin]: http://www.humanities.curtin.edu.au/staff.cfm/t.dolin\r\n[jason ensor]: http://www.humanities.curtin.edu.au/staff.cfm/j.ensor \r\n\r\nBooks and Borrowers\r\n------------------------\r\nAfter an initial inspection, my first thought was to try to visualise the similarity of the books in the database as measured by the number of borrowers they have in common. \r\nThe full database contains 99,692 loans of 7,078 different books from 11 libraries by one of the 2,642 people. To make this more manageable, I focused on books that had at least 20 different borrowers and only considered people who had borrowed one of these books.\r\nThis distilled the database down to a simple table with each row representing one of 1,616 books and each column representing one of 2,473 people. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 1: A portion of the book and borrower table. A 1 indicates that the borrower (column)\r\nborrowed the book (row) at least once. A 0 indicates that the borrower never borrowed the book.\r\n
Book
ID
Borrower ID
12...2,473
110...1
211...0
300...1
...............
1,61611...1
\r\n\r\nConceptually, each cell in the table contains a 1 if the person associated with the cell\'s column borrowed the book associated with the cell\'s row. If there was no such loan between a given book and borrower the corresponding cell contains a 0. For example, Table 1 shows that book 2 was borrowed (at least once) by borrower 1 but never by borrower 2,473.\r\n\r\nBook Similarity\r\n-----------------\r\nThe table view of the books and their borrowers does not readily lend itself to insight. The approach we took to get a better picture of this information was to plot each book as a point on a graph so that similar books are placed closer together than dissimilar books. To do this a notion of what \"similar books\" is required.\r\n\r\nMathematically, row [tex]i[/tex] of Table 1 can be represented as a vector [tex]mathbf{b}_i[/tex] of 1s and 0s. The value of the cell in the [tex]j[/tex]th column of that row will be denoted [tex]b_{i,j}[/tex]. For example, the 2nd row in the table can be written as the vector [tex]mathbf{b}_2 = (1,1,ldots,0)[/tex] and the value in its first column is [tex]b_{2,1} = 1[/tex].\r\n\r\nA crude measure of the similarity between book 1 and book 2 can be computed from this table by counting how many borrowers they have in common. That is, the number of columns that have a `1` in the row for book 1 and the row for book 2. \r\n \r\nIn terms of the vector representation, this similarity measure is simply the \"[inner product][]\" between [tex]mathbf{b}_1[/tex] and [tex]mathbf{b}_2[/tex] and is written [tex]left = b_{1,1}b_{2,1} + cdots + b_{1,N}b_{2,N}[/tex] where N = 2,473 is the total number of borrowers.\r\n\r\n[inner product]: http://en.wikipedia.org/wiki/Inner_product_space\r\n\r\nIt turns out that simply counting the number of borrowers two books is not a great measure of similarity. The problem is that two very popular books, each with 100 borrowers, that only share 10% of their borrowers would be considered as similar as two books, each with 10 readers, that share all of their borrowers. An easy way to correct this is to \"normalise\" the borrower counts by making sure the similarity of a book with itself is always equal to 1. A common way of doing this is by dividing the inner product of two books by the \"size\" of each of the vectors for those books. \r\n\r\nMathematically, we will denote the size of a book vector [tex]mathbf{b}_i[/tex] as [tex]|mathbf{b}_i| = sqrt{left}[/tex]. The similarity between two books then becomes:\r\n\r\n
\r\n[tex]displaystyle\r\n text{sim}(mathbf{b}_i,mathbf{b}_j) \r\n = frac{left}{|mathbf{b}_i||mathbf{b}_j|}\r\n[/tex]\r\n
\r\n\r\nPrincipal Component Analysis\r\n---------------------------------\r\nNow that we have a similarity measure between books the idea is to create a plot of points -- one per book -- so that similar books are placed close together and dissimilar books are kept far apart. \r\n\r\nA standard technique for doing this is [Principal Component Analysis][pca]. Intuitively, this technique aims to find a way of reducing the number of coordinates in each book vector in such a way that when the similarity between two books is computed using these smaller vectors it is as close as possible to the original similarity. That is, PCA creates a new table that represents books in terms of only two columns.\r\n\r\n[pca]: http://en.wikipedia.org/wiki/Principal_components_analysis\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
Table 2: A portion of the book table after PCA. The values in the two new columns (PCA IDs) can be used to plot the books.\r\n
Book
ID
PCA ID
12
1-8.22.3
20.4-4.3
3-1.3-3.7
.........
1,6162.2-5.6
\r\n\r\nTable 2 gives an example of the book table after PCA that reduces the book vectors (rows) from 2,473 to two entries. The PCA columns cannot be as easily interpreted as the borrowers columns in Table 1 but the values in the columns are such that the similarity of the books in Table 2 are roughly as similar as if the values in Table 1 were used. That is, if [tex]mathbf{c}_1 = (-8.2,2.3)[/tex] and [tex]mathbf{c}_2=(0.4,-4.3)[/tex] are the vectors\r\nfor the first two rows of Table 2 then [tex]text{sim}(mathbf{c}_1,mathbf{c}_2)[/tex]\r\nwould be close to [tex]text{sim}(mathbf{b}_1,mathbf{b}_2)[/tex], the similarity of the\r\nfirst two rows in Table 1.[^1]\r\n\r\n[^1]: Technically, the guarantee of the \"closeness\" of the similarity measures only holds on average, that is, over all possible pairs of books. There is no guarantee any particular pair\'s\r\nsimilarity is estimated well.\r\n\r\nVisualising the Data\r\n----------------------\r\nFigure 1 shows a plot of the PCA reduced book data. Each circle represents one of the 1,616 books, plotted according to the coordinates in a table like Table 2. The size of each circle indicates how many borrowers each book had and its colour indicates which library the book belongs to.[^2]\r\n\r\n[^2]: A book can belong to more than one library. In this case one library is chosen at random to determine a circle\'s colour.\r\n\r\n
\r\n\"Plot\r\n

Figure 1: A PCA plot of all the books in the ACRP database coloured according to which library they belong to. The size of each circle indicates the number of borrowers of the corresponding book.\r\n

\r\n\r\nOne immediate observation is that books are clustered according to which library they belong to. This is not too surprising since the books in a library limit what borrowers from that library can read. This means it is likely that two voracious readers that frequent the same library will read the same books. This, in turn, will mean the similarity of two books from a library will be higher than books from different libraries as there are very few borrowers that use more than one library.\r\n\r\nDrilling Down and Interacting\r\n---------------------------------\r\nTo get a better picture of the data, we decided to focus on books from a single library to avoid this clustering. The library we focused on was the [Lambton][] Miners\' and Mechanics\' Institute in New South Wales. This library had the largest number of loans (20,253) and so was most likely to have interesting similarity data.\r\n\r\n[lambton]: http://en.wikipedia.org/wiki/Lambton,_New_South_Wales\r\n\r\nThere are a total of 789 books in the Lambton institute and 469 borrowers of those books. A separate PCA reduction was performed on this restricted part of the database to create a plot of only the Lambton books.\r\n\r\nTo make it easier to explore this data, I wrote a simple tool that allows a viewer to interact with the PCA plot. A screenshot from this tool is shown in Figure 2. Once again, larger circles represent books with a larger number of borrowers. \r\n\r\nClicking on the figure will open a new window and, after a short delay, the tool will run. The same page can also be accessed from [this link][applet]. \r\n\r\n[applet]: /inductio/wp-content/public/acrp/\r\n\r\n
\r\n\"Click\r\n

Figure 2: A screenshot of the ACRP visualisation tool showing books from the Lambton Institute. Click the image to run the tool in a new window.

\r\n
\r\n\r\nInstructions describing how to use the tool can be found below it. \r\nIn a nutshell: hovering over a circle will reveal the title of the book corresponding to that circle; clicking on a circle will draw lines to its most similar neighbours; altering the \"Borrowers\" bar will only show books with at least that many borrowers; and altering the \"Similarity\" bar will only draw lines to books with at least that proportion of books in common.\r\n\r\nFuture Work and Distant Reading\r\n-------------------------------------\r\nJulieanne and I are still at the early stages of our research using the ACRP database. The use of PCA for visualisation was a first step in our pursuit of what [Franco Moretti][] calls \"distant reading\" -- looking at books as objects and how they are read rather than the \"close reading\" of the text of individual books. \r\n\r\n[Franco Moretti]: http://en.wikipedia.org/wiki/Franco_Moretti \r\n\r\nNow that we have this tool, we are able to quickly explore relationships between these books based on the reading habits of Australians at the turn of the century. Of course, there are many caveats that apply to any patterns we might see in these plots. For instance, the similarity between books is only based on habits of a small number of readers and will be influenced by the peculiarities of the libraries and the books they choose to buy. For this reason, these plots are not intended to provide conclusive answers to questions we might. \r\n\r\nInstead we hope that exploring the ACRP database in this way will lead us to interesting questions about particular pairs or groups of books that can be followed up by a more thorough analysis of their readers, their text as well as other historical and cultural factors about them.\r\n\r\nData and Code\r\n----------------\r\nFor the technically minded, I have made the code I used to do the visualisation is available on [GitHub][]. It is a combination of [SQL][] for data preprocessing, [R][] for the PCA reduction and [Processing][] for creating the visualisation tool. You will also find a number of images and some notes at the same location.\r\n\r\n[github]: http://github.com/mreid/acrp/tree/master \r\n[SQL]: http://en.wikipedia.org/wiki/SQL\r\n[R]: http://www.r-project.org/\r\n[Processing]: http://processing.org/\r\n\r\nAccess to the data that the code acts upon is not mine to give, so the code is primarily to show how I did the visualisation rather than a way to let others analyse the data. If the founders of the [ACRP][] project decide to release the data to the public at a later date I will link to it from here.\r\n\r\n','Visualising 19th Century Reading in Australia',0,'A description of a visualisation of some 19th century Australian borrowing records from the Australian Common Readers Project.','inherit','open','open','','40-revision-4','','','2008-12-09 22:05:28','2008-12-09 12:05:28','',40,'http://conflate.net/inductio/2008/12/40-revision-4/',0,'revision','',0),(171,2,'0000-00-00 00:00:00','0000-00-00 00:00:00','I started using the social, \"micro-blogging\" service [Twitter][] in February this year simply because I had been seeing so much commentary about it — both good and bad. Since then, I\'ve posted [800+ updates], amassed over 100 [followers][] and [follow][] nearly that many myself.\r\n\r\n[twitter]: http://twitter.com/\r\n[follow]: http://twitter.com/mdreid/friends\r\n[followers]: http://twitter.com/mdreid/followers\r\n\r\nWhat has surprised me about Twitter is how many people I have found on there who are active, or at least interested, in machine learning and statistics.\r\n\r\nCollection of people research in or around statistics and machine learning on twitter:\r\n\r\n@arthegall\r\n@mja\r\n@nealrichter\r\n@dwf\r\n@brendan642\r\n@dtunkelang\r\n@ealdent\r\n@mikiobraun\r\n@lemire\r\n@SoloGen\r\n@markusweimer\r\n@pongba\r\n@moorejh\r\n@peteskomoroch\r\n@smolix\r\n@DataJunkie\r\n@filterfish\r\n@ansate','ML and Stats People on Twitter ',0,'','draft','open','open','','','','','2008-12-19 14:11:45','2008-12-19 04:11:45','',0,'http://conflate.net/inductio/?p=171',0,'post','',0),(172,2,'2008-12-18 07:45:40','2008-12-17 21:45:40','Collection of people research in or around statistics and machine learning on twitter:\n\n@arthegall\n@mja\n@nealrichter\n@dwf\n@brendan642\n@dtunkelang\n@ealdent\n@mikiobraun\n@lemire\n@SoloGen\n@markusweimer\n@pongba\n@moorejh\n@peteskomoroch\n@smolix\n@','ML and Stats People on Twitter ',0,'','inherit','open','open','','171-revision','','','2008-12-18 07:45:40','2008-12-17 21:45:40','',171,'http://conflate.net/inductio/2008/12/171-revision/',0,'revision','',0),(173,2,'2008-12-19 12:21:26','2008-12-19 02:21:26','I started using the social, \"micro-blogging\" service [Twitter][] in February this year simply because I had been seeing so much commentary about it — both good and bad. Since then, I\'ve posted [800+ updates], amassed over 100 followers and follow \n\n[twitter]: http://twitter.com/\n\nCollection of people research in or around statistics and machine learning on twitter:\n\n@arthegall\n@mja\n@nealrichter\n@dwf\n@brendan642\n@dtunkelang\n@ealdent\n@mikiobraun\n@lemire\n@SoloGen\n@markusweimer\n@pongba\n@moorejh\n@peteskomoroch\n@smolix\n@DataJunkie\n@filterfish\n@ansate','ML and Stats People on Twitter ',0,'','inherit','open','open','','171-revision-2','','','2008-12-19 12:21:26','2008-12-19 02:21:26','',171,'http://conflate.net/inductio/2008/12/171-revision-2/',0,'revision','',0),(174,2,'2008-12-19 12:55:29','2008-12-19 02:55:29','I started using the social, \"micro-blogging\" service [Twitter][] in February this year simply because I had been seeing so much commentary about it — both good and bad. Since then, I\'ve posted [800+ updates], amassed over 100 [followers][] and [follow][] nearly that many myself.\n\n[twitter]: http://twitter.com/\n[follow]: http://twitter.com/mdreid/friends\n[followers]: http://twitter.com/mdreid/followers\n\nWhat has surprised me about Twitter is how many people I have found on there who are active, or at least interested, in machine learning and statistics.\n\nCollection of people research in or around statistics and machine learning on twitter:\n\n@arthegall\n@mja\n@nealrichter\n@dwf\n@brendan642\n@dtunkelang\n@ealdent\n@mikiobraun\n@lemire\n@SoloGen\n@markusweimer\n@pongba\n@moorejh\n@peteskomoroch\n@smolix\n@DataJunkie\n@filterfish\n@ansate','ML and Stats People on Twitter ',0,'','inherit','open','open','','171-revision-3','','','2008-12-19 12:55:29','2008-12-19 02:55:29','',171,'http://conflate.net/inductio/2008/12/171-revision-3/',0,'revision','',0); -/*!40000 ALTER TABLE `wp_posts` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_term_relationships` --- - -DROP TABLE IF EXISTS `wp_term_relationships`; -CREATE TABLE `wp_term_relationships` ( - `object_id` bigint(20) NOT NULL default '0', - `term_taxonomy_id` bigint(20) NOT NULL default '0', - `term_order` int(11) NOT NULL default '0', - PRIMARY KEY (`object_id`,`term_taxonomy_id`), - KEY `term_taxonomy_id` (`term_taxonomy_id`) -) ENGINE=MyISAM DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_term_relationships` --- - -LOCK TABLES `wp_term_relationships` WRITE; -/*!40000 ALTER TABLE `wp_term_relationships` DISABLE KEYS */; -INSERT INTO `wp_term_relationships` VALUES (13,2,0),(14,2,0),(12,2,0),(37,33,0),(2,1,0),(10,2,0),(8,2,0),(11,2,0),(15,2,0),(12,1,0),(37,54,0),(16,2,0),(17,2,0),(12,8,0),(12,9,0),(18,2,0),(19,2,0),(15,11,0),(16,16,0),(15,12,0),(15,13,0),(15,14,0),(15,15,0),(48,1,0),(40,17,0),(41,1,0),(38,33,0),(22,2,0),(23,2,0),(18,1,0),(18,25,0),(18,26,0),(18,27,0),(28,51,0),(20,15,0),(20,28,0),(20,29,0),(20,30,0),(20,31,0),(20,32,0),(21,33,0),(21,15,0),(21,34,0),(21,35,0),(21,36,0),(22,28,0),(22,29,0),(39,1,0),(20,2,0),(23,15,0),(24,33,0),(23,34,0),(23,38,0),(25,28,0),(25,25,0),(26,46,0),(21,2,0),(25,41,0),(25,42,0),(28,17,0),(25,43,0),(26,17,0),(28,46,0),(27,47,0),(27,28,0),(27,33,0),(27,15,0),(27,48,0),(27,49,0),(47,54,0),(27,50,0),(28,52,0),(28,53,0),(29,54,0),(30,1,0),(31,1,0),(32,1,0),(37,55,0),(33,33,0),(36,1,0),(37,56,0),(37,57,0),(37,58,0),(42,16,0),(43,1,0),(40,54,0),(40,33,0),(40,59,0),(40,60,0),(40,61,0),(40,56,0),(24,2,0),(44,28,0),(25,2,0),(45,16,0),(26,2,0),(46,28,0),(47,62,0),(47,63,0),(47,64,0),(49,1,0),(46,65,0),(46,66,0),(46,67,0),(46,68,0),(50,1,0),(51,1,0),(52,1,0),(53,1,0),(54,1,0),(55,1,0),(27,2,0),(56,1,0),(57,1,0),(58,1,0),(59,1,0),(60,1,0),(61,1,0),(62,1,0),(63,1,0),(64,1,0),(65,1,0),(66,1,0),(67,1,0),(68,1,0),(69,1,0),(70,1,0),(71,1,0),(72,1,0),(73,1,0),(74,1,0),(75,1,0),(76,1,0),(77,1,0),(78,1,0),(79,1,0),(80,1,0),(81,1,0),(82,1,0),(83,1,0),(84,1,0),(85,1,0),(86,1,0),(87,1,0),(88,1,0),(89,1,0),(90,1,0),(91,1,0),(92,1,0),(93,1,0),(94,1,0),(95,1,0),(96,1,0),(97,1,0),(98,1,0),(28,2,0),(100,1,0),(99,69,0),(101,1,0),(102,1,0),(103,1,0),(104,1,0),(105,1,0),(99,16,0),(99,33,0),(99,15,0),(99,70,0),(99,71,0),(106,1,0),(107,1,0),(110,1,0),(109,33,0),(111,1,0),(112,1,0),(113,1,0),(114,1,0),(115,1,0),(116,1,0),(118,1,0),(117,72,0),(117,33,0),(117,73,0),(117,74,0),(117,11,0),(119,1,0),(111,75,0),(120,1,0),(121,1,0),(122,1,0),(123,1,0),(126,1,0),(124,33,0),(125,1,0),(124,72,0),(127,1,0),(128,1,0),(129,1,0),(130,1,0),(131,1,0),(133,1,0),(132,1,0),(134,33,0),(135,1,0),(136,1,0),(134,76,0),(134,77,0),(134,78,0),(137,1,0),(138,1,0),(139,1,0),(29,2,0),(141,1,0),(140,1,0),(142,1,0),(144,1,0),(143,34,0),(145,1,0),(146,1,0),(147,1,0),(148,1,0),(149,1,0),(150,1,0),(151,1,0),(152,1,0),(153,1,0),(154,1,0),(155,1,0),(156,1,0),(157,1,0),(143,54,0),(143,26,0),(143,25,0),(158,1,0),(159,1,0),(160,1,0),(161,1,0),(162,1,0),(163,1,0),(164,79,0),(165,1,0),(164,80,0),(166,1,0),(164,28,0),(167,1,0),(168,1,0),(169,1,0),(170,1,0),(171,28,0),(172,1,0),(173,1,0),(174,1,0); -/*!40000 ALTER TABLE `wp_term_relationships` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_term_taxonomy` --- - -DROP TABLE IF EXISTS `wp_term_taxonomy`; -CREATE TABLE `wp_term_taxonomy` ( - `term_taxonomy_id` bigint(20) NOT NULL auto_increment, - `term_id` bigint(20) NOT NULL default '0', - `taxonomy` varchar(32) NOT NULL default '', - `description` longtext NOT NULL, - `parent` bigint(20) NOT NULL default '0', - `count` bigint(20) NOT NULL default '0', - PRIMARY KEY (`term_taxonomy_id`), - UNIQUE KEY `term_id_taxonomy` (`term_id`,`taxonomy`) -) ENGINE=MyISAM AUTO_INCREMENT=81 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_term_taxonomy` --- - -LOCK TABLES `wp_term_taxonomy` WRITE; -/*!40000 ALTER TABLE `wp_term_taxonomy` DISABLE KEYS */; -INSERT INTO `wp_term_taxonomy` VALUES (1,1,'category','Posts about machine learning in general.',0,4),(2,2,'link_category','',0,21),(59,58,'post_tag','',0,1),(63,62,'post_tag','',0,0),(64,63,'post_tag','',0,0),(8,8,'post_tag','',0,1),(9,9,'post_tag','',0,1),(11,11,'post_tag','',0,1),(12,12,'post_tag','',0,1),(13,13,'post_tag','',0,1),(14,14,'post_tag','',0,1),(15,15,'category','Analysis of problems and algorithms',0,6),(16,16,'category','Of inference and induction',0,4),(17,17,'category','Uses of machine learning in the real world',0,3),(61,60,'post_tag','',0,1),(25,25,'post_tag','',0,3),(26,26,'post_tag','',0,2),(27,27,'post_tag','',0,1),(28,28,'category','Researchers, conferences, journals and institutions',0,6),(29,29,'post_tag','',0,1),(30,30,'post_tag','',0,1),(31,31,'post_tag','',0,1),(32,32,'post_tag','',0,1),(33,33,'category','Interesting articles I\'ve encountered ',0,10),(34,34,'post_tag','',0,3),(35,35,'post_tag','',0,1),(36,36,'post_tag','',0,1),(38,38,'post_tag','',0,1),(60,59,'post_tag','',0,1),(41,41,'post_tag','',0,1),(42,40,'category','The art of expressing ideas',0,1),(43,42,'post_tag','',0,1),(62,61,'post_tag','',0,0),(46,45,'post_tag','',0,2),(47,46,'post_tag','',0,1),(48,47,'post_tag','',0,1),(49,48,'post_tag','',0,1),(50,49,'post_tag','',0,1),(51,50,'post_tag','',0,1),(52,51,'post_tag','',0,1),(53,52,'post_tag','',0,1),(54,53,'category','Explaining things to others so I might better understand.',0,3),(55,54,'post_tag','',0,1),(56,55,'post_tag','',0,2),(57,56,'post_tag','',0,1),(58,57,'post_tag','',0,1),(65,64,'post_tag','',0,1),(66,65,'post_tag','',0,1),(67,66,'post_tag','',0,1),(68,67,'post_tag','',0,1),(69,68,'post_tag','',0,1),(70,69,'post_tag','',0,1),(71,70,'post_tag','',0,1),(72,71,'category','',0,1),(73,72,'post_tag','',0,0),(74,73,'post_tag','',0,0),(75,74,'post_tag','',0,1),(76,75,'post_tag','',0,1),(77,76,'post_tag','',0,1),(78,77,'post_tag','',0,1),(79,78,'category','',0,1),(80,79,'post_tag','',0,1); -/*!40000 ALTER TABLE `wp_term_taxonomy` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_terms` --- - -DROP TABLE IF EXISTS `wp_terms`; -CREATE TABLE `wp_terms` ( - `term_id` bigint(20) NOT NULL auto_increment, - `name` varchar(55) NOT NULL default '', - `slug` varchar(200) NOT NULL default '', - `term_group` bigint(10) NOT NULL default '0', - PRIMARY KEY (`term_id`), - UNIQUE KEY `slug` (`slug`) -) ENGINE=MyISAM AUTO_INCREMENT=80 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_terms` --- - -LOCK TABLES `wp_terms` WRITE; -/*!40000 ALTER TABLE `wp_terms` DISABLE KEYS */; -INSERT INTO `wp_terms` VALUES (1,'General','general',0),(2,'Blogroll','blogroll',0),(61,'prediction markets','prediction-markets',0),(58,'PCA','pca',0),(59,'books','books',0),(8,'introduction','introduction',0),(9,'biography','biography',0),(11,'classification','classification',0),(12,'biology','biology',0),(13,'foundations','foundations',0),(14,'assumptions','assumptions',0),(15,'Theory','theory',0),(16,'Philosophy','philosophy',0),(17,'Application','application',0),(50,'rss','rss',0),(51,'ruby','ruby',0),(63,'probability','probability',0),(60,'Processing','processing',0),(25,'maths','maths',0),(26,'proofs','proofs',0),(27,'humour','humour',0),(28,'Community','community',0),(29,'NIPS','nips',0),(30,'workshop','workshop',0),(31,'representations','representations',0),(32,'loss','loss',0),(33,'Reading','reading',0),(34,'convex analysis','convex-analysis',0),(35,'divergence','divergence',0),(36,'estimation','estimation',0),(62,'scoring rules','scoring-rules',0),(38,'representation','representation',0),(40,'Writing','writing',0),(41,'education','education',0),(42,'communication','communication',0),(64,'COLT','colt',0),(45,'data set','data-set',0),(46,'boosting','boosting',0),(47,'JMLR','jmlr',0),(48,'statistics','statistics',0),(49,'science','science',0),(52,'sql','sql',0),(53,'Exposition','exposition',0),(54,'ROC','roc',0),(55,'Visualisation','visualisation',0),(56,'Applet','applet',0),(57,'Duality','duality',0),(65,'ICML','icml',0),(66,'conference','conference',0),(67,'talks','talks',0),(68,'paradox','paradox',0),(69,'axiom of choice','axiom-of-choice',0),(70,'prediction','prediction',0),(71,'Data','data',0),(72,'AMT','amt',0),(73,'supervised','supervised',0),(74,'blogging','blogging',0),(75,'language','language',0),(76,'evolution','evolution',0),(77,'power laws','power-laws',0),(78,'Teaching','teaching',0),(79,'MLSS','mlss',0); -/*!40000 ALTER TABLE `wp_terms` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_usermeta` --- - -DROP TABLE IF EXISTS `wp_usermeta`; -CREATE TABLE `wp_usermeta` ( - `umeta_id` bigint(20) NOT NULL auto_increment, - `user_id` bigint(20) NOT NULL default '0', - `meta_key` varchar(255) default NULL, - `meta_value` longtext, - PRIMARY KEY (`umeta_id`), - KEY `user_id` (`user_id`), - KEY `meta_key` (`meta_key`) -) ENGINE=MyISAM AUTO_INCREMENT=21 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_usermeta` --- - -LOCK TABLES `wp_usermeta` WRITE; -/*!40000 ALTER TABLE `wp_usermeta` DISABLE KEYS */; -INSERT INTO `wp_usermeta` VALUES (1,1,'nickname','Mark'),(2,1,'rich_editing','true'),(3,1,'wp_capabilities','a:1:{s:13:\"administrator\";b:1;}'),(4,1,'wp_user_level','10'),(5,2,'first_name','Mark'),(6,2,'last_name','Reid'),(7,2,'nickname','mark'),(8,2,'rich_editing','false'),(9,2,'wp_capabilities','a:1:{s:13:\"administrator\";b:1;}'),(10,2,'wp_user_level','10'),(11,2,'wp_autosave_draft_ids','a:43:{i:-1189558527;i:3;i:-1189570080;i:5;i:-1190591697;i:13;i:-1190593673;i:14;i:-1192684203;i:17;i:-1192774850;i:18;i:-1193976135;i:19;i:-1195535502;i:20;i:-1198018429;i:21;i:-1202095733;i:23;i:-1202364536;i:24;i:-1202708087;i:25;i:-1203488822;i:26;i:-1204256636;i:27;i:-1205383436;i:28;i:-1207198205;i:29;i:-1207547619;i:30;i:-1207615221;i:31;i:-1207615432;i:32;i:-1207719465;i:33;i:-1207784337;i:34;i:-1207784346;i:35;i:-1207798714;i:36;i:-1208749846;i:37;i:-1211780597;i:38;i:-1212899675;i:40;i:-1213160238;i:42;i:-1214921360;i:44;i:-1216353538;i:45;i:-1216776601;i:46;i:-1216970232;i:47;i:-1219655454;i:99;i:-1220224211;i:108;i:-1220416490;i:109;i:-1221173349;i:111;i:-1221526092;i:117;i:-1222126200;i:124;i:-1222498155;i:132;i:-1224048802;i:134;i:-1226021882;i:140;i:-1226886616;i:143;i:-1226999496;i:164;i:-1229549677;i:171;}'),(12,2,'description','Mark is a post-doctoral research fellow in the Statistical Machine Learning group at the Australian National University.'),(13,2,'has_openid','1'),(14,2,'closedpostboxes_page','a:6:{i:0;s:14:\"pagepostcustom\";i:1;s:20:\"pagecommentstatusdiv\";i:2;s:15:\"pagepassworddiv\";i:3;s:11:\"pageslugdiv\";i:4;s:12:\"pageorderdiv\";i:5;s:13:\"pageauthordiv\";}'),(15,2,'closedpostboxes_post','a:5:{i:0;s:10:\"postcustom\";i:1;s:16:\"commentstatusdiv\";i:2;s:11:\"passworddiv\";i:3;s:7:\"slugdiv\";i:4;s:9:\"authordiv\";}'),(16,1,'first_name','Mark'),(17,1,'last_name','Reid'),(18,1,'admin_color','fresh'),(19,2,'closedpostboxes_link','a:1:{i:0;s:0:\"\";}'),(20,2,'admin_color','fresh'); -/*!40000 ALTER TABLE `wp_usermeta` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wp_users` --- - -DROP TABLE IF EXISTS `wp_users`; -CREATE TABLE `wp_users` ( - `ID` bigint(20) unsigned NOT NULL auto_increment, - `user_login` varchar(60) NOT NULL default '', - `user_pass` varchar(64) NOT NULL default '', - `user_nicename` varchar(50) NOT NULL default '', - `user_email` varchar(100) NOT NULL default '', - `user_url` varchar(100) NOT NULL default '', - `user_registered` datetime NOT NULL default '0000-00-00 00:00:00', - `user_activation_key` varchar(60) NOT NULL default '', - `user_status` int(11) NOT NULL default '0', - `display_name` varchar(250) NOT NULL default '', - PRIMARY KEY (`ID`), - KEY `user_login_key` (`user_login`), - KEY `user_nicename` (`user_nicename`) -) ENGINE=MyISAM AUTO_INCREMENT=3 DEFAULT CHARSET=utf8; - --- --- Dumping data for table `wp_users` --- - -LOCK TABLES `wp_users` WRITE; -/*!40000 ALTER TABLE `wp_users` DISABLE KEYS */; -INSERT INTO `wp_users` VALUES (1,'admin','b781ac8ecfeefa5fa28512db0e109251','admin','mark@conflate.net','http://conflate.net/inductio','2007-09-11 11:30:09','',0,'Mark Reid'),(2,'mark','$P$BqIsIbDrwmJeXawTQnlgn0XQwVj5Lx/','mark','mark@conflate.net','http://mark.reid.name','2007-09-11 11:45:57','',0,'Mark Reid'); -/*!40000 ALTER TABLE `wp_users` ENABLE KEYS */; -UNLOCK TABLES; -/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; - -/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; -/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; -/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; -/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; -/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; -/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; -/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; - --- Dump completed on 2008-12-23 11:28:54 diff --git a/inductio.sql.zip b/inductio.sql.zip deleted file mode 100644 index d0175949b9e989315941cb23269d19da0903e6d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 228587 zcmV)ZK&!t{O9KQH00;mG0P(YzIRF3v000000000001N;S0BLSybz^jCZ!U9jY*knc z02RzoNfpdcN%aBs0qnhba~w%_C;0dFr|_ztYJ%##B5$Br2MB^Bsz8tpfW`8-}eTtJem4|Jqc78qCxqk{)d)FDmYVtpnpTh_x^`grB@yp`w$W{k`3T<9FVFe_65k znd;k@_Hej+^mzaHXrT*JzN(Lh`}^C+dk+ujw}XS-9sG(emJ~1caIpPg_ejLipqu~J z$a+8Cd$4=*vkwn;smt{D#kyIw8{e+gwraH-S)H2I%=&wLu=kzEyR`7#oe#0n!rvuP z{9Ien$=!#CyLP695AR@|$*(1m7T!2`c(VQY_~FS% z+xs*>+SQ-kJ$#s3q-D+Zb@1?b_sDd{zw6#~v7Us1^JCd_l?bCC?niN;#J~?k#AV-~ zPEMXYI=R1nbbp8b{XQCea!yuV+L+I}GJ?B3lb*cAY1+Mah@89APnF8vPOdB+=K6nWXY_aE+jsF$<2 zT=C>^?|3%>m0C?~@6+X3wfH+UHGH6Dvwy-X06$`Yn@)d(^iB2jna*Jt=28qtg_V>wfa`8y=B#J z+`RI{UIz&E{qb0NQFsaXq}7&yPWnxQjrsFr@6Pu{&lx&iv{A3!6tNdNLr>W$qzD;D zK0!u5P^e$Zx1A7$<_^%>vNFgMwi?LTjl_-GjY13RY~%;u7X&i1NS#MTSS{8%QDf=2 z8E9Lz+OiRr4R8_%xrX&?G~9c1%>i+>mRzvh4$%L~{EuKF2|BsZ!^vuX9+FN#LI;JHM05nptzrhCGk7O{;bJE=IG7x@?$Yi-`<)m>le`Tu>aU z;KZ@7R@t8@4|M^eyj82~K04fcuzmP3V^Tt!xhr)uMX-yTxv^NbSUtU38w*9N`_6&V zl0iy~5VzRXvL3SP3&eogqrC${HV?oD*E_8R`T#!jD%7Ef;^rwshP^JM3R%rdDMD5? zp$5&8)h*rF8A$SJ9x3tIlM|n01-iI> z@jyl+!dmpJBRwJ<^v@`YCR^2NoU2wz1(*uWpi1beQ?+VUYqQ$Nz-1>a%kqFntWpo{ zcExJ6D$Q=iYBn@A>XfZoxlX8{xmE9g8fB%{rm(tc>8;7T->XzAKPPBjV(b%L2`8%W z3}~)|Fg(?dNc0hr52T<1Nq?RZ>2bKYg7tZ{rr=b_NbGrbjB>GiPG$X(<4N=p1x_!H zh+sq`+OSZLCv=`sAic03IFm^D0};mk5p5+; z$#=CzrEWDVo9T|#Hp}&8@#B`)+zXRU%1jYp6D?tZ;N|h>l7oK|5E%zY9Fdn1zEj4}@OS1i`cdJT& z01B0dOJeK$GE$*>Z*CVK_|F~Jl~od9#Ri=X9i|_S-WHGF7LRCCsYEE+*AEi|EBmz3&zz33C1Mw$$<J-HkB<7M!(#WyIsG}0CKf>;$UPG9#LroC3* zo4m8x#Jy-$+O>++ZPT&luSG}H8e6T}R8@w;~RFuZ;HSMcE$kgw=UF5`=r*Sw(!n0OE@wZr99@U*w_8bkaSeTK-xqvhcpE-m9(3ln1HXPjT;f3@b%UCBz7Xg zNpaX$lgI%npl#~W9SVUsu+!iEc17&b#fe=y9h?@iY4LCr3D?(^)HxAG^gIQ&+T+1! zkLfby8jX}Yp&Qimp;H>qgfkw;o)gU~SHDSngdyF+T`J3X{yNddwXSfTn%Q+i3v-#w zX|_xCM(M^K={Ypsz8X6LZiLIfyf6FXiXRNKgPcyM6`Sjpbrq&5IH6{x(c$Sd>vIRT zxz!>PQ7CPSdOaE6^E!eq~?Sv9(Yg!=rd zRcTe~)D`hp&c5`9H;cQ{Vi^CLccrzJP}uygm{K5XVf%3}GJwfY#lRMbKB0HQ_#mc) zB5>bC5PJnguOYFg2ug>s<3_U##0exr!%&>1_f3`98dY${P(nEZ@LnP=l3P6NQe znF?llL7{?((O03EI?)Ksv|#M=lLn1CUzm@akhmxMa>l<@TsfBS4g})b z@-&&YZf;raVri|NmeQ_#OoWeEn4uqua7r-l5@L_&Qh*we%u5Fl(VZCjzOBWBkf`!c z7_?dz2ELK})1j5u+m%+Q(rTqEZIrEsu-aRV##RnixU|+vORrAm8nLhX)krSGKJaK)Q zKVbnVpqcs%3Uqm6={nCPx(vlc`H&|VbK2BJ<*Ang(lPa0Rx}_lC;ddy{u$l6up-^b zNP_1MD^73BmCyt&sw0BAmjy1oST&2+a3Sxmbd<-2k^~xWQP+LPCy*hYS9+%*OCL}{una_X!Iq63aAG7j#v0ctBM1J( zFkQ38Y{G{Q3A3KKR}uRX5*O78Cr-fd5UTfQa^(Biwu+34Xm2u_g-$=L#No`YC_APK z)n9bEk7~jlll0T6SL&_8Zj#W^#l>jM?R?!zOPL426`-AEq0kn`Dj?!|Ed3vS`nyEu z9pWa4g8<#dF4IXcH8(u-L{5l=mxKa);0y$q5sw<;_DtmrC*->R6f}Iim16)B5W-lI z^ydIa+PEua5@5`*Wj#(c*TbRe6X~Y|-h=wUi^K-8C6M`9qW~uNCS7qM2Q-G~`yS66 zE0CITAg+W&0gqUmS&n}p>4}HJl6Pa0^K3<(Jn&g1aWC-W3DHnYlt$F(fN;e&#t@A< zH60S7*%JejBLRB=TPI}rBU<9q-35Eae_#-5dhFA$ft-v2f&sCCPwJCc_k2@0kHAs5|q(%;7fMFFkb(U$5sPjHii$Uy)4Z43fGpz~4LC`EMO672i=4}ZdX+6^x zUk92)WOm5cEHM#z#Vd8j6F(sSn7A>53xXO3P}*NVz#{1E{eebuQ=C6Z?us@N!(&Y> zo}K}IX^TOTuxP(`1`H{Ninfhb0*L<6r~hU^_%Qs@r@ybc`9yZr^XKjykA@jT9G!{2 zOT!REvlQ!SwHvh=#&kb5B#!A;guI6YMxKJn0`&n|x>UGKm>bO*`lUBs6Z*~J-ZnE$ zIow<1QJPw2RjxyqRzi_r+{sSML%DBgAfXD$N2v)dGs8 ze;S3-CQxir$|zQjNrD+qSSRT-$r9VHI)8d2q}`@#>GTcWdHN>QY#Q=y6)~_#0=hSz zzFAqspCAwN+%-z5JJUOri9L8F^}#DqI;+-YQU{IHkz)M~-LFUf6bN;YQ&NSgx_X;n zIshcc%yMMMA#)!VmDAm7tyQfztJGmROhzhCFjCnclrtC%nWH4q2ri`3&RFCY%h&ZS zl9H@KX%mcdQkp8|_B;Y?Ct`u{_L3iPkxBjff51Q=;?-+K5BO7cq3@)B%vHzWReW z>`OxCd5Xs9itq#I2qT@>rSh&5uh^`YZcrs#E9ZF@7-IX}Az7ZLxHIx0YJSU&-d?6_ zEh<%)Y5&YwcdL_hlroJ{#<@$3g46EvDAiTMP`6y)WHn)9t5z?T*0R#Vs{|co%!tnk zL1YNu5qs$49zLX@hMq0{@JM-tD z3@l}tX95EGC?E#Rp^tM94g{Sna0LlpXFFk^V3y8Z+>&BMXy=`$Z<={SQXBg{s0gZ0 z-=sD!ee-*y$GbuBhG)|oWFviR&QkM zRW`Do-$)Sg;#OL^o)QrsL=vYM@IY35x=81@J zBBZ4mOMMZzcqTziz*uf_^6yCz>t>;6F~ZH`?Y(qvrLIkUL1OZW zGk$$&ux?qUCZWMYzefwcPt2+hc{>>KZ^Zq@5Tvs+)a8||yhCu(A=HPLklNh&G+3?L zm^+`Ww0J>S+6yF!fv<%F-UQcG;L1A0mv!@6ofN|LUUX}vMyiSy`*JK_vucOtXk~lU zEY~){LA7dIwN|mTmX*>T-A&&y!4AJ6jx|*!e%EH6NNO$rfDkIc zkD)H63bHrDZ?#%2(;{gURj<`6t#+kTsaq*Nua|3Wyhb-_Tg`61v;~!1QVJQv0T)Y> z3%|?|TQhW}tw@dq3fN19$7jBl=mRK14_Pb+-shIawt($56q5;CY5R*q`HUcNezAYXc?d^(orM9^DM;}>5U3$G! z(;NDVgtT2x%{P)A2EwlAULVZ9XQi~m_Z`6}S<10O+eR@%5Kd^@iy#Hk{R~-4*HZ>l z8f6VLQnc0J?un(eiSQ2iF`oe*D0u<_r5{59^GOVI5YP+MV7(}zy{Xb%5z2^O;Ks=C zlzWH9h9tL;_-o%4#A`|7hf%$(PnT^>q?YhW1kEktm{l^;wt+Cz+}wLJqZLfphJh1M zM_9`>_=LO>3AlvjiDfM_CV2@SArWc~htM(17Az)c-|zV2QyjgGpYKC2JGP6|6EeR59JK}6?7G>v4RwVlkkJ64C5xF za%26SstB>MZv;&#p59=!($gDS6~xLfBkr83%1uEs8|@F93{5Jc(K1AxkR-Gt;Swb- zln{GJbbYpXU_YGtLY41&$pY5u}RMCW3y|12LJ>_WX65&wm)f3&mBBBK+5 zyEZyCt&C;Sv!@~`pNmga`K!2luFc;juuUmJ2jj{f0gbInT7i)%y!GI)36p1pa#+m| z+`lDqCk=p4x1?I!v1$~0ukwIJtbX%t;kjkceXXp%LH6xV$!ZX?pK4KDJblv%0yPA3 zeEKF;od$AN>BSyhUiH4`^dp(Q$IJ=E-He`E{Fv3)!YOx)rM0Y-Uo7+kL~`YPtw`YQ zXBB;lq3H|=(b8#6M>B2mNhGo-(%I_w$xx|rXznH-8Vr?*okHDE5HTehGGHMWfW?Za z2{ByMSUhy;%)k&0V}7zAVk3=ia+kfdGm=K##gKTj#4K6t@hRf%lm7O~#M$}f-8k?kcyY<5 z#hKfN6glh%I~fwIzCWT~6;NTkSzRf-R#^;TDtgca5cOE~J4_6wVBhE(Fx ztX3ljJ{C(Ft@c)}T`aY-(p1uTLlfjs@)3q|mYC|`6tUaPP-(jy5AQ4BazPg!66B>1 zX6~%Sq8OV)Xzt62DmQb;WaHrgTs5Ec9(AgwiOhFKAPaW9C!xz`gI>HLNe1K4D9HvJ z=QqVW;{2c23%$Y!0|2a9ZmG>Jp7}jYWh}LNL2F2f{IX>e^z*b*|3WVO3Sck@E)=WU z7l>8XW}D@z-AuNAprVm)Yq9Tv5wiCaW$|m!C1hfJd8~&j(@vgORcOlQ-s3cJo4%6wA3Vp7ms^dmEXs^Y~KmTVFGD*@=JNs4`iQ?Z^s|0L{DJw zT%yLt3&Ae2rX!%YLvFS)(R)NRyLB_MY9I64kv3YEEU3|7#sI?~KsDibeV_1RKjQaR zyr&r@URKNsJ(%){Nf}$H^bx&;wxPv9uteX`X);qmN6-(R_a-cqoqL}B{>%x_W^oZ0 zOeW6;6ZLFtpN29)16|@K2op3~l}4tJtC#By91XD*HLF-!%Ss_M^zS@(AB#mr={P(K zKk=u7a6E~%LCO+sX}%IvrX}&cIB?%FU7YqN&t^mWEPUSUaThQ4qUCm07@JBJ#+_vGQ>okxeeM@J&@fBZQ% zc!tgZvI$=P3GF<+Q8S8OCuD>tMdIux_{j4FBb>ZNR$>V2Pk$2GytE z-zPLH!?II6eLTNUoQWy}i4|;~2*qq}RvLLhl9--a3s&-rZrCZagN^OHXqKxD%e>;;O@unSotjAncV z#VrTYo;Zj}RHO~ORdwDK8B1;=0K#M|muJZgQ@&rKVs!TWTwAbc?ZHRIymbYvj!4-m^boQWP2vqPRay`xVU1)c zym+es&j7GraZBJgSjg4FGPUwuyaYwbn?Z48y%e^CV7VG1nz0}>&+@dryV{UeC6qFC zu4L&fgjGqX4N<^GxdMhJ5dWo7ff@64OhCac66ltnS+pkz>uN%C^6>=v7Ukiw$b|HYd5$z>w?v{g{nNJW11|=_D+f2xoNl2=Vshcsxx=`>RNu93rGx)%PegRfpl114vaS{p|Z8;noMTI`9a_Tc= zo&|~j&?_9ou8X@h^@wOitj1LHk4= zXn=!eCAt>We2{oKBcSyZABcwsD{Ug<49C7>S3@F+=$L(`G*2?cb2BS*)l~ympegO7m zB)kz`(i%kE5XT!3Dvt;YjisxfP7_nbSJnZ1ZO?u6Myb&SxyEZg-+>(#Z^b_AR1&ik z+Zj5<|MJZ&aOn9DOU+W~{5n-orLc_d%UXIayXZc(MX6V;hE-`4V5rOQBb}VQhtdw~ zlsc)rr&;r3kYr_Nlv$A|lZWAx-()~rgiev<)| z*cy0*5(797qnN-v_y6erBZ7-w01KO|n8b9n0reds2vm)>p%=dR^wY2Yc*CgSPWx6W3xwbd%T)Un1gDNUj4AO7Z#zWlZS@x`Z~ zVIH6V^@}h5^2HZFc=5&G@|^zi#pi$VU;p-3zxu%+eD&}C;>$n#{c z^?d&K+$SwhcT3Ix{f|HW;>Bmb%FU^hKX~!kpS<`CbNGk9{ka ztAFt?zWU97izc*1pZz}fOzZl!fA~A<^S4Y_=#~n9>&Jim>tFrRe|quxXSt>QIjt8% zd`fHioCf;GfBonG_#v;zx-?b{r#kKtnOt*Z0%~Ylx_sc+azzb^UhRUT5G1Ic!+@^>6Un?xq9GP*5=2=Gmm>(kIMEiF!_{;=)qQ> zPE-+FHw$9JNZO3hLU`dlee+Ig|9K~8|4F1?3q%!hA6iqGfV0%b$}+YsXFCu>7@<{- zeBUi;?>Y1S7Q+^uV=oXdNBctjDbw*n(bs46)VPdz7fhat9!H&asowqC?AnrpZ*+2s zmh|A0($r9mwj^&;lRwDvaV)>x8ptFpe$W_$ro)5^l z3-R){9768A=S278-ZIXP`eqGR2)aFE&JJu?+8D-iSr0#Dr&wnLhOYJ-JB$Z$C>~*9 zxZ*bg{9T#ozu)&y2i~IdA8{?7IpnUzG+n>M<&6_-7W85*c(~;bu~Ea(CZ%|Gm&WCt zZ`{He=0{t04oj-xFy6~u>YWd_55Q=!os&N%9&x0+5Cs3jR2)uNfCCk2PY4N-kC^g+ z?N1Q$B_}*j@Q{l45N5Lj{~7UFArvjpOAOSMgh)SRJ*^6FRz!XRTCii;A1oRJ&0Oz< zdyQ~TvdWa?2mI8tA(JFwD^KK*T=5p8F~w}2*kGy*b$b)^Ca#&%~|%+U3FkQ)6- z>}nhccN$ak-_BeqpMWENGO8DU2D{d9iY3ApPg>kNjx)`;t$5 zyBA4l9^{!H>c>kcBcWPs)S$ht<=-u99bwhCU}DfMme#UT05j3mKt|>i!#4{1?}Q07 zn)gWhYwYk__ST)3*s z0%ljKT5H)$9lNu@{BZ)U4E$>n(+#Uj+%^5%s#Tiv``BbQx;3|tS?S7sd?HTjdE@wXFsbLaSI> z%SuyO@6ZVRWmbdW+G^Fvs^?0v5y@e=#d>!rjM#;hvAgTA5f?%>MLMWTV)K{!mKx;| zu`jT0`I(=&`P!UOtY(`Z;BFHDQ3V7(dVfI77DMvDSVOipE1MnIL$(|Fjp@Q%wb|aP z&6U=&(&V8Xq|i7Y&;1I`0vqILn_n1h!0^s}DYCKBK(s~w-@JfqSj`SY#7n_i=&XmzxO*CU8e;^@>L`vH+g)I~`pOiEVUX+&}wz~0qKCCc_D5zoy^ zXEX0ZM$7CP)ZdsZt!1UO_^b_S?%vshZoN>e~hgXZ|Bc>U4(Zh{^YQ7sJ>!H$0MkT~-w zdE}WY9b-$&=lYpXD@cjUoQ2@ZNiq);B|*XsCqdlUIt|Zh;8tIwje^zKX|{HYW4((fB;JgC zZKTO!#(qlaN@7RSz+eE{C2==Ua5wKqc(>!pyv~Ro1!_RIvR2^o=6Jk@=J5{YGo{WT zX3J+G@zAxxSp&CG$3W`v1}m^=Y`WUq+_J1EuS8>)I67PPO;|5Z6@iSKx;l%EJF4eMubdQ=YbxrG<}E^+b!}xTJr+6e<9+Gp zw!3c8#p!e_Z6c;tX7Z3kSkO@j;Q@L2{4_?P!d=y2@{;Z_Zr%L5(Y(r-dl z&}bG-objrZmA;aQdnGH~YYMsTnzqtCBx3$Cv2$29iT^YD^?jd6hO2ToaFgYF;vErq zDz`?s#A?NGe&rAvHq+9g^^ewrN$m}rSgcMQw(_Y|@>g)MvHQJ^^Pjs}5~I}L31w9D zPb}~@S>UBrt(JJ}jW!3l-4-7s3JH9;4ExScn#0#(6FA2#9 zBjTyF8))Y5mzfcTY}l?{mC~1gY1`Birj98O?S%c9ZKaHZKjAgl7W2t+G#qsbqd#9i zfh9f@!&nFOp-YN8%Hd&+w;OD;I4}(enD0btGOfL>xMO8Q8u!aKK{rQ~*Pa z9oY+5gBR$p?>h)%lLU1xz{>m=+$V5-eAk~lbYSCiTnJEIp%YguV3*%}D>iK2VU z#3T6{<(oQOG9W58i=|dp3VtC8oIQnG#hGFgNtoghrFyGA7O*D5gKj^Uc!4JtPf$I?M$gKvYG1TVP^fF>jFG_~L+Iih7TQ(w;m zP0fOh4RH#sPO;R=O2I$mlnV&kbz0y!acZ#GgARod>4O%yV3OU8y8$dN-@ zB13{EY_sUmHZ&;%QaS4N~iHU@)+^^$|XsRbe$y`jPwVcz6igs zOzE;DWf9jeW(B$^FAZ0RM0=h3&X|zHbs9wQGAN|VxugL}!)oXUv#VvY;U;vUI6xu+ z@^z+m<}iJ{EGN&$xh8}rRd7*yAuCroV0F?Jk`d{H*E5X(cweBuLKmkwCUy z>Xeql%MFh3&_`gD$n?eJGShdt738X#NZW*y=82pn{-7tuEsS6B7G`;DKIUfmkhoTd zM2p+R*?khLXnvx#RTC{x)+^5nxg5?bfj!`5)zS{sn8WT?9FJHrccQ||AgKPL4PiGX z)=>qTW@A<=65GZ#Y+-?B=E>q7Ik4`)GeFYKHJ8V;w9;A5WZ;LcJUiajIy_4R-{` z8K2lmCyHcl;|{JmhgLcVgv#ZS=g$wXLHEqZ+|Y-R?#v^s?HGjt6`evj1_QLHg;Lh-eKY)*| z>xVmdws`J~M?d^Qpq{_X15>pg6L5pQWx88D3{%8LBmE?vOZ}`P*SETv(NS0S?;&2(-@r+x zdYuU^;9Xvo3NGe963auL$G&n%DE5Xgb5I%PBs2g%)>F$6=b`>64(Mxcn3rG|>KU>e zJExA{3uL^AFdvZIYSJ#`bZNN}AE2$cjmFnr_T@6f>rh1KEUzbs4IMeKgS|v4u~>hJ zf?~0fR#wDu;9f^rajkr{tki&rTFSD4r>Ve`%8BGUCyFK$V+r%|!QOWs?`lCa=3Y%E z@J#+RrTDSYsNKwcNM^i|{(5<`*jGp;zT)`iWz!`Ga+#dEq{*c+>ykE!{A!izD{Lij zkz_;J?N+y0v-*wAMx!^7t&Y`d4EoJlt8VvN7v6>?qrRS-!R+>>_d(ZMdJFU?4g319 z>p(xS4cEU^H)Q%Q>gn3e)w&(ifZu7p&uL5+bvu|5QdC{0`kJY5nRX{zZz)|#sW8** z))eNyQLJ>??P+hmyf&wB#EW;v9N*sJS_s&rI=)UvvVwx!GXR`e`Ps+(KoK>VZSCPhh9I zWL0?Bo0}S!vxY02`&`eOnOG0@d~31nr2cEJu8He=}y7ituDxcCY1VH1xI$S;1` z%)TriH{W#C$oV?Oqg~Q{5pb4wzX)7Ooxt4R;xOHGb5?%`D6ROubcEs(7e`q9UL0ZN z7R})i#&fV>S@_Z^UQ~U-i-?A&WD_&m^GO&je$$UbiwLcmTWsOmFZ_Z=7>c8^tU%YO zu$xp^g{B)#M$`Y~=S!2Rbhyq_zs;49s+ z3+~cO!1+pGxf~X-kIa9j2^?25y{NWRsJ4>j|`TD6Di#o8zv3fa}D5+&shub^Zn&*I&*C%I-VVVKx3P$*`2TH zvnRDq?UPjEQ!v_w)E1<;h4osYj^6DPyhg_u8499YWh-A9s)3a?>+>~XD|SMlHk=#3 zQK(2qdBS%$@TcfCznprrc-p|rbDi-a!6Wk|@=wB%4?h`hkgLPz!t9=oS5BYqif1M9 zajp)Z1B`^SI>RkTMzzW#SBcLn`?&O8HuOm$&jgznrFufDQhf`) z8gEtK8WTz%dGVNj#SsAwyp_|`H25rs&z(u1Bl57jKyfO3k|TyIBAvx=okVwY0{vUa z$nEj*-Eucqr#rWlQ#CV)%JXmN1nWSWoUAnA|54I4{ooXNXVWiD`ht;g8?*jf z37bEil#}q<=3|a}G#R*%%f7IKO^tpH(l~+3bMd>zy^I48RW#GV1(SXSV^s+)&)4C{BsMXr=)FxlsvbVtspo1=jCn(47*^+t z*@8air#Nb!`-%m?wAk|j7ncu&y^3s}Vo#MU{RSrogdBx0 z>-ElJy@Y?r!M(oR&6J1X9o2|&Zaet?BPyV)zv(`6j8{$bNi$;HM~XLop-P^MIhi8j zDnfJ=kyVWk5cG`L3Pe+G#?6=x_mfwF;19LX|Dz^HwWieUgCT z^es=xTN=woWA_9`Xg9DS=TQ{}xk{a6GvV8Rs_nw_aNO$VVG#On>{ZGU=PGKRrFU|s z{SfF&w_eK~N}Kmwi%gnd13h74LbMl&a>C*%wymrI@eIuT5kiFEK|515J>}{%VhdG2 z-~-(`7>v9p&pZG8?vv*&|DUxS%Ri3a@{XS$>_?j)9{#h9$z($wcZVPB^*(5RYj(8r z$#)-o*t}C~-Fft3=yY?Wzn#7(XrZV-H_v8%iwfquRH>e;$VUe5=7fmWAfSaTvN`Q; za;~+(pxJ9SdbVY?<)+>3)N6LTyV>v7RL36Jz#|Wtcs!89iK7mWj-I{=y7lu~kez^f z1`L0-RBVX@L>oRKU>>vBLbB{HXlO-ICg7rtL>mLk97mDGpE|_s)T=<$D;?y$-|=1e z@u?qu`ukjqi6iX<|IvaJm0;J@egW1tHe@4z0!Y`6saBmcxuUBox9zg1nQGOjv(1<4 z?3;y7I$F>M=HR~@5LJ&yaY?bhla^@ol_;>D5D`?CWj9-gFYhrV($O zd>j$3{V$lw8&JqZ;yc+q(>BA{vGq)@w7mBw?=(A|tjpXxmswAj7nJ1u(;i?mKhAQ* zg8hS0B>TMK6p^zi3I%cvrJ0#4;x3$9IL4|!5nx?PVydzPw!mP@(~(%&|2pw*Gr{Y1 zo(x+l#A(SKPfbdNB*4AN@y)SHGSGtmU7FQ~qy2NHb}``Ko)uWU1k@`hWe^b(P9R2u z?c;eb%U+ZP#u%unLjD7%f2zYfB2yyw1#2Q~0X)IrImf;=DWDjHQ#^F#A!Lwkj$pF^ z)s0E(u-qPKSNJ{}!cq|%i=jk|Hw9q_i8fS` zq1^nAE zECi=?>aypv_T5TqrpLRL7Nsv5S$q-rXUZVpIQbm>#tx)@nYIIh1;jQsr)iwFIfICP z`!tbB&<%TMPfFrDG3_@WY$=J|j)`d#x{@HaC6}cFaH*5>vM(cmhdtCZ*3?dJe8lOz zOY^M|aRKQ&I3FrYKtkKH!l4fBQ$WKp=@m+$`9=*KdNMGPKV?7NGzuwVK|4D30l+a1 z*av}QvY1S`=3rtx&uO#)xNC@Pam{ti4(Ptf$Mz1?bP#fg-!6VQ@_zQE~bgnTwFYDA_G2qL@ z)}ysM&wX}a4t#%HGOOg{=051KVaq_9s*LkLU1*08Wf`eiQjqd!O-bkgJv*$*0CXO* z=M(E>IfdA>!b{RPBHAH>2*9yB^s~cZ0}cXgtcUZ`B)hP2mo~9~=8EOe56D66i;XOMD3Y(=(IaE|=Fo3$r9<8FdmKA<>e$hU@MgP7 z{EQeW%$WXcwDBjbg)*FaHfL)(x*K#4W|E_7lRQBc5q-^MDyg2AiLdRSPUV>@4_pFE z!o9bM1(jm~1rgN_1LB*soBWjqsm`^R2n&tX>BZ&xih@|_235Vxchhz6WfzSx6W?x* zD4F}BG4eSwd3i>q1V{c9TjhcXAW>+Z826AzVybK%f-BMpf8={U2X5&pKo{n3^95d@ zTLhw0=hT^yz^={D@mCezYbOauP5a2+Y{}oCV3j4ov{zEVZ32pfxY#*r82Ps3=;j%U z-IoZQh!`M!6(HHMJLBjFuHp|2dt<_xJxu}=5>Lk+>G)z8hjA$fN(7dWUd*<_@dVxu zD#vTcAq|j{e~yNLxU@FM=s~CkJ#9{`l;(*y-Dq$98E43J2MqU`6+&bKJ7nYlhwf}N z$LqT>8cJH9<3?r2OXsAc34&>X{U0m8c+Ofws!8gAX*QCD>r|WU6b=lRF$#fVu40yk zV@r1%nvGIQ9`#>L70d+ktkVKb%#LSc+vE!ZAxJq2lQ&jJe3)rgS0X@cgye2c7W@ra z+4|Jw7y%2?y~AmTzV8(|o`GYwls>rB$ERTg4{(3 zLyU0e#Ky@1IBp$GoggsOnG>cyV@f`I{EgJ8y;+irA;FoW26Na)2+S{T>u{ZfBX3pd zLz-3%slf1`(9dp7F1E%f9MW04gP)FJ(VE)lgj>a59o#w=ATOpF!a z%48nR-&>MHsTab_9O4J5tm0h$L$eZE*pM^-RSMLOBXycx<55CBNi)}+go{dautmy; zU_yLLm@^a`CU6Js;UYR@{VvDauWKakI7Bcyc3#2zQL#jy!da>%u}XfIlP zNIR&1AN4WcTUDCXOV5oKS|+4cu6N2QABkCTnn}8xU_S07A70OJn%607SroCy>YZdn z9n@Cb(PFAT{#PF~t$|fe5s-#B3KcRG6m(eGx61~_1wOA7hc>ZSPN*m9xtNJ!2ld6a zj#3R)u`uwZOPuhs=GY`HpH!b|HCb*cIo*ws`RgM(ckE3uP&*0Oo!ZM^(%;PBeRK1&g6AvK4UULHMvhR}o6plz> zWN!myOYb=euhoDB!!i(6hL_8On2#}&4a(WB;lBvu*?gH-nvWc0)pIDrzv(1npX zR+E|$yH2lP%g> zJk$bEj<4h1!8LMl>x9gyeI)-^~86!u3 zBw5#)Cvvo0Kx-Mr$Y5$O)@Rv{qdapW8de#dL2JUCR?5#AUKTuM8a~}teYy#@RxE}i z&64H#Q4YC7yU34p%&S7UNQ4Pa{XX$Id9kAt_p(CA*X$_JUdX{4V1mHP)@h>X9L6(f z-yFNh9!8vI$gD|A1RS}Jg>GnslPvd8!>}Zk<2W0YS}CtsOIUgwbPw7*qdyiNa>sF6 zRUJOJf~HzxW0hV{Wg3B`b1k&gE1PI85SG6y1n@kQ1- zpTK2Kw6e!U=|tE!!{M`dqD4BaA({eYBtq^rczY3huTm6;?lOkT3qiSU)akTXK?a{U zTq>B17`-XWYFc?}!mI*LSZ}EMxQ%+Hh_#;ZW%<_vs*gup65dC?q&=X^^alNoIAt2f zWSAWpfm#wyZBHo9E6ukroFOIr2)ab0Ldi{^AJ;gn1}8!RkieGI;Y_%^y))1-OweHa z*tTuGk8RtwZQHhO+qP}nwvGMv4>q~CHn_n}PS9PGOfXeF-E|reDChKr8ApsE1VDBJ zTnWy{e4-TT{PYEg(r(6@pcyuIutkhfFHa_tOg`>{hw3?K~F}fu*}OXLh3!Zd#(HpKypiKZHC zwF5cs+gQ2;nFe8VZ@3|EsX?s7x^p<0SU>}=+*=l`ps8L+P?mW=F-X(VUTQASG(0HC z9w;r@eMI~rAuoGZ#wFG~$n};-I?1Ic4-l8ov5>H;qhkrxp}!JBG&!TUE)D~L-*g$; zt$IoJ{0t)(z#Imxzw=snV4pM+)ED_UevJ2uvYww6q`(mbhxk*v?84F%6N|dH{dwq2 z$#ley5^}R{NsFF!p^jGchF4a^V%C}0<(klJem<{x0LB>r#?%=>C`SOV=A-5b$5}2$7pNu**$s)P2+vN)hr`gnE8)Y@;zN+Q*1vhd*hx2;TP{K!HN(u(cae`r zgCV^&q^RrQQ$W!ttG+bs9%c?w>&mqg!df+MGO_P&3g9emPcD?ia@_wHC=391#eKqj zQRR$8w?cC6Y+jWLJE=EZ=(E`{7Z`mQLYN>1JgJ!zYr!<6GY90qx&N2h36$zhIIw{ra3ture_XX zF^LE%>Gt)LXVNIfcaFrjb8dS`=bh1_z9sSm`OpLsygwj_s4*nWK{3p5kppS(^XQWc z&>ByN{kQP#r0pBWDl@+Cs(xg-Y_sMY>dlvL-Tmy#e6aS~@3Pvc@m@Ua*?uKr^j>A**dYUcZMxq~klA59S0VF_g1eV&dge{uSZ(l$X!gd8 z_8EM-^?+WiXlkDcT5(?Ih}G*sf}m1G-~4SRGnJ!X3j0B`-*MuPoTN%ecx5tP`Vdo6 zUIxVlX)#~o*A2)oVnc#@w&|g~W!35Ea9NJ*!5~HonrY#KUw`4g^?e*AXK7=dm;Dn$ zHJff^QRyiLgJY5@PEtW)yyv?`v~=>a#Cos(Sk<0}Vm3cR4OO6WHx#0#g+)GR*_<;+ z)Z=y6BH2bWkw$MnaOD%i{IpSVfZj$GaMO32;XAfKW zQB)z^5n3JUh^cW|;b2eH+13iEogo95Qc~tJuP|t(Y70|bqGzU`Xu-}^R{52B1?bQi zr_YdeS~Z!=GAU7v22NkZo9#2lhyQx5Od{&5B}rAny|uE7t8|m9%j5+Nk3(h`ENZW6AXxc;nKnkNyNVZA&2wHy7mRR5_RbD2n0!}J_H%>@)MH}Xw1T$8nOdQ zNca>NDQ`!-$)IgP>@cXh9B7sTn8)D42?(U4Ii z!48AMOoK7NEM$~>BDW(d&;cPJf)<6p@O}v2_u%VITk0x|n@p@88ods^k;@eWBKYr; zI8tJ-xfLlzh?xdihKBY*&m}>6v36x6rgzUyjb{vQ=7J?egv*qOBF{{PHJDSreiQUe zdEs)aT+GQNY2L!?W+&Z(ESMoae33}0x4?8rViZd?vL@kerKnmCoC||yK;FWm!^9YE zomKtb0Hf(3&|CQAPCITt8MOWWwVjf+oTQ_o9l|U&Bju1{DyDj--i0w`OR^Re-3lhE)s+T|Hk71+XvOH5gC-X>X+ zC?zTQB$eHyEi7BaV;CXDuMIFw5eiF2bc;9gCdH*iwn~4z#HvBQS7GD|`Lc4jEzxcH z9&#u8@w97%+H%DlM!ATU+ z;-b_M{IloEqsSb=D(d=9l9@>0B-}3N&hf_q`v@#!w1&Dsc9?1c`w$Yb^>`Dj4)Kds zLUia`5*@j=`|uH_bV-DZZq&`R-E_1;9EXu%?oQJ3*lq6fdl6~{AL~vUuWGK01H>Gq zMlnioIlsIGF(B^?BRXWH56BSlb{st5qa(tQ61odL!IvndR#W*6= z+jB)2d|}0!o90q(8fE*7j}7vmF%IRxm!>uIeBNVloe;3d6IZ1;680HND#I@9-vJ7} zN1Kdp93C>c>DJ{WM1S8&p1Rb#)T?!9R_V~J(4$(SMm9$bZwwn;8~*q91V`@KE{bQJ zmzwrAr>muv9G%x`P+h~nchcRv8={D%wT$RtR26V%WU%zxS$%7NO@3J5T6U$m67483#z{KDx9b@q24m)oL9I&o`#Qsq0 zUQMfPcQ~!+r24a49HWs`QSb!oZQl`;ZDM73#BR^sI;9OqSwZ_H!;7v7GfSJ{$hhSU z#lPe}%`uhpft=DWd#jS}TjE6EA=I}~;WXizwlp{dSL1z--33@kgUkR z!ugo3Ib&u2C7r~$G6GeK4{BOAq-OBGg-!-Fter<Q4j}iUOd=wrC1&BS;(FOW^>J$u(3Fs2p0CZN z{A%zp`ekAL?H`D%+G-@8u38{_7&lW)9Nl~{*Uu4#3q}ZzmS@1X%m)fpQYD0PAMyId zGx8XcRE8+6vd;?@E%1EOx$V2uUg!7Y)ni_SF#empi5d(>mGE}o`4p?-SLNGgdce11 zH}P<6!wx$DcYs?pcvhuU-GrJe8az_t0CBQAF9y>WYN#c^cCMXr90SnzW%^e%|u zl*!l`#(3;lK44i3z~RB|CjvkO23+@D(O^C!SWuUfe5oPYAxBxzmwiL zcrg)??)!pr+5r1DQ;|*Ub);3=E=M^yVFQ%xjx@*&I9l^S;?#OXiK)M$jQ-C%W9oW} z_A@s!@ue5!&adXXAx$wF+Qgs5)<uIup76)}=h?y(pr4B0*PB-KcudHTU=0oe3F=Zg*Jm&ikRTIx$bfOD-6vDOFsH7$tdJNS#8@J* zShiIdg~gDQidSa7ku{AE36OKPCEfwn)Iy0deylkJJBS~FM=0*0KZnshYQ<8{txRdH zNh#A&W%BFm4*1u}kQkSz0x;@}*k~RQTEma{TQml1!G*s*Nj<}nycmUlLDhPf5YvM> z?A*(^>oio2?sYEG*?uKN{F_9yJJfPa>`72NpgsdLI;g%YD)`BhWV) zjCL&qXh&%rACFjfX`Iz(duiMs^Yg-nWi4a(Rr>l+m?l@ylV5QQlVP3R@j&6l4oh=> z8l`Wl=AMh5+Mk+FViPCm#(%ygOYJWSI;Q{)!mhOjo45~4vfY&z$uaXb^_f?yMen6f z{!BZpphWHKW<-TRrcP2Q#^)bKv-SZ=CZs>S?)Z`<`F@g8< z^5*KQz`TEBSkf$=r;{QH%i<|@GzKXkQ&Q*#I`apkf%S$5Kg(X2pPc)L2u}r(r`H;i6v0^Hg2wjJO1}F1 z_vj$(2CLygY**uXrwf49bcQCT4A?4k(jZ3G032g%$M*~p??yiu!qM(UN>wLCwu?NO z{uh$};KXjF$My^ zgd_G_goX(G=@}r)*Pd=L|B&we2G-gSggr|okqDl+P^+TY8Nv$>OArY$ldW-wwE1cf zmP3hmFVwZtB;eW;H0mAvjR8))EUu@uR>n^u6inev$u8FUk(`l>LclsW_lA=(?6j4y zYF1iK!TWs3*LEx2v^w}8Rcb6>7ZiFmYP6<)H1@PNu6J)dYYm%rr{4@~tY7ab_@+e5 zbTGV4ntr}Xuv@-yy3nN;ChJNP_Tpc?t1O|^R6>WPMRSN~Vy7Cu!4F??V`gKSlcL4` zr5V)#XdGk*#@qEUpEwJF^ZWRu^edIu0=QEnveqEN ztQ~XmodD?iFUwHFbH2<VdfVN>Vp~v5a<88!4n@)?IQ%sUEYYW1vqnzwdX;5$#$|4UY zQZWZC5681d$A}i~u#UTVC-IP7;61}a5`GK}*BVwbytuSRMDP*PQP4ZXke!DJ`HnoCzYE;QH}J*&wG|otqgfC*=b8%kw0_a2q)`6b#KfxI>h#`tl1$ z9eu;CV6!nnx@EPE!7$40DZ+{T<0l52H#OUTPLL5QN+D5BQ$0*aRk3{mI@Gg3aH3sg z@6gf^M6OyeCowlG((t+>(|j@!d*>xHy8!lK#GGkD=qo_Rp$}_Gl*ww34ui2vl@&l9 z&d#5_dqQC523O z=7NO<+htya9t6v>SF80i)pzohNpwkGH^GIBlCz+jC9tAK47Yahh+_v+Z6{qs)x5Fg z(b2CGfL@-?0kr2MyP!B+*SPp4r`BwJkm$DR+40~^kO{2md!nDe)Wn3bUPS3<+=TXm z!N~a}D^x?;g|^0P$RiUA!M03S2ZKYfa<@uM589ZHwP(Y;R%9YtHAz>dhECWMN;zp` zXyxFv#VervoPu9YMq&sZJ#QC(YW`{UpJ^>WCc;4L? z>?XfuvJ5bV#S$w|<>2pTN)Z!=d>@%!+kf>)c@U$>(O!bhgPABr70Sc#Dv@><%po=4;S-R0$}DTLhb5B_ zmbhd@s&kx#hCuQuP)J{E?EE7+pCA&Q1s)q1yMF;I2wd9`s|AT660c7$0eD2iEl`{7 zt!#EFuaYan$lqZ zOF*#F>6W+460^t>A~`05#$9^X^gFCG?q^ZBvchDB+rO;>L`!);77ANfbXox__IV}4F$1x z#ig1QFKUBKm5958VE^!PZyS_~P6%2eHv^*s7C19@r+}~doZdWmMgn^S13Y=NoR}~T z1hY(KJT6dJ8gT-1rfKdY^TXs($y-ZT&6mUQ>Vo%l-(uO+kIfdt;3(7coD68gy ztg3!v59T9+P!{llHO!!O164#3JToA8V{p)GFgOdJPSoDJ1KtM)QA375~Gt}&xy8WTrmS1bpsf(dp zzLZSPbN<^|C!4jxx>y|IRqp#v(Vfv1LnLs6YvBX#uqN$46XufKMM|h}vU)sVMU*A4Jc4tv+41;QB~xgq zT_!)%#k-JAA3d^6J$ZZ4kB_%&>-|F4iS2De>o(ZV4(UyRT zzdZqC8~sLBdJXLK@8+JqI#}sdFfuFPr2lf8{r@Lb{r@4%rvpYdt7@eJlCde67`BCD z=Pq%cZbdTJ5DD0fIM}|ej>>pya@1M!qf4kfjrcsHN;geS+r6=F=~|0SRXRJd+34Je z4(cg`#6BO}D`}oSD#C%&nS&ZOc8X!`Y;aqUQ!E6zcukG$9S$rQ>U7x|?6ks(gdCjt zRTBlPTA_8L`I$lZFY$JU7Kr7}-+c=}x-o%zltd<*k~R~VF`^jJsNYX5*K=zg&=J6D zE-pW_EKiA>c(|JG*bU_V8m3nHZ{bJG*}uHP$VWk;l$~5L$_?>pevU^VSs?)KD&%z< zkBoM}1amU9=R-ESyRNGhd7%yW0`U*ng@dMK_-N>-c?0=^NY~}@tkrCBNt>p+~bxWj9-j?qyu?eAQ=Pbz<6d%j9x@>=@>d;8G;KI z3;T^-W?{YU=x}2vWVXMakj#ea-|s+zJ3Gd{IHa3f?@Yr@Fx)gcnjDm!TxpQ;a7A8% z=V3&t_))UtY-^okHoz9z1KW;>o~$%u^+}i{Oc}EMyojoP|F=kpNg`N5^}Y8mZAH>L z(grU^%wThz=qv%%n+`vM1N}IKA05y({nL3!YX`QnKhla#d4~&w$K1&j-9CbFA z4XYiqgAGQ1whv%1@EMFe+$wdo>@+fBAQe&(>^1n-ia6iT$tC?H)V=M0E@*Dgj1BMO z;8@Ih}0Y?LMab3#9FB2SuNGy zReDdY^QuunqJrITd9p?G@s1S?;Rj7D%jO)DB&l|leD7Hp#QN`C>B4w8T38XN(VX;W zzyiKa5KT~rxqlh_1akfeo%F;!|1ekHusZzHocz!+nd&%-^CaOcMEv(yq?%15R%sS8 zNi~~>7b9dJuG@Js0_08a;dv)5zdYjw2@Sa(T6>prYsTa z-v6@#uBCxG_@124te%Qks5T^YG1}v<_j!z(C+h=4>F=Gg#X&O4F7+k$r7U&FUv_L8kJgU(auKJ)TG+D2M_owTQWmE;TW;aVdh<2KH&9NuCL00E8v`)=qMGf znp?ZFe6{c1rET7MANF3yNGTy@bbl0!%3Ft;NRvNf%imTIy8unIimxq@2xo;1^%62D z%+aXMG@)V#^^*__O~P-Kme9WqA9H8rng*nEV$LC9gn9|7r7W3^6$l**yaE6dUs$e= z!DkbazfWBgFDgTw;%*7Z0^G2b_t)37oSz_%p^2E%vO_S_6$3=Nfub!&nZe!eK{ zTq$umigP4@Jl9b~DD9Z)Ve2c~h2haqdBsByG>HN?fgpIK3(=eVzqx^O2;QpcHzfFa zhvX+G=qFi!lG+RFY{o2fnJ}5addT>H=BODfsKSxS~dj;0Z)W1?rV8(rt-NRcjoiptJ7mI#iQ@IF);OF_>(Z`-ge z5%3QvpTppjmG}N3yr$`x#6s^KNi2JUya|~lR5iFKTfnle_AE^`z{B&BGJgp`#QRr6 zCzmchvNd6@U0RnBpAeeTgNgA*&=LUasXRQ+10S&XzSbX}0ZnSgV$Z@YED~;7eB7IE zA$ZMwALTIE5=OS&*sQ)VzItSmQi)J9hep}xa+pZl2)5q0A~`=jE-owL_dq|UELN24bR`NFH+#AU#;k(i$m6wT%#AhpU+dV! zch0g?5Z2z#gdtzUYloUK=Ga-MT%oY|#a-k!edp=}v{98yZX(Y0Fa}7Qy;T0s!^VKx zHp0GlEULufHmt3Baz>Y!-rbz5M47U-VYaSszGuLmWiYsTclx;ERk8-S+PlD072C#H zs*49=*v@X9%uonFLutEAopx)VdM9y7JEW{2l3l@4FN~Uf-WRzIv^0RVhr{9I9QqsTz)6US|IShNdwj5XETW$jqwn;&566Yxj!+0rZun^5T|JhN4Og)%hBTTA) zkTLq^dILs*QSdNefnRt^p6xh@uPT(Euq)iu*(!}vngLl4^c!Nh@BuSH@2VweLZF?s zVB!^`$iD=5av<1NR-E=F-5Pnr&Qg0rEzv`6=kFZQsUJ#AOOvx{pil_5lcV(v(dMjo z^&R6~KkWKeRyb;orK2lE{#GTY6H%q*vh!sb3x3qV`vjZv?AFJ|PKBq-FKmB&ue&Z2 zHpCj}KB&_hW?t7Afo-<0Gp#5!sm2pUsh$;Es~O=D^8O&=gRnq2+L?uF?1sAs-`eU0$EP%_T)YQ~=FD578XD?q&yXX!G4Qe=ClwGQx z9)d(?w}?4Izen$Its^d2O`i1^12a z=~>EjPv2md{`bRJT|dgS(c)mJoSq6SB$^zIt(xKM4(m<_TA6T8Lu4J12b?0H?E0Zw z41d;RqS}@yr;N))z2R&x$Z+OFO1<@=pwqxd`v^iAMi%khGl`+3PR9#7BVH5%nk!j{ z=(l}DH#@+CEcca#EH|}s#+>hE-){+YvC}ayN%^(1F?jjq@IGW|4_u_&*^N9#v#vBk zCRjMl186+5#R9yKk3$pE=^-bkk$r)kad?A-QCLNeA}pfr%cgro-2t~qj=fn~RMeqc!4e#zI;O`|>jY9Po|tF+!~WnW zpp6W2aVu|y928Q;-YO;eFUaDhDZs5Q;Ae1Eu4W$OY)Z(*+GYnz*0C;+j?g;2>Uc?C z)!UHmEDGL2zxYM1chujcs5MFkcd~2BJunNn6bC5qh@3E+l4HlgDt;0vQ$|<1tbdHS z^*JvW(EozcQd!*Nm*kQ@vd`ruC{UYH#th;fFK0IG29ap649ytaCZfYvm#S1O$=`)d z0#})XVPx>Laz${4@5^v@p;mF6?A*tMl@Oy`E=q(D+uKUg5@A#)D#aaFWtAzuBZc~# zm=0SKQYfJ6t+6ItOjl!JE)3~p;|l^#Uq$Ac9A#K^mM!>4O$OW+Q;vk`0+<48WSxEZ zfh_yz09*dHwkIy^Vs@A}7w=TH{_Dq=Da2vDl>Z-``_+1)zx)dskxmxn$fw5{=qd*whUyrd6z{hxLw$x zkLM8Ca_snmI+l5PFLL=lNAbKLkoqVW!#h6S;Nn|%O*m1wG%WU4S_I!?-|6;-kpF&* zPzZ9yCIz;uB?lA)WEfwJ7^*)=#n_Tq%2E)RbQ0pTQC67jX%Q)2q`WIq~Q$tu#GRU!O zq6^G@T1^Y}G>O%6okvV|0FSH%3!u4Qm~X0556F8wE-m%>Sh4TRArY19(77J`bK@_lA5V3}dbs)$xk&r|Uj2z+33F4P50`|c>`~W8Zs=myeC5i8;-(tzUbw)=#>T>J z@tK4>x*m6t?+r(Mrs*n_S%)mQgFce0W)}82Taf_ekmD-@38!>tk}z*fPV$_%xRT(t zBgKx4*rPw3Cmnr|3mfJ9TN4A_o|_E6$3sriIHyKU`zbXlH4WF|n7pVbmOf?y;LT&t|X(?3NGr*D2}sN#BQ3a1{#U$hT?h~ zB9VcBP6gP^7&cF!)Gtir+%y*n)_P&DSV5iU90Q-5h127{j@`V`_`nb&-=V`4>|67W?kTcqFZc3M8C0hi#Mm(=kwxGHsl388b#wsy z8kll?=kJwE4xUoDifRQ?15j|(>C$1gL(t-VN8&fYwu%F21-Ui{a5?sM&$P!`1e|kFVj{C{x;j+M3 zwf`y-hNg!`*F9Jzq%U?hDK~;fBq8O&1S@`K%PB6gyl$9?*9mf7RyrO~T%Vg^uLx@XDpgG4BQ8f=-P$z|#N{BPubs zlNPiqt#i-7?*WV4;^@Rzj0?wpDhDmqnb~u6Q42z?4X5Ko#X|og<~yc)w^Asc zOqVtM!0hja{Pq-^^)CgM71!N_$Dm1ZZyy4O+8}*fw8(S*GmeE)8Rv%mjgQhulBh%O z0}S0p0IQbkD51yL*{PJ3{#1nr*7!`1AT^N_$SYSd<|ujX3)39srN|m9UD%I6QG_bv z9l|sl7;>dIJ7M+O3PP_r7G~;$%aZP-F*kQpapMy@86Kb#1fwK(?e+8$kg<6b9}-&k z?C3~FJ6v|N1M*%K|IV4_IfXnY^?fM*1i^e1rwI%2Ir2ih3zgZcQ*lWY3#N8Q!c9L7 zbwyN_D#sq#bogLk=B%gvsVHn|?j0mGS*4b-B*-bNls&r5z7gS~6LC~}0DppDWrPO| zgsSO8if8I12>iqGJB`?H&YA1B3HNiGmM?6dW2bXs>~4SJ!W6SLM-MNo zb1^>`yd0Ppxer$>yk4w!{|a5=d&;a0&?u{g0xj+wqLR@uNF6sO>F5OOVf4NLoWR=} z^jQeIWGDIGM-&bFUUaMPP?tOo-=Gd1&j6pRhXxhUf z8k>W&J^M3wQP9uB!v6XF_}#6?PS9<>7x&}W(a~=>ugBOj`y1I4y*}MA(odQF+_u!k zxmlwNi_>L^inoe^%O~a09+-}XMBI2E**o0&5FWi~ba6=zV4o*RTjdmO+2UpMReFe5 z-QGo7uKSVjZX3_(wqtClSg8&i(Z1rI!k@WLBi`LJ2T{;L3?)oNg1T7df#@23>BE8BT0P4lF}qlJ z`z3m9PxV~?C9$H8uOWb2zQg$*$wo`}zbbwod2g>NjnAcX&ZwHiH=ig`-OMhqTpTjccN&K;S?m@d+HR8>?>z6S@YV{9B7nJJ!LF2tn$ri zsC3Ybl;Sn#Qey;-d~6Qj66$rB4t~0{I+qHg#aA~~FY(W$(p3eCjT~%7B&4*aaKPxe zji2zm;$dLl%IO=CX-?`TifKnxfOdMK3h#1&sDB2tq=Rz#Hd`cUX_l;8Y)UHSn^Vn* z>&yfTj@O8|1+ zEF@d*6Q%%>ERCqdWKyNAh+|n&kZOUd6$SIXYbq43@t|9$SE$8nWwn#VX1m9F*-9TK zN3V>gZDmzK5ZYs!Zx8@JnjI7H#V<)_&9tIR6=JPYn+dvnmAZVog_)O_J|7PpnAG#7 z+=!ua?Xy&y$NF8z6cF?wn-|{Df_pFoUV#>~Z9uK}!EZ@z&rQYe8%MM{wyHAZ1d#2+ zI1p%E|E*AyqZt?u$^Lg-@f47!3K4de;n+ig9!eZzZ`%gsY)Fp)Hm;zB-ip3ey0_#_ z_aPkLV@G8tE`kQV7JkEpT=}o);nKl8WKGD$u73a)d0&Aim^b&X%j1wrjeKn*e^d`l zQDKZ(8Vr{!vAACffk_*Jddd}cv+}&Ol%*0|S?-vryOYyz=mIdiStzf@8iOMchy=x4 zAwu=t2~^au-b`5G4xl1b=43J`(f>64K%vKovu;E9B_O`rL{X(-o~S=It! za96bHK@b%a(Zs2IOEx!UNH{}~-5$z~S-Q@{n@_ zeX((=Op<-BkMQShhRM-Wip(f2>`TK4%Vz;=rDI5=L5hi_cC%;%C(~L5oq6_nR-y=U z{;T#=zAEdIfB5$elmtvCGSwa>5!=jwYv4?9O$rlKp460bNdu0T>a;FYH~L@XC25W& zJOEGQB@_o>G1bX8ljt4?VUW0}m=moZIF#l~EHR8%@$!%nOzlomtU8Z#ft{^IjU@V1 zaFsGoZS-@Rk|-59<7hgg9)_flEao!WyI=>P4@1^?2Z1?b5hC%EikxD-NQAb=Sa}z! zb3i^JQtVJ7Vh9n{FG$AJ$--dbN#2vgD<-2_hc%}$b4n@{nrL*X0X~OVm)=QIW}v%` z+zJejp#@^bFtk3cqGw`s<$@E$Klo()haW`$^eFDT-%jRIS~!)IqH$rH#-t-HhC#VP zEb7nWQ-zBWgmYd+$^sB{6rbcrJaI(hU7+%!7IxaJ69{-NWhh7Hlb$ts-t7;bx4`{2 zWn*Pb#gIOxv-llz2H5w5m>x5j6c$4Lw2}3rl}#9Lho|Z)r5_*(_5v8oH;M`>1L^aq zG!*woc??HpVITDhFn|}c+}g}sH=Mp5kkulQ?Y9`O^{ z6|n*7*xHd; zzR7X^a_d@buVe4pl~}f4`O~lc1m!S!dA#i(@8bMhzXiI>7u|nhgY1T->x}6T{u;w} zf8zD+PfYs#c>7bzV7_!u_8my2pQtc+pBEDKLP!vh*(ubQnh0EZT9*T%_Vek+^GQqi zxn3BB=1K7uu@NJAKLw?<=90ZfN&2BBel+nGaJsIT`aeA)@V zWqmt^0pMIH#oD82k20!#UGlyr=W~FDl1p&t07gaEarA~lEdR5nFu|Jaxc*L&>x${X zbo0qwPzIL6n7?Mh5?Sc9<%d>~FK;mkPdG^E?9ez8-K)pvvq>qZx*$aV1c`;1SIX=a z6_xH`xZ97~=KZdkxDUZX_kmA2>S#F&lwLw4s9h7_hn32dWG5Lr^PhLK{Bwj)|0NIv ze)R{6-@JsKHm3$d9=S#D%piPdv$}5QVZWw{+_G*VX6MdP>cu41Fn&t!&HcuSIWw9jlw&dl;rE*B^vGrJsOUvZn0f;qY2yBA?K| z8oImElVgP7_Fp@PO#(@!92@M8&k|Pe^#STCqJEv}odsstCht>|S4qEi`?=@4F1oi= z1L?$BUq5>YGIcE{E08z6^9~E6sYg{WMk~(qz0OiK=9PTyN%)i+k4rOwY|zg+wgL4( zu{)1}e=mVr-h^3yO*Q3t@6NGxG-pn~4-2c0=0lp2d+_Sw7W!9InY_2n!J^H(t^RL% zY&CDjHk=_$i}sEAL3v1iHaXWT9)xw*p0BIUK!yDMm9v~x&_%v|Z;j=5Y;}?w0lG=Q zlL}LQwbc_9LYx$V8rdEFl?J{3_;RxS4^$^FA7mo$^}(sy-*09SG{SQ5OkyZG~fj??M0MuMTjnntf8fB0SdyI#7DX@x2;BS zaBLZ3l=+t!kQPZsZExH3<}4{CU;}=FwtJ7N2e#&6ixbuoq_04w_i*k*1-%A$CK=g4 z2`}QvJVXHpmQifs?K%y9SVCBchas$UE}6KEnH!Iq-dqV4Jflm4A5n;l62Hy<x=!Gt z1WtiCkEFiIY?)biWl0A^)o0)fF)x9oHJbmT7_OJswfjeYRRQuMtQ;;ZZFCQq zNU##5_fygEY@AbqueIFG$v);JZx&zDsg#&8Lr<`hK}*|B(Uj_h??t_FEyCE!5!s~b zbC&c2HO&=YXihIpKx@2g1l%(h?Ps;BU9Z#+OTAkv^~7CWYCc-C9QAEnQhVOgPEi-? z*KT|-ab04#$GYjoN*2|*eG|yEnQsFNT%*An+X@444(1ROyUzdd;v3@B_2;=waKc1k zP9`}7c6IqJH@%?B6d|?fF}Emn0>hPC9V0mbvZoH`+i`~N+rUUm-Ch%b?n571g5r*5uxr0H_~+bouF5@F0n7bou? zX4YF|?i<4;{6w;dQHyxuo)!hAU8zE-oQ+mZe-#HSEqiJaZzyt}_6`|7x&7=n^2*RdVak+MWCB(Lz0J*!+u_@i9#s#Sc_n;wQq`Gm&TC!IlBEu!+-zaa9J z4L3Md)#B1c6FUDITAq8BZtDlbbLJu|$jjw0iONikdisP|gVO({XwIaOMRLuOAgAcO zM6voj+0S7kAu@*7D%Ij4t^=RSR z#;ax2A%on(>EuC_Qzg~rw!X);X*YN3)uP16O^%{$v~vW1c{bw0Ea37|`_%XPy4E&@ ztFyUNl(aj53ylMSdi|x(?K&$wA;HGphRu}E0F3qjihVT}U|+yiJDYMt{Dc5Dl0YKB z?LT{mLzvDTGz~!Nw?k%$%gbbqgAnn}Mg(+aV?(Q zezmozb+ia}ZWQEUc)RI^_>tZ3BWj7})*45O;zZ7oklFZD}P zhfaJ%l5`m}XsG!e|2^$Fy zqnl9EmQZrx0#=G-xs=wDdL6>on{Y%_OFee!5o(TXL1GDVTn5;c;{&AKEjG zX}5NZY(*yE7Pj-`&rl3f&qc$5cPp4x35UofLMdoFlB5qwrkPEa-5fyoTh4K=JL8o1 zdA{NSC)jYx z&?UM|%(Q3&z1XxTaMdNgbRhEjo-{=%qhpaGqDK^S8Kj69^{cjl(0urgR&dot&43iK zVU+*wq&mw0!8>q1@IYI}V(hD)*Xo!l{vVZH6t8KYSM?sL2VpIB0@prJ{*1nkyDrB( zglRy(3J8BtAl*zcCDFGX3NHsYw>2)pl=1b~WcI(Vo1h;aD?5+N1+_irbC-|uH9K5< zXO#?{Pt&_{9`?pw0RwCgbH9~4Tnfw|lfIUd#B*3-#$o$iXyahRkTACaTuCdIb!1nEA}mfx~G z3HPxF3b|5+Fue(3C$Vjdzg5=8@~>2RC49~Oqrt(&b!0?DmzuvLk{>Q}C-avg+M}Rs z(-a~BN2^bgn-YNU2Fz$~8c_{G21|7T6#9pBRMOPEnQqOa3o@;Em(9Y#ZFH|DE;nzqgV!xCD zaEIYMmsJo3ldP!x%O=evwQN{Qsiax3(PQMaacTp-AU2PEbGKRS#%ZQ2+bse^AWfz+ z0z+L|BZf`+w^UVZ0*9$kq^^VKrMpK!9Y+fAChBw&yMH-vV)$+H)iZ@RZ- zw*1TEb&K|PRkyMEvj6u0_>%qhB*_v~MqmThIAWrtcN z8J06rS+Z_oJr)GWArYu1o+IOLb<|(i92HXBUVD|U0mt=hVlnb*%2C)$%N?uu#anru zS{vjA`@F=QFWg7Fxie~mxvc0shJT5-VDW+-YRhppcYce zGdt^ek*{-NrNR>~*??oDRGpQg4ZPyX+!@BfnZ7j2r9#iKzE6kfQAsXpr_1>?J*fg@ z{^rBsWJkG@UNcCDQwCNAIi<6r9;SKph6F#r^?jI5tnUH;uD*AfcF3Rl5CN zhDxFm@VK%u1)NTl;gp6DX1Dp6y#DLvSlREC!L?q6BXz&+Dd>~S(mlgdfcN1ZMQQBL z_&edAS-ZT5rmZJN)ahL zC$Nkxe*)_4AsEa5Z>P7ToWMOnU*y0(Ov7B_#zg!mW1V`A>h`uxl| zYu1LSfgjan`^#Jh{Dh!5Axl+vqcHub3@D=AcXgz^sw73jiSIIc+XQZPM-Dk4GU!pz zCgw)PY0dpx&B9xhn7B^)4z!b&82CB)c|jhS$dZ>3qC+e@YxX(a?;KKJrNFI|bYOYc zq9DW0&tHF3WgLQ+h=~M)Z-kfmoZD}ubwU^t%92n`NAb}{@=(*An!^0M$*6P@S;*h{ z>>0UXB1DZqb@vHzC1o@EBmIJjED?nSxWckfn36ez1&jG!% z^&#EKd);jf;q>8yXb8sKhiZf;h`>)5%tk(uOxiuQ9Oai~#^g}}BWhaO(L!5sF)vCJ z>zny5M5R6$0;BLPsFkx%52V;0PS^yi<_<_ zqykRj4Sr%&O;o2iY$Q&Kzt!Dk#iH9#Ernk7rwGRf3*cZgerENY#Le{ zBbWl1@^eG57K|fBp}-1CXAXK(jLB8DBx_-zG#-)(V~(-~oQA~L8GR%s$v08>X(CoO zCnJs1h@`e;N$qYM?0{}Wcvg<%AtNWSK)Kxj?^&R)x=Ha^?3k*kU!n29G*m(@HlO_Q z{|)m72iL_4Z@P-zkgL5>F(@zuwH^W2YPuc{>`UGT4D&7V%Pu?*_Fi^^F&}crFY|SK z9hBHxSkoZD`b1em5J+kM12<;EK)y1PI|f9pC<|1Z@jd&6{SZ0O*F6(eC{Rci0X;}h zr5_rkI`ca8O!1UBrve7j+)s1P2Rbta!YMA)9Z)_+C2kmHG%S5W=~l-;A0*0%RQov? zP}|>=s`bZI6gBuHNi-TaI!(CwPtwg1zbO(4^%6V$a3oyq!RHv3wJ%EvG9AW3(bz-Z zo$Rbc%rbmOt$HIUo+|sc!TUTZob`qg zX}TKd*xO;rl>5Yo_WWOz?$+?W&_=@2cG3Vls@Whna8n+alq3Z8son$|s^3A=QNM^s z=}A5q0{KbDcqnIyxTYD@@hV0rN*@?T{|vdd7mjd*Eh>ih;zMwjAd!^OhXz_ek8COq zmZqcxdWcNjf=s}Gd?d;76g1vmCJp;-Mb%$Q7UXK6$BX8`DVgjV5PoX?VJ8D}rQ!CD z378x-5c1zOnwAevMU_P|aJ3o)c4Ea|7h}&hYOIu3_hi}aAFu4H_s~Ar!OY_<5n&5O zXUr}c$^@g|{>)DF8%4#xlfeH#Hw2@cFs=U>HdF1-+vWM~evw&>+FL)Y8yN^LEG7OX7zv74PXrqpzdgxJ--U3`Nghn2{#yPg zgS0(W>tAK%oW5m9k2>mKO?JXnG81iFgTt$f^6p~6@cfcm6u(s;m>lzqJ$??)6!Fv- z2-DR#wVf#|SdMD?6f(8@_Q_?)IuFlN3!ZMXRqHPo*OPECs-Rv6g7h1(=N}|L z4IYKkqnR;EC=DWYTX34Dq|l1p^9F%;;oJad@sny)t${q_2B9X=R3or7(C}bMH6{P0 zQac4{%5|&s#U`P<14Fw@-xMv=adI-ki?#7C*Zo=)OU25nm_p#{j0I?PT9>?6jyNr1 zo;~i;X4|L#>r!=0L7FDK4{2mkwm{2VN%G4X?N2HPwQ!%%siqGDxuRgHL&q|{EPCDP zM=;XjtiR9SIp_<-5<>=~ANKp^V{nbKOwTtA-a=!c2lLE3Xrtrs-w_nqDx#TX!jt#7 zmfSE|N=&C%=ubn4wz6%m&s`rRivwXUQz>dz!Qn9l}H0MX6%M_Wq(wDX(6cG)o6jUQzC6I8iOtiHarnguy z6;g;;m#^`EcMOn4C--2SnuH>xkWW#y!(j>L4M$P97vUYAaY|)PF$)H;)mNH1qSkmS zQ3;b#IA2=*k6a`w$^4AR@cGDiTcZh-qvGC^ETA;f!|uWy}z5Y z6~fsZLo8BDG3)k(>mQp)_13Q6q!K>%GOS9Y>CR3Juo(m8 z2$-ae+gTJYH5rP4R=}wUVI_+P#DJu15EYRf`cnD}SRZj_d5C&vYwVUvo3S~+W-5`} z26yDzyV*mzLrLL493?4Um^H4t3)@}<1y>#>gtVdQp=t$d$X9P=00qmHaT~vT9PRN- zg5q(vK`4-`80a3RX)+n`UA9nKCNfEalggr;vM&BMjzHIt|H~AaNf1($inkcDPkz3AmAUR4C#6ETg3f<0ou_bS$o<`^~t}{V!KFT2BGOEL_=+BMop# zZUUJf=D3br_q$!yvNBso6+8G$~RkziEhQ zCo+IlJYdRjA&9nACDuR!v0F7M3qW_2C@mkD5+lDbOIS0yl?SVH zZJtGVvtBZXY#ht)s2)dev1cfx(0e5Ja)skXkn=Q{Xqdz^tr8c{@2_~s74aqdQr0R*$Vr@lo6)}^zf?Ypvf^h8uB#-^UTcDB+4@8wp z73^HI3TG6WE4plH3D<}%{RdF2DI1j?X_N0%PzgO}|rp4A`^yFd5f)hewA_bMl% z3}q^Y0f2qZwQmD3{*X|02|RL8Q5A7VFr>BYJins z-2Zpx4=2&6UVp;CJ9T>Km$e$p4Awb<)-%0ICkGICc-vF3<9ULai>!o>+i39Az`(#0 zY{rP#OII!D!f#ii+wxIc8W8Q?`ysk92iu9QDcze}d61GOKXiM=$zWR<-Lpn$W8jlWOJ)iGl6wL z1t&e4cU;gNU9kDbD5z#2tgR?d>G{v<KECnPn zJ<7}$v1#(=;oyI~fv)8GhP!VEHajWni?y%DQ&>v2^ok|2iKixn(ykD|wy~O~_XmNS`8Na&ya5$Vv4< zc~5gf>;x!Hozk$(ONjO8BMTsyu`&+PjZVOn+oLhtVLdRio4?3`L%0FhgZTSg7p+XY zI^D`#EXZJ&t%*;GTCgcT=t)@s0A9*9BsTg<@ z1dD_zxYs++XLt%h?3A5f1K)i;TIOh&!H#lEx9V{kHlOevy)w2rS)%pT`D@ljX)uHZ zPlF`VDjRVg*ua7~WHO_2mGh(ve+5fMVd1q9b3K6rPJ|&6sKRd*{qcs7$c&sAhAdOt3c=Gof_ zM{=Sn$?m(CGo`s{b|RCY&|X73A+8akQb&z-myxRtzVt({oTopz&*h~BA(;lQloL&~ zv{RqAZ-0ZplFYyoAOxDO$0qlw6R#HI7}eOsWICH?Y#N;y`&6C|x0mz-GoQA__)ZBKkzIBiBZtBGoEM z|2}Vui{>;bwgcTQA76eRdRE%pX1b=Ee{!Xa7?_7XEFz)Yi$1eEm6oHp<>|A|P9(Yi zm4nb+$f8x72Z+iGu%l4V5zk4nm6-#J3Y|x=AfomDHp#im`v^CHLh4k#?hugJV({>n zjh=LUQOnIhyhkklW-fVKKv##1V~;kxS7sqC=k`*&AxGDK!wpGfX6_Z@?NL3N*RQgx zFUBa?_@zMyQy8x+$#5MHg<6|@CBys`kWqJ3$+c8&n)@J)t@(Qeug4e2xV?oTb55ed zDFTE}B!on~pe(8j3qzjr*eyW2Lk2r?4n<8lDET2|<4CGNl+tbN3gso&-~YwWA zMuJ1>bn@Rx88xMUQ|9}U@jlhz`GN*BnxI^JRKu?c9?cuD!PBd})s`+ngB@dpX5%Zp zucQtBVWPq~m1B~>JdV)bL9-BNc&a*@3uViyFGfh(6#rn^Hpw3XI#6O+;y;;RZh_Wa zTcl=3Yf66dxCRvrD5pyB=h)O|d&XT&h9D@B-!j7NOQ-kYT-oNRWB^)Cy4*i`RKM{n znmiM3OD$rpP@1qbqLfx}O7=#Tf@Fr#tyQ}7L=5dNDV}_EiB*wXQRyQ9&Pib+hoso9 zvTgR+I)@u8|0Xyy%3K9$Z47g1<~wh)>n)@=zu7*J?rq``LOR{nL`WscaJ2RPafUD2 zTf|{vUa{xoJ4BnDlE>Rh$@&*M`Pr91a5VoVY)&fdIczMWER(&SnEe1J-dtTtzTG$d zViZL9`L!|(Y~smlf%mUB#6(-Sn9`;1=iW>a`M3+ss!dHO%N-E*mDPuGQuVihTxFhC zm+o?X1X++!0r|32m5|kl?e(9NfoWaW6wV5~66$HqjVPxH`v}|st3)JVvextdIWu-x z)5jj(d*ag`bEb9#uPJHj_6};sb&@37og3o8KE{#BA(a#f5uH-lyUv zdcI4NoiKQ@g~*PZwQCQRMPj_EnM~xTmeYq{Q^ynx3Ms^yV*Xz3Jrn6de!H1vs6ShD z?mpd?J)oVvl}}mYIYO`W@qIaq2#Y-B7D*U6?H=z626^RcVR+U(R>3arS}f|XF(nm3 z-n@uq7GICZ3!?hsHF}GQvqJO_u4SH=+^~d*%k!xw{V9orotkaHhoVhloQH@-we!8y z=5Ot~<8iUQ)o9d-C_8&|;|IkHe@5rV*s<1vX$B^dqvQuX#Hlv~AxlnMl(r*-GGLTq zs1+*~=dk;!in@no2;?|PYq=G-U00@RPv#MUlAa+3qc1Fx!ZW`#KLI=847@)XkemmG zDk-T;cr=V+_d0&{$}eqTHlq2^a9!HkGIb?RK;pMte9+W=H|UJ!T@JW|x^HAJ*E7?97yxbAf) zo|eRan3=YibgO#P38K1)$CFUVIsf8cxVIK^J{&()c=c^(nZQFC-g>tozfS zv_s*hUI zAZd_QTD7x)xHbF*p%wl-ZLYK+Itc5My0a#0`1nwE!B#agY?*L;k9YCvpV{0knfEAM zBeXZ)cMw(qvhv0AbXcRxDdDt-Rnc>!j`}ir5>pY zJ&g(edORqjpEta?+<;>{y^D3w%=YTkyCf=SR;YXw)eDc73@tWW4N4&Ck#C?%KurJ3 znEmJ&Wg=Oj2|J&Aq(jfQ*+SmQyIGF{=#@-_(yStM4^1A?utLPzb#YRYI&uY+SXI}e z;}+U+n!Nt~vow+!>RNB8o64&-P3d~BovXx#NLQr90adgxB|2pDug&ny6|iL^?OQ(& zMFD)Z1LWZ_*Teh6>s2@jShFeyk7?Eo>;|})J6|VQ% z;Cn-OYL&WA#mJoL#Ub#;?A9>|kYaFTz$y@3B$dZs-&sdf7Tj2GXRZQ+13E_Pp%`Ni zd1nk&Jnn|SiaMmClWwXp1yni^SJ{r$p^9YGR>vkn#?yU8NSA5SYZ3DTKUw6A64N~Pp=B7NbfCY8aSoPyIl?q# zpq1o)mC3r3R5`PH)ku3(l|$YvnZFwMDAh)D=T%nT1{^9AyX>mfWHW#-oaoP=Xj_DarU zEbNI#N#tL~r&~LkhTb=~c=Z33*kr_0wd zo%9m3Bh*fnQTL2t;@1wh7#pRrvRM|Rd;VK0yxq&FfVMj?(O?u7Z)pzuzV$^rukC+^ zPF>t`RPhGYKm9|WrndU`97p3?k<3B3#zOpYC$d&qsE2&P|IjVz(fV}TeFv9Va0{<0 ze=wVZj_Po3vg(gWjP)FV3J5+WYDKuTq#SC>K=ay=)=^*UzRJNM`PwIC6PGfc)r8#P zynbD({&PV;<4)+n{Tv8acz)8k_^Ow!bl`klaxmjgb`YTHNP19jT(1dN*}GvxM0P}A;ks?hnG0#xZpIpIr<>ot_o0^ZaL;>ZA~th-BdIn zBR79@JBW8&23ZTNZ7U+;H@Xx$>k|vD_V+oy|Up#a77#PfMb`p%(h7fjrG`bbh z=q|~LCd5f+UY|1SfZptlH-iH~;uQrRn+b?dLYwFd0|Ik)J!h zny#F#3(97whE*^)mcaZL5wOm@>Zc?JKmh?S5bbf*=1ED9rtFO$>JetqsrHNP#`*Xj z3^oDXX$9=tOODJBv2o9UX66IXvg(W4`3rDGi{bFawyyc;^e#M$>ssP^p|OLoOZ4ty zNP%MKj!DF2(~D%o1Fn3}(H3)&?WY+R14GB&C?Fn3I!esyZCm+z>pWDY1;})OI4cgE z9eb|}o2#5~)Gxs>o<%m~Z@PR=;XPGqLxh-j-!;K&zlCF?oxqYnjf|`9q@`EoT zNm8Px2(D235+#C4%M;{mlDC};?@P_>7U*(mk4OwKRt{VU!&J#@l~>K6k!s^IibRQ| zoi!A7hiq;C*&KX-RC1t=(M}SYZfYsmpI%ctMb*oyUX|5~cNjyb>@vpnlE7feFM*vM z)3ZD!oM}dJ?gE+Ud3Q)ml#lfv4S?oK)Kr9|Zf1z5>vq1M>=3A|)olm<4(O|iscjAP zCq`YrZ$+`KnN2F}ypPOq!{Vy_i2xU9?#I!u&~GWKx=?;If%}0eLBEGoN^x%s;KHOY%Xy5TyN=5k5aLX#sP zB{=)3q8F=9A*3`adng|{jqNto-ulg{u4Ufuy~nI=Z#h<6f=muK#Z>&!461nUUZ2Jr z=`(oHdN-~0^eCFId$SusxTw2W@dUr#6@_w#%k9p4QaH`7p^|nWiVIW;YFc8P)KS6` zp?9^WZjq4K%L2sRyWT%Ip?z4hlBRQMPXJ8J#GWoXa!OjBX~DMxejQq*yg!K2h@A-f z=DqHX6^nJ?B3PV1xI8J|hHT!nzl|qoeJG}T;jnsLWj_Yg`A}5@TVhq|xn2-Wps5?< zeNCqpWS{yjOG;6W;W9&8}&=`@_w=so-ZuL#>Ik3>-sDm*dvHjIGrubD274rfSVx2n;K zg$;KNIH~#yP}aPkqT81IM)H;|zM&GR-n_?PpwW#9xFqsX|60!sny+$_7)iUhMK@KK za?DD?S|}R~O1g>J=q{=Tq5)DY7+$6>$ShU)VXulYK@9EM8WRd4>qg;XLD`oO$!P6v zIYPCHAwt|yo-&$pD65Zqm89j}hJ(DO;$S2*ET_^+XQ4>`8QD41FrUJ+?DQs<-N;!m z1#ZRod7)1bztyr{!ch2qnYKbv<=(V>j<#%cB%#j8SiZHIP|2OX9mS)&(6WUm&a6Td!?{EJq4(g zlllOI?htKp1Akw;DPCE_;+ToSuYI}whghI2nSs_n&?68tuZNspN`XvDps$4LOfhxE zpH4G?>L}}+LF}Hx41_-_#_GV%JOx)QMZsw#+G?CHinCZM+-cNFySFR>jOcMsv>oMb zBACvFil`8uiOd&!OyFKVW9RwNOH@vs&UFo_y;X(u)G`(|=38okXVhBoVRD^zhkGINpZUfYtP+lh5;qFiNBagn;PObd`?8jv`pfXCN zf7!mcto)UtNstidP=9NB+qPl&_(tC9FJ?kb&Zq5;>Ia8txm$BFcd7vKh6+Q^H`UF$ z_?`d~RwIJN9#e|V@v!m1Aop!FHkDL8$=z(_9fV&uz~&Mh8aoHc{lxZ?Z_&%!n$w2R zYjbL*3n56biu$V{dTV#jP(J#ynB_5_HfX_?N6_~Dc4IZoXAk)u1>Vz8?q2=&JQ`}P ziqCAjIXH-)V;Zr8Db)D1UcWq{*0qv$PAq9iQYZWhuc92KL-8)Ap}kRxgXMG#@9zQ~`E`GO{IA0g&K3i~c&y+X_`@6E1@JKVg@#ai5AYUdM1##4mH;f z;}AZNJE{2dA%-YBo$??h?&WV_aVsT{76AHtg>vTi=)Nm4ii-ZbO-Tah9^mfi!ouX zj()gEJt>w0EE`~-?W?u1f`OE^ga6#)=bKoO=`B!FQKR<_!tw!<@Y-AgaTv<(N@=pT z#ka=CyT;cl#W*2UQJq0raABdz7Ce{UZzf~VSVj=O3Zf zQo^(zm}d!|yIwU{!mRpFsg1nwVg}9sdG%j9|7GRBu{R|Clv3&Zv;5z9|2Lxl#a3mL z`)?+#*OdS1ZgH9a=M(*J*8d+Z{gdt~0Wtc&y0;JC!9UfiLz#ag`d>z!k9=)~EMN+- zLjI8|uhswQhyN1zFVB25|2GHyFU|k8h1Env$ z*ufKC@?h)vRs9B22K_mr@4KO<+J?c>OwwrL$^E07?0X9+V;S!-$V4G#vwf^G67S+`;lEgW zaNpX(Zb)2;U8PJ0c@D2?9F&grxHh!!9I6!wkrv5Pm)TAY!Q}@{EOoyvKqC8o4wLxh zQh?WB&SNk=M<(_2jVB)Q2e;!^Hfdy3C)<9hmwNdhDULnw89ep4=%!o>MaGbYQ(!rx zlu3)uH<6d#4R0fy3`fGGw?oeCGtI^D)F%sJbLAtaBRCQVI<$RRc%;W`|10f8peDDm zK6P+zp5rRY{rr&Xv3`;@E8E4Bp8cGLak5N@uORo)skBybEZ6SoTBaWv(&_GSB_gGmTg|>$ZT+iUG|%Gy zOx`inN`0W^JhnVmRs>#*49^>)LkgHul}&nZTQAErU!tyDr3hAzh0p$dpf8BAFDp7m zVqzU$-MVmF!)7WNrief}sf%|!xQtd%rfPQ(6g?TpRr15K zMUgUQ#9mo`j>#N()CV0DLWd=aT6`BD-ojKx6fq0CwjxNe9BmTve7GU`b{zMojFx3M z1o>sVC;Tcja-1)hAu4zt0v@o;(7^W>*@sj2r=;;5%xkK`uvu_e$1U=_xDdkJV{;E7 zSc?5|%MtaI+tT(EI1fU>&qp&c1SW+=bj(<>={9M0JUL{8c>pS$c=tXmP1xz6DUj(g zh!J4NaMUA^t$}Hh+;R`8?fU?>HX`yv$ z5%zx9KYFk_Hv2uM@GtAU2dOs=KO}8P-+8K0DU0OJiy8g9xV$pyJQ!!$Ca`jC z3MDclHW~EdKHU`?$HEO|LnZ49M2T9RZa1+{w0V|%8ykN!?3Y*)k|s!n6wYx%mrYCL&htyR%j-xu zswey2;DRe}YMJDG9Z&a-Nlu42e;*#mp4o$`bLH%px_+MTz7Auw`)he?kYup}Ln=Tl z26nU2B=fA$!P<@qF2%WQqzT1!=jzIE^J?EiCU}&~m%4whsh|4@XP71Ur`HyS_7UEv zLRuKldNr~;wvH$%4M#r~%#>>42^e>j_Hxh2A5C6qM2)eGP;uth4dGX^vOM|guW@%} zcNU4qE3E6n;02g;5N((Z(`onP0psJM?M9jUE|Yh22GGB;`gv`wVbK$K>LY5qdVSx- zP4Yf9vwLVan_lhD$#i+dV1xL@6>T|*)rMgO!UO`&H#6$3Y^BHuL7&IqdsYOzs~VvwQugb1_{Vk4!qlCsZP{ybI$1A@g9c}D-)_;j&n!pTn%(JXnvKd0Rfs5BZqhZm zK=fo-Ux0`xH|6{nC^E2nvDd9`|JqB{ziBYC$$Ye_kM_ZR2B_qiTi7QVUkJFu#$D8t2 z1wI&z2t@fBRk{@vW7vJ9lMhQP#;M4}(IQ(wpLj30a92blNv9fOx9E<=#{J55n93a7yj9m+HZP~;X4v9gx1+TVh0t|gl@T4p%X3E>zm5{<^7 z`+O)dG%51XBx*ya+trLwf$j{Lej(QMXo0}bC=Qw$f#hT&3e-l_f85LPIjD*r zS@Eq%E;&Z`$n1})eiCOObe$Iv^ePrYXeY>Npw3_!Kh}N?lt)Ta;&GKJw%+DF`|fOL z8~E@4jcrIHz6VbUSuXhn6F3XB>gD$XBTN>EjgR@nJSL-1PY)!!jB@%*dA!d|;TTuP z8jH4Od2(Xm7PToN&0x`v1{+Z+;Jltk^IhdJ@#TCa`hxHn*#&*y{%=RF!sVMnF&(KU zFLoxxetO0BNg{@AT=!Hf?HbP`=W~&8jDz-`1LGzNt!b*_$mei}RA}lRJ~#F|1hMgD zCcszqxXibbg31^SX1V*6yuPT8y;UMvNOhJG62%fTav86<8P6;9)cL1ygZu1yhsXn- z@9a!&yPnr#%Z&jC(d3}diYGN1vf)mj4S#YtaP9wv+2X+vPq$tnhq1-CnM~(#eZ(oW z$GM}^X)g<}`6$FB?0JEe9~$^!9ER@##4v$=;WLH$+c zuarz$w*wbK5XqYG#g%K;)nVeq6C8%H3(oT@zfd=rBmq6yM^r-xu*}J0>p)(2&L!SpEDz$-8bCr z-JfK{{+Kke&pzVz0V9y(?|hD};!c}It+0HOrj6Kn*HdREizM+m2e_x^byqzPGb~b7 zoY!udcLm7#aj4cu5!@ExDKkLt_ogU?AReQkqX7E-Orxrx_$z3O3+mqeuBoVvl=KE( z9Q;9-y)Lr^t#5(Uh2j*Y9hAit{ zexKOkR8DPYql8*+h(ZhOL?*-V16DWbMca9n|6YTHAa<88?Mq~RT?6mHC~*fyrKIp* zg8r&pqeIL3Sl-5P+)8V+F{yo_pXFHB?E-F2i}@EWlEI`(uA)929j{_)vs66`F=T%< zw5^S;aKfGI%jxoVl&gAttXOMMi?xpM0(QkZ7b%bCqUonFJsvl?bwD(DN9Al92;aic zU1BYbMrN~LYO+KeYR7c`$*dAb@YSn_U3$B}{JgJ7hBCRty_PN`NwXIu*48d1 zX7od)Wt6>&)5a>e6Iqz(v&I5t_$7v~apy34JC5zz>0@T$5G~Q(!Cu||x3q|%%URSm z4WL4QR)yDQF@WU7^~JTB_PKgi!VaV{2Tq(#Eiav{F6x z{^%o$DL`D`cifVjw;KYbRkP35Vk~wfUDTU>!8^JnXJT zYdkM#G295?@j!{k=p%{4rXem1RF@!!4TiPTAM9Y+;}G{FAfd(y{COlihlm<4k9s>%~Xl%fns{J(Dpau=DwF^RpXx0nvAH z$Dv;;sX0P|reMpJ7Z}>S+8a)o%rx&2!0Cdp&$;&u2H83g)&Q3h8JlvEh>7JxIN3rq ziVP}J8$9z-&f`2Y706xtUF`p#bu?;2t!wGDtE%w{^(Mltwt*_VlLVW6FaAY=KspEK z_Q;omI9lnljfZrpOWs{dG<0X#0KG&G2+t57KyuCKdsxpKYyVZEgS%Q#wgb_-fCQn@ z-&U;MU!~u7f9G1l)~hpZy4+l7d!VoC@Q=t5=r${fNxxOQ;PE}bP@x8>G{chGaxan< z>#9Arv4j+O@Gk1%Z%N~dI(Q%o4No1-X_;5Y4t#rK>$~U0eF@_Q_};Z9J^WI7J+JYs z8~Qa8AS{;;w_M)u@A{4sef=9F$d_k1h%eN7d;)6Uy15r@@0$OO_@H?s#*B7(h#lhZ zquULx>yE6D4SlMNwm>a!MSD+daXhTMBdqM-?HS0>xc<03sWiw%gE2{5T`PNR?z3#- z;M&X*RTRl^-U+!LwPjjj(&(s=PvV8L`%j3RlrLpX$#d9_z3K86{GmC*)7u9b(koQm zYK<|Gb~}N2g84(fpBU%m{bdm{j;8lUaS2tcuPRjfjbBnbYU7zt^j>M7xB@!MUXr2B zTuERGy0;E-vc9l7*9!NnQm;SQDWb1-ngQU;JU8Z2mS6eC^Qb(710g11=MYqdVT*_pbEY!9APY_t2~~{G^M8(oOK_ z@udPL@ei#asT+N}T57sWQpI^&vEZtu=t&N>*Oc!XUm9~Ad4j$gt1pPYrazzL)Qft* zy+lWn$Qikp5+Pz~cXq9m_)VE0ENKLmNnRJ8)VR-K>)5!YHh)2Ut@Y?)XDU;U-e8~B8acMt3`Exg?YCyr> zIsRkoPY)F8(VnbPi9QDp>MXHb2ToH&Em!5jEj@iW1Jn$-9R}4F?z43ZoaeSTXTI+4*U`mpuXkpCLMAw0 zy$s~$VrWh3#kWbO@Ww+Wd}>u;t~n4TvAkth4=-9U2=;KI~UaiXGYRUN2dmD;2ebOGFyEk0Awl0m?SbjiERy?M({tOfnp!BaGV&5vhZ? zXooJ|(qs=dEV)2;N&>dA#Ufr)Wla-Tr|0$i!njgYgtMm9l|}g9zXgtDg>3EW zT`_j7Y9^-F9@pPu4lkVsfn_jN1aEB=*|2LV~OlAOSi?c~r3CNV1I?(VWN$jW5?-de|d zMg;s-Rty8jfwpps#$*8E$4Ey9$ydv!#gbC@AxH(+_XNxofXrx44?UzD*Gyu~OGdwj z#9`34onpU``g1pg+WU@;bagnuj*TvP?w)Cx2@Pa;tVm#Q_z7ewq#LD$F!Kx$YG{kkNhTK{PzNu3P6iZwkuroB?XQ-FE{QH!rqQ@t_ex4b>pL-0jhQZ4r5-i zMei?XmZxRQm!&>8Ph@c1PZ?0jWOp5fi9!-Hvadq32{ddu{!vH+I{VL0KZ4R>J3AnT@g@I;R$y#d?j=t*vSkN#6W7>`Yd zhf>76OT)BFUA;o(C6R+riF7I|>K4G{CFa&SIPpLDdgtg$qF`M-nQ$hyt%+?rC$??d znAoYa(R zqP!{;^JxKA0vRJt)Cu2Iw7OV&Uy^2_ue)u#Cb8hYBTEeH( zLbVLl?Sptm>9%=SqbAigPUe_MEQxa`ex^QWOuwqHQ!z_fubL!I(`gPqTd9i1y4`il z(gegynURZz#R9Ha4h(&<;RoIv;|yq@=-PCO*v%z$TB?dUvo!8|w+lCmNH;Nx%#T3J zxCl%wj@ru<jdF-#B^gkq`POuE4w{p}>^cWlQa1v7YaIEqEC!k69d@j_Qua)c#>V7V6EcJf zM6@Y#tLCz#OYNyRgUEIjVJ$nvm`Z?N3S_*|5m#}t3+%z!^pQJ-|CGg<#}N6Ha}vGPIED(|kTj4E@VGpJ088eVO;fT>;u-+-nVGq$N=#7TvmNwx*YC0o8?{Ov=e_1(?M3$maTbb# zD7C$r-i6;jVqyNi#^_a>mF~xQ*J+M-qYKTa0qM8((}%6KWNk*+nQZ@5)kER*8$lA~ z%5<3D*~&lIOzY@Md)FUOD@Yz)vT*P%Z}bBoH(5h#As0TiF=$1UR9SDN&x$9#i6TTr z%?NZZt(|`6>1#8iX*!~;`a(j9GAEIsl<%pFJ9Dk2Lx!x90u`pJZQ0%354>Vza!*OX zD){noW?1YF-hQi*8Nr1riRnR_#7QhHiOj8s7VrJs_L7#F*8+tW^ zAu`c!DDe#5og0^gdJkbgD0C<|pcKl9wuk2Ysh(HvSfGssmyH=DK!VfVs}pAXZDpU9czEJX+ABHE9?=QYrU>L~LgE$wmp zGFm*23HE`lM16ftQ%3&7^E#PXPh!S&TKSocCD&!Zw=7{Ls(9Q;5$Huu0VCpTyc}Z! zD&oaHgT9u&v~uoKTGK3P_?a{qMp22iWmnHD zcQOuQ(c#7XonUO$bgLdWaIu1;o~_iktEt6#%((H=?`+48kTSFAg!-KE4#deQ`Kh6R z7T>OnmSMdB-{Rce`5w9Qy{>i*xQ>F z37|po$-WrZ&sf>NCLQ6aaLC*PAv+r z6wJbz4miIcF`L1d7^Q~2RF-d2Vld(^Lm;3(phP$+U>zhDL6o9Dct#nX4MTF52Jwti zLd1ryNg9?OZu)^VxfL914hpSCRyl7+>j?QX#}uv|%He;={YZo^IfkOd85Sp|;1G9B z&&Q&Zqqor9eSrB_)U-G%;)P|TF1RGyol>t6xq|)L5F%Jns1<#W+92j=cht8|z@Ybz zwaz)`x(MYIb((Rm*V*Fk-T3!BTN5Tm6;GX5zjA}B-PKhBCB8(T&*-mSWjd9ck~(ysHY~lFEc8z?Ema5~%?{rFNU>}I z8qgsb!g=8_8`0f9S?ts%)9VEff|PmxGhs_N#n9f61d&r2dH2teGB$dSdhv*=Ychip zGIvI#Zz$oX`ndtpY4lpE6q}3^#6;zG6`{7#bz*BY*@e@|3apU2RkV);D@4y9k-yIL z%6WeNL#vrJ*a%9_T5L7?$qltM4H>Pw;+Y{H@)g_fCVEs|(X_|1Q7_QRWH8MyVE>Tw zahi=7d!0B;t=r^5RlT|vS@B4|fA?3EVBP9>Fer@YnNO6zbzTw49rv{F2Z7`?^dQR7YHRjOyhaM{ z4@yi4S%XatR;l=S==$nXT#A+x52JsE-5iqbR_I2rOmaVM ztcy-hDFnbq!#nY8ak1HD`PsvMFm3%=$t-1*up|bpD(rYoT{d)qn7&E(4E!< zJMSinVLa;Zj&odI#(~Lbz+I_}*2AN`ll&0aUq4Zj zm+hBho5p~@NAdw7x8L?^ttL&)lJ0d%j&XwgQ$kv1D|Ojm1ol`gJ7(bftClYBG^?gVH?6U| zemIlAoCo=KC%k8bZORt4X;niMGCQ1vb#5Ff#6lsbH8hpX>GrPpeJs&q#sT6}hdr-a zHso#C4yS8rG#@Jxss&A_o3{l*oz$o-Q*IbKHK+J2_*&!OH<4a{nu>x>f`jP;{!4jAyJzU$1>UwWW}qhiHN)CgIz>_mPiHH}S1NMW@ys6i&+&sNdvS+mDf zAknru6H)w}r0`9WP3mo~0|2(t$#Agji&w_F`CMOIY@&P~2CLKr9NG=VDtuKdD8TU+ zq;}vg9XlIU9eq(W?_H;hKtMg#yq|UK=)qK2mRM`$n`Vuj_FcnQ4R|*g`WuYUM8CS5 zKisZk7qdY%?lHGJ=g+I~EFd8=RfEdgOi%gU08ay5UAktJ340*uXKYT|I$Pyy)ixn6 z!;2tSK;l;57J}V*CGR?gS-B|a5r|?HrXLz!WJ61HOm+Gr8UK=mfiK8WN;n2kCf5~N zuhqX0a^s6Oh$aps1Ps_6=~)}3pc5>#)3zI-RU{0;#B+=y0V_SrmDzJJ&{hN)m9!>S zn{tO}^f|*w8sSWtsd(gKH$}Ki zLTW`c+Z)ZC%Jwvxv;PU{jA$^2o{kZd&Csk2=-w}{Q%aB(Nv9)tnAU(BF+CoBVPhSR z;xL@hT?ph-phS#a0EWx18yGh$(!+Y2e#o*pSxe_P!(aQSdgO3qX+WI z5_)2w5+CMYdeE@}Hxycmi<~SlfVEw;zG4;(U`{9`Um| zi{T?rhCm82bS<-?3He>*$3-!&JQ)aP+m0>7a$o=GsSO2qRAC?HEL+)GXaCHR*TRAl ze2fexuBjvx^8)!QVL`qLA~B5>eKxKlT(vrM>W${^f5Jg+SxeD2ITC~1tvnhn4lp!bfqq{Tofz~hzR=yD^3ohl}P z5R^9A;yQb7HCAtxYR9>#EuiukL{XvpOtM}hIG5dFK>8eaO+UKkdHdMDYg)p-K)0wu ze`zz|HLefPCU%1_&@G?Wo2scUYX-6k`y*C8!!$xeLHF}H&Snw+;cjj+dXPXNSn*x! zZmg?`Ya;Z(IBSNVvvN}V5mE*%CsOA=(~RL~G8`RIjIfnavm7#f)Qdo24T9u$9oClk zbfnQidpGON50%gw17TGn?r+zNor*U=j)u^plsAU~(EMVDtP2q`y-MpeS(-hx1|h$9 zlFoXMONBNv8)IrL3q$wi4|0`*0tHboyiBJ0#ZMp7>ipZ}?)&R(+NCstr>DR}*NBDj zF%16F(-)`O1Hi3;nE9d#EnVgc=UH~W-_Mti#*H&M)}zAqQ@C9?FQxu`9!>3WSIqeM z(V}i@oVX&RDTPRuY}Yh*+a9B}p|W8#E6CIb1f|lL@%>%i2lUF>e!r;EzAO0{K1-+!t;r+e~S>k9==VpQvsr~8J79WD1CJZ_PEq@m+Z z{0X)fsJ_bCeOcVpcD+FFI2fVMIN1!t5Q7}i;dgC^QW*tA4>BZdslW=pMYjxY-}Z$_ zOl55Kj@~}wks%V5qY=W5(K)a!Tx^TrY+qDnsq)ntU8uQeZI5RjrqLe-by#tJb#8PZ zfduc?5^fStrO?!ox+iqlSdvK4L{~pD=Ghh=ZI?kItt6_uF>lde?7CZ+)Y;mZ;m)Tc z6&m2)$>swuQ7)** zEyAPYk9{NGTha>SuM?{k6OyqcLNOK_uwX`xnNUA}mk;jWVrD;*pm$oLK=662K%@z9g)C6Mf6jbfQ04P07z=(OR>(;@uTJm-xX4$YV6#y zsat&ZiE5?fcgtu?ssZ8FGjrAa>PM0uV>B+kxiqNlbY;D0)0vWSCPt`zN&rcL4i8M` zl53(S<4ZRRo=ySVI2jW;7`}u#ZHQ@0h-oDZyx%&*HY1RmNv*{e2urQA4MV4rLx6x`?gg0jU!!&n~{-g(mXy?vyGT*%Sv6^%l} zwpld!2?$p#THI~p)eApVn1o%|=}0@2AxEu}ad<&mWoQkz9*QF)EBu4{KPP4F>w&jGJBFj}-#}9G#!5VSI-2W`m9d{oy$Q8n*(+bP=~p zNSiv;t!5aACCuda!|xi^R6^F(fLrLNGhk3&I>3=$dMaUwE^!K3A;#4JHjC4(3ftoM za&sE*_R6fGAUZyNHvs+joPFva+rulTR^rITpA%S_f`n^*1<1^aGDIX9)AcN_g~LGz zlqACvUGU;eVeOd%Q7yA`YY{Hfj(?b|SY)7E6WXZBzTs`gUpLV!;#x&DXtj*DLK$eDeKb z039`sF$bYPAib(&zLYkFw1bsr-Sbhrs}1gbr}NX+HJ0lmabK6y-%kmNzBIfCT@~!K z=vl~ddj`$Fzjpv+t@1t8?6349eS zx?Zg}I0`((=TuCVR^JLfIuOkfwdgPuvHt3X{jM-U=)eQ_EGbb1wfd$MA+T07!l*@r z5j7cyE}GU{O_Mu!~~FLo7`Zm z)3H|@dpautE-tJiuj>xE?Agz=fuRwWL%dQS@(H;TSu~7NDR~-s47`|c`=zrlMS$0t zn~RwzgNMWY>lhB{YHgj>A~;WX)_~7rF-DWJUy-p$t%Q1zQdrVn3qz+YMj8t^+Q9w-lcw+ znlm{hj|yS1E<<7rOxBOFpIIo#8RQyI73uhGp`L!L)+I?_O9)W5RAXj%`O0(TGMRTRbD!unNT*qsW+T z0Yf6A0tq%V!EsV^ESge`HIN9{NX??uqF@1QL z6}{@0vDgOGUX;~hwO7lw9(u*gdN^9%P8UdXP)Gpq#6H{kvh^^UyLwFDc9?AqUAe*PPorE zbkhp|kdO)3exa^)-`dyD2KGc4c}^G-TsL*YQx47y-f)YPR5I@OV6V`UJNZ{l*4%$} z)!*0^Y-Qj)a!ioQ&R0t%e}OoO-U7Mz=S>Gl1e0~tuz0stjcY+QOe$BBhzd&ub6M<( z^jOfP_jlD|Oiy#udUK?mX?WWqoW>AKEM6FBpm8{b3IEm!>|d%l9Vovf9N)B`UOAC^ zB3)ne9p#irchWR2S&Fu{Z9b%QZj^GT;?q`iTH9k;|8auyb$t{wYB8nw6Q|vxq*S0Q zJETeUv`Xv(n`r%a7);*8=>_$Nr@hMk-@$mlcc!mLv+|{|b-0#kZ|r$EA>4N;IQSh+ zv+X?pUJ*QP#bL{PYmMGdQPSX@NQYEAlk@X-B&=})ZE7=xf0J`stB|>5a{T1 z9tw}4*g8azl$;x2o&-TF@IqsUP4s|&>CbqU4iRG=PbvA zfZ;FE?h9CF@ zP9Aa!ExacP)b&)BO+B`8(e)G^Tn+XathRG28vGql4N19WDA9(0?H6FEps@55Zr7-vc2?3Na|j5+jRE3?N-PM%(u0= z0n_CE7Mv5yo!F%r!+eKAyHk;lUv%cuptujpUisz%M?r({MLX*yqxq>`IT?TLSO>N~ zj@^JIG{*T`zKu6FIX|>~=5}woqob`msWoIpRJ*8+j3r-nzlZ~aXWGCv%UwNTx#&Cf z(2C|<2$rHdvA^yOklkcpQ0p9%fD^v``M+`ZVKcQWYTGv*!O6e7RU(&3gZ%Ii=3bsU zzMd4iDfgA=!1S{$a1yB*6f6GlWerk>4yUP!4eyY-Kwfy^3`Wow`*SWM?KHrxDKnY!xVC zGidNwx&X2Jgr^+xIz?G?vvpMR8K@PN(1!;Xk!BRsU&E7$KqkKVAD@U7mu|`uuP1|EtOWJDHVWjsJ9valiUsr}KZY z|8K*2V5++Hr{mWCL-l7n=@GH;Tx7Ir9Dc++to zMq_K(lD>EyL+Q<=LKhZ-!}sp7>Gcd1YkKA6AN`CI6T-?VIE9$hIaz9r+-j6I6&WU( zca12&T0{KRlZd1xV4W%$K5V(fqsqk&*7EHSb;->0)a!=9TVFE%+VDPwm)|&iWjR=v zrw~`Jd5IE6Q3JvSc$=x*Vg2CJ_UO}0304qfGwQzhQ%(8kRA^2`Z5 zssnofe7O+3!O&f7(f>j?fDl@k9ONIa8;VS1s_Abcw&3P9^+p5xqgttt)EsW|tD+Tb zy==#Gz|11}ZV!xE1Qg&j*kXsJn`N=&iE5eb96zETK~eiqDR z*XJ_mPtM0W)y!8r+G<6*&)$oHmy7+yF5!P@#sxs@tTS7N!Tnb{rKKYm`UV*SZghNG zogR*6obc^VCsS`;wVY4Y zon6O7_ci0mq5e?ck*J0O%`2R0ZRG7lj<+V%$-hxpB&%GSoFsAGx7z1^H|auD1~WUc zJuU!0BKb()BTK>-2bhq@B*v%-M@57tq0h`!sr+&`dGhqbBKN*7-|Z)K^QLb#0mT+_ z?b>qhq*#+bs`okp$HetLHjznm$ue;xC*EHAwPvk*`iUZA4+(m9qimIb=^=1#9# zYHT)zyMIsVU_IC!s>DP#2V_0PZQ^7ZZsg(huBb)&~`l8rTqW7XzJws7Tk`n^2+xO6;k zo&*iV$Dz2AK1dU}IDw6=`V+`P;+@HC{G}MXW~a6)0pRO(J-k6RV6- zJPJIHSH~kSnU)dmWKAd_j=<>CG{-%TGJSIg_{EuJ1%g-!{mc4EoCha=^DA3L%^yj5 z#eg%vc_FxSqJ*1L?UO^ZCH$O*D>exLveWNL6O;MTKWbF#lv-?TYE}3JI3p~*pC|XM z7N~0IHtc3$ELXOg53$o$zqAaq=cBibr|#|@{*tbf$EwZ8@aU1HwNJMkD)pF;>WEXP z+g?X#YV8oEzjS?b9Q(3bHIlbYE}h>xP$iBG*Ek81KqH716$C8_ju^xE$M$r7N&90s z0F(LWsTf$5>#%E9`r}aH@MER~izrLH#&NDz^e3sqF@g2jZE1+7C3i+~_+JP)qY`$$ zQH*$-4VI>>!TG#w^gmh#xVStCu!PH1idlK4oVA&$ZlkDwkYViY zL+wTnyr&YRye^4Y*}ZR3eXXX0D#!?yYP-A7ekw{_>0a}0@^-42Qt4t!Tl6Hj=IW}D zHSdCH6g#gPYC|~5&sI;KcpVKoBp9IXcfzr~jK2fMcaD5PqHOsc!eb$h2UCev=_-@{ zTrFOYtzxeHKV2D9Y-Gcg4?)fw;X|Whw9Xmr=))_0(8=OU*mTv^YGled_6A4)0P08W z<31TgxI{}RqhvUP-%+UdUfCQel9XBIkHx@Yqwf9!-PbGv8bJ5S*FR=wjA-eoTfeQ7<~BPep6lwH;lLCzDc)L>`?WpzEU ziGK~X&~T@T4K}jQInC)c4M2y>3nqmw#Yo6EBw^;aej}@rS$aad#f3Oj!0A3y7MWs1 z_TCfc3Pz@i+?pZ>-EoE{H~ac z72rM6($AYs$INvwGMUU~?o|0x?=RHbg4_n}3Vf3z17W>#l^mx%eun&;;noUi&C1iy z;OzkC-b8yQ!QU~1X$4qhpvUxI%LuK&Hxc7l74GWoJokHqYV`(eJJkVnr&0cItAMgn zjk^MN^SIjhWQIp67^sL$H`e=goa20dDL{6m;DNY&zYEqznJ4zFn z=M#aQwZwQ~1*u9Yk8IPeIaq!_hNE_!I)W)eZ)?}e9XIe6y9^~raNo-9|(z2+ZHlEyu68ejzb_X7q>wZFnzG3<`ObG2s z<|cUxq0K8-P}uG{<`?WpAL&};ClZZD(&Zr8%p^<}HxR=gf1ZgIwv%9Py1FaH#k8m` zp$@TxPNLBtSBgcy2xRtqZ3ZJjBAny2rK=ye|qo6B$H$?x^*(UlER#w3U zbDOTQQ)NU3Z`Us?ylfAyjCCqZwd$fy@pqxT`$`NWW-X2fLH5m!R)@zDZx?5_Q?ar- zjoJSoKC3EH9Dg@Y5R?;G1_pyG+)mDaU2DghZ%BH-doRz&#&PhMUgV$wd68S$ZoiXe zw5Xs?`mEFW6X97?)%gEzIfsogaBZ9?@TPMt<#aOtOLX&?Q06Uwmj|Wj)1SFzo-!l# z!)@=p_If{ioJ4nzMzEzB_(!og4AXgJbjjAYO}@%BrITbxQmRW~hN?F_gVUzHU7;Xq zWzp|0=(v?rH#=oMwlEa<*u8*V4V<*I7MjlDV)b>06rj0cdIn=mX=&{=x=N_5`enbB13#%0*V|13C`Z)HT za_IhXKb0M;0-4^T`fr2#4FL|3mIA1NOpTUnARvmN9Z2e~6gK7*oRuqzbSGu{yzfID zgrGw0?O(i(>4$g)U#~XM@cqQ^)s;m2*phvwj9NTz5 zlPrWG;AA5#GbfzQ`!KKo>BGAGHWxXJTkWUcoQM}Gzm*S!fF*GyRb*A>2YljYRc)XCxWADtPc<9b&Fb^+ zGRp|ZYIqd-SDqZf2Tsh`rBpIP>v(8sP3J(fu1-eg#>Dsne{1>vOH3AIm)$WQ z4zgVV?+Y{7xp<(70&?)2a5zq*;-IDH5EO6i4U0O7X0}e+X^k%z$o^O~)p}E2r(!hE zmWfyyH6~iNS9-J!?Oj|Irh3S8lI&Ni9a{JuaJ=*xvuPjD<=;yPj__*Fv1d<&Zi&l< zf>odCjvheXH9bVgBCe|oX0PtBX@WgWo|jydrg6VlvgDSC$N!lBgjzkJ^|705)TaV3 zH$?Yl9&=yDWKg&bgWoUVHEr{efq~wh!Dq42pdLLGwYQkG1IygA`j2c5l2SVX!v&Ro?#f&JbMo< z({}$7^d)ExzD$dO?ph1t9MpK10YdEw0nD>=Kk}bGcZqsNgJx-?*N!(=f($R}5($68 z*iOD1@yk27|3#7a{1<@!#3mV}n|R+rU)Uy#MM*T1Ki$q3Ocx@CMHR(}g!7yiY;iDD z=dq2pRF+kSznAtakpQ>;Hr>Ri5I>9=azKCmiSppH{m1W;>RbodTRfE9VN#17TefM% z;hiQ(MG&w_WQs=DoPH7gY0}#=O^fN{7D&YIAAm8y4(-i(xV2L!*w+ zA~iNo*GYr2NU@rkL)EsOvf~n9ugutyttECnR6n3Q}t^WKqRs0=CaL0)oE8IxT zi~+5sJ0UZDq&wSApK&*6d3QN|nMtiN1#?c{#BPvPqExRe!V)z&G@@!=*$fAt-RXHM z(AdAiW63_JDJxt%!CxZbAf8-KZo=&I6j9ydTJVhkGWyalwMZ1iXfi*A%CYg~%mGTJ zBsg7Iw} zuXjEgW+z&{%8R)f)(@xaQqs%_d66h+Ba0>Z*h)ZRBK8}!ZXW6%=B?0<+l6t_Hb_BV z!$_m;mz_VwTSI-0C@e9;xJn3RaD13>{*CXfg~f)%(cG>QWr(JW^_7)mG0sh}7uYs9 za9EDas{b3M7j_o=gS_HvUvl$`hUCK7mQH5jPY{<~?*WisFGzyyPootHJH;}y2f=T9 zIguqUqggkcSdqntlV`_YHs&EoofL!4!#%v?R24Xd63uzZJUylfd7!1nLOd@ z@wAF{nIuV!>I=xl4soBrd2R&DJDT_Y@ysy?HeD*L$Vi4MIPTzc`DD&nGLG zO!WE!deZjq0bw2y;ERPjE!(Q2#@ZZol^s|)Py$__Rwy}hxTK@B8JObfZweht!F^{yuew)@O*oVVUF~# zvi>&NAH+(IbCI@++n_R3_L>Iny z9@|Y1vBiP^nrea!c_Y&aJuDvC@h86;-!9>IJ0uZTsDlSmAnjfW@^sM7A1PU8YeAZKWY1SPJBZI*ITUveHZ0ql=( zR^2OXR#4Nd3ILVL_7{6T22@gj8P&7MCB1`%_7v+fcNCI?w$wW~-zNulmCzQaYp2ppZzeLA@#GNp;eB<1X z)Yw`^w)Z)kLI#OA)*;FT)mM<5xL&!QBAZDZ->=qc?(qJ8K3-*k=C4jdsj8CaRbHZ} z*V2}GAOD@r@0sun%B%2c9qCc)o(h$&uN+TggJ z6OpLHOKA%mk9?SZ!%*^Q7;6URax0G5<%o=2fYv<}rNsMaf9NY+M+Ir1zgMHAX7V&SIzbbscgW z-MQ`Gc*L$VDTgSl8Pw3a;qV)0^82#94H@$K@pTbjX`udCG6y;?<`&jwwQ;bZJB(@I zQAOw&y1`vsx7)UHdg+m@1N|vWnqP&%U=+eg5`X_``LzAyFCQ+|!Y`v*#}q7#XIr@c z?lFWUt^%Y2pKC?q^o?Temy8JWK|#xx2j&es1q3<6|28Y|f`jk}K@Rq=iU0g{S2-^@ zD-}E6FeIgSv6$}@TH)P(P~MF5m16d7!BZIIja5_U_K~D>y55nwHOfVz{I%cO;U&&n zdRC)7LFmR>WPf^soqznHXYJn5HAE1Igm=r%yL(skTvWwCLh6j&<&Wt!0spT2~xzVKReusLXtFaMA%NUdq;KUPbR#)$fTY zyb5LUPTrSt2lYbJU(p}myJ(IO0XFwCq=a`bCNf8J|xs7 zL8g2!Yu5(&ozZKF3cnKnMR(0KN3m`FdyTQ%E_+L9Y5^BJe8CH@Tw?ABeRE7T{mLCH z9@(<`%F2)FE$XGmGtR4VG9(kvzRS!(2~+op0WdK$jhcPW*L}rD4F5bv z?H5U)f_MqVe40#dEcf~p9UD7gEzrVWWt)P-EKcN?ua~F1=_LyB0-5X0gqq~I8e?eb z#5IT^a`S8Yxkk8R=p&+#{w6zS@!#f8(k%xX4=u|r5=Mhaw?>SBLKwvupt8aA-Now` ztT3r0q_P=b_F7+a6Eph;@eHWD*&?UZN}ti=(s&XCjkJaR^ue1E7Gj{1a`UY*l8AEC zS@83)&!7*Vq~A?`B{mTOPl@vUU$oysX_bw|;VyKC%L8-?T~fx0npTS+*(2_6491-z zDbv}5*cu${{w_fx)@sMmo7Z30&dXDqM9P<09WN&1dRsFwkLO46KRx^e|0AVcqks9D z!||{C`ZK_J`H3~vkLTr|ZsYty=hE9?DsNHH29#9k5m5n)@N)M&j-)~EuV+`srFN@H z5&z%B$5Gmdq2-#gmD_-uB9kfMF3&OWSKyf;ec}BhI*1?^+?zm5idv2Adq}3?g$zQg zu(?fgDV;@*{W<(%9QNBcA4Q-@U=+4$=P)GhkU7s*HaKYl-kIN&>B5B42fDuvP zWoIU=bnIbe2$ah?NQW&pP}Q?QKOzc_h@)*9!pAFsn#+DqQai%ok_gYes%+Wnb*s(t1wNsNPliPm*vg*|y>6Pjp* zL-c#HB`<%+8k+`7RUO^CjYC1mI0<&K(;GR9;|@n5$fluf!N(PfTgp+pvAfR^<+Z~Y z{hL2@E!+tu>(+i0*(=o9yF?dhJE=`$RKuuvlKx+=Q~x)~vc^XE?Y|Nj1Q*YS7l8V0 zGvF9E(1v<0{yh@k1+3J$gzFuU78s?i(kbxHIm-77U3Tt9&Hh8g#3n znvo-`-z_`CjP0G6O)LDmA+td_-pM=GbEJp9ba+!1g(z65&g))X{hI7i*X&b7H^Ja9 zmepG~6CiybJ6<$xwS1myRW>Ri^IdX)I6uoT0QkUEiU9@ewWzgEO7D+1AHVK3&Af@Ts-t)7i^-S8i${{^a|5t+ zM&%G2RxyMriTjl)9!$nC730i1(jVt@eEM*X>X=@`e*hYRDQypJ$LMx!Xi%Cx+&SX! z@?I6?pWn=N7c#r@n%#_8RcCeE4yLX%2g}LI4Qn-5Ik|2k6+Y!3bA@>CE6JbX-olVMtc%W%4iCQ+942AwQY1IoP}jl z8w7g$%{!U$;g0twopZ@2-jrY}Xdd#FO#8Or_|1dN3)g&3ipgbjVTs%aKZ7$@)y(a* z6iNl`Iy7I_H4n`-mbEIgH{cT5@Nq0st0t_8$k2ARH%61T#2aO}6Ne`vc_-& z_6zSX4kx!H32%Ak%*rE2jc5qsC6es-!%TX8Sy@0Z{rEp~KN83eLCSg_)0cg0X%tDP z<()TVkplpPnXF=(PKnSpu_&=QqZc)lwZK-L$#W@(?cW!Z7>V+`6c1c`q)tNvPMv_0 z`qp2e*g{gY8V{bjoDWn^nrSX?7Di>jqa4$;FiUC-749qQ4qr2$hv-eRYKcglSIrVC zBiEjf*J=s>IJN#vj^~+{BZ-h|F9%Bs5~!P1fppYVS2tr(e|^%OA&LbmF~L#8h0&?r zGMU%@?C)jEc*y~dKEmqeMBf3peTo}WvgljQT=-*FBCD^OCIMUFVxF}JN3T23u{n%M z+`!`3+Kk&CUj$bfLiQ<`@Cn>f8W!$T3qq0cy6AByPBQcvXyi!`<_z;i?fk#B6CQbT7IM0`Mt<(w|uhasrocv#1}j8n(!&C^AFrV z*BBJG$Gv%*Gp4f%_oklMdomIg4oRWaL(~r)fNHB;fzLWleH$Tvlqyw^%L^{}R$ac6 z!SLPl*P%?4M7wbUNHal*AxGx@py`RORzHuS`$+;&EOP;=@O)n^d6dV+uH!jP{jb;h zLh4@B)lMQv8F+1o&}V2TbbGcC>SA>L)yorfUS8YZPc93&$g*=f9p-dh2TRJ`MT5H% z^@JJ`WfGD%Wa&CVk`JQ|G?|wqiv)!TJF}~RPaO~BFK1sJg}ytl6`Vn9-f&NIaX*h9w2R*s zUy4UA$Uip}dj?#l9zR7}sFqiMuo1{+JkUTrdeOL&d}*pRpV3g%-8I@ix+dF*3LIi` zL$vH)?t{hUWHglv*7i))m%1%n4*Vf$HB}sK|6m4aq6nk=mvT?gp2u&;m28Y!l`0yu z9Poh_qy?cfL*phYASq&v6?^5a!>*~^Lqk$B=mv(1x0AD>L5e$@o>EmL;b>;0huJ4) zv}6=!)T>fHZk~DX%eJb8RJAN;LxKgUrp*%zLI?gU^!X0y5nMtB;f5tz(r~tf4sS?4 zJj>j;*m~f;gaU}2>PeXJ2KZxgtb5s?qLoIIV3L`?W_(TKmZ-mG@F$+$9%4{ujA5(n z+pYp(X#07NM;d4P8Kwb~C!V%%=UzDR##qc5LV?6`cIQa4w$tiEf7m9PduKkZm2>qjT@Wv?O{eahiLg^dK zz0D|cC|YwfN(e;LgB5kyx`Y;yXlN)((MQu!TnNu)xm@!GCO8RNeYiKu68m391*VxE zN+4~qGVra;WrXWEgpO~p16~1&4Wov$OeKu6&nVwjcSN4CkWs~uoNTazGH`H2=S!e3 zzJ0a-FW$Z}y0#{0Gq!Epwr$(CZQC|(Y&$pRjcq%*v7Job_xomzzFG5ce$?5$p3~L4 zcAws7_3Em63W1uM_p3P~%+oH^oS*l7luI2A-c1iE2_&*$ZQMm2g$h0zF`y2qD_F`P;7yT{tYnUX-X)1LTN$7Oeii1FKRl zeW5KsLAZ?fUh|+w(qyPI7t48b{@Ots8;nOSUULe=BaRqDAfNf3JqK{%5*C!)-K42KT}+SJ{?(8}D^0kO%Q%e@~XKOR`dUIfZng$f~EgQL_7iiQc4~I8H1Ax@$qa1Q|BtCD_qucPm z?uer`Z};$OrZMjt@UW1&crmR_H7a4GQw=MaXjH>W#_Lru)2IfO{x9I$1{)2a-V$ZF zx?_7s-^g^=M94M8?RLgmA*L0=7MkvoV_}t4za{@<{)5!Kb>4&h>$IS}>G0gr)c(BX z4l`7h^Y{_gb5yNAzNV=;nc`|_%$T)TjTt30?d2@(34y!zsh+ZY4%u2362`AQd1JI4 z5GVt~;J*tS1ff$qW;rxi;kF~LLhUbYhgNt=2w}e1481obd^~2ks9MIym0Rgqe+;2y zVoFIY+ZUh!uHsTa{l?p0vAu9C(`u7jnZR-ut1S-6iLc2EQ5)}AuR$qYE%v95X0BcC zjOQcMXKS1<2+xLAY;ZlUXRU~Ngi(B= zt;Opqi3w*LbxX|kS}_bUB?wwwxpYW?HOM~8R5eNvDPBQwhaP<_78eQibowMypzj{$oA@~=yI%PnD1NMms>86LT z1T^SZ;&_baar!Gl#Eaa1(;;p8Chp%6^9~otXi>AKI-~{Wvnj%!sE7eVShXAWk^oMx zfxPa9G+reQ91rdLP!B@=5bBfJ4S3r$AmFW~3Zm1#leTV$Csy4Alj+5BJoE)X6rusN z&O^ku1~VZ!pr0*i9Gw+{u&Kizf5Yd8EC~c4-HDFKfV~hn&RP82>RSZood;O@c_c{0 zNKh6yV)U`My}vTny1bUczKC=^IHN@Vb2;GupwrcjG@v7%Kll z~BhTh*7bbGh*`!$&Y53HpqL}?>N13|jo$EdI-q;=g zF8g+edeSIQ{+xMrL@jspM0Z6yH?gvruywg(9F<iYw`tE_c$rcKt-s zD_uu?rhEA4Ez!EdW!*ZhRTjQ%ic}T${B?igv?y%~TV4&A%KEDjOl3J>97{nHn95q< z{{+7*tN|FHwrDXx%ipgpAO{mzOW;qe*Yvv0(X#KL-AsXV9de3bge=XG^JdRnHhv1B zA0%(^wLpZL5n|eG?VJ8p?Ap}ytaqQ^T7hebOr*qBK9OytGV@0CrTLA4g^P-qY zP{kR)st(c)dlu>iOh%$Xh(Liz5T2#1+>Jw6Z%uI|J7Uc&!IJNkvGf7uDTqthlL@H( zgiu5vF7nFQw(S7@rT-c#|8+7$;<+<)Z;1n1B8jTHr~#(lpL4sQ0r=tipv)7?cihyt zFyidq8rticMEm!h%Pbf8iTpb(`5{?G)wz_R)D*LVNgMa1-^ojai{|F9daLwI_J+si6`W!N)U8|G}4kt(4n`i|pka8_LJ zA7Fi%O@J0<9$GKu3RG31ldHm*nxZGk+rNLS)n(HEvIKDf!?fkA4=X*q72 z1|RcDFr(7gb-Hyx)!i~_bDK6s5N#&+zOt^c`lfUyIkU(Zv57F5HJ?mLu#c!7A2mG5 zy?Nn@R~WaMD&J5D45J_|0__@M;{o-P%*sdcY+%rlz{_}ihDC!^OLYb(rmHH~_64)F zh;ZJ4SrA3?qaf=A%34s9Z_JC$R)ABpyOs>jb;-Oo1u@ChU8Uo)oB%vjJqNOJE>Q{} z!Bgt-#F8==W_S?~b~h7N3Me85^^@t?R+RWVho*z%1R8cw_#G^K^az#qlgNY9^_PS2+Q z{L>M7L`sGsCgf4%vCMTiBx&b;IYGDjM`v1zN0$(^adFl#UM>fI>=@qkR}Wik_Kfu^ zdEWpsw%jQ(QiL#nt&A-xgVpaboD-5WEnGkjJ-_5|?oTww5goKM=-^Z?MY(-*eJ82~ z03Vmu2SYmFiNpeiz?i7fWB4ScGgLRWD37+F*EHD-j*75f1ewgk-%k9|oBvVNf1P@?X&YTvqvYXcrXGsGs{|H&g z-61$1v{$c2{~?hcP~G3$u^x&-XITpfSEpvtqYz5VXGazocT6FPbfodCNUW#veXuu+ z=U+Gdc!@GLfO@9cp(ych9vs*wg?7B2O}qNgeNQ6xT_vcq!_#gxbDw5!OL%`l04+Yoy3Vxz>!HgV-f!9!g>$hStZb z5}|YF2h2>nDfuNev>u~7+U5g8x;I@mjoJ3O29qM62gC?!gfm&e6^|`kt}!tQ#vW8inlpf+!d~ z&6RPI$K&6*ZM)~L8O$IZVV91CZOSX+Eh|JH{AKS5C0?DmW@(`u)@KGbE{ib;-Ca9}8eVHG((BRJ7<`xS z8}>VlDWD%{z=Ds%?au!1s1sKk+_S{=_{Z4<$FrC48$*mg1pDKxJJTHvTL@kRl2|7a zQq(!jA4tTRSm9T%{?VZuyXYQ7_TF>skLX9eYLgm4N03@sTSa!iHv}YVoGtuUf~kwQJ*PX*=vlT-atgm_$)=?G zOKiUxy3tP$C!6y9DSW>A{l2_!?`egfC7tuc_M7DrwD~xtPDlcfiuj3Yv#R4x8OZSo zy~zHgrPlDxkND$&Ry0N9Xa44JCZ3m(IcWPJ=A)&D#`m1bAk{_7`K+@$HiY>Ny50Lr z8D!?Yo3*uQxgYKxCb#{m!Z?qS9SD;@;W?nQ4*3#sE|xa-Ea7S&P5B_SL;mE++T>|2 zf8vzx#rt~aEB)l92rE`BaP?|O0T9R#vL5giBlj*|^`54dqHPiPFCG_P;gn=BWKQTZ zE6Fszn~9+5@^{Jd=pV2Yuzbc-ujp=Fd3uAWUW7_0#wd-^-LoF^U`YxBTmEAC3;G?= z{RF3)&?oWIzu=lt6YmTUUe-Us-rlFuCc$@h4}N~0@H=b3O;vRt8e1LeQ!zbF_SKK( z85y6S)4Z03hd-%1SFLi7RdmRocVG99tcO1`iJ#@k(W zZnOGM+OqEw%k?F`A1?6k69Zq%Q@F=gALx%tJal8;V@}Nf? zYyc-IU_uNz>&lq-;j3JA#x7Tbo)m@rf42v1;c^4~WS><*C+~#4uU?1%aK7vSBE0H6 zy-2h@@=-MvceP%pJ)P}6d+SVJDe|=kJR|^VyZtW-J~=mV^oc0%n#+%;;96BbzFfDm z9rsdgu2C*>OH-Ip^cqqK<_o&9M2~bMZtIbR^$OpU3%pY6vdZU(=L{Bc+@oaum zmnmOF*;2JNyrXOp2URJLEs@3Endl(TCvV`_Nq#qZgY5S{s$5AFv8?4vBG8i8) z>uca#*59EnC>#t4pYkeb_hb2|7d%TzTqdft7{p4VO3qw-_(n4R80)P*O8~~d)%z}| z?XJ+B-(Ag{&H0}ryFULT`CxFz?`W!{CE8nL&u^>ZVquKe*irf&iQ!68+oAnbBcc86 zh#r2@)@|A=w@K|fp>GozeEfX&be?Mhm7x`AM6GWX3PT&vnA+fC2TP*qe+FzoV`>5# z{hWr*T#GkEX`&9#jUmlMHHu|DxM}2jP4_wE8p-%P&(;%0)>rS39T2&ktI& zJ+U;wh6xXBemuS%nU1YZx0QmMCKDKu*G0HvCL5z3Uf#Ty+6jr)V0XP=6aMV=eLt7) zfUyw8Ofq%@7xfDq zm2&~gK{6I#=Lzd%`^ z>g2&q%k?X*(`c8QxY18^lD5|pEsuNV%I>3!Hz#vC?aLn_Yq1 z2e{P~yc1YVjLVy|m9vQq-*iVSvj(6uYg_d0(@I02FMa-CFyAgpKEjpbQIrruu>T5V zvM*lDaZ`OmBGAN=AwmajgILQvLjN%JgX=Zqc;R`FtX)_tlBm2#77)XDO?suoM^w>u zV#M3eU@FxfF^^qBYGG9P(xurdHkHWsR7Cy@W<9`~D&da-N?gIsuMZ5x+9eLYSm06d z9ZOad8XjU_TP#q7XB{qo(7@Z(Tb^Cr?g$Jjj#1OLEeIUm#K*EvR_>TQjm%|2C@k!x zqGsqlb5WF-F9*(;{>7*#q9e{PxtoLDc203izAr3&N;3;^?8W}-a#nfU50gsj0|4?W z!r;Q0x6nJo5?py@CU&|HI*s8Uhb5g=D#=t`prg?6%qRSSJGu zUn;cE(|qLr@{R)`x=~CG;VFQL*u6hU7%XX-ScegvS;EP5W=dcM9d|%v7y&Ryq_NS6 zElf>WrktJ<)Rs2w$OFwcNVRdZUGWhTd|VJ4F_8$1X*-QRVqsB=2cL%d9EYCEZ|_h4 z92{<2ejNJnnkOl2RnMtoKspcFBI4>IVAo}#;}$tlvsOv%5YD!$nexU=ib1cnqRFEb z((0q(7p9;Uq4>^ZUnZBO8y*pdhJ)|*yW9|wj2Zy*%p(kW>3xpkxkYm=tbXQ-BEJGo0YSxBb<6%WKGR1|Z z)sPfm4+y(+-o_rV%}93o67vSq+XwtQMw75DjvFZ*Rlcj;`@H+;1d75SWYA}u);Q%# zOBm$@D;LY@T(Bs*4W(F+vhoQ#*#n6STwpg9>6eN2YL^+kTbgV~>NapNP8vYdaF`Zc z&I`h5@vq8=N&is@fklwTxGjY~bp)^Wae^8E08QXM$gq=8J`ezu-@^PdI!^6qeU^Kb zAdCZ|c{Kbs9{4CwC8R+C(-8^d_2~j*^ICC69j#c5K}SQr5~>`>!32$fxQiAkcea5m z3&SW@4;AwF&;!+Z!n`h@&9W+m!JJFC99mxs>Y>ruM>))jTwxOiYL`~P>B>zXLSJ`9 zI7w&YNZ2w&J9*+WN}oVfE?8()d$y;j-bH}>L&XwL4`6_!NVmrjRCF+A0Xl0f*!8)DE+721AoMvVpFnH-dtUP zUf6rw!^Q$Pu#^~gOTYF42;X`tGJ)V|#uS$PC zy4a&3!&zk&TxSg9W%l>JU72z}V*_a5DxKs%xJ_yV&xePN~%9h*6sTiH2hibw(}b+MWR< zOhgNwg%R<(bX3#8dc*7}yXO z8qS(+B8#Mdeu+1z6c+jIDS9V9UU_4GY_wtizi98Hyg^tt`h&{9m3ocm0a~UvPTzZde5@JJKFyS?d!8a0VXn6 zDAug;!{>zOsx_>u3*xG6uU2Hv?)Kc)*8Ib^Z=&j_o2c-Bqr1hHx3RydEk1Ry-`?Ch zna~`NHn}wM_m_d|q6DZUf?SkhufuktQh(hcuKc+cj}?2;XRBlS(CC=jQ@mDpOP|*_ zS4RDVHo99wM_li!dKa|c(d=N0Ek|=*%|THL94kV01?0HgxxxlR9Q6Idob`S7GqXx3 zFgU=8D^`+FqMT%~l?WBL5;6aq%=)I{K$a422vym{BcpNZd57JuNdKPyS>@}xWv;S0 z0l+1FLSTIyy#)O|o!&L`P{DjlZbconX4!yV>>|zfYA(E=hbak5Qm#SgR#{y@igNwm z!1+)w=rR-DFl-&`Uuq<^OW$9r@7MC#{yW!;b&F7`V&}oCxW6{uCPg|{K4a3YC6w#v znf$GQOjg3^#F(0`;ylt&(=OTHT2Y#(GrC+&KU5sp zq>W#*Y1A0RL5svBo2&DZEkI&M(h{Z>8O9Hg!wMxik<82Hsyouii06%qz!~f_5sEGP z6_=Y+mFn68NFa86`^ z8^1s#4whsIL7@v1om&~n;*2@%=SXDCA+#x)NrBIdo7TSOZ0jdZ%ZcyDls3rLXh>e*F(~5^&8p5x&_!ZsI z>ZUc7Q%eUfX*lKhZs-wk#MrZ}#zcch$y5)-KO&SI!Ic8^Tm0hK$ybZQ)o#d{sy4W_@_qM6&A3t2@LcF$$`{q!>x#!h>IqAem|5%GB^stq7VxoJKdQ710&AZFruhYF4m7dOrl<3+rbxTuW_cU%rei65H}A4nO7` zuZ58^>hw{oluDW}>S%a+Y}^sKspV=ZL@!3HT1Q++pFa;LJHg+(^7#H=H)93{JM;>? zFV^2eIt*oA_`7&wFl_N74Acztw;*RM0M>|BqWG~S^?gxMXUpbAQOja8Y~b0Iky1=) zM3&XXpp}1~>0b8Y+KU~34JaaDI1f2@^3$U`6pyh_EwHg_++E&8Wh`;Q-JXr^EsfIV zN=~9}#WKiEp3g*BB~1(8K`L6m%Hau4V$Cw&KV$Rve;w@LlNVbN?2m?r0AMdw)7Vj} zuovr9j1Z`C^iy3Y``!9If^XFC9Ux3X$ON#;KUYOl(yCb%g;;o}34f_;a+S`n&l&;A zNAUN4f2^@JQ@#7hrQ;LR+ZrC_p69ns5T3NI-3#7oOG zb4&`Du)E%5(-6Ax1`@94)Slb~feR}^ezM$4j_f!NpfU6J9iNEPNULw>2WR@X98~oa zMS>4qNnXRfR>^;V^(Oy$NAM{$-MQN$z$58b1?Ge6sr@MHlK#FcI!XZpO3aE|US{C^ z>x|;xIPv5onicHbd0~*XqlYLnin}nI~4q46P!u)V%3u3=8t`S5@Ry?mGefG{; z>!G+pTnfMhuw_v0(YV!>(z~N&5R10On#3QVwg$(5grqCopGoJdSO-^9>pZRE7V`LChVJ%OvG->LRTOXljTD#<> zjM2(|s=t1Y;g0}c2j$zn9wze>AH%0ca+I#^=ikh}tMYF-7Vy2!5fGC0u`|DonI8+w zw?3&S&05mBD^I;kzAAoh89^BL-yE=eoKlfGLSWZ z!Ha8G2w_b|MA1g<7mqLv;FDX4Y4Nf8wZvBV0-5|`lzaK~|IQ!l{g$s@bDkW3c#32s zID$s6lF_x)dQgtZK?>x5DQ@I;1c3W9zw#bm@3y!5z^w6Y0!rh-4C-NaZ6wWPSd#Z=*vk z80Y_quh}&xUcaf?0Z7r#s&~4yMfCo_^dhYB{fXJ`V+!#*Nc0I;FSX+2y&UgVC$XCC z4Lcj9QB+qCq)Mi>*p&9?nJ&Xds3Bi|vN@b-ouh8G7>QC@FK&?7s@q1Wm~Pd3m~ZA7 zx|x1BvA;{(KBY`#I4(0yx`|;yFuD+Q5nWu1%WY1C{#0O30Gm?&Fkic9y@Bt%HGMEh zi(S0qB49GddIx<~w**@v*I^lygNYw+~u%niIPV@LcYBCUv zZ>7W9GC;FPARY60%FDC$^o6q|iss*N{{Cv>yK>HF5yt=4O`u>=50ISm4|b6f8c#W$ z_uK^Wd|QhD5=z<6N=c@fk`S-*jO;Dc&7As#G*B9gXl2Nr=RX~-3UDJYXeo4#U^nMQ z(n~6rsr+>SAsbhYceWfobXTSS+VMQelfFlG8{%P>31|4J_R_kmOU3O>f-1l3%u*cN z`{Max)DSyKUzp(P&&mYcbz|OK15I79u!HY==#wk7NwJW$3JSEZ-S*zsoWVhOMm;Z8 zSsQRf#T^?wOd@)dM|gs+OiiQ*(ljsMK5BrL%m#oeRa372C;J5}z8n7SRgDY=y7CezJF{O~OnwcR$hbkf%8H`P| z;MW?HRs=0GfsiXM`2oQ@PauP(`)*RccikmC*F$V80C0QPZBh9}ImAy{qp<_iv}6y; z$-=Ki?+513myBA^L;ts8vyH0@R|a*-CQqgaV(#Ld$EK498|+*fg-#d7@Qz!Hydel7 z9xK-P-=p1lSx}#dV*-)SCmD-`38~hWj%Q`g8eJA0X$K*-gC|+;3s#1qXZY%f#2xng zL%o#A#HTh>v;ed7EiI$-h+{Y_#vkNFM_#G4H#06$)bE5id}P`h@$`B0ixJbqKepL_Pl>z;7ZE9;W&H6!YTiR?PW4=e5V`hx#X8=Ss1w zajD)d^pC+0$=|-6nW6Tn!~*e6rpuG`bH2rR*J1jT=r+t|)dfotql?Ba3#KFGfc0@{@)z zjG_YA=Cwc#wLVC4i}>BvL(V5idb&-EKJ&d6} z2R!6-Kht!jSO#30^hD=U>^~6z@(Mqg8DcTA57vqI+0;lJJ#Porh z`)=Xiq^%-VUInF_irfEEt}m}z5~kM7=wy6w=gZ$cUKOSpgtA<9&D7g7F- z$zauC39^el2}&>pW`&B|_%svl;++uzM89Aua34>Y6m_VjH<$d~W>srG)whN-dl}I@ z|Kmj$7E`uIi5dL~@}4Iv#E8Wqzt4Ta#E^|n9@5j}b@bS6ZVRV-ln_F-kn(p_EcVHX zp*AuZ05gk5ktokBRBmQc1U^-oi6*uM8MIDl7F9S`!ii~L53sd-vDr5^!za-=lxn4o z3mXS)-FLW;{gXJ@Wg~0{Tbui5q-yd&CE* z!3DCmyFE}alHp^E<|H51GLxxVjB@pMj64RoWH99ZbIkN!Pojd&y;C8zOfac%(U3rh zMr3VJ=Q{Hw!q_02(l!CXWns#0!e5?6%-4yK5Q!Y55qe19_Zd$?*CpV_!UEnT(<=$O z1x_ln<-#?^i0G@Nj_{rcP`)Sw*|gg(AgUp^EqReDr;IM)UReX96DCm4n!nQ*Y#8yf z;l&)~f_GPZSu`LebZ*+L+)d<;;qfLcyOgZx8gMQiCX9Ka3UHm~GMHW~&VWd(*TCrr zrT3X)NLG!FZKqugBpBn&jT3jvNf_Bdif$J|3BJq|qAcezH~WKXB4`H#i~B8%K$`l5 z5;GI{BW!$1FeO~b--+@^Fl|+|G*lFt(j)n&Hxf2bA)K}R_3-Ktae)zafWl!6C@7GuaVnW8Lk={t8^8~ufD)wHz=ZH& zC~B~6u&sm`4TgjeRYqii;-070m|53Q zU*d>^Ubnrl_cbS_p;>4Q6-h5p8A_LHe4^3yDpvX&j%v6?NZgD$?P>3-VSj$+C*9!p z=y|!b$Dqruaf#X~~fA)u5$~khmW=k@2S?*lpZ?n>gQt8sG zfp^6+u#+y!@1HaSC*9^{+TbHeda4aAQ|&4n>r~sC#v0Yumho1#wdMai_`cCx1IX&G z6_<4m_&~On)GSFklYR(vO2=9MppN_khc(I2DvP1pmI=JqdSdZr_*LvZ&$%`urRd(QEe)%T# z?RXOBvbEZqHxU1>xO}R~{!0@zw~GpsJJDYIoOMs-)aRwkro+Y`>OhXT2AoZa3V9Uv zlIbdyxZ~<4lH@krH-xE?PE(4NJD!7jr27u3%~D-naS1eEHk7!KKjRRWvnZY;=ZYmbN3MBrqk^;WzHHChZN*c&H1Ix_F;&&I^xM@y-Xx;hTb68qyd(~pgB{!?*p6$HVa(08gV^v=6QafDP`|=8l z!aoso740tuUwmTmz|eHl0y(UhRlp)hS|4_$oyq_hB<&N3LlfwiP` zCe4s*oUp6%==ZNBt{InP(2r_;o?WtZm(i)x)|J>PR{l*-b6Mbx=cO~Fjc(dCWoBPC zf<9iSOK}E#3@Yw8V~f)fLZa-y6%ag|b~KX?fERj|9()O>IiBFx6i#DI!oQ@k2$?&d z%rpk2aqXhht(^GLpYIvdHt`#ca5X2{3gaG>>;L1D^i%oB%v7tdPzN^-m5r^_&$ zPey68udfUkMt|qxUv~v}21**^*{%teK{o}LjUKhlXSoHj=vzzfm4q#lNHbrzd%_Ld1?v7Y7vO=1~219b^y_^JL_U z3ee+=nCZ`9E_jVD`W_wKhu>dPlu(b-b`ZOPQ!8+HIOyu5?{vq51qPZH&Id~{{h}Ml zo2AUJcGU4TT|>%+`}7Q?%GoIM#)<8MoTqdt{FCA#_u0OYv$M!Obu9ba*XJZSnSICV zIM#Wi^XyXG>bgLaZ+2qgdyt3B6AZXyB~)e7r@gGe-ENsImpTn_UM9}Xc}hTgh< zH?_;ey5RN_m;A3M`UiM-9?Zp7mhMl0_FRp^`QNEaC(bKWeJKkGw`A7qcVXm}`D;Ap zG)L(-NkH5)gmA+_q((g32uL7Ki0!IC^BJfTa+|C=={OEiaW%rIkJ8l}Vk)rnP{#nP z7=UE=;J`JmTuRZ7nAqge#mCfsg}mX*$b;Q?H?yo);l~+o&kvKoXUj0UyUnh=qFsPb z#%A%U2{o)$V8*Tz_FT|t%zm?MNn0I)vN{E1D9_X9TU;qA&n$-(8*_Eu7qD&F@h`}s zZ;<3JOc8g7IxU^L6pNsQhGPyC!D7a(2~D)%X$QH@<|*|TwUK#reZHu0z48kLJ{TflUKyQm+17>i!c1i#H zs5f<25bPCtxsYYAc(Ax_;ceMgYKdznw8c8T^SO5GPzJAfJAQ|U8% z=T4ISJ3wll_Z7Zk^MFmU|3D#%5aDGLWGpF$m$-_o68flfb znyFw%velrBi!`(MpoAk8bi4qnF5K91(wu9H#2%Fuq6vf@Ns8)v8|q>_A-2Xrs;`cV zOdZ>>Z5t&9iAel>b9$}%1PsZDub6W*hFIK1-2JihrQ78E#{2nHymWcqdrD)|v)0hd zYnkhI_QH3Rz$Pb%K7!MyOGe@zMTQKV z?mXMlxl(8CnDPEwKFhc7#5=rQUJGFKTp9>m(9$3{GzID&Or{R`4DT zu(g^05ADDbowR6q>}{?C;kNQ(dYobjA;$6)HA1G14`;f&zy-CoXK_#Ybn3o1qVAu9 z0unZFNXgtme?1TaMt-jDtQQMh_jf;-^?=3-d)Z}F@-3K?u8bO<8o+vfD8?rHGb6u) zqa7$z!EzbT!i|3(38_8qwB7i7HWp5nf6VB3LFL%usK&8qP}$K@y73t)k?K(*M)L! z8|#qc7@{FmzACgXeD9IN znQ3%jHFOc^o`i!875n(X6IrNez|s&0(G(80rSvIOrnRu<0nMg`q|!FlXPND_d8nOU z9@`l19!Vh;JWF|-?k%3gKu>XVzdxmw3VZV@+o1!ePXQk8$}{P8fQ%b) zD3xO~M@O?di+T5`(lxTW)F|p;n_Ohm&Mus=C=!HK7GhPb?$~!3$td?;fB+n8DX(fX zz#gQ|A1fO$#!VWJ3x*;*YDy`VtjkMyQT)a|{n~|*M1YQg9}+wiqR!LvSJ0J1Z>9a9 ztr*_KH1_yK>Ayd~!|%28w0x>|x>tUpX}UMD_yYF7lfAMY zRBrAnRN$trEB@CX-(^*}4pq&q?W#8?(ymhsvCMFI^A){a=pszO6ja+sOxwC5&a~uy zdEk1D{;8hoanG;o5KWiJ!FJ{FQ{{ixn6~!}I4@6MSR}1~jzoJ*Fkmxw$f&__jC72v zgE&^(cm}AL&u+T2pasQYre_k|I!D?hhDSVgb0jTOals(v z(pXxSSkCp%hrY~ARHgMo6>1D0CDba#T+vLwNuAS~s5vBt^obEOlReUdQlU3?Mb22| z{`U4*01L~wQxY6ALOD>mmPElXB??b5C0A4T9lMT z(zN1{FP@MZ6;h$s3L%*MD@ei$f#&>>+mnx&Vg(g$Lux{9Lu9B&pn-5ab-={H`eHO z+qU;%_Hou-CCJ`sAGMhuv2pFrwd;GfP49-T|B~36ll$Y0qt40Ua(Qdg6-~m{8OFfP zq_IRwhW*GFOns@(ON1g$Sac#Ic)cTB3ny403XjI$0c2|bF|`VsjPf5&B$&d8xOjdU z3DuMr>`zWw(D-S=fnsMatbT|IByXC28;rAt@mvux0kSLlN%>nwiOM5Vxc@MSvVMA=DRNOPbK zt7g<6etRI^k8Wcq-r@2M&jw{4I!yfsgXA6`2IL+}O#R6}&AXqbmY0M)eE$AfaIR6W zOVsCJ*{XlJvQ4@SLM9emj=^tTd)GJiOEqWTpxhqrezqj@?*`{!{H_Tm zU$x~`NvzQ>+;NBR4c-Fx2LDY8r2Xs=So3ZG+Gd3d$^dAu1s4lX2ldw9L@^6>hVha5LQ z;N{`@!OP=c@zOM;=i$-KpMxjhvUzG z3G)_znq?vu?eJ*ahzY~HM*TdM#zm<--USFdtB;r43&>9{&W?SWqLP=_3kp6J=_Nb6 zD*1ol!@qYnQPxh^_KL&|H)Jpw5e!lA;8Gn#AGJOjl9e=>Lsg~*O zwhSpW2bZt^RESmhbfYBY*t^`&D%G>xBWWt*pDOj6IGf04;dpSqTkSwCJ$Rxl(?&UB5UgK@dFz;B)oex zgt=Ec_~)PM-GErK4lUMtB!X%+44PSQA+4zk8aMP+O#$jywSeP%nF4RI{0H=hZRUGo zqDkh7C`}q-%&qE52>7-G-X{0eCU*>MpnW%wfdm_H}@ltB_a zp9Dzu7Bu*{RIKv_eB`)T6O!ST7Oe98N+Zl{N+Z}*DR!R>NHePpNb-oLduQUB9wTC! z9>hnn$$mMZ<~BK@We)n!4_hm|##<}hi30$v^Ze*>F}&zhANRbz-##-M-#+%m3aeso zP#b?VLS^?>F3;Xwy+__$y$?P!6#q3z?W!SY?9}@Q^>Xz#22I91XU=SBkTI*CH63~D zoLtq=H7n#`k$r%dMK=DCBhz(}{3)3jQ)Z|#SNR>wt!#{{YFQK&j=8dd$Oe`2o71NU z-cDX)-p=j{p~dre=byVddn<#&$~{^-Skb^z;Vkp|hP&EZRnryS96vpDckmkjh`gip z-kJ6?k1?$^l)Ab6&e9t5(vqi*rnh>5vGm3qW;O=&E*|=@`5OS%xag<`zdgRd7BDg! zBU&FX*3b`{B^~VDNx`=OKC7VXVsrWX&pxv)fUIj`Xyzs5#)}NTfN-j9#@coudj1)( zg+&V6qzoZhv+|fRva-6vP6txHC9p-MGwGyEgJ_E`bwgwWz}5e(7K!(d+G!-7S*!c* zzeafNhGtT#uwEW6tq!RGJoAd*X0ip8dc_S)q8JHnIHUa7`zt8gid)SH*%(m8f~mHL z>;S83?Be8jTLm{iCggNZd|5)Q2_y>yO{jR95YH)-bm$uDdGpkm1^=pkcf zQA@`BA03H7VXe$TM#x5h$`gXuzrt3BGyqV?TU*<(*ceg!eC=6%N@0IhY+Erfhi1%9 z3q}1qqG(HQNBut|S}1S>Q~uNO6Q=k@23tUxIpN1za6MalRvROl04Tc3Tm#g9>=v4} zey=qA$8L&^bx5^;%^3=D^HQ}jq6&DQ_V`yh_twBHinj7{JoKNtIEwQx^%mu0d5d3D z4uzD_lkq4IhuB^&5b!L3B4qvic0D-FkG#1y#y=98A5(KpZj60SZoCmiq2N;qTiB4t z)LKK47bO2E$dU$!T1(Q}Lmg>b{Ug*$?z_sq2(qfHHlfUsHPl1{+RzP>54C=)~e0oxAe8vh99YtNZ@X5Nj_B&4+|A;S4(RH zk20jkEmPaZsGPiqS1FIK`F>JCytH=jtEQmF-`O79+u6RVvTtA67D2WK2%1?rhpySW z@8300^NWw1b@_b&kD8;xt(M^5DHmSU&^N`#u4)2Of)Xn5{zusY&puv+=I?%xS3DJbJm_IUGIWKN5x%8{}0B}d-)X0a;w{NqisEKLry5kTi*xQzPBE@ePBfyb zz&@4mg%#B!4JfItHx+ylrGHuCh}_eQ@_$b-vi)5Dd>o#>H*t=}K)>j6nH{n_32!trKvKFky$P7ZzQo zo^q7p<>QD(QH%2rryAQtQI9GX$BmY~5oCDTbH)m>H3+?nYcpG-MT!trb#hhsQH!cj5& zYs(B{F+Um(LLnARVXZ4ejrE+_X=~HQX-AP;du5 zI0uV(_&p(fRpbNgt%seS`XKmkhA)qD!eRpbZ$x8<4}@MmASixxx}mx^4t3WxQIB)Z zYslQ5hHvxGuy61;uqxCX=`U3LL|mk^e><`Y#i%9y8{zVd1n58tQTD~K(nZtKBEk*IXqIf{CW{}~zbcec!He3Cn#NtH*kXH>~Jrf50D{CIGdWwjE7 zWsX@R%av1P#DoXYlN=P?3OKf#%}PLZF=6qlP;ydcjn|^l+!cyJ?U_TW2R%a_`?KQ z7gtunM=38V=}-S#*zlM@-q1^6rZ=oh&GC=1fRX?vcTgOP+SJ%~RwhQ3bSw%lhkuqg ze{_wa`kntzj95AGuEeF7%h~9d@?6y|)1s(&;8O?r<~ri1e-_Xb5PRD*Fsf)38$4Z= zu!a`)Ujx+!6XHtC;$%b#WN3> z+1MO{^M#+Crpq8{8gif}g|pIiun54EWKsA60d)zS1Y)57*eT2~a5{hV+b9OXlKsm_ zV@@5rAiOh>ld~Cc^h|qCcBMYN8Q7k@t1(9dz<`aGF=j4J0el8wlT&e3vg{p8T1ho_ zR8hXrS&H4K*U?PifUVD24VNj!MUIw%gP5#Q5`~+XR%xv!{wbgYT3qfT1f0B8zAevX z(2LKSqMU)6c^qIyNp^Q{IlIZ^4I)$L<~iR!yl-Aa4b%>-_^+Givawq+v5>&3oQCg2 zEhsaKIq0agah@MAAhD^aQL7m<0e>P*t$;)o6jV+}ZDkk~M5NL`6O(uq8I^eT|6n*c z{hqT4ES}LNqT~|_4vKI5-zH`y6@WEtxKNu6=F^+U24gcZe=j4W{$8Rgpl1RSfY2u) zsOWO?;g;tX2sLio-V97Wi@YI6Nm;glYF12h|Fwc@PVBLm8W1q4xm66OTpSGm+x=>8 z=k32|XJpy^fMv^W<7sIIk?1NCp6CiyfFu)u6voD!6jNGyw)MG;_wt35YtxGGDR!{F zQ|!Q_Nj3m5!%VF(!-_)xUf$+Wa~tMSx5i%gOYn_%GqH|$|7@)Ed_F(PtUf>(7Kmdwr?4UY^|Fz5#L4TXWkLV6?bMy8!;v)c@a5 z>@4})CmzC<3WABTk=w{yVVcidNv9NUYv*e){lnMLpc{zrslxw_#3p8moPeR_F1V2n zgvNq@9{EYKex;=CAmtUV{ZFOSR=14W@ zZ)7=4P~|Cj@$Uv)-tVxR0Ch{L6%q9G6qxMXisS;_g%QR%5p z##x3=s)wRyWnijDGATqtc}8oa{`qq#rri5L@*k2JPXsE4K1b(joO-MQ&9nk+93{8> zl^&afXIiO5w>rZ3KX2$NigLq>{|Me~{=Nr|)z1ZssmWC;r1u zX^~}>Lyi3wjNyvLNq`c@5v7i)>Vo1OCsX2nmGxr?5pHg~^%XlIGNsY?u z#$y4|92}aoN%4fyCu6KXWE~UOza%&)VC^IHFO3>&u4uvoeX!Zyfc+W6O13O1se;mG zriA}Tk8!PkXJjt^?T7lw2!K@lXE~O*zDYQEQT_vfrriIz&F!DtT=u#@^={0|I^LL)i)zbzKsD%|!9d9@G3c(pH%H1m z0{i+I1iA>z&zZ9I)j;Z&>z{v!ua3HlueKIJCFh-koKlm+T2Voj>-r3AsSp^|Z<-s4 zy{z+zz4d4%T>mOQrB!`*YVW-3kLoDrAJw=bXA+<3sCq@oKGFI_*LbC0$9M(!EhZc{ z5XmU{V67sf5XvTXjJ6dG`&mrtp3>ljo)-Ih@rR)$%W4Lk}tjAeI| zV&b0*b|3ST^BqkMQEg2P%F25G61a*VaGA(rEVJl+Ciqen`K);C6L0+(v8+IS zRR1|_dh%xm1-CcvG1iC}n(UAI_V}{U1l(CYQS|tb%afy@(~~19w-ofVfGDZ#sD(>ve%895&e&qHEhAGCYkQdK zEh7p>0EgSRB^*N_74W6!T}%Yyrk2dG*T>D7C5shAOKXeF-PPw^)E;1-)E+FpxXS)3 zu0id2e`~vd_!ieNHL9CxYU)AK3Fl{wTr^Q%VqVtvFav8};tsjuz|JRX=i%oF2f{Ky z9M!-g@HLN7m}412vE;MtG{JC0-}k|}7?FpDOY1}fe$YWNxu*_Ep#fpp8?!`oHSkr{ zc+jpE)=KdcxDS5WJ2dro2&Qw0e?OeZn7W#5+ryWO{vhGQ!}Yl)PNRSQg&)K7_{@;i zcjV6_JHu}3I?cW>fLt)dP(yRCFn(Y>iuXJ7?|?dL#=)qZ;C*{c*18PSy?t5`B2(tU z>>84XW`Y8FyV?NDZ8Ed4AMuwvkT(Kt_;sBQ4xBttIo6oG-Hdj(8B_Cf(=NtlGyU0X zNIPsA^W`?`2~P-_7O0tX8vEro_q01&{bkFHd*A-Q)|gbg?!QDVPt)+CMQORcfzWRq zp{cd?^ni*_S9Hi=kM4CMFL1`{Sc07^UOwh^9^Kzll^7G_Cgpe4grr*pVVYVjW7b}) z-cbg!L$n@Ug$QEMDYb_EW1P;jdP)% zr1EMk>tF6k6^kW-={&_CrbS2LGpI@22UbVAcC0(xesWLWnj^lw29>|H>ec-9tv+~- zg8)FMr+59pu(P3`V+hh znIc#8N)b@5!k!i6(MtiS^7e5gN06qsvcG;DQNk?#rXD7SxN+4O)&0t=cf0E^ zV06*0Ati>1g4uvsGq9J^=G))u?}!+ZS5`~_xtw2)9|YdHX?7Y7Y3b7y|7}+S2NBTP z&-Btp5(9D&YPa}dANsYPtid6;#$fJ`8Vl7AlOV45uPR^Jo*2hq4$+f|#Pdi7oVkb4 z8|}Y!=$3D^06<7+ld;6Ut5f^BzD6*c))olT_o@Dxx$b$($ANA;vM_|tiTC0&|HOb5;+<8F zUtq|o={4at0xRiV-jH1RQ8gD2-ZJ*96L~{@S}b1u|(LACGX5NilucClA`oueq0w(E|?{{B3O($1&JxJZnmJ<8LacN6BAM zK!D^ej|Zd1${Hc=*mwz!U+pYB&5@>hxt(P+^UW!H!l>!B4x7-3!O90m58|ikUK)vo zf5;zzC))%iGtqVEob-{{%ed}J6Fq>rOCzscy*TVoTQfE zEn{0o6ItrzM5gOJw9Mz*g46j7C;?hs!8op`N}ANMj$8(0Rf<@L8@vRJH2SGtkOqhg zIP$by)+K!nKNmH{1gm-ORBNklsQq!1VOL4Wjc-^8U|nX&_yn=0Zt{JuL9$?FGr7x3 z3jL*)RT9wOa@s|F%zQX?QrTGoqPSk}~A@BIZ zNs#zMRz2dqyL0gkOisp44_ByK?*?7qm68_T@mmQ{2w`Ha*-o0xMG^ zoDo$L8Q0tRLarYqG8>@MU6*3da6U$z<+liWFUU^7NpxQN6S+~%Wlr9011DjDg|Uossc~*KRO`;x0e=&{0pAqe{Gbq6-|8S*>TGwf(XpZ0_*NVMKU>h%2c$Rm3!Yz z`Sq#~#T>M>GIJ1_X`eCm(@s<_i^-$kUQ})f>kh@4{RheMBB%_O*WS^}Ef2VVWj()R zViXlR4LN@38%2Q*3*h8Im;15+wuG4wEEH!LdKMlS(c~ zs(kJ+E=J3xC2AdIS*(35gx13dW-m!dpL1)TgtVYI#H`xkmToC{Rc}3+AhMzb{Lf|u zw1kZd|4PGY%X5jf7K&uu8Au`Oe0_UWeH6<4uoX`QpdN1_(29pE`z(DQcy&Op5SLF| zq>o>f@9CGN;B^jgl~ht|y>a?gKs$rEf~-6=`QA<`pt0w?05N8t8 zP2=d7U=pJkyG0Rd`au%B3@T%PJ1h5fFYOnS1yq%HPexf5=@1K{)HH%#IB{g2PoRAy zS2y`L$fjrL0Idb~_Tu9BvdnqW)%+FPt zpQOppxIobdMOW$$&9mghogwH+O68T&jcDyqM}?zWQ~+RD76MU$Yn>vn7O>5{5ZG%D zY>yj1_a$hfgj3jpvns7;z~A-VPO>xbD1jEz70MJN02lK1Wv(vTGD{96aM{BUmg8TY zV`E&MBcKUt@U5@0(5|oHQU=umdRr{DdRw@ZbfAt^&s}7SSKHv1#qD=zW87TD3WFjW z!$VzF`L==l+iSQKawf#yg?xVn8O=f?xBE&YA1E#Z@Ps(j3=EVRt4xD?TJ&(5>j zD96PYAcgGhhYND9)qrMo=0bAH^J6g}zesC~;T~ELU=%p)XiHhByX&3d`dYO;oGvoY z*|C@$kVCE|{LoyL&ttjo(7Z;)Dt8j&PAGCM%w8201>z>?rJA!oy%y!XtMx{CTUd!X8LOt&4MrbS_b_bS`MXw*a?lOD(r*E(OdMvgiKu zF#CQRk=5EVe6}-e47M{_GzY6vx`D#}q3ROBXqmgc#aVWa!mm)ScRgL&a8PVkeW+j> ze3(EZF;{<6?xp~GYk_X7W&N24=xCnDq-XTjeDXM}I| zpb?#ZC*}QrBL2@n4*9GeoJoW`jtcNm_ClZH3H#y+eFU>l5kHfR%+C%Qvwns*A@67* zZv@Br&yxa0F#m}-k28+X9RPbgI;G=3WhTJV%xP&qg~3hINOyrpu=u%V57>bNgZ^)j z+MNU7@Xo$iWW|V|qn`ac;SS{g_~^l_{u@fDaT~}7{D@z)gMaL(1)FA{)c?ODAiz_8 z8X|2E90~X+1aSeX$z#?ZcJ%*J6R8tu8SoJa`rj};k!b&^LvP&o-w`;_|5@~ZSmD&G z|2qj&{igyz*HZZY&$WLI{u?j&bG$3Y6Gty(Ttu>l*Bxk;Ma=&ozVc*M(cjK~hL(vp zYW<66_pbEi?-7hJA|i;dg~HO2XC96gY{lRE6&xzoCj0*q5>5nAYhWm(g@y=eCOj@` zW+#Ny0Mck6?U_ejt(d+7KOVrYr$4!)?gkX#XNV2ux@P7lGazDsTx(uLn4X%>)flp- zR>i!@jE6hHLW!Ny9$3C&Q2DR!LvRZPhUXIeR+hudA40Avl$k>r;HmE)n7`~V%}fbL zS~dB3$Lbhz$LcvTv1GorbG>hsX#(8gAz)|Ys>aJ_CF)PbTFvE+4(-d40ACx&-3)9t zejn@W4_u0d0z=^4PKY!fk4o|L%RJy7xwLP-9HOdlobH(;J=tHxO?W88KKK_PjwDdC zM?avyAgvq49E};eNLK!hd5Ua1uAFaJY$GytJ%)@#_icOLDcMnLFfm5`DUTBr!eWfps36wwxq$I~%%2i6UYI-2=XbkQ836BV6sOx8Z4<;M&D3L72seM{MJv7NI zh$pWJev$ek{f_%MUjH7E`v7$6yojVn{|-9+ddm!JTbY_OV~2WSsd_`1N&+HwN6*GfsfTjJ$AUmMA48B0 zk)?vQvrbilT^xnXu)@Hf*83t;t7oouA}2($XZR^8G!YIOA}RL0H~!ks-}~v@f?FpL zg_3s4F4bAnlA-l`V)6_ECfLkV>(Jp$XDG4jxI8~yrR}Z78YABv0H>Ht6h)kXddyv34Cr&HguLYZ2@l+l#@N=x&wm; z-of#m$7;-^i>8aCrdWvlO=f5l8busPa1K7J>>#v*2)$r>c5_ga;6|)wV|mJ`FM&g- zI)lOo2IN;NwBa@d(56+4>^V;vrrgkv$N{>r+3Gkfmm@e5)RMn)c5zfmdsx7l;g+nx zbj-Yg)#r=06Wv^QRiAbJV0&1MHCZ23oA|8(=W` zh;$6EQKlbuk_{9QYh>by)JqY>X)dXy^;EaQv&vq7;%?+7*l7UVZLtSUd?Xj67x}2J zYHOVa&nYpb?1k_gndza&F$T|r{V`YP)n}R zbBC5hcOcoOClTg|7otZJu;e2)xC=XXYeV;!MACnihZ*b>pyf&+=TbcT1zL`5?c z((-<-axDV12j!OQVj?SmX%_0&#*KkA4O+)a83lH>8OkFvD(Z6B(f;{>LWN+r1fMgZ z-(J8nLwqa;IYsUb%w|T8`4xJ#_Lr_r+=*FcftBWl6vA$1DOY*8s8|{u+YYTZHe;#39%MRs2oTUn6I3 zwX8Jt^xh8?x&_YGd*MyOejLaDS#*At=&W%TXzeA#gW-STZ-W?4-=B-Dfd~omL;D&l z#&(EYgtiN-WtESEKwGsD8?846!gdJ)A(Be{v$L>6awPDOij;@6!+@na#2u&nMFA`h zD&+OgE@2ip^cTVY8Cp}stv1&!9;9K24Z%5ZzAp7nVTiI`HmvrvnvdY$zFD zdSO-OuO!bIGbUu6z6SOeCj6jrOeXxto!B!XJdTrvyc!{aX(ueyhs;8RaN?&$7*fkc z$(EBi`Us{9kA*T}#&}k;f|F4W?M1$|6hEnX9ZonxSAx|i`nWfwW+47_oI>B0<2?Kk zEa(mj*rB+g)2n;Ksud8}Xx+IbYZG%lEjzSX(+y1R0NL)Xvv7Mds``5Mn0&{mcuUC{zrwODtHqk#QtfZm92`fR6||L}Tma_f~+CBc~UsP#>B&A-SQw6NiBm zK6K7EaHmN^u%5TT)?e&d8A~^eOurNdx%B9+MbfN2M4Em3M1>|a5s%2&uOhNGzY6nd zcWnzB=3IpK|J9XW4l)6C(jG4@`aaFafv4^E*b3rn6RcStax59`)>E>9hUoV&6rvel z0Vq$KJ%HHO9Rr96htRhP%JX&#`GRQA2IzwmsQW;|T(B6hb!-l~b40w}00yDl`i}}E zDzzenxT{jgP4It6SNWDdWg5h4?s{L`9?~ey31Ioq90yEKrqN19FDUqEB-$gFg9>&~uO z0V1i^rrcX|e5y`IUNJ|^>r?6B4tsi!nB6;I*_D^=X#boolJk@BG!zcO^SY7P0`kUA zs=7^(vs2&#+J7p`*mJ;$0Lg4M0AgIf^zn(oYc(4Dbq6hT*~#^LvKZvF&VsRsiy*-WPUE zPy|um|L2hopZm~B#-@6Pa7*0+Xm~$_=dNPFm5a)Y5-Iugb=Z4I! z7RT|P8iSgoU^KVu!&@c(LnS_`za<-X&)v=b-u`wle|b52dFq^bT9oz zG!SZN7O-$i~9|kWfNg9cARKw>`rS&ovY~r$b%8?QE-SW60atTBj5n zAPx&A(6(h%TI%~|AcwP#qG=;M2^X-ndIaCL<+UC2XMGTxvxedImp4&Eo|GpG-8esH zyoBC>{60I4P^$N-#=!WmwujB?)2)as;8*(+KG)AvmOCY`M!0YJamuE4;^BT#KQ8^% z3?HkuINFXQuapT(Ca%Bcd$=U+aa<95Y^)c_e|fCUAlw>l#8JOztFGoc+_d7%9PL)1 z#~UxoJ2Jx7sIZMHkKKdFVS32e{L}evJ#f{!WyXv*U(=us`+~H+M=j2Tz~?QVkb$v< z>m>iSE9P%M`TipTLJBVfrC-k-cj0eFiRBLG3cGXDi>9rz5`9I}7bHR)C`TO>$0i*k zz#@`zYTvfw%6E$hFCjgE8KVWJ|7ue?C>htI1Etr}7RgsJ;t~9@Cayh{iA?vKRAY5V zSN88w+JgRKnKlv5yfP_6+2J!Lg<8kkE$9T7VfYfby zV!4xw|5h)A^NZT>I4tM=qY9T$^~-x8GiPfW}1ZYLw59+ z?ML;+1AU25JR^6Vi;mn-OA7Y-V_H}r+^N4Ub1wco@M&r^M7$E1uX4U-b9`CKBwMxO z20r*m@kunfMvz4tzZN4NkpVSdW>&G1A>+W=dM?73Y+K(asO_NHtVJhXUPu?lvO8*O z7N+7srD?{FE^55ybcJuQOK1{JO;KELU1b7AeO1Kqq&EvNA6}$q)RmB4rK1AjrZQsgW9r0tYukE@$o#=jBeD{1NZEv z@szkD^w}lr!^#Xes5*4N<)FktIZ51?8OXG13;)<=+<9^IfhZz%Yq(548s-!9v)jVo z1H?V>6N%jvrkBi}8>8)YsX*Duw{DvKV!y5#jG5D3JfpP}vFmze*B~)N_Q%jy5jpWL zdTXb*QUpP2-ImcHkX?Jjh!yMnZZjFsR#sf*;x*mOFC{-K=MI5Vc7C>M?l?h447 zm0CoXrE?8Aw3L@nPJWc8sf9Q6PiL=!c2+#gp%ah49^vVREaB;U+2x&_y$x|*WSlKgjNGz}pEqgTi6BjovJ=XGhScuFfZ2$z(# zI$|W@^im6EVkP)CG5(bqgBA5+H8hr%^@#?O;GAfls1>3PZ&xvhCv8x$?N995VsW|` z6A9uLSbiW~O&}H({#y~{>dWQoFn!O`Iup!HYa!}dK?CU?9yI)-D`s{Y-~>;~us^bu zZ(}xOeM{IxU95iGheq(VR|8)l zo2pb9z1fyvF{SSta;h1$z53{P*^bf4gzOkUaGCCd;ji6%2MgdG(u=0 ze0VOyefGGCcMA+LPHf8*wXB{+<@N^t+{(%__xN&&Uu?@sto>&hn!0?Llsz}*8Jt-> z65L;A^TLR;k{bev#s@GC>E=y{D=SLS?r2ucmWAb8mn&<6KrODQ39fEWC9Di?p9hu=ckDSm=zy(8a z$QP!%I0^e@U8%;nN^e|Wf)-dHM;mka_;aZGEpIYEu?OM-@6Y>|nPyka@x!YK-X zuwmmO7j$ziixo#7!+VvE4Ie{#&oY%4V?bn0>-)`)tNwI+gmE$qAi4;&BQ-aMr`Qz)I$9+x zpePGQwOz!c(BA}S&a1;{kUv|8zZ{Vx4$^p6U76O@V9~vH(Y>xTymS$HcqVlxYvk1FG9^j9R3vKS9egI61ZS{gp$IauevkNc>DLv4> z^oDwxDxZj{&g4dg#RccdYP{-KqFK~fgbPEMLO#1A&|So@44N0y-21tMJ>DB*Pt1Wy zJ30N%z*ZH4IpExS#s518Ns#K80`)TYOB9=0GHW%-%VS3*G-#gpQESSEUsYW@YlRZ?lDquA!MBa_QRQI z5wUt^^rNl>5}|in=)&t3m9g)<^}%jrCM&AnMi5sr60|O@OW`Xx`snWJh+Eu`|LpBaX?F4 z`{~3rWk=464Dq=gW%9Y}#~$6<#>YKd{mTpAo9@f0EB$Wr$J+(qW+D@Cw>LxgF*Z~A z&LX>4_VIjs@^K5Ko1=w~<0DlcKc=3JkL@GXkB6Dc&yJ5RAU(QvJl%A>k@?);j_`Ru z?{&BV$>$lkDfQdO^-9Ovh|k+?8DCGm?%Ty98#k1ma9O2~&~ISp2_GqK+|Bk;vd`7+ zhz~z5zuZU9jIPg}>&IHh+nVh=A7F`zL%@$~` z571QIZdjj(tj!y~_n;2xnIhe{03eB_ZTR$Jbi6+Jykvb0Y<%2o2yS$IAb#v#ydR$w z>ArvR5ubh+c#A*>`B3)1I;jLa_`J!5M(Fxb_?CG5+OfI=4}E#2%lygGN;Qvgq3Zy7 zatTy}i&P?402|?5_1kupsSLiM$}}=L6>gxL+MBKZo!{mFE}_slZ5NfS1n`QND**e7!{oN9NR*k4(ki ziE^vf3gb7^P{9@m9P(ce%9);Ra_VXdgfhnpQ8bjw3cx07nGMb4{oNng$o+6bRf0?v z%Nwk-#z4(qr1t7p2`4|CAD~F0hCr7!Hq?$75n-Kwf=B%~#*_!{8YH4Zb1A*wA_&cM2VLb}3gjl%WI;kir*76z{>l%x&#{jx znAx_Sf=#%Eb$RKKXwsn8N3w6Hb%WgHd+fk_kz$ZVYChd_zX*%ix4Z+tP&mO$T7 z1wWv>**jSKdZ+yn^6NP9u%hCc*;B$DSvL*>c^c9c5yLm^!x6Y(&7NK2+M+)-%)bw) z`?dEBd6@(&8V96&75u6KIC!&4t3sA#m%G?1t9E5Q>gOE$iz$8g51ckUUprOzu>!8e zl$FM>=6ILdC=XyboU-E3>*SlXn;iq}~8Vxb_M=jwrRw>bz~ zOqSi0>yzCUtC_gX$Rhqp6CGrX*dx1!v5iw3bP#?T+}&i6SaO+(t5urD?qzyyV&INt zg9`O^=0fqeW-*$&A7TT=50U=LUVagU*vjRhLRUz|2p*}K3Tl*wqU_)sY{nS}-bmX_ zFW#^ErY53-O!;<04#na{@KIj*TeQ~LDwW+=2TiN&{=QUKghb=5n!H$0jM9mw!l(bP zzF-22=Df)Lf}0X@JL@+n)d91rTTxe1_YU*~WcdW+)yO!AO3lYqAL`q5!*ji*o_L@6 zLmSu|a=Z3iC6z2&N?z5b`xGqG=ztjF;SM^Z9(ju>WNGZvxO7H4x9{dc670tOWY4|S zAWfc~uR76vk{M;npsWKF#d?LBk#hy~c8-1z$9K~%oXMYM6o1P^XGvbKtrnJ$Hcwkv z`0K>1UP4`zw~H;7@3K3g$EoUht#H@N0#To2GqF<5E5TlF%xL8ABFo%q{LQMxe!Aj| zI6-dUZgCpPJm2H}6+LGnvP8roofn-k?O(PDfq!;sxT|v$#Oxn;!<39vKW^QiNxp-nIi%csy5ucnBW{nO&X6(~9 z;a`;njLAK-O&?G24q^1=`ps`k`J)8d*vr8&_E>x_)o{2NUWlCu;3gJfk5>MYsoc$C zI=N1)XhZ&B7hH;=VJBP}5MtEVIWQ)l?Zd+3{VrRL;!Y%;B#@*N?5<$~$}6aLOhCmb z>aAEbY?0Ari&uVplNkcOw&s9;+dx-zM2j2!q)#$4@(gPAoo{gvE`unmY0&QV*wk2K zb=hB)+W;e$&Y<*gS^r|#?;O4uE>nEi3wbLD42NyH6jjgoL%G~#Cr0g2PEq58bF$j3 z8^gX&_|=)~n^vnGYrMp8>%7$MGRE=4;QXS8!p+aI((tp(;opy!<#G8N>2+~78!#?^ zNz>^uVO(Cq1;L>@G!ha3ELgw9F&F21vF>EcrbD>ST^pD90=?VAc``$zN6M37{F|kq-b4|nL5!6rdv}N3Yy2LCN5mk9w22Z!OUO2k}EobOh2AKs%Pw< z@+`g2;cHg7QejZs?>bQ!U(P4!$LK+-@l_hY(UY!H~;fWMV$u!HDp&O18`6~O0zAo3m{k{Y?w ztAlYwi~SbOCtGvlm0E$^3YFr8_3qaep|aI7C?q=t`L-MYh# zk32Q3ZRM>nvm1ACqADAWBq1`vOhiJ(v;}m%sWtm5VmL8O>_wr&iGmNQnPWo3AA7qf z>LuCugO#7~(dh3GBIrOaMW5M{Bz9m?_#~r?dQ|WQe7zU)i5mY~a|^vxb+OulKel$>TKFuCsd`I@`kN2s^m#nut> z%i-=ivS$chM#km=7yi1>Ya9CX#$8Zs#RH{$w<@$jv1@Vuv*ouHPD!urijl4I{%_%I%9)Lt^;Mf=M@voK;e4$0G{8?y(kKCco| z8-Zu!=e~GJ?a6li}?b8doKV` zWN*j^R*37+OJJpi53NtQhhf71)D`x8@oX@Z0-7jLxwjGDdq`Q};E(~AtA~~hK+U(G zA|`^MuR$$q%=vPv1Ves-ZdVL?Sql1>EQglQf^FUyp0zso=*sR<>)ChMW?>p?w;3f* z1YzV@o$9em#!zmEvhZv73qT7Or3`C$Qg@P&!&bo8(^!U7ez2q-0Zb=O9jO|I~8bp=J= zAM5R<`6U_Se56%6hSCE?806t$ZzIhIB^WjH6=mpbj%R8 z4CF=&&VC58)xkAFgrHKm8G_QgFUAP75&rl!YC#oxSCr@YS7S`1j61FHTp88{4?kXIyUQI{51g(=f`3g=8=k06u!H!O$>0^)g!~5?(vP=j zCcI#BMAZ`|sqn9>-21OBPKwHcEJR%kP0T$(RLyMxuG0KIQ{unHkla#kYG}-bc^ch8 zS2q0-Qqe69)u&|s+7CYL|hAx>@_CX2obu-c3+Q^72Oz@L^A8gll|rHLkn zRhKKCPCvGiU?h0JqoS5aVYAT0W1LG6wIY(o4k1gSz$GJ5C0#j7?kTW&-|H)qZbUOC z5imO9fIs-6;>!Vd5ShHsE=MY);*-SuVHWOAUM9%)9o9LU6Bn@@3)8BGU439jC`jv z$cT(|V_|fJu%^u7HicLu9>Cm>7%{3_z8nIP=rt!Q3&jz~XDz4$UB(im7wi?C-fjCM zBtl&diH>245qFHuAex3f{RLKS-_e0G+@aKiS13u^oUpj9mFsucxthTvJZ^xS0G5jG z@X)(4QhrXcSlsF5`7%sS=O5s`+#-01p$T{q$SHZFpk!M20MM~nYkjI&0Y4~I%l3E> zW#Vp|1wrPgujBF;@S1p_HZA<~#Is_uMW9>jM>VNyI19i(c#t7BJQctsl@zuiz&-AM z31Obcv&&4BZDT1qjS#EKb(5juGmiuvb{uq|=H?c+Vuf zHzI)7qoAx@deq=NxdC4Z!~S!l!Q z-JqvgWu5qmkS(e&n-k`f!^ocX`ea$6*-eI?u~eJm0Wp+5oTDl~e<+3GsGV_`j41^) zdy_4KCCzni&o|RlBO5$nI~;SQ)XV5#=D#9bP^_4XbUr~lO%D{m)X|Mr4=T~lYR3Ik4{EsU2)?IlD9^So*`QT2g z6bc^Uu^i(rM(&dLR3!JGoeZAdD>ICUM--0s0eRuB zANK-j+QQ`3JA+vw3G#p9))epz3=n${mqY+Rxa94S992q%q>!A=EiMA)%Ms3WVri8P z!0>6=l!K9Ln}&aZ{<6SY5?p!zP|FWj!EH`-16R6+y;fW8Tsk$T$3N*$op7W;rNcNW z&0q?DRzQscG*G%ai$s;l!n?GnsJKE6w{i4_)RZz}*eydCyKUqs>&;Oa2T@{KXwdzv z!O^*!S0CrIT%y()#|@k3T^U2ki^{X5D2ebEKfd0MYZk8}uY*D2q96jhA%eGb@3m?v z?cg>pb~XeE1OJuriYRsLO;qYPlut|iqE6g)(o1hgblv~6?8tDjqBKmo)}NiGr$WPQ zxA&ON1vWIgxOn0BfB=csq+EoIv@(Z1A-CkYJ$DHu)Qe@7ioTjTbnb3J@4`?IN|NEt zJnf(MrCrr)n0@_j*2>!@%gXF<`_~BKd)k-V#D%Gm-)+ckcap#H&l1@nr!>8<(Sbkh zAoL{Nfv9u;U@uY;JH@EPftIPJjeLZT?ygCUX}v;weO5W)QeNqOk}g~FzCFxT>reWL zCHHIwZVR{hXbrOYn*~Gx+uztjVr9ONo2dA?SC#Ov>x~LY_=>bL^wb9=5hPy}1E8EM zrg!y|A4=~V(hbbZ;T5i%*6&b#3D6~$V_suK5cAWy#>N7_zp5C0wi-K3u_=M5)W{s<0EAKGZrG&fNPF$YGY>>jM zi)Q;w*z}Q^l0VNiv&7Vlxi*saHcNt#wj%nH02<)H*?MMXjsVugW*$^h;1 zGACLkM#7ZI{TRX(Y}=cFUz%k1$uj3c?y*rHQ+D6(DJVDM>*Fa&k<;=RvLTeWZf8in z;5JN70m(^ZHQJ2G~d5H0jHC$aOi#^@P&Y68wT)T@*{+rTN@-BV=!@q3Dk7&ZvbE=G!Q);XK67d`wtTyNl8vp9TB zV1Q(z|3@e(y?XO)Hf(&UU_5g%F2jNIR*qzqKALmV&wdO1GH#I@&W3Yjl+?=|8RH3% zyMFKAS`iFXAO57~h~*L7<0Y9CXizFeg+9@Y(J0cx{<5||S2<)yj;u$@%iC(*epABm z=Is4?T}WH$h(vF?9z(nDm9x0nN`$PYd~DpJX;B>=pgH-u5T%aN?e(_7GGH+#7-(Bt zH7P58sl7s^N&*So;W{Zhl=^7@KsC|4fNUo1)>k@LB)3_gIP9H1fV7&z3}1}OVQu(B+)}q3k%EmA zRe7MUd~d_x*0vaxut6+XC5+@SijWONx#mT`YPRGD(Fo=C^T4h#m6i{qJ9sKxp0gNF z*uf7C3?>TrhPI1_P;^&T|ANM{WaK4xp~h*hD#4)!iJI}-R5O)yjSfT2JT5YMF1@d7 z_@gAUrBj_vTkvTo(i#_B?}Rd_$TQdw_efDF}-{B$pIS zLJ9px{;ir64jRQ3iN%92r+F;<&b=ez*&pN-*LlYViko4QGzMb^kxTuwr4BJv!4gZC z?AO>Tnf$I#k%kb^cUUrX+W00@QsY#aaBrwt)e6adI1Rx3Y)Jl6Cq+%39O!Y+lP}5 z7TiwK1db?hfG#qM-KyASl*n2xo0v47kNs?!iUqw7To6kx7CcIBm>4a(GPh+J@=jtb z*sPjA@HIGctYO9y-1bgt5hBHDTmruizfEEs>S(dwggjr?+CN(Y9T2klPU+bIHoyWD zl>sIWz;HYTS&>AWM~jj|;h|$D^8WxV4P(6`@ys(1+mQg0oJH7sXa-95hNg6)=x&Nm zrnNI^EDPLkzl|XvB4+au;Lp!608BVGSK&>^>|?4gECb1?6lz_Xwb4LeW$5TZH-&14 zl;@>A+e6yJ>|q^hC|ws|(vCV(vz=_M4m@l8N5Azm;|mgf--Uh9owvqm5kzxDdwcz+ zz$B6#KBD3s9aMpLJdz(Ivi$$Qyf|5buD z(TH84C)+yv>pu@7I+vnCpYThX*sSHQD!sXybL(p&dK#1h%n^OFbTWPHP?o)R;@hp` z8ummptoSTVwLAW~m2Kq zDjlBq_cU50WSc^Mg?1%e8O)GDxi>{&QWCKVDfcZFE~5(XmT}D9->g$nT~GFYvvdWz zpNEO;tPb6e++g3O`yGHYz3`>(hdK{n0&u)TQ{NY2GI8CzQ^!nNW{5=!(h4?s+sa>S zV7Ty@slR$4(=VJwN#(A32F$WEVH$qDBb!zU6r*I1bLrtL2~x+Ih)u*hXnhWR|OTYT3Pm zSp`O&r>dEsA|zs&zeIX{L8{_QTOfwJ++TjyFOr3+~cy~EI+!AYL;;w~@z zC?5FuISL2E>1a3|;9dK`{_$`+v0Md7Qsm&rm|_{332C_;RN>oc0dFKcI7i@G_k?#y zH%6|tPoyty&R66PKgqC8fXpV{=F1ONx}jg`u^CFR*GNeNdIUgR)r`l@=c* zuv%ELk$eu|{XCr)m-ve)9Zt!(j#!T6ctO3EeyU6c(b1V36I8K6to;p>HAUCMf1?gv zmYf=e(Gw3WJU&|mLFNbNJW+kYepInxYs$$t9)gH4g00e)% zY3j5_c+%{;nxN#7uN7&SzRvF5f5`j*v)e|#07w+={uH!Dv`FtCKjUo^>GzQczm)~Q zDv3(lc`%MaZ2K4DD(>Xr{j;AAkg_|Hhf~uJWaS{FW1l^Sw?$?+EQL_C(uH?c(ztd| zb#6k+lX^ixW+=*q`B;Juoool?SI;aqno4&s$Bvb3eM!NtELH3LS$OU zKp?N4Z8fE9QcHXb+UIrqUvr<|_<~)LjHK;Nyw31$ujrp|X8oe%Gr%WL`m6P#AM(z^ zHt$^cap@Pry@cB1&FV*($q4kfq}Z@KOFssUaiOrR=#AcCMnUwQw6i_1tW4RnK3q z`&6Zl!+aMa^Q?;SP!7WT3W9p(9y>D9A*LzA3RcBWtNLgKxgVHL|S(Q|Xyj(0~y3T6d(wLH=(mQkXimi+H z84+PiL+Hb6mQ~<(>Wed3JwKKaQ?BmW9;6=0boz1%3>BSi@wJ^T>^p0}H)MH{Y`WYz zT86L{Kj)Yj>Qj+Xn1r8F`@_xhP!wP3_ZbB~h!isDsUiWybR1nyRnInPM_Q>-r5j*^ zG58y?>SKY`;QWXRFz}4*Ga&v%saAPxN6ypo=H9;cl%-@aslN!`DCj~3Z{*i%;+0@8fD%f5Fr)9R?ThYNSFb=8Q@IjtH*fIB!3lB1}+kPs<5 zMgA~ovO+Z+J=-W16bXxUx-O-SRo2>|g_iv=>4A#IRq|GqCDf~W>B8lp#@HXVktMwy znjG;rOV$>ZRvcNti#s$&WQ$d09tl0}*$T9Vqy@;|ms{@d!7f3v?{Hul@2H9XDklYT z27(_CPfpMEdX_ynwVa9Dv%Cp@>sglPxV_O}2_nC<>H#k5hl%uXYh{Ulx7Mh)!tWHc ze}i-DP;f^(Yb&G^UDZ`5`v&71i01_4@80XRSj-;rAOfpall0{_xH|Q#fY^=O6+i4X{Yus{P;ZY2mY$>2mSJVfLReL7s|od zo2ndAM3s?UtI8wkn*AOnzWsabj=!2^m=l0~ltlX^r$31fKQP=0nB-dAckD$OW^!60 zAFrrVgovW06in|uAH9Bj)DTiiMM10Zuw|To04jY*SbrbOr^NBi5|g-Hn8JTooE7)2 zCt);?*ebCr&bM^?{U_VQf zQ0|3oi;2bn%3V*_O^<5pEot_<)e8wXG7Givhy$#=NSok@fU)xoIeu52at);8*)MCij`n6p0=`zTD4beRqwalDq!2e#*p z2ABQ?kNsJfg9J|gV|eAhKa<`=BA*3jTBvi7bRME^W=b6w%vN>S6#{h*GBmx+vG$<6 zlRt%#LW*Q3)S}sv)LeuEu!aU9yc7W?5uvM0H1&ledd{|hrCIToMUMM<#Iu{F8GDGD zlxugUDR4!~paSr-J-wG14SjROV$!N2t#!C(j9ThS&ev4)x4o^a-rz%+27@5#tQzBp z%=>aiz!ikQZkLDNHyOBtj6_VUbY>y^kqe;;&sCXQI$m;U0o0zsF)P2mquaHnTHx)H`d6a8!=YUri1CaiXR3ehksDQ5M zVMYgJsy8cc2kSK=+ia5vsbvw`=4>JbOVvf=1hdYfRYcnLYt3TvB6ywRPw74dv}SVT z;~ZPA*0Cr6^g;-9bZm7HC?&Pt_8~NdSZ_|;eb2TB5`BlC$F6TVj}|-;g)+#vCZ~e* zq@Zj{skYOE^;qU7=as|z3RoK5o;Sctx9n%|biZyk9q}v_#@#aQU{C=pAobR(kedTW zV_%k#O>-H7G^=y@<3eXC7M1~4T|tC5p2E?*0Yetq!eEfn?K)B-X$X|GaP)l< zu-~ISfu!8wu;T9Id@X}G3h-}fLtpw?8ixZQ$aR0zl+BEEL!lo9X~?U9e0HzE+=IC= zqMdW;#Tt=0Aya3INvq6q&EVLjC=Q(l<*Ledb(6$7?70@&NJryiw<9)`E=E8aY`JSx zsezhQ0)P{7mHFKjsNb(Gg0$M@@!}ruUN7UR6NMiQAQm|ve46vYg*{B%fh=R3_s@Bj zbUHaBavw&ECK#elL3XcIXM6Vmd{dL@2wcE9KIEalsB;xId0kL@S@kg7Wz zTBbXeq*;{KT?YB*JRwg`k?q78K=-}S&(3=}sQ$fl-rIh?*D46x8RGYt3Y=qRZ9F~z z_}hJmcr($*KHb0I=_ z=kDxCdJ1RcOfM2I!s;Gl;_wYjD8!J?LZm?Tn>2eA_JTQBkE%Dqv@R^qf(&!7-LPfM zp&Wq@S}3~Gz>uf$GZmNq)*~ryPILcWZx-Yu9qu@zozmY_z2UsREM53n<_~z4lO-Qu zLbuKu6LgtYy;`RZK277Yc7r>K7krlaSb!BR7sMK?jP$AtsBbjAjufCc0^WN8*MJ8i9FnK#j-XK_=uGEGUNBZ--4Xh$-UklbMvD2>8+_y>UFVvI$ zyRapYh&z0SMmrv)q*HJE^nB`{mq1m6hbbKIF;iibg#1Z|`I`<~q}R&~FQB1R4HY!{ zjYq#C3i&vklVrw147Q{b<?iF9f5UE**0cq z1M;;xBLB&%d4*_}NN6Qx6Tp9`PwwB>y2dold0+>g-}%bm!F(aXcnY+rEEn zsYfzQpx|r4T=NDHx@p8fB4M2IFE;I~H*A)aBA;9=?I|~@DUU&pV$?AvOmvT}F6)`} zI~NgPSEW*;^k3hDll8a1oT@V7qy1{X6!aPLxR0g4`*k7lmgcf+d2U&zu4#uOH3h%r z@<cwlyJb!Sg%1zVyzEQw4 zUQ~Pn*U`1%|DJ2PW7mQIkKL(j@0akN4xhE0qAcNUd)C--Jjbj1a70PQGz!nwak+?+ zGoGNO8;f|`P31;M4ApuKGmYRb!>X7UNzaAJIPLdG)%)SB{jM_{I^*_xd+k zs8u7~5hY0_>Mz&5B`5#^{1N?8hl08U_VON1Ol(@O>c0MMDSvXtl0g7=DlU#hgkkcq zQLL%w5&>b>vZ3X;L)Ws1dZOd$Wathjo;4-nfj_X|_Q-OlL^{~Ownk%^wq2Fxg)w?k zsAD1~#~WMD&>BfHgOe?+)slW3Tf?bkRgxBHif2vfRw@TCaM>UZtKgOE8t4{ls(xAS z{s7x1!s+xPQL5Cg`l^2@v$*~RM~&0PLMZN3ZN0rXw1?4C<19_iK+mP!V71?v)NjDX zKEpCHNRRE!QddR|C$`0XZ#W+NE}w=Lx4Gxe-Qj!^PF&X=t1F{UzHwz#&@^Rb)X6g# zsk)MU45aOC;KF3blVO*F{2klNWCTy;;6sU?vjakM9^6!?nx;&Q%n<30Jt8I*hk9qUx_92W!2=0+Q7Hzxp z6lqCjLTm)$gDgCSxNXeKZ0=KSuX_c9-mnHr^to&ofXE>?-B$nTvWMxS5>NpO;q`Zq zo?u8_BAgv4Ek@24#0$s~Q24^hlT%(>QJZeb=z_J_{0t9$agQp=VgLsZkK|HeP-B^0 zh@x$1gAGG+E9|q6$$pbmxDu8aTGpakR*4w80nT`@ecqE6+iJUDVvi!N?6s{ev<*a` zo#b?^JW>uPU9hBuv{|XvFqIKl+*FD4>N>?lTE4*lyP!Yg^>Ih9h-M!YZn3l1_0I0I zkCKpQ%4t;?XEU^8@d}CX;5Eu~;$zBZZ0zrkenkG*($HIXuqxvss!s#2(_&!35a+Ylk0My13JTux99c?5Rr@CyKwSa@>m?y z7rM44z0oY8E8{Ec48(VH)~4kCE!Tq$tXkbU4n<#(Ay+B> z1q4%}=wI+BQL&keoplz9+QQomL=3Gc?snkv2QGi$@&_*eEpd5SZv1A{>oJ^>CaTI> z6FfCN;C_z2)CkI*xWuZJIR8Hs?D3eLu#eM>e9upLLASzBqEz0eHSgbuw%wX&M=CR_ z>41C~g~uKrK8aQ$h&f_wT+W>+lLTO}?rN=>U|(c+)jZjfWznfnP*eAeFE} z#lK0`CG8cd*)|~*cqKNcS%o1QYTEC9CN?TtRr^@7$&X z4uB(anG#KeVjw7(4M~7eFB}zR^u_)Uu&$uEF^=L5USQ$OL<1Z@rmHD7$Fxb}{wcx? zuti;&5@tqLQeY#1;sBEtw&3dRz{Os>^M?;!7ZR~T4?%ELmz5>Xl=DJfR-;Oi$$Z+B z5=wSX>3LxXpv506(Do9(5dP^IRtrkX)r=errO3spBDR1aQe3F4_FESvq1$T_;;l!!zGVmQ(c; z%5jOARPvqneT7CLl!QzS>10oaOW904}3#dFuLpYTq#P^!ehnqO!wP(_C z#G~i~6kH?rA#f$U3f~i7l`xU%5_!c?STG{o)?K>sncx# zg8TW?6qu%*X2g;AWm&{2J5()jK}OA}29~H<@(W&43D}(0wYCl69MLao7b~bt5Dpv3 ziE#z!Xc8qQ%JbO21UV6lJcE^%Rk4?5YF)cG60Xf@0`rXKja~A}ZY7op^B9;cQ7P=A z2x#L7b|gwuCt^9a5Krz4pc_5G)E>|iQeH%xDnsQK-i>8CE%YErx3=LCD-)7jVhOf> zNoRb6IUpibssu#kYz3k%Mr+>&2)cR-^xk?7APWQGYQ6MxLH@FUz2E`eNED9T93&rw zSm$-2?xNSLNIMc$ZZNtu7v9|$DEEc7%n=<)B7NypbSGWWak1L@`$X_}nUrGpNU-LBAGm@0UKiA04rG!b3+5-n_JaFyhDmYK3{SA{r=L6EVeK*)TDbo>aG67>{VTGzA+dsTtCdkR_r3 z`ujXy0h290kI>H1JZ~riEjKCJW~B_PTS(^_Dn7(6IA-5t_kE1>roOP;9U8Q1etzk1 zS_9N?R;JjUVZ`rzYmhwfF&$goRG2@DGTd}P#Dqb%n^=yw1shtyHVuBa4Trl6(^yTh z^dId4WJ#APOchYQsI6ORf}H1wxQ-u2WVep1A?qQ;s@!*a=^E2W#k+(-9z86?J-tAA z-J{~6)X{6f1`|6;BYbU<#Qw+wT3XNuN86pIGqel}GK!>o1{R<~bNP|*0jgu*!;}jH zybPU(ad2ZHSKjVbsxiu`N}SEhj6`D<zbw zR+qz}B%|cm3p5DVcPeq7l4lw4#RXje&=#|MGPF}F6b(N)B3ckf0@6Fgc0flwWTK?L zN-cTiKWX3~U}spNhlk2_&yQ;T7=~|DOuTYIA9uMp)egFmdEn-0&44gc-HFv+P(qq2 z*5-)aPP5P#Dx#SE&p;1{74My{gamGOhtT&$MjBC+DOjdaP{+E9xIJ0pC2tgrKZ^Wx z!`V-PRRPTR`FZbs4nW_J(EkcfG3*z93ZJ56Q-a$5j3@Qy*BW+@?3BK&J;(M8tNB&4 z>psc1G=%F;^44R=#<)*s`eLqTT^T!5VIjy#Xq*Wo$lcd*l0T?C2bJfb@*Gs2gUWMI zc@8Six2p1dfq|$uWo{X|)5%D6)jmX#|Kb17-n%rpk!5LOYyAtK>QWXt4Futica&9^ zg3n|onaO5EWma`(56uBM0-oV;N4f(MM0S^~H>wNx*UaKF&Sw^PSXt8zm8F+ItG5;ez)JnKKZPiT7Nk z9G#<{eIJaiJI$!3<0Pk)(e8bwAlZq8E1LERs6xa0Doakhq4(fyb%`1TR#Y0^VsYa6 z^r9wdGJ}%}#l;E*RVX04P+}IGOb@T+bqo$>p>YIrO!*-dhlPs}eNHcv_>%iRM0t)9 z1tj=P8InL}=%8s**v% zVm)6hGY#KtBoaSvQ<6B}ibqwj=%~wCO{XzqYrxGq&$8tV+8?av7)ddxqEM)uI>V0P8|cmYMyPzjh^AU#Pl30k*RMhGBfV2x zl|p|lqyL!7B@HUXIZ(BeakA-fq6ThF!4)yA0Q|<{=^h*Dv@gk-3xIj? zK}r4AR4K?RL(9Au=LRtvO`#T#B2lJkArJhA0xxJ&pjeS6m5!B^Qm&ht3m?#Bl#vRX zL)HT$2psxBAY;XRhI5X-7Lme>5uW{^&5ul?G=u6!kCo4mJ|H$9aa)5mfIHyqM|xj* zc`Eedu@+73~P;Z{sjX7vS}q( zVL`mbFohb^{3suf^6@AikMi+PSU&z9kW`1tCLRnsekZ8PCI$zF@t!WF>Xv&hcr+`z zkLS8w!pR}58g~*k$ia+`MH~0Hgaxhma}YGVFEV1uofD#vuJ_rBE;z6Qw|uUzzr1+) z)!D_%FTX(L(Z*V&%3v{Dt1gJPK$<8o$Fs;BkVX}f>4D3#T5^en^1p;RpfQPgz+_hm zCl+1L=}F^Mv#|GK9p)@5gq2W1{|czw?5f?ljUF` zT+ZL2a0gs&q-xMBij~9_%O%OxBBFXN?9!-})iPy`Wj%7|go&Xg5+ zfI29gyzXBgIR%(~`^z5s%m#@wqzkK!b;&-Rsv){h?61V%euV3+VUw-{c z@-QW%D_ov>x?M0^CiLB9bdpBPM;e^>5v?nGMB^r`^+j;^qvS7B{T?@Yg!&y{m+(4_ z6Xq1oix`)dWR&UBbd+qjwQhTP36z%AJs!j+u;_iZxSIrfaNBK?X0KWHEht;Xj-lAu*ycn zu?B(pMwChOjVdhKGW{6px4g^lg-ycraz#cYQSUMvXGx72cvL8k3dK>OI4TrJh2qao zp&cQ##H?LoH+j)}$8(|9`No}kJp%_%A8I@C$T@H(U$h{A;mGx|Prsc8Yw zR%uC}ov_dIL_D4+@OPdl(bw|;GSSse&S2Q|d+jY3=b1MR=ScG*GSsdr_BlCgGfpSQ ztW(A93QR7NiXxS3V|+6F$>iX&TfsD-6#W=GFBY(Ft$qGz_aRjFd(p~0i zv*`*=*{M2`%$VmWKgdhYSv8-q;D9w4x%8AOO-i;vuBgTD;~`&=oZvQp$_GWud4`Uv zlZZZJ-;u690enTi1Pf+F2{7Q21F*D0wNt$94F0Cd(s?EjZrBrPf;VZnhzl~BO1rY@ zWwxP?6zec|SB3ov^)a%|CPUFP73W!Sg_`BC%G;*iq*KE%58b_U!#^i@0OIUq0VDoecZbc+ zL!>I?1FZNa0`nt9?m9GZiFBk`pVbXa9+7dJ5*eivQ7nx*?j+dhDiZJJquM97?(IP<79j$l08 znM>Vb9{KGHGU3Rm>9p$2qrpDrQFpg_#Jx+ZW?y3@b=&P`e;5qv0%^S+fi!OP5DaVM zX#9PzZC<8Y+bTng@x)(VcyEvxRc&we@Y+x}k9(&t!juPj=P*s@ujVNs;u;(5D2|Te z=qQek;;1B!NICBxEPt=<5Bu#Z?VNpvxV>*A)iUwUgg{Qc#~}(tFAi5A=}e+hE${h? z4W7R|d+nLDgw*@8lyJ%H&>xCpDmd{TIJpS@Ua#37bhr581zX}{39vg%Z=qMMt_73! zP38%%wvB^}18d_`;XQLycB@KT#Et<;9TgTDj*EB&8^8iv!IouP)p$f~e)`s&KueO^ zK+nxpA$e(f!hlD7V0I4ro^B1u^Hz{`+D~C9SH^BSc+eV3;lU)E(|u`p6#&P@YUH4L zCk^}3IdLz})r}T$Q&qsnOU0fu1h{qnq%ZB$`y2sO%_jR~Ge#ady$5u)eLwA%W=T6N| z%$Zh|0@%a9?|&h7X@sp|GiYD5+NbUQX}5DvjqsLSDu$fjwVXQtxNkk@j(;3%B)CN> zq%GK~6w>z8D1{w0*P7HTZp9-6QHR67+yQfPWOqk)_k*%K(kKQ8(G}yd0(aS&~cDqOJG6BF&fW8I=?eA+~(O$JpO)9{$I0{EtDp(hV7SB za`RIG+}(}OL=0Iq`t)}ux3>4kXWC|N0e10sVG?=2kqSse5_Xusyvl`a1}xh-T*}L$ znzwz(X%Kl159N*S_WP}VB|HdDz;=K)o*RX;TteA=9CLKz5FTk~K9$tl@3!@Z1)fc@ z?N&5pVSrDA)l}-)Fp5n-2ZK<=;SV>qBSAm~>SC*nP{Bm3Mj6yL%hK&Nj$i1oo$e!x zs0Z(+t&|HL&|B>4ip)XIrDbNW<8)Cp7px#MWdVe9Gz*)5(%y2wd8ST#sNj!q6Xb?-4*IrDu<7lsT^@);~K%ljV|o(LjXXBV-=g! zsg96UNGG*V>ttM&%886%dvH22)%1iWJwo0|_BWcHG#GA4eV{P3N=QwM*_sB6QWnc+ z;>rM_!vB``^hn0!+Z)Ss*-APe0ZHfqoutu5* zDjK0;aOyM)SXZH{!X8DJu{07f9}}(Rg?0mO_C`L;h25;G7tUM0Tzm0ge|Xy7kwvl> z53W-%xkd1ka6Zb>By&$j%qno!ly<2Vy!XgUj=bd0lb4{s(Bb-bvc_3e^BUlOol7c$ z_mvb|!w zva$(9`Ssf`-=!1pMmn+lc(608Z!dm4sNu)#oB&w-rSrj^nC~Hu24sA=K3V>-*Be&$ z$pUFT5!1R*9o1CpFKubV1l4hyP{18==eGe44?_0NOskdTo=R0LRjfG-`Ao|%Znx&Z zvM4V~I~Z6uir=M2@;DK6Srt}mgUs4NYs;hZLiJeK^odMO&Son>Tlat0G z73_=ee)Zrx`Q$r%Qg|oca}NB_+iYxXV!BKq%}e38B8dvY4U(x#@Z%;UcUpf-!;4gC z4E~(;oZ~P$n=g%!dPlcH+&LIbwLcIU!aY)Ar`3l;YNsLvNNT+QtCpz=)75ka?{^ZB z>M%#SqRyxrR}iz{^j6R_DFjmLC5fRoQa2@gBEl?5l!7tmNlY@vLCt90@1f_=KUM{x zCfrk$z>+ErQ_dX-_$-Y~h>d|voJi1P@6(tlC0{mwljke=iavWrgq<({@SoD?`{t|Y zGOYAiRHT3({7zTqiEqt#d%o7=x|t`A&rF`U-+T1!$+z<`JV8?N3I0zxc{YT^kndVXOyNeLJI7!k!A z8s76yy{8iLI*=-8CR(WqcH7(F1#Z|(MP}>xf*|cDk4L#IxwkkMT0=E&a`SZpwh?XqHUc&fim z1?m8HrMz}`mg`|um8-H&0r*4gvq}AYi@%g2XL8(bB6bQ@bcB0hot+$zdu24NEA-i z@&xI$4UaTN0cljeZ+e{Dmlm^Zfy4HPL%-ox&nehpr!Y@-9MQIQ!&boRB2N^|m5_vA zXStKb!zeI3M@}~700qPj-lS`-?5%>)_Tcyj4(_^jCxa z^x*vME3qmZns{SU0OZzOW}#M_QT~@(l|3$`mXi#aiR9N7XwyQ9GiN{N+@gm=4l~>q zT`ebhFgT-0oJV83J#=GI8`IGF%cDFCF+Ac8lAc~mzUDVf!G22!*xtK~8QuHBdm1x# zO$^wpH_;@|yr&EUoe?R#0{qR3Xqjbs1J2w9*uv}8T&PPp%~AIu63hr0nrGhf`$R<*>7ekQ#me?&pg?9NtT*nkZ|B2HH50mwyy6fHI{di z6~cl?ftysVLBrHFblL-W4^(zE!bbi9Y>)zp0r(7MP_7G+Z*jcb=;)556Wu}098!bd zjBE+f*F#}Ion8-3&w*jfp`g>c z{e|q6G_FIQ5b z2-cqSiRy}C>0ka$A|8ww!n8pPs$_T(!o@MgWy*61_`dZ4!@i9gka<@N&XQ zxL|`r;pM>1Chdy3dx^>F1ZzQFWz-Y^9bDX|yoQ~2u5a=|baHvp@Sy7XtM4qCmxLCP@K^QO^~Z5pSBW?O-_`JR^UB(nv?<}b__{oankj5gpi1^}=;APhbeqs5 zo4a+|?Gyd}31h26RiFj$bl=lv)QELVxm>+HUMy_uoFbbpZ;0hpOZDTu|C7G=kVSRS zSRWuH{MI0-tiFc|e1TryV;9Sy5@h2r;>ymXQd|uKMYeB%ofl0QCQrFgzR+HnvoYW# zc$(*uxNn~M4aznr?7oo?&*pGfggQjk@+Cyo=^;kdLr=hnk<*FhJ33I+^V>la|Lt$l zJv$rw?MI6=EKXXTRE!j&4}bY?Lt{Ray$1V$ytDVA%y= zG2?E5YKKG7Q*|!Byl-u>^oT2=k=KL`2LmiGtizUOpk_hTU|@Vm{F{?~0Ho-=@q}&> zG<85APA0ihI6d4yjc+KraO7m=kZy}m{kWs{6#|ZS#Pm4+4UnlxIn# zS|vINCQiu(<~@ri6FA&R7s#&lX{jU7V2Y(L#T^?n!-RS0!1%><4sVDbF*@dBVv)E} zt9(8s^4b>*_^zB~%R0vg?o2l3Q6CuSyMcl2yIO73WA;B0o^y+8NMPWKgva2Xw^>ZeUeLb9ty(8GvYn$?10-s*igS5CWAI5Mu^av&f0?X_%_@)R1UHd5OeWXOkn!d*M{WT43$j}?+9&T>9$$$Q^>c0H zz712jT_;;Ntl^!LN$s7lNN%F*)V{D+`(Bkzov{bX-kRywJU3=po@1qYi21tLWj__o zKke>FQQa%#Q`dT-TdB1e6=mUUUa=$gbL0g_Uhw1c0!ZJ(fkGjFeOQ%P%5L7*y5qc3 zvA+VEDE{%%mV+Hc1Uu8H>7v&9n6^PjHBA}9G8lGy${Of(JI!9V1lFctKwpo?H22wh zU)%mpo4Hk=AO!@@Yimt;-!bKbcB}trU;sQfIW#}|qip9bvY^$!An`$bzO}<#za7~S zFaBwm>ui%(HVfJ(9MaQ0(`&*Zy>2a54W!_%#Z-$*CbwdoAsqiMUnO;-@F#vJ!6)m` z$v%WZwFAF0IL&#$vRmF;?3OXTo+)!yOw%_`G3XiDjwA*Sl#1`ntvXpwC%ih5EzD2t zcNq=;OaXPr2FbG5k%MRleLjlbP&-Rn{dSZ7v)x9R&1RB-AOTS;#PNeSyG5~3K{Nyp z{2BT;rH4Y^EJ=DbMH_5+Poi;*z+O`|V4B*0ZqeP;EZi_k6pW zT&ngsXQU#mNzOaynViKTE&_w9?fx33yI^#x$!bMLBXD0szmj3XG0hESxEUaVkR7}> zOHr+1ucByCdWSbdP3jW)EUM$H@{mNO$2ce$^O|o`o75sxct4I7ngm1SD~IfaLDq74 zxecWvBG+hD$c@~1L}@yjc-a+!7RQ{`vqUQ>rEHOAH6?btRKAi#Ygvx31Yu5v#|fUG zdo7_>VBTO{Ym_Q3`e!C^1^?cy7W2tM(m`-$(~aJTs0tTxseYA)tAtv@^95T0Kw)U&IhhdrkU-#q^Q;AG!aL``^a>Nm~pK2Si62ncu3? z7CFfKgo|NA$=G$RKX%ebOYbS?{~Ss;)HA6ztlPdqY;ostuDbF)&jE)G&&Pw$!gPgj z0kQeEy$~M^c!>Of35EECMBZ5nf*A5myL105d1vIowA2aYpcWVB*KqNzTOz*IzuC z=z__w&9a2kxb^E5U!Zx9uk^&x)aOLsl%nFT3TmeOX0Y2aS6=D9rW~2&K}ddp#Oq!r zVE9~PxA8vvi?lquR}Q0Pd|0qFM(P=7h3x%2o2(K%v=;QtWyozy&!$Umpd|iCflb|B zDIc*ZhSS*dDs3<>iWMG_>H++QqoSGP@--C4pg#&Sa+JXlO~Nu(G$Co~?kD;R(&%bT z*>bzLw5~KF(Y~55_?ks|p2^;ldNE!l65TDyObHjO@uHM5j+pMi&;&1atnYQ4^R;NO zh(nQI8buRYN0G2P0g0=`##~5C8YAH=4gvF>3L&Br6fk1f82pH+ zxjVD=O=|!eo_=SQR^P<~AI_!sQrppk`@SF{?cHiS7ZL4Th8e8fYOX!#H{0#t){b%- zsI3nDL-i&35G+xSvuz{l@8c{x%GslwJ<8dmoIT3fTjVS(v4deL`@KP{TM3-Qup4JN z-REUAmM(wvpd(Q#b5bxbj?W63{mK!u+H>jOlmG+}C$6L3hlC*jktqwN7-MeQh zNexIUx7Cql194<`Rnfsu4A^2Ypm$q-Z}7>QfCEm~jyu#%EzC zRnrV!sZwX-W$41odY`UR2m}*D>+GqeH)wx(ckDs^vLBG>+;>>OtBD6siCfJT3Aq87ogVCPmL|{!_q%E&f$>kTK!?C->={} z9UQ@z(1{~bSdqddx#c!%E_E8#9EQ=@xzsS4{&wud4VS!0Lg{S+Y4}p7mz#ovz=5*1 zNY;Vc{U8`NgU)s<$TM%0$I;X))@im-y?r!(6vsWBHik931JAqQH%lKKzzHm+6*z1Z zofS9*G#Qj;tzj#GDb{}#hEp+%7f{%4RKw(g(Wc3QL33r3AzwZ+ZtJWXKLZD0x-m7~ zct^08i`C^NE1mGg~N;9>vwDwW$E_Wjg5SMdR>&$`bQ$~_%1t;1yhmZSVVNQOb4nz}2pAl@ z;vihzSuyI`G+C*&C=hw{fWlqjo~aWYIv*Ps7?0UfTZXYJi@AyxY^js!L{{*8g>slY zTV1kw17MK3g8CQ(7F}(wwNjqvVS42~AwzOJOQJQ9g;!ivk;mC`*zThoLCz(E6vN9Rjn?;8O?5ghx1muKWMi5-NA#F|FG5a{ciKORcggBC`aHA z&Khr1H9Fkep?%sN+|y?n>{c~6d;Crdtzq<=;{M9p&GL$UoQ(2g9ih2Cc3iR3c!5g+F`pP0D?W8k80xlBFkst+&u7UI7|5-bRoqJ z+weG>`c*Vphxw#IXSxE6>-9WC?XCLRtC;SOrhz!Qr2TH-WpIdjUqlIy^CF_{L)GgF zt@4;e#+mo!av5I7#4#G4!bm+wuXY3^%<$*4I7v`|`c;&th?C9Z<*dQgGv0Zc-3ZXy zlaQ{?`3ixKSWo66S|-`}ivC4I5a%}k{s3%1lfRU%!&h*}V}UF5hVj165>)l@xMS~c z;^p_z4gF_d_3eY`ZVR+w0C8P9yp5wPADnY+?xDFy<{p@PT<&3&kE(o78^^SHNaZ7P z52$oJrNeQLrhG8=SjvY|K9YJrmk+C^xPS;rS7A)L3xZGd=iY7Tk9_k_n{Ptw z*MUO%ca^i9b`Z4v3aQV9%TKUz0z?iy4lrsl=`|taOFR>d2BmO44%ejlak-_$SgZS9 z-DiT;F2tbK5rTfkSAy$fwR^VPb=5`7j7=|AR#Z8a!fK8RNa`aJFOy}WOgp)3GmC<S{Mo9m@py$Md@x>IS@>LJytjAFRxp3%??D%%QuhmA2 zwOx(Q_X&CqZs{`13Y$Ckx-e(gR(;8(G|F`N@sOU~OCQs7^qh3p^y>3{Pc{PCauj|=ai$1veb#rD*1x!pID7e|^@hL?5X&mzw4 zDwf%J77BV218;}<91Kk+q9$)a5p7ooaA$YrdSZ45Oph*wKq&Cx4)~C8ZX2$U9$$iX zRM<`z8W$0l z;XO;jiT7#vJ)9P2N%Wouq3wAaUd?FmeH#Vb@2YL;vK@|%j9(v$22=q^fb!MvE-BB8rK%ROxkt?1i7l@#N>yD9WmC-gC6W;%Xk1 z;^7&4fJjV3>w-tDaJ*cCr71>ATWV(EVhGBO#93TNW8QQmJn&2=-_qfv<2Asb$+$(X zf$i{#z8f?P>8xP^C?2pP9C8JhTxv3;ron{Q;`AvH)PKSo<|;=N2&5gf|0 z_(=GE!Z76KWOO#Ac}+5fGQk_ItIlesbmEa+Tns=TStZ-BATB7<2P# z0e;%P=E2ht-#v6#sxF@@R2MEfAYx>aE3&zehJn6RqQf#;^^&5Y@F|&Ql5oTzkWNeX zf}6Fu{cX5R%`L6MPaC{RhQQbWhr!IV0o~YE>cUi~aVqF-*l2#7bA5yeE&;l-HVxj= zxiP%(MdbJ7v0%(#PXVFIZW_x+ zKvXMruDW>~UpyV}CpFC?g-f40heua-xKlZ1(u@A8DLwB6Kqj*LB@$}u-Uu-gxpDbu z<~G@z(HIFVUe!7nt$ni9qEJ~?8n#tvj(L>MsH2q;ALc};rMSlgk~ZSQ-IA3TVTO<+ zf}~e7@TA94a)prMW4frnTkWZ6A6TkYw#860EzeVCS*dAr%Su;h?V)6MkXg|FNJ@5q zuPqU2>e!_~AI0xc{2s;cQT!go@1M2!g>8C3yzrpcs%+BABJ&x?2NV$;#>TYot8RbC zfnUft5^%fRL)XAmxYXczju&W|ua>jvijhxv`f@ddDwlu!$N#->FbGvZOS{bL@==!UPB)gGq4GdZ~Xgm32xrNVH_ z2T;#1w^rbMX&14UC|x9p{D5SLzn9_`?zVtABAZ`igQ9l+9Tc&YCw3!MEUTiBUP8KD zWSKdji3W7W?PI`g3|@tholCGmI(hF+l%||4_`58;CYF@V!ug)ce+}EHEOgs=d+FWI zn=2K1@D7JX+@{WRI})0kzXO8)KI(j*;jL2ogb3wb`Pyu!$~qjTlumIIK?aNAQ`J!I z$QzHm@yHv0vb+(R*#SEg1l>V*Sd}w?_}nwwTJMdUqI2q<73;Zb+fza}Kal!nCRK;9 zpqfa$CZq5&V!AJ<{&!U7zb_eJL({6UJ?u99PHVe>^bH^BCT5w6KH-8z7O#D-o3b#W z=g|#-ctU_QgB_N#nNHX?<)(x(q#N^6?gx4jt|^#;;=7Qq7P#Uuf|EJ(TY&|mJ7Bh6 zBOphqW+oEeQ-z1j0|b8=sc2LBG+84z3!THuDAa2Um<$FY4gXY2$1O~-hqx|$RT#Mj zJghVnM@ggREKH`HHWbd|L|D6>dU>BU!(gtb44L^x=bH)QqjD>+;sj-)vq+V8mh*cI zq&9s!QAeuZ!AOT0+lF(rYy`V;tAkxMX|B0t#}h3Q?;^t+8Dx=m0dNwOtPVA-->t$4 zwrlLJ6YFbuap87fG>F_R6uCb3nq$7I@Rn?9&92;SrGt#dA()@>&iv-&{KU3R`8oM&_hzD#4z zjhzwqoi&klw@r@WO5?9GT)P1u#KaWCMetbaS?}Ad;T}Biuw5E@?O+&Gc4_G1*?X|P z+jLkTrD#EaCl1;^eHezfbUo&@lV8#HJsOxm@8ejWSjveLLQH~wu*d65qrww-{sU5Tv*ez<`g zTyCbJOu`by9?k;Tfw4B|sWiWfXXh8X=EIhJ1Op3O*D|#qz19}9JQr^KumAk7|NJtw z3HP?=w|jO5L8DISYTl`FQaqy1nOC{$c=vP8%V^qT%{Bb6J(jKH-t-Z`v~V%vjuPpW?Z6LjIH^o z;rzmFZPKK}1WK>MlnqQWR+O2w4#=X5Q!FBut4}KaJ9q4&qge$kG~lhzXIv>s7u$hN9vu zUnWSl)qaP!fA~*X(R`T}t8iaHiw|cK+l6~j@63*r_M@~L<-OS6iPAolcfs9*(hh8` zzgAnXLQxI6y`VSns}y#w8@wZ%+D}Pu?`)Q~7g%a*x02pbQ?Js>!InQ#wP3FXLz&ZA z?xtcN1;tTN90kP>At<1f9t?p$82H_wQ>B!0GnH35H~&?3(|iiMgO1`n!nrID)F0QK zLA44xP_{dn&l$o!tQIWulfk z%jOx3TG&9dHI1^M1B2zIc0kp2tkSC#?fvTFY3c$l!{Q2-oiT)vEu*?txXnK1s9|v` zBN{3^Qqa@$xLLIvkhYUWQ|NFlhm1Buh)6urOsrg@LMs9B70jkMIE62_3w7c`Pgtke zEi%=fP#4kIoeq)0cRAOLIa6^flnK++h@;Y5IiHIbB&DQYp~^dzLqWkrrfbKR4!s=w z+4>4f(A^v?TuYMUiYAaQBVn>!=mh*8Wpe659DAg)LR6lYPVnnUg|ygt#9N>$h-qqw zX$lE(SkpO|(#9bTiiv>ZbgEg+3h0da-2Nu#j@u86`+>Lxx4Be7T%9cFdRyG7I=vLn zXJJenDs3_^X?PcF;%nZkm5F|Z3DT4DtVqJ^&^ynT;Z=5BsgYK-)Y_LrY!%zFDqEVH zo|&1~_N?kNfnC0EyGGaQp9Zbd?(pNJ4yg7MybteGqnNl3D*njYj;!s-+I|Gq1|8~v zZCZkMr`N5D?Sn&}rQX?!8yV9vy*Yd0y=GrIHtifVe{N}BGt&dP_ROrRyA`Q673h7_ zQzlt^*l%`%R=MTGYs3+n&lBj_A(>>!n2Rm1^LS4B?rPs$vU_dDR>oP2qgvY&v(u`Q z7Xf}&wctI(Mi&B){$49Ky8DQY+xjEYglbK8eBV*9k=r3Oj#s|C0JLdQR0}7rawkep zCq)UQc@d7mel7RiRk(6!y1`vMU;~q&9VoZPhPHwOQ2F#KSG!6e9qQ`oNmO6GflB>5 zOVcnZO1SNX#?ZUq{-u-&~VfZ$wN!B-)UP9p@*)K` zg@zT^KQYiuWr1upo052!JUZR8Pu1*v8YgVnO0n33x1WD{{*Vg~Oun3`H>Nc^ivW_9 zA+qJ8_Y2f?mphb)YHX5SS$-vDE*dF-UsJd*;M#fy3?Q*Gjh9mOFb3&|x=|jh1j~Fi zRs}*63*${sZwtKw{vjkEN(hytI?3I59@)l~NeEfY6)ENEC|G&{OEUQFsg%Qqxw9`c z?VBh`@Mq;_rJ1Uqfp8zaNWndKF2Is4Tji>(!>N7{5Y$hzsi367Pr_}~&d0Mq{O9B}_}4;U;gH9^i@PxmC<;V_sE~gkd%AbA4}gi;FpR+UC(Re(vhhind*_L)-Y9QSqE$P&~Ev?pwq6hcyP0y zn1xHTi!8}5*T!xCG9hDLndom+wQPpYk zcEnk(!Zk~m<&{EEG>etauac)^B5@(1qb0o@RjKUA2(Kck$_~ez2Z#CqS|0Bfmw^@n zhU7S!8g$oecAM!;voSx;>Z-n!^N=wy5fNhmPvr}X73FS~n`#V5^>Z?)P&rR$zD9Rg zPNhQ)His{hIrqLUJOsb+XZ<`H+LOaMUrW0roqaauTr_uhJ@cDzz7W@vKq-pm!dvA+ zE%Jy8?kW+Vk}4-5p2QYeGWti^Ua1!AP0KY&N-yJMZ)V{wOS~l=+H@@0pe09Y5GHs4KLt?Dm zcE8H_(%~0$Y7@t$wr8R|mmpP@8BsQCphP!qpg{npHB*opMX?@~A|Am)aIQW>#OP>> zgfQ5aadUq{FX2OAJ5D$@{R;sn^)BahLd3-`C4Z(WAHU*J1+#(tqfOA5hP)n7+maA& zts1c==3I6;`%vpQtJK>9zgO#h(0-=g=FV=uW^Ft!n3D3vFGL3auPvJC{Jp0N}Ch< zAQ#bX_hAoZ#K&ds-q%HRTPycFyNKF#E~3tDE~3tk_y<2g8&jh?bxXkwx!1TL@66R_ z(kBh!yOr*0QH5-KWMxNI_JgxBs0#-~sBiTHzZRkXtz;q)L4K|xXhJFjXrVqjy7+Br+tsoCxH%CW`sF#R@Ldr!mSaANlQmr`_+J;LlFi_M@%}V{h)!Z4#q*(IRotJq>o` zv+otgzO6r@=F)XxY?^FM7@Kk3pS-T-@omnovS;#+yDVdZl3nx89%K!3KOlkO${q|> zIvA48-mmJQ%Zc?b@{p`1?&geyiGp4D3Mnx zd!YB%?;{IvDLGBF+3gBsOD;7fmQIvcO8@4>{L76MyD)#qD_V=sfpir311#F2^&PN8 zg&Uw*&@ho8w#(*3{S2yOu_eTD1~a&C2ft)ATsSDkpTF;f#) zQYlz5cMx~e<)!s@@yCDs59OvsSCR8;rEshS`HuEDW>u+#D_EB?h@n~EKsKVdVvOyr z>~jsvZzEAGg11P1y#d>!E&w{=WU(8B`gX+`g1WuA+azkdLMSd>x;bY?4KNi}=wGl{ zD>$O6i|2r+>P5KsaATf2*=_fRRuqEp?y|xBVj+1j)T80I#q!D!aieSRR#Z%H%QK#}=#kC6SCEKwp78gmYbEbCylrwnW-mBq^ zvTA^z^d-|ALDXRPE2lYu1q?NLkBHcX$r9xp8g&w#p|figKhwfqXJkKkPr^LOr16uX zRhG+JOD;wDZ21yGst!-8U_);H6!&wbx8LM|60_}YKjS_O`yx0WPJnEs;Xw*)&9gkq zO2_+<9q(H*;x_%ozwmoVB-~Yxx0n09U9DfRF1E`!JW9BuggZ*Oql7z3xF1r&!P2S& zNxpMW@LsFctHem=vZOQbi)c+OulWWo9;Ml%>?u-soVDyo7{1SxYV4|hPo&OEhaFxh z{`vCRvuEA~8NfxToMi~hN(Hv>pc*-j%J!huYqkgeaLZ8%lG@mT2VcB-SlWi!dVx?I z2XCW7_s^n*0p=3C)UiAU|d9ZkIT3Pg@~h=n`|PFbX}5w ze1h*kDi*l^i&^;SuUdi?`o$YMC6e8m`73C7j!G`Fn<&S8u>!6k1`?$+h(c8_NcyYa zl~}wmN^FR;8hHbd!sR3b^SiO5zzf3HKUhT%;_`mRTyb=x)VTh>K3w zwG>cx3Z4oF4oDocv|tgcOsoztQL3uhft`-Ly)@%_Ceh^5wB~dW>&q*R-ZS{8z(S3) zB#8{{t6RUr@|%5I5)d|n5F2|CpTsAV6WWAaA^t?VgQrRxC~f44_jna2%(~ej{LRlu zUQI$~Vu_sTH$TJspM+^jPedr=I#D8O&N+ut5Ed>?6`H~@RWGc%$$IaYC>7=we7uF0 z-L$=Ev_M;KLAtP1ezAdam7GUud!d}>Lh;6JO*+YyhQpGpOjoS=?CtA^lGUKn|7xE+ zrzdUegQyf0Hzi?0p(EN+Qu`DmH!WK23s;#DE@ihML(ZJm$^jT4)4HT7C7 z7HI_;Y1OJ8PiC<(2`hY_0Pq1m`nSX)Z^Em);IMp{l&FL*RMSwC*wani8a}#sd2s@c zelqCxhE6+i$I2Q&mK4A%^r zjOAdcXWj#wH-n3!9(brEkl*dwVk`%7x9#_v?QU;d{$!aqM>6nul}nPV^K$l(B7wHx z{1MPS(FR;psNGU?9w+D%_suT~>)+IP5p-!LNQ=Q;x6QeP^!e7)s96$EgpzWGZ!M6- zgPwJ>h~1kZw-t+7K9QC;f%h)F4&#JDV~cFMyrH`#(gWT;bmr2$K~-B3&s@$R8ab2I z1++jz2dNV)Za7<2;1f`Cg-jbH7RhXFr@FAXiY6!CGsRCEB6Gx{7v|DS3-2lDRu=_* z+i>naZW3~UHNT9dmq;#%5ibGaO8bedZsw&X!$`X%=XCW=R(N^yr3ejg2uo*Zusg7Y zsgXV`lub8lPx!9Dua%t9l1{GY7V?8uzDfFavA}*Smu-x;exYiK4R)yJs93EUI}TIi zlhY1{ER(~DAS81xv!SDCIz=Kn`Uz$t6aWSs=aKcY^6sm)8ivuD;kai9CxMTzl2WIa zeak5-Y?ZsQl6?v&_Cr-44%*#nRCS+AE`D%ca+}T4y=e7LTf@`N;Nya^b7P^yt-i{t zX>*cfzb*lDSB);?Px!kNcW5_j5_h^-+v9}jm&+^PtPDTUk*nI3)+GCYAjQYbB|wW= zoMfmTSnp>W9YxnsbR9+4PgHb~GTk{Cw0baX`2&AY71IIFd-`}0P2%vB_Tq~bJ-;fH z$@+CP!f|{_hAOwWJ&Zj_54|Xw6<$>=v{Vp+oSxH7N#-e;r@NGK|8Soud+s>A-!~G6WTsq&c5> zdInsgUwG?m#n*$Z({iB~T@mK3;bn*rJm-6JOENM1CN)#RNvOgu^9%cWJYj!giuFz8 zSXGqeVX0A_p0XZLEN0!dR-n&>t2qs|3r~x?i8}$dE?01hxk(94Fvrz=AuPw85nm;; zPjjxBEM=-2exv7S&yz19CW!ytM56-W65)!NXD&_O4Dy2e>y%6Bz^}zk5$ACIAi7>< zPfMzRl~0lhf-Z zG%s+l-~RBQX_nsE3NX1(X57M>e)%~59v{3Qjo>o&KF^3(hIw|~@IH%ZcV9&_0Zn*| z2y;I1o~$p!G!CyC-rt1yWbM7Yj*5Mw0C(pro9gudZ*Z2fv5?JWxQp0ag1dlPHNCwF zr#|ctn(bz{(`xp6q_Q_#@2a!SnFoFdv)V)J7ErYvZ9MOijb7UVsaLUpTl8RB!%jVz z)>o~y^2^N41>xTC%c>OAqa-*=f}%H*p(0@5@z=w83O@C=alnQl-Ay>9c? z?GM^Pl|bo%jK0YD+#BAP`9%Km61#tmzhRf&a;m|_dI1M3dp>DDUdIK(ya&o{btYI- z^4qdX@EM8x7+TD^bF#WaXWzCH@`uB=(uaH9WQYKu=nF=O_Gs%jh~v z-A;|6?smgbW64@x%9beD7!dk|x01VLX6&A|PH4|@Qj78;$T!`V2lhf**3(rk-o=>P z9>&?EK_tWDS{!)oP-Vp$IvYaxc|khgJQLu1vn4d-+i1-Fj5whsm*fq`G6Gy=8pcHt zs%&gd;V2>jGT_C-mASD_OdEGbJ3*c1I*zMSmF=Sg%B4#xQ-MxwuyzJ{ZA*HtlcBH~NaImYlRbvjhRGQ=F4DS%6 z>kT^n!JtY6_k3k9fADp@O%u8Eff_sS8(Yj<1z{MOo)RE5vD2XbxnY*$VoH&m-{ zfE=(>p;}pBZ<3*YhZ}7Mx;RO$T5mY=(IX%I6XT;$gbs%MPj+m}9}KHZYg`N`o~ujV zBuf?>{o2y)flO*MmFjw)R_o)1riZ1_wC>Q*>2#Z&;r0dpY$XtVLi}MVCD&-`w!+OR zd2Dz}R52-m{2qqpLOI)YXcat2A72jzn7VNU%Rkr>jE8P=Tt%Eecc4 zD4j$jZs^TxCOgD!Dek~qm!{)DHYU+!EC5dlqadpK+)_fY^0}$h=I~p|!l>FKeI?>t zL0rY4-u1=45$&nMam$$7mZ)TB%LrOiZD`zn^K#)0I> zyRN&0ptp@7nZw;=n`;DlSbRz|aZ$hGr3@dNREa2A6c#~~kT*Ei$}TmXC@b62Gq5t zxO*wR?{@}OFlr1P_hq^vQO{w$6}Q^}_nAj^GYVC}Y<~>-U>v>TiJ6ooich-xf-D+>FXNj(uyD?$BjC#1CBE9vVOg0FHj^ei}gMHVwe8 zp;mggD#30!oYWia!n?(hGgCu$A?8G@I^Nb`INzFo90tknXIB!HH5=-uz8E`7DDf&cuVU-m^@8?tR zSKc50>3@9}dt>_F|Mx$?iy!>)fBByt{+!2WF|q{5VeLq&c7IL}{#?Wt=NQKzL@yhC zt6L)gYiX_8HL04*Yd+Y#v<(ffR1w2)$a;s|##=W1TEPXQZWH5KznU&uU1TvUaT{}E zaGp`3fz?<&n`Y`E_#GkHw-<1hAjKX%F*x&z3j?I@!gz+(Q^x5@vz>A(zgr)D;yp+H zkiryF*id8JL1k$ez9w#k8l}o)xeik`4K`rWP$`@uQO#rW?HiMjPUyj#sC8p@7j87C z{-rF<6f8*l2+7M&b&^xQl-XD{8_-=7r0m$5Po>&foHXnhJfjJMvGGzAPu^HTo=SD2 zhy`hv8If10Pm}ff!qzZvmj7}hlS3>_Y1s%demWthjHbXg@wwZc#)kAGrG`awt$qu( z)R>*HilWkjF>UD(xZ!jpfNbS^oZi|KPOzpJM@~idmyrNpU&rYAEy-=1sAblv#xbbd z4Y|S+*cJk$mu6FiV+E)1_mS%tU!`WEs7;8&LcwJr(V*&@H8j$4lh%E;38_T~r=~=o zkYK_+lKsYP0i>>1A@2EtZJ$tBloC|JI>y-?UD@=>R&j+AyVp2sxX|2n2eAZck;Cn9 zG&y1HTK=(xP-kS_+ci=7+jx3~W_)jPVP3@xvQ3uzX5`#iW^IKM8U5N$!<|HxI>)dl zcNRyt>2d7~Qr|lMX}5D9Jde+Z}Dn+1dT5Q7@A*&tPU2salw7?&{eR>6gp{`o{Q4RSbeP0bsb0g zBoA-+E`#+sRV&Z74ymDmhWLuAc90r~99Q5mT?^up3=PFP05=5WZbe}QsbD`)66CH~ zImK@xX}TyqjUsLbZCSFd9}>W^a!HPZB*b;nvd#L+;cX`;4tlNkBui*5G}m*dQOH|# zV-6Q_9_^{je~b%JnK@`CY^Q4{Tcz~lpF;7sDDr+Epnu)|X=_I-vb})W{c4?;d#R0t zNZw6ijSN-U3dY%zcd{jmaagVN9`E(>UVrethP*sj{~v!i><$N2E!7#s=Zh>=sDGr9 zc#p$%R7lMOE%;1P|L{r^LoU`p3YgQE<)P2X=-b;g5s7~O1@ z9kOzlRxjMZjZ5MrIt#DZ)TINK^3%{mn*|QIo7_e>YHR_^ty*Xi<0rVVD4&%!dVKY) zJ>4*69)=w>yje`dOeX1OgkGZM^lUs)3jA{o5V-~8eKN!FnN;SrMwuOs|Y>}_$Hjsx zJKf!Ra`yJ3;Z)E(eY0yrMBk@p@TD^mi(n}!7IqUhapa}z-s?a7mn1>m=W{^Zr<2V4 z>lIm7bVB<^MelUewtAsJM3rW;HIMo@=?7k>+r@ILgJ5rWN2b(XSiHC3T5(cY#UiTw zXAG`eoAGp8hW&7N!V_&ujnhn+8QAq;GgF!U|%sZ)zh*P{8lT$hq2%7EGo5@pw01%a$@0YpghQK4xz2q&obz(T1ws_P6mjYLxsk!0$L~W@EYDV^xXE7KDpptZzXK1YV~H zy=`=)aKad2*W>(6-DSx6ty5Pcu+fsu=+tsdy>U*uZ&Sw-j4gn?Gi7`tanE(#OX-b% z3!9$(>^NZuF_vO}zvk5JgUihKyobEqt(wPeRU+jxm|= z-8oBTPUR$CUEgjeXiCb&wrxstwWiw;xCz2)D#~&>ztolI;ZmRwx%t#m!agM#VdGZI zSuU}co2=AxSh`|6#4>4C{2bc@7{%y4ZHQWsiW!Z=1yhuS8XO28u7xYn0P~gQY*lC= z7{+8FA#CF)AyPjt=Rz%T^K(8ajzPnxGZ>-6v>?rDxY$JrVyEJ|d8+@Gw&NM?5If}b zJ1ZTCAu4z%OuJ@Jb%JB)c4laLw_k@ul#c>PNOgT z2F|#Sq)1Oq6U0B_3pJ@a7e#OCSOIz@S08~i*4<2<<7Jd7Lf#-*2eAj7Z^-}<*X3h& z8e1DD$Ts)W?{<*m0ccQyV8`{fLjZs>oLz$(bip>>f);bbF*#%3xDC4@*+xG^$( z&p~KXc>1`9Ea8;&s%G$v_)xb`I2Veo6-u?dFCy0A^*J@@AEXpQr$?hV+)M}P z-_&Rt$*H%OhOz46**wm@7fD9eR{^H=MK~qxBla%hVm4auYck(O%iD5F4>*O@okn>Q zRrYd!qa^8mTHI|0bij$m9WtHk4d~wf4M%GYXm&m9v#Pp`lwFlVoa@|f)n^*Z1MM2$ zJFCu>o_2?+16(?Y%>eNPacYF^WQ!xwVO4KI(;htL)2(Q;ff9Vj!5wxoCf%}!|r*cFB2tn?Sz zALd9l&6-#+LPT_0$ZX-*6x0OiUP9_+6hdZ=Y%+BUF2icL8bVkbYp~MTT_Q{>l~dn- zMKM(ikklOo+G(a^dm>`OqU%lE7K^T0N?(^dQZ|G~ZVc&AC>=bpHabax%G9TBO2|pD z({^xc6(cFu8!90EAMs}{W@jaW0WygeUEPawh;2MfKO>p5;Y8BBU6{_K)PnF%`_V8O zuTTIIxU{yWqHrKzz5>W2t~wAo%^SMew7)B?^hJpHD*et)bt*S~DI%3iwbVS+QQ}CT zOq&O;2m{(>C z$Z57DuXPRonHJ6TmEp6TZvGYl#km0dHXy0LN=Ofd{Q4@pzG~1fqxZB)bD9nVe__fm zk|<5R^I1q5q4(e%=IkZnliYOHNex3tgD1N>K#YDN1LFJjaRZG<*1Rz$Qx z+j|;~Sr&M$!D+jF+Uk2RUSCvL+$?LJkhT1qS7a@pdSWF5^EHV_aG!dA>(eO8YA*h+ z-d_HokxE89%m00h_)Tq$TPtiAGc$aknGpst(mf&hywak-%i2Kma%A)h<_LwI)RSzn z7B2Cx|AKApGT+Xt}vDW}I3+H9Bpn`^44obMujUQ2IidPnoO4sk;LCqAw$;6{v?X z8I`E~Zi+%`o)#2)SwY|V9u@`{2-}}y3h$nL`NhSvFD~%s`NiAcJh?d6woGhZY-bst z`oG*%*W=SbaWS}d`GfWbkBv_|TPGZ!cGWhFHD_T?|BJ>i((D%x8xMkZr_pNsA}@+} zlfK`I!eOV^3)|6P(u(|O*q*jK)BdF2Znt~ymIeLH58AC(;QNDt8KdnFFb3Lny$i2N z>&W>*`rkS#j zPTm#q_tCo>ysF!B9}6lTn{7YVEP+eas%Vnz_-_ncetslBM$A)j|>SlXci4vmT4 zqt4bIxwO0>%_&-*l-InyJ?&e5QM)s^0$;KTWB2Ea=%%RH6Td4QQ(xykUwO^*X^eS9 z-=3quHFt0QP;=I>+X-6ZL3=Rt!{K-^2q!_rW-J=49)iPLda$eZ%gs=rPmaHC{n}dlY2268B zkKyzD%WrpgP}lhi)YT(iB>gMBRsg}F`7IF#O|k;oB9 z8mn=0JjR6AI9YG(&6e}To5qR8t=y!WgTLbUF!QQK85nz*r;*ZsUa)oQ{r2@M?;Lt| zxTs_^{nnnOAl7cPkg=Moxm%VS&a;Cis&r`jq8;3UCc1igbwZ71a8H65;GKfHBg~$o zVLS{v#ZlKJ`)9$NE=I_2`MSF!%*Nm$IhB0-a6w0ahoTa7sSVJARz)UT!^-qEjO zk`Q!akF#_lw8Wo4ihV_cG2FrUY8hTuGN4|q+i2rFt}$rQL~Jr^ey4|2v^7vDBbB7Z z0l=0+S}i86j(-QzVpoQ{(mFOoGr70^!)a3j7ZQbq0C)cO(+gtdj81_jKT{l} zfq_oaCfgc3qcG@Vq_|v=0om}LhH}H0Wo9Trb$p-OfPWCC;@A@&AUuF&-JV5ky=w^1n ztqm-kw3hgOq^S_;4Xa6yQH$m=LNKBBM@~57=`39%X(5juos)g==y8@^(L%1g$E)QM zTU%k`_4cGdvL>3#tQn?Sly-)LTj=oF;hF8!Y3Og9-jbtqc%U8uX=+aB)&cz_K_sW}SF%_RiKyf4yPh}qniMp;+>DvoX%-Xyz8 z*_@ZcCFUYZr&3bS+VS)fPi;1k>X08NkDxcyXJ^0t<`EIKqbI;CyKbz znOSopjXDPT)0rHovBo>k!Ln%{MGO-wba+l{SZZCMk_`7XObd+Eq)WG?k+eV_ws-i! zSReEQ?|C#*R2YynzMCIx=q{;N8q+imB8|;%VU1dgSF(>*4)H_70Q8;pf0bT3(fg|v zGj=r@_SA49G)ywy+w*e|VaQ$5_(zxuNUP3)4=TBdqG=H&SM=xAw~=;&HJ=~}bye+L z?eFgZ8_O`7zjcN?jM$6Ws^PVL@3U2+C=0~_^t!w7x6am*YS|l2=DtxMd}ucd>B5U7 zT&vg%EG(SJBQh9u?Ixr7dF`j-#0e$;x{85CAQbaNdHbKk+uy&JWK@BJCV~UazUE23YC%&^IV~d@``}5PJMtudb78f7 ziKcQ-y5Vlc(vnx922?~#PP}tz7cp8bP1vQHBie(FsgYS$w6$4~noy zNu}iJ?i&&=)FAqR2fbZFN3#YJvBDaBgdhDou?D}qmfG{BncUmQ1AOC!%@o_JSN}jcw81fl_T%Ny zs%0f%v3xhrCJbjJM8wR3q)iiDB8Dcl>6`o;3n&oL_?Ec8`Prb?@3y*rG#HMiy=V}P z!cNfbbVp&(8nlNK(vH;6ZR&2j-P$+7OsDP!!@;mW?6jRRd_6|DKN|OgZaC?6dxL(L zhU&I^?Qq)bx2BW9=padI75ASd$*a{>SbYC=9E~F-N**Wi;&E1;j<&ChaYSQ`vZZlm zD-ebq?!=Z;&N75o>Td4DU}GtDk_1z^uO#8j&gFXi{Z&XD`}>iooox3>T;0*Prk7H` z@5A1`Eyn??9g$~uO51DFLZ1G@^Ab1}Yy?M!BvFCR<;43 zypAtJg-VsK>g>lKbwamf4BDVWFjbJEK3qQ%)~9`PxOFj=3RXILw8v_=;6L5 zqTgj5zT_I3L{qN8<%B#BD~}Xl9z5d;vrY~VRpW?VYh~Y_y0C%^)f98VC0|fir8XCt zd>HXtfePU5C3>*n3V?*GW>7!v3Gc`-}_+&8}X^4!XQ|fU|?QycMjmNcKr0EaO?6Oh^xGNR2a>O&8f>CcQb3PH_`T z035ccAWr50q4XFhRL2tOfRP%S#LXy)I7^gE=3`FO`Q{hsB^eh=Q1L5ylj}lHKiSFd zX*P2UpXeqkmLJ7l-WdMQhO*=x1rF6BF|ACsn}%}foj}#8{Vs9!b)gM8LymCXhk$Z=>aOwL%f1!fgZPBB8T?M4 zkn;;5Ry&%Hb*)HoTx!{2)*aumIv!^F(XI3* z@pX(o@?spSLk2S38(}q*dA!SOcF^JLO-Qao{(>l&R2}d6%kuzhK$O2HpT2(f^8Di2 zTNN)2W8ieH*hsx!x(-HaPF78fq*+;OxWld#is+U!bVRf6;s)a)`l{vC@-~{%eR&)2 zDGM@z(Xe?Ag3cCO^sPy_ZZHT~&d}rP7=gF8WWYG}7=I0{891k(j8UynuILrEKHwKxwoQB`W^aCMHsatrCsP8#R^(67$a*F1?SlASU zHerZ03m&v7d7no1_I5bwwq+kYq568yn!Iml;+{rId$t-fOO%K3c5>Sf zq~oX`kd(p`$km#%2vR-_X_KllQGwGKUen`YZ7$G{I-MOwb4M@h(aZY7ds%CX@7ki} z=9w-;`5g&w-NWb2EL&zHMoy|=di&Pgt)pdA<>NIMdPH0Fv_p-TlHb6Vfz#~?x~IWi zWMI#%uhu`(y>~I^kP$Jtoy&4FZWEkz|FT>y#A(anMryO!8PLdeoG&Ftoo1{W)uh1w z5U;JTplhq<>)%JoJEAvLL3fQJJWtnF-YdAtLsgY&?kHT6y!>8?wROn)yK}tTSy(-X zYu&ecsV9DP&UG*EoTx8+THIbs)zWh3ECa8Lt~&Ttwbgg9vFl6OSn+?BuhMV|`Il!( zpVn#^xDl||pn2Vki?AA<%ZSa98c}Od!_E%i08X0q1MO^O)9=oulflL^YZWP)%7L7% zrZ7U`VL?6^vx=j~bbNg4^>#FMW~!;jE+X_4L#{%2%}uaq6IXenJV^%IhGY%IiKwOC zjHrOyG67Rs5V!Qe)q?M*J>gv?ZXNz-dWC*gvD9wDQNd79Y>mDWEXoSP-8WQFn8BKy zcx?0YcF^p0eqR4HXrFfPg7_tw{LbR)+1UMIP?!R^yMSuICY4}1=Yd0cOb{e?(n88E zSRmQ$k-Y4hl%0c)zpKoQ_4BKB#?1tY7En#eC6)I|5S-8eP5ei@FT1<&-UMr)>r^;) z5I?(9g=5dGqwd%-heNn6UuvKTy<56ina=|nF>s8$*OC3=rG_h#TIwez7+d4IJmf2O z6oLiV)HKQ#%CDV4dy=x+%F`e2dHM@yAfx!Ub_8Cux6)Mme;~*f)>5zbw(cKz!Asq3 z5?7292~%-5?hjHb#E{x%mIf1ByF3q1F4HWD!-fW~5noQjQ%6ivxiK>E3o3yK41*DaBU0YoPv*BOMZA;!Hj85do`6@t4MTJDZZ(NdQ&^klV+&Hd8cHts^cNMMeRNB2^e>CWghZBFGw$3bSXcxth zp0=X{SH+K8Z9fcy-mo=|T0v(x4hOv!ZLmKc1VKA$ePeP?&8~K(*^ngnbxac`+MIVz zSE9D+bJ)_~Int4Dwd&p1e1bNC?0Eu@Rmz|K(E`n+DkWrflLRoVN3HIoT#r6hqqFm( zuxR_7CuWW<=ns0|m`tzQH7Ov7sx`55;!U|>7o(rQk24N@Kbd8*YFk;iyne9X@&=D= zXRs!{s7WtshImmko6?H$qPms(gJI_zlQ&ekQtoimY1bbge(*S&Wy$2e}ouwtJmoH3Z3n| zbK2I7prgh)xLuDELK>e zTqJ7EU~PN4^@DN2o1w|LOp`TZ{@(4Q((P~`m0YQ`??O7VYbdoWKG{5CW$xRec@-}o z$sZ?+*+Nt@+TL!b-|6&9vbfK(ST$He29+JGY&R%{(f#d!>**PSJbwrDt|_U2Xz2uf zr`7uJ-ADb3N2lRDPS*6)H@`?$<9HH!MVJ=eDve?1eDjNjXTG@1iI?z4cm&Rdc1!x^Ihlpg3SL> znA6V~xSbvwXNljW{{-$&dd^Qm?n270X?lc_&@$589J=Wagl=w-+4e49C6Q7hS><9X z&$CgsEKZj1mp>N@ROR8(dl1mC&~so{x@A2HO|&L81F-xyCX|41jqm*3d#RKM{P zgsJpT)P}aT5&7r={`(KK0slbH;(x(EUByLhbSM!CwOps40geoqJh0u$xRk_%&l?{d+uvK#v6j6 z`-c1OO2Lm=B2)`K?QSbGB9_MWe*IgN@< zRU`8>_88gj(lWff%eib^S^D)Jd($ty=cBx$ReD7|?YDQ1U9;ciebThw-8Gb@erY-Q z=)vFqyGMVwFNf)tufGg&7uw%=>BoE@libGp>M7sf>qR^0e&bD=?%Vr}EA}OLv6@LMoqbAzfccMSu967OhuQ}F| z#K1qmM?T@PkdGvx}T-siW8Z zfcI}ugPK6F>rAwK=->TyRBwYvb5fQ zuld+~jN`($PvaxbfbB#w91kLz+B*ij?}>GHjQz2Xi4D-2cYnm!vJ+dZ`t98-Yo55H zTXw41yS5!@zpd)uK6tfa);2HtXzvh3`4&gY;P%05bU(9I+w63oDVLcg4_b=Oaoi`# zF6rIx;`Ch+CJ6I97=PaVZg!o%`sz2II8V1Z?gn>(esH}0oAy`f``^X?_TNq1VL8Ya zUWdyte>aJy;fkN013ENcyAN`RjixMvcexBruaruc}Ffj1g6Tdea z4+DSP8jiZX{>UFp+J4v#{ZVfec81mXT!RDRbNLk!NWN}o-fvRwDjp`@=TV+Uf?8=p z;%}o%U|M>wRC=KW(5Wfw8k9<8NcNR5UP{@dZi0YG;zq0FN2@$}G#CdxKN^hty-9l< zbo--l(hjHNX=m*F?Mc`Fi+LXDwA9nO7TY&BCpU31$>t|FQT+WpOw*Had=e**{>Lz1 zN3*L(D_Z?kl%FhAkZi9aYxUyaOXlU+fI(@K);VRy6;PWFN9WPt6b-;^vyzl;pEB7cU9S`1ICql{pu|;XcsvH%kU#3sefKZ z<5?Pix03eAbf1znr(AsXnv?HyMQ1vV)ubRun)kqQl*=4ijG@33TU^g|E|^MS&_C9r zsoH^XIg4n3Z|Sx^Sf!|4d?=8?r)b5Dgw#`_1NG57n#AGhBpa{hb)_&LbImH5qUz#G z0S=NVzIb*~e;=h|VB5KmErd}^g|~OOkJFAn>O_-aEL`M=tCZwl5FM^C2diTlMOO-1Kj}H1iIlqkc2(X{p?@+-@bvl zv0ZOft<|eRP^4_j&txWk;#g{QuVbxRb$lxv&~6;ucOD1Mi7?yLV5qh%ndRJk7}pD; z)m3xquO=yCCz|Ut`7WI1S(Bqh5LELU9M6ZQ_(6{6BRc&Rjwh#hpPE(oj?F>?LVNT{ z=7E%uB9SwQ56E>p7U-UXsCK;>V(q8l*7>7doTvV3g`*fvQaU%L3quLlIAb@noyY6f z8M6iw?K((oRz5?ODd0Y2-gVaSwu8cI-V9OFe|X@WwVVWZRD$gs_Ip3?oCV!57Q{}Y4zyAv^|}M9lzVV_nbM2+kU+9mrG7BU~C78L+dm%JJX@AA>896j)_*o+qEBaI(`y+Z=*@J zh)_Jj5ci^3B`H$k-XvK_nvj>}$YR<>NYRcOClFE$x1E0XSjCN_ykEKj{jfjGBJzFy&a zW9@Nni&^ZVEr2Si}rTdWI( z2YhNOiD`xCI8FT^Tt&XixBQeEpZma3*ulO2LkR!t5`rQo?z$7$qj5G(p_Y0lnhx~N z@_s!>!WtZMg}4E$dIVA}4HWH1~Jd(%nxKF)vOXgqY|(%1ny z6mpG>2KaxrPDIc$@yMCXug>XC99F(eZt7Fy7&6{gW09|xzrj85(2PIGJ@AN5e}#LX z&B_5Ee>&hK;`=sNfQzLfPXK5Mx!`*8PI#*@UnGI#JFggAp~x73F)O5I?+Wxx2Vsq3 zy#=Sm>t2SA>|*&rlt~>O`eIT<%K$*iz$W5yIMja;M0BX4Ih`AB7K=(F%;$e-Ra2HCIFu6H%|5pp@u3%q%A& zu6#M;1-MLRmlJU4&DE{wHEZoB~0=PUJj*&->3)^VR8ikn1^mw9lg@zM4?zup}G4-dL3>I zKVlJPH&gj{S{#K%-Niuq2d?%?UsOKE)? z5G)(TH1MyZj2TSppxtWphW*ZOz%Cw|#s^^+kLdJQU>76y1jKa5PQ&FAbP7u3nhJC) zy@SM{)O@{6qpQ%nri1dLxSbTKs3Bj3X}s`LAOkP{=l{37O4hS`4mx!6rVZiFBJX^RI}nHpMMSm;7&VXdhaE4{~T9(dn;nXgeGa_$Hb%eV#VF zr$w*~H^yvmdh*tLwGLxh_-~-g(*}`Fh)ajv5hSSMC){u=mtIQ)f(IOUpKhWYS~&I* z_3lWGj#A*Igi{t=VMrjAoU6Bx_^a0*08VK6L||74`Iq5_B;)#PI)13VxD4s=HoO-Y zmT4^mK;g8=GVcq>f)OKG%Kyr za+wkAWG??FueO2}?^I%T=25@cUdem0iq68dGP*89fc{KI=MSfyes|KJks{xnur@y# z2E&=(p7lF}X>hL!v`*0+Bl{bu=Dw|6l0y9a)~ z>koo@uhs9=N8MpipH5osVYhn_3@7d1fVn+1>kq=*9?|Kqp}=r<;B(;N=BXdck6Ees zv*vxowYI#Mb8`wURr4X+as_#>{nXaHIMa>xo&h;TOl96P5*Ic%ix6PoQ$Wbv!)jRY z-u5(W*E~9B$fhIWL@(zu;N42e|1uL+cm^p(FBwPKzm!EUQG zbf*W5sO`4H3R-|2>itYsKnpVsCc$jb@Arq@+327%?GF8EuXoU&whnstvH};(fk6O+ zWSP*?F8xVX+D==eroYPTn0%e*UdOcWc+!3gAKF6`{UE3R5uN@Tr+nN(M|IA3VQLi%_j0UY1UHY?D zt9KCk{_r4}9d!LpHyGW=QhHkk(kV5$D#S86FTyM*CfqzmL5XI&HSF~3XT)KC1J3c# zd_D;0ctoea2It_r`PfgFbYp(0Yiw>yCI$D(Oqi@PAuEx00k|LKj2O9IFC#Jg@AR`u zMUHivOh~fML{!g~3^yunP7R3>KkD~I1Sdr_y;rEJPRN zf&Fv!jm|U}27_=in{>K;f7I{HX8m@rigk0f3bNmV z2|P4~55fc<(dn<|{gC2!hInqEHs?TEl|mY9;h#ivk0Cmx)Q?o-K#&2BKnfGQ+?kUM zNd-h^X?Pv7myfSib`r#-PKQ$D2c2<8s*O`mkgXfuQC`a5q}u0XOH%Npw9G+n zz4lXvs4ovBfX@PrESjW=Pd8mLNuwzmQ(-z;tqT!?sY9HMV&+gmO5f%Y|L%)p>PXz1 z>vG~alOW5y%Ox~^{q*_8{X9hK#Scp!(jj4@M?&1sbdi!k9Qys)LA&4aTXYw813E7x z8FxGFU=-dP`PJ)Ic?g1R8pV3n2(#wDmc=~DqULIuWtZ79|BX^y9-7JrVIPm^^jBja za*GnJ)`9;=_opX;2Ow<)R5iMHAe|v)%RRBPzo{wW}ir%BX;XUkQw8I*~ ztU`1EnVRc`HM&WWg|lM$i_&NQ?bp*33E3&B;Fy7LHHl$0!J~vxpd4j_s1@Ax4+$yl zDuX#V?ETE7f=M{*`a$>LAnf(~lY>rsFq}+hlb|;k_FL`Iy$Xwa2IJ+L)@?^XX=~Vw zrmJP)qY`%xG~0uE>Q3{w5?vme*!_c{XLD_3M zlcB^jF`6l^yaikCuM>n;hhihVFWB?lf`#@E0w?+gd1Kpm9`g@8bW@_`{zC{gW93*j zz)3ukERgZ**0+B65F#9dZuy^SM;!XIZhP1bJHt+I)IS(@MuX{~+v!gSB<}Wm_Xb)H zwgWAvi-efg8XT5$eEgJVv<#chOfT(BFSlm;fdeicnBxaIf{*C**NBqB%A7fC@9DaG z=9Zk78K0RN)uWxn=daf!5+mN2jpufQ?u%{M5g}E6WE=(PN^WhZQ}-I*c{+ciS?#K( z)4QjF?NhVQP2Qg7*35T^!JYd^^8N;PCvV}QQxwbx_<*wW^ON6y{qn`D%IK?#S1P0cuDk^ah$Ty#kS3d!2rN-@b-#%J;ae>9^i8iRYkf{P6nC$?4IXe_+i#Z_kc(AHU&Q zIhsBD$MVOr_r3DZ`@i3(o%r(P<+IODHz#LDr{2-mZ(m)WyrhLXB@+LvgEC#uo*#Wp z3dHeOM{mxaz5T4nX9sqNo))XMiL!X-_Bh+SwesJ*IeGi+9W+zE`ILrOp_-GIXV2ce z^-f;CeYN%K-;Q2!~Jw1KfHUs2g6!-*R;?2|y_I5-%Q{3fpuMGSVf z-|D!(l%qZvw%T2OWkj>z^~P{C>J7E|mitPj`L_JBW9c&7g#H?MM)GRkokpdRL9KIl zW!u9MEyD0XUKzTtxF5G$wH@2u9gI4|-bmhP*LH4uyG`VLu;bfaZ#3$*q$H7mbqS5WtaHO<8~D0y7VBl=2b$p38Z=koWwwA7 z*^knIM54>dP2;=uT>2bt^$E7lRa3ysxTZ$m$cS7WL@x8rP^FVas24EoFbAI~y#3;a zUETA`Y-Mt@xw?eB{n`1G(ct`v$L-Ny4i3(rScwu777-kO`w;UM8K-3gJD#lQK`qP_ zqCYfS+_mQh@M$0#8f{rsRO06(A*FuxxuD9IQtg&(1zivFW3+-(V70dWY>PGSmn>H% zQ-6864Cz8#*pC>LHcRZ^SrEz=1#-$Sv$fRske_xJ!1+l?;h@&L%ZfXr{(z`N&P;pc zzT(nur&a5g1UBueGmspAY?V?tWeg9Qv zTB-=q521jFaRnyUP!`2%68n)TZ(UO{(T;)(grcE(yWhH{e=7|-=eOV5aenEc>gcc> z7pn(5A(8jCbI`e`6Z-XTC-gciqW?L6cg`OPNWES6PJh&=n@e8ly05tBk3^(i_d}e& zB~6!cgQ76A2a$xQYHyD_h~4&Gg6JKLe(pi+?K+6VgI|0Qx2OC|P9$e%R9gIyM{`GS zd`5?Z_d27)!GF%^?lVfaU$3`IC1{U2t**S%c3*MNC_V1=ck2_akqDObyYourW4fL1 z%G9`>iEKrt?v316+>Zw&(e6-{NFMLE4y5Nn+kK_-ai=!At3683gSNcVbzgBm9#YSD z7p&9jqi^kbRBN|(e?2&;{A52LbPtFr%B+WNw}(pKof;(dJ70HNg8^wP@=mw%PUZ7n zt$mjtcSsoOwOb+&4g2mZm5&FtcDwVdGvrZo9kCUt-nPtmINa{)QQWxK+QE(6{m!Gg zaqqz8#+{u){O}hF@mur#MVuHWQrp@G<-*mTz=28Ydmj!=um6Vw@8-ZmVhQcLwu=r? z?qJmI%R8gWJMOtWz@gs3kVf6^sM8S+e$cPHQ~8{F-(eH#3=R$kedXZ?qslw(=c5+= zw4?W4uixq%NbjR|<(IbiGTE@4=?*^BezcCpGFspZ2kN#e?LEYSXhGhKF9hsBc_d$8 zzIE)!N$fp6`SRrL(F^b7?_ZyJ{r=D97DSwV7_@tX_h%acJ#Dm!pqzg?bbwo((THyF zE{v6o$1wDf~_@S9GHgoxfatp2ZG*KHQ-Xj0T8zAIdxJ z$~*S+N4e&HEq`wI2xo3~pC>m{H}1*PibuAHfsYpk54&=u;N#=mx%>elw!Ve)GX%Np zdYS&I^Z$@p&i~u`{Bpj^e?b$-^s4 z5E)x!j+1-mA~GAy7SgW9UOn>DWF5vj9%j$)`w<4Y_Ot9JNrQ(zb3Q>di2M3G-Y4S1 z&~`2nJr9F>7te%m`~re7P5e4^g$o-QRre2h?WatCk2y04Ron`HxNi-KAc{+d-W9rR zw;v)*gd{ykb_uvzxmL6?Ti>(*MoerM z;G<6}Pup$#^wa3`6BV<(XSa&4>VMjdK1Xix_|#9YyfM~{Iy zI9`hWJvo1}@_cV>f1QQELP9f_eq3Bg{4hd{KlQyu8qPkmL4wn4*+{b}3mfzzl5ZP+ z(aaE|?lGsPH_uLAcwZtT_xz*J-9({nu%l0#{^uU2?gB&-y^W6wWJfN6ru5RJH))lf zDBoP{S&)9y^*-_$NcWk9Y3keg9ZL}0a#Qnos!@E6w92`k%bK{DPYkhO*UMyLbF#uR zNv=3iJ4cXp913hEP`_`0{tYN9$=LRz6*eSKyuI+68ZLlhxv!YFz^`HuNqR~fv_^k- z_K9W@<=DSi@FkEHMwdPb-3Z&}#Be{y4%0N2OKKB0rg!5b7JTMy!rZINROR=CzK(ga zt4$UzX990N^<(Y~$6-lNIL@-+ohBsB1SAPX%gjT*>IA|~fh^XXzW2t8;6C*b$YD3( z4i)@MLPqx+o~1JyT2jz3;~a}~6^3h|1~YZtAnlchjA}9%7G6d7q-#yDyrhuMkEzeE zUqn}B#|#KTPm3iz*GartArbH4CL)T--OF4 z5~hKwuz63W7l$`X`X=I(>eq|L(+2II9~4NJDFt5{Iy~l7&M@`PrjaCnzMut8kyEg_7!$X>APPbAj3V<+_E=H| z`m+0eFUQ84=%DiEG@NGci>6JmIlzc>7P>M!Wuwebf)YyyJWP^IY91ormCL3I3!H`s z3v(ec(0+-%brRFp2&6}P88dKVXyD6qfp3JD#yMxw0=f%bWl|o&49BF=Q}?7MErWuZ zJwzWn^WG%PzvkKGHQr`T*{cbp*o<-iOjVn-SZ<+sCewB&od7aG z(aUIY?%dy;kO2CMILe^`YH zZIw@7fS^gjxWbj<-zb$>WQf;`r*wtH5pf9ob?gc=7vl>gNR0nl;9c*FFi7x7a!s&r zEc}NljXf_SBp7<@MHmw=jb;1JKcbs5s+aj54evRfT+*6z6hVPBorO4ACH;d4>EM&v zmU^=^0gq*?qZbz|63e)JfWKNVH3Kx_3IHtgweOb45>C2+Ts2_h=!UzV7m8T{rHF(}`YUpJox6l{jH|7Nvl#%(Y!#ejLXyR`EM!djT*?^k z3EELhpBBBxPP54d`v!2j%3@FLPW{s4vjKSZ8 z*OA`9)>SWC6tmfq%MJIMGjf~{I5f`t#IlL-Y0tDTw4Gsp!?ebC;0BTea?VzOxUbX* z(j&*{6TnvrL`Yx3WDKQX;jf7e^Cb;&KpGG{e(XNbuym#QC(I5=71SH=r_r0t<{f&` zj6cx_0?`AR)6$dp; zFiUEB^$41y?4P#nUMal0=O0aqG|osGatG+?Zv|UA<5)a6_EGP5*$e39xNG%o$vm_Drf{;Lx$z6DtrxM)}!;q>&b0!~OxN5oM z@~Lw-MgRw<+Y}r~2r&+Lz-&+49yIS+MjKXYUq0l6D=Qy7sDS(SUDmc#1hJo?xvK`2 z)apnyh50gs5-qv9;dHfZ4H>2e4OFET+YmoC$b~H40^p`K@QX`3f=iNdxsC7Su9-lB zv@I;?#@RGbN@RK~ua&Y&`BD*27u@_`)}M9zG9_0=E%nVUT{8<;5CpPjwn%R1VrZppO`dPoz!7ERW9P-P3)tE$jE%hyC6iS>HT;yyj6E!=ACRKa9jc;nP;d3>oi@~- z{_wEZc~W`WDxZ>SeI1GnXH1>MSrX-sS{i5(txJ<9tqGsDAmRfhz?2roaz$rRP%&pl zuusxVb#t|=ik!Vv6kS|ifbY$4@zWp&3cgE1+0(QDf=$3Lew8fgM&;{=n9g0TRxx4= ze>s7s(wO?qe=YD$^EHPo$W(~lze6X2=Ojm&+Tg!^ehgLO7>WBNm>k8P^6bDAMPsb4 zD5EZ-A6!O&JGx}+(2w;Fk(p>!Ypm0j;hC@2os9@fr6!maG9l7}Id&6lP=TR%Z(bdH zQw+L6YjgtyvKq;6Fjw+V>t&MjcI-VT0U-2V8{qLxIT5LVrue115g(jmDk&5XY zW{AjI%`psT28@Z!gW)nyH{Kqe)I7S1uS~mQ$fmi~xQCF`X88^y#q2~?^=>`?Hd3|@ z0liK_D)9!ur%}stEpXAr8;-yT113#z^i)RW8eg~&UKER_a+khvx$>sf#DdFWlM2rw z71oiEzg8|UHe#oqJR zwYRVL$4&f2M#4kD(8~_FcBfqnUIO>vYE#pi_rhrjlOQ3KT>>zO(K2ONp-*u#!<6ki z{GoS#PC_z_P^i1%z3@Ks_S*IH^L4cEO}*!@y+3>S3m#7Q>HodDM??0sCvLK-ZSvm) zFQ1#9mcuc_7YSy@bZUiQ*23{7I?`{X3Rsk+Y88xlHsdqvdF80r`4gF{qB{adw8WZpGWzv=`^+GZJ-wEVtyRAMpp?ji1c+Y`sET^um3uA%m{$m07M+V$!;}G-zuv zqIH<3ezum?GKZnEYS6CZBo{t5M@$mQ(cTG(>(eXdU~>`Qs|x|F4Rb>+{iMhMXb^x%tLwAX^ag8tKJ z*SuDvFMsah&j<47A^tq-ixFV2U2E@4BSGZP*|JQFv5_5Dj$pWSIR!Zp?sY(jDq zK$NAEdnQ054sRCLk0Y?pB*OQ_BvG%j>?4=Hz-l}YpE*+O;Mv@AM#mhQL#oY2GlN5-PNL@M1qBNN{_@N_s2*G3B@(p{=Cv1v2^xErNBFklj_3<~KkOG$yb=EPBL$BU% zt#3b}n<{|UlwJ;Rz1An^AEA4%{0Ctc+K0+N&D%N#Q@7o+yFki{(wog7xrrrylZPw+ z-}uf?Yj=LqinOo$yMLZge^)cq9o+fzJWN(BQIObj-wW5_?R{OR{FRR1nX!hmel!HW zm}f#=KkD&c*`77o(BEGq;%d!ay56(Y~^nR$;p{pPJHCv59DoV#*h6%VT|ryx}|JMV;;@!ZDf+%u4uceEj_fC z_qR2fPf?}DY3si|=YQ_~2!^Li39EL#O5nV;-+a23|IlkmJpY{afJsVB5t={}%B`sO zzQ590^j`4%9uk69P>fIJnwHnm~C^P#96kf-=s;d8atoxd{I#r#rDXy^wL6j zyda&}YM_;HBe)J41rdaQBW8?gAg@u6Z0$y$8s-mSZY=xo$Tn#1<$#!3?6ZkJDSZi7 z9HumL1}<9$M-A(5YE;f}Sbd`&!NUsanRbR~YazA^i3WjqpQ*`0{7fXdh$Yv^)?83Y zeQ-t^WMz@sUttwtjYO1PCQ&0-a>}m4Exm*7;=Taql0abBo3y|~kJK3lv{2?EuAvMi zS4dcZts64vZqq=dZd2O#6^F7(ykKNZTsT`ON{xe*i{=V$A49YxQrB1$Rr89Q7c8}gQ&TuJfV>p7}D0WUWtX@$db|&ay_N<0? z)nlH=J7P7i%PH20^y<`copodGxgMA)R$|E>XS;mZa0+wmuxcmeXtTj~JVqb&vb%#V z8*TaYB(~da<6==RIgjeNYEk`9=mv3oNIkL6`xE*#;j*MB=TG1_W+!F>iy~S((J)Kv zOA`C3$K}ki(_>Qm@(e98<&FQ^@J>-jHAHD-x=9g|fXxMCSQoibcW%N7^J&~%VPKsV z4LS`mhP^;J0ooK;a(!Y5DPN1U-yDqa4W0J_M@!=o+M|Pkv5Sq*iZ~-WIrEp-h%Biw z^QBG9VIqTlo)x{DD1)|YLOv0J0j?5>sE`%R@2$^BW1-+upXQ>a$_Sb9Z1^2&3K*0) zt$gfkq!sM|JE>tFr_ts<_(f*EDZilU!r^#$9)Iq=O-cy#$d73iq(4)feidrWiQ$EG z$^+%`vjotrnTK{HoMk3vSp>J?UE-5$NK9_R)Vj;kkS2w-5Fa^Hi%c8suy2_CyQ1i^ zdlqMJRxCMqgFWC4k3?E2Ya50p`r!N{7`14`XlD;K;u7AFE0}2zi(K>O`~qrHWf7~f z`_|hkc0mydw8FxS376m)30k5XmJ9wstF3PS8)}o8GJQE?E!>~`QJm?2&GPVH zq#Fb&^Iw?`wLk?EKa(3TTIw)pY@riE#GY1(aS5&?fL*gW;d|yGzBLxo-(j2(ZOJ=Y zB^(1?!e3`%ax}*E30)UL{BbnZQanNgbjA4r5mS_j?F3RG?CZQf90{2x67g$BIZfdT z;$TA%nek{N{umblq&eRR$h@Mmn{2!DSrFk5?HbZraJ#s_BF#KwFDq}0)hohS zxCb8x{u&G=iPee8FYDzU-f|HlM3_r_(VR`AlO)&}H!JUw!_G>t-A-;RVU$%b6@4!Z z%=OjPxgrk{SekRijWCX-u3yF+t@#FDaEhwwZ3^}Sj`Agr*fjM>%ud`tHx znBwa&ZVJm|jt9q)IuJ`9`e+ZL`-vN*_{eMM;Tx~yip^?&%^la^fqMmOp;j!hU}w&5aO`#rRs7Ujl{zGa3TSR zORb|&?3Rx3OGB2(7WyPLKX_M5pkM9|CCBdPSBKT!Tk4g_NBy#a^L>RjB@Iy>BdOSrI!F$qS>Ki`a1%f zW9@lDT=_~wmCEmQ9H-IJc?7n5>=)C;rhYbE-1zAaG@MYbl8y7;8TMx3FK2Zm+k(l{ zu5oyrO&KUF(r0()2(P-ckh^Az8zhvG!zrGteW6@?yaaLa3Kb=V0L=GnNGioiQRJAY zq#CA)H(T@y~u_XGBoTG=oHO*~&iQD+{$XwI^?UeesOlFtT$EEba zDfJ*;5_7=OPAyZX-Z05W3oO16`Eg{i>V*(`cC@PsOrgQW{+vcMXGeFXu!`8B;oZV#UN)6v z8KiDy=O0N}h!g%+O%OKOF2*hm3ji3lH?Fk9=^0SPkdn0w`J8f?15U}bkk~02qZ3bW zB{>PR))KNDqs)rCZY;bIrUA)VVY5z>B1SJ0VXvI6E=?KdGERB(7P5I|U?%^)G1o%s z=1e`P+L=maWr?x{u_3mYX3|P^Ssplim3 z$&G97l2Use5|b%x$hOqL;z$C{NYqUyMcq&!CsOnB`kMxf#NzaVTMQw9q2@H^WqM&| zvq`KdljK$cckB1g6E@T6-5h1&5mO;=g$#|HT~8TZKbo&DY0WdYFW7ODT<6TGLP3g5s z$2HZGqBRT@o}?KCCRfQ!j|1?<(-v!;XWJCcu^N|rDci{5fH1=FIM`m7i`Yy2W*U|! z8aa3-(EmnUFmRYe0f$|hp0LAao#>CS3DHW3-7JQ-dP*v#&sv5P1h-7oJj|JXHW^C> z!wNZ*t_m#L)ifzraqUOxp&9AN-qAks-Pf=P{IM-rMMN+ujK8}EbN-Q72j$x@-u10D zbfJ-I8x_a_PW0}#{=3KKFi6WFkJL8~94vtioD_i$2L}#)&3BPmlIDn!eZfAbP)yY@ zIsUQL=(jjhb7OTghZ%IKXE;|);FhdX>@*8gXSHA6Im)zx{~ zHJ~%>3|5^XlEEX1hUkhmA}i)#tXY{eVpnf_$xIdoo5g8h;#sVz4}s`A2!!DIZMGLk z>Wn0r$aQO+g*3B{yJ9*S1isW6tlw{B__)by#CJDntT*DWw+IF6 z*RKA+w4)1pCxu9^qB*I0u`$H*)Yb*ie6p~zynZY(_O@yC^F^&3FRepX4!87Zipkx3 z5ej?=P2fb_`x_HhfFNa_R;w)`g=9}4;@wKix?N50inDUXjrPWbkoJ(8|s3%rKMVN0%(qiVsXynP- z^r|BSUDx%K5?yjf8Q!H|b8dd|#x@Kknhs zdw=$rcK^&SCj4bE+V?&qCfsg}TJo7$6W+3EvW~Wxe&joz`;atEejGyP+SS~i<} zq-XO+Hju4;%=@CuN{i}bE6E5F0k?W4;qJ_W%@|0sTQiD?NaAl; z(Rqi6XQgiAw=73cc7sj0j*vwR({;p&9n35<8|gKHtJ;Vrn!pQ*8ot;h-KfKo<#|~Pe5d(|Tf^O|*J(Mz%-8wnlz$CF@kiLK_MLkGTM7 zK$pMv;RY^{4;2`hTmc1aKGO>y_TwDz7r2O*4Io~O|Jl26Wjq^I>`KrV?J}n(cG_vxEJ28E{DI-nH|~zgN0RKm-0YYfU@o?|a%*HQoxoO{DOkka z6_&#ovBOX^=yYe`Hw00O!)D1zJe>1jEV@$%LZlQ0{LD&T%u7trn^m+M-b)$?-r?9Z z-PXHDhg$&`zYdL>3eb%#G@3p?#ASdt3p*@#dKJmlqBW?y-#LgkMyf{C#&To(WkpIu zjEe06o{%ObwzM(?@6P(dsV7o0-*F4$9kACYO#luSgpTU`f-;WQEKXYFi&=pb;xY@+ zZ2DTrcMbAgm+VN7#@4tu4Bn7NlY+UMi6>V5nY!gjtb<7Sq9=r3XJ^5_=x2-SV_3GOG_KwXG0a)3f+cX&F+PTTt z!X+MAgp?Ig&R#l;LSk;3(A(a)>4rnN%>Dw?=lrAnGU=9f^7|r6!_olhAwuBV7tu_a zSmoeKg6BrEZ4vY!hfRiCd#{bqCDW9`^K&+b`OBZmKav~~{iH`=?XGw0U8mr!(7hjL zkD3ujolSrTl`j%W2FsJmv`Th)D%St^5{mKfGm&QxY4?EUB2IQV1;eF;!Qwc8vjKdE zZ`T{xjG=al{E#h#+pAlCEsDWhEHIX7ihc4gaz%5FM8aOZOhjlubInRb5YsS%s}z1V zVyQmp#mSoRwDh+Q%J{U+uu1#3T;ZvIO~TacFpiVyl}hbn15V@9l5U6~FUD(mfrUcx z1i^%`Oy)j=YXC#&u6spj6VZ#DJMVfwb~$6{8WSM75>60v*;~*F;Jd=07wuaurnL8VB$-c3aKJM5`kvSv(?HT?ST%tthpKhhyFrgt$o3Zb+kr zV(LnDr)O`n3zyfT;i*+_7(P-WX43EmnuifC*bt{m6}!rJfP;!eqN*`C?>?aHG%N)*P)i zG|HV-7HEUj>!u+(HEkNvEgk^uYhIX7OIePV05JmMWQ5H=%;{2({r~y@(sCHgNOh}a z>(IZ_gS3cNp3in81NwCc8-pe=z|Ns|cpNnY$10rLoCU`B)FW9D`LH%ODmDUC6zlO9 zf%i4>uShH{=L@vZ8{edoI+i`yqt8TK(pR(4zU5e7v%7L<)m#`kmI&TE4pLz;$A%V* zElSZUR)T+tsP2Lq+*r|Qmp%u^rfsoRij&(Fn3K&#Hzm4&m5YrgMx$|T57tL?s_U=j zfp4N^@CZkf!+PIVw$UVx(X|KYhh)7F{7y-J32aUi*@;d!-rn_E;-IHE6xWN+IBK`7qR8fqg zc}V@Ss~o>N;JchfqF&X~gD?r%566yB*z|LFu(#2WOXsMGYfwlNyUIa0jWP}3Q%FzK z9x;Oi#!{cwWJ7~3RtWwgPLdlmPXWRbeX`-m^RT8| znTN@upuh?0g;1<%2e33>pLw$|V6!O?!i5tqXH#N@rGSis!`%dhSTesehYIO!|5|?c z9aiTaaIevTUx{?Sl@|4(ZRoDt$sycC6(SgUW86SQOH2{zyZ~P%8Ps20lTtPk2# z?s0gv+@TDr#}MIk05%qLx$2EK*Q?O$X%qx>?w(~TOAWrDrT7GPfJs!~&!BYGdH#e7{q1;ZtUGf*bH}7bM+&ZU@0;QeB3R(E8fNd!E7viz|PiBep zQNjg6!#s5W*s#BUV3Kux{8u1K`l|7WG; zDSOXZc4(TdF2H?WG%!(d5Ye3JHeW#cxmrS_$DGYNJ$^t6%V_W)q8-_Hl6lie{|UHRG+rXr;p0&?d{0w?X=2trZ%E% zkxtcd)2aArt9<&_FeoE9_$EzAlntMuQN_Uryk@?%ab>V!U3@7{1gQpRN>p}MOjeNu z4jKjQ<;m+aARG?8Z|F^uWjLu~{5r*r$H=PGi2)Z)XO3*u5nF%;|8p|x zo2}A1G+sNWx1{sygrFd^UpSc-nPil*1q82sOsesUA*iyCc^n^4E%T)(Tx&wC;f&>B z%Q#T$nYVWYgY%)7P#JKNqymPOa6xKw%_~X(vIJ}}5+?BZ_PcX6&P=sNM`l0a>(nW9 z^EGae2H2xnQ5j#1$8Jf2bJ*@lZ*twz<4pO$4^U<@H7yo#jbsR zX-QNEDi+Vlftdu;foRD@s7!x;PQx$Oh^JYH zCPs)qrz86fY$8i8THu6$E63O&xc|#gL0!*CJ>Wyx^i-U36zlsSi`lmAuQdD{g@_(g zWJp8K-I6m%#huc-#WV{zDxDi>hRZ8IZXz4pUxpUf_{PlCevVl-rqgD5yJeDCLJX9S z%Bq>6a4%e_>&5ynXF}4S84iqek4G`!N2GtOZ9B+dxo3&D(m@_~Q9XPo=8}8;s3Gh~ z1@5%VyR*YdzoWxW>#$c=D50kZUZ-oHI`^Ii=_^0we8DY%^0B@8xDo~*8Veveex{5B z+3glo*aofuIM76tFsMPaz>#Jq=t<><>+qHlW?^ZFRzGMNl$Y~jL!;wJ8|mBRGcMUT zz1lbk<_7KN*7`nJlgzFFq|f2fzzMX+_<54%1+ZwD)7ykeo}F3_1gf5WJwVCl$|A&5 zR(@$Te-YADNzSW3rx`E8rB)iUV*2>S^YW1t9{H-6C*IqHt_>7w;PZaOsco$91<`yi zx-$zdc$p(UAOZQgD}iZBFoky)Wq_}fxxgam&R&Tvr~!TMj(OYs^U!M zqyRpuS*$Mw{WC9Svp-noba!`3b&a&StYOZz7&@D%Je2Wa@3fjACwz0+8Xj~kZz#9n z!Nf335+y>vD>!k8Kd&sdw2-NM4qtE9_*j&!?MLCGA4XW5f zb~fXHwlQmT_T=G@%@}0c+IAG>dAny-Wg)p+&g(x;ksor5LW3OYAxq>PDIGkfCR}}r zHtJJu1G#)MsJd_4sBpf=v6)A6z6ZPC#!GK=&1jeHp}<`0u-`gtx#Iy6QO`a-(W_ec zhZvN2u^r>PR^5oX+(n>Pd^vJrT*vM1v#mkm01`&-fx%e8t>O(M4w_V@FfQ?1Q4~i^ z|Hf8X3G^_DpauxV1d6i6bi?ilNd7F};$)MwxnwINEdB7R`clCO?Mb~7jvPVpX%w~B8UClV8Xkqf)=b`v?QR_pfeVAgCZ?swd^|k zQntd%n3*Y9V)G5{(s0DVAL?~Xsb2rwU9nPbxa*~IKgYXPYPj>Nx-<=K0KiXqB~iH< zVRk5-=8+oTdbtiRygdoY#C@QW$W&g_Q`7&b#g^bSFDixqX{{Gv3zT z_#@YB;G~avPn`%X>MxCGyrm8A3s;KtZ=+=O4*79=@Q*85*)&cSt(t)admnLEx{lnh zgfX^;jjPS1kryFUOu9u@>vvTW*&g%9 zX1gu0xA*)bh@*hl(iGV32J2-ciY$-L7k|7J8)$Aq>DS3}<3{Q1xucQry?sNZbdf~J zO<^Oa*^_c<%M)FtxQcW#C8~_KYUU zK;Ve-H+NS)#>J`PGp+o0T6*mTOj&x*dEWRnqshKXCw}|`SQNr0_~K$F=@i#yZj)#} zd^NOJvhWN5ibxa!5xBPfvY9c8xw1xBspHB>w+`-VWkg$2{<1+$G{SuGLxZjq2~}vW z7Xc!y;KXaL6aKDTgXch$;}S18hK!r9Q71u#0vTF|c;Tv0@>TSrzj1Y!Bw1=%U=e2P zQZR(qh^wa|MVOb!ASst7KWXQI-dv`uJ=r2@<=TAgJ)6OEEFSQ1msVbu0jl(I^Y&txH0TbK#3=l>@2uE!VxFYwxgo22f7#dd_*zSg zp86Z#`?6T)zI*P4*6*(G2EeV??29k==7i`O;UBKxi9eT3Yr3$h2YLRa%7;uOoJ~)W zlAUHT_3)mLvz?2xiG1V zFNiJ>J>pJy3rje6l^DrdFujPh5A(Q2AE`CP0(pv_!Jt#L_7cXzF0mr=XcZ=Qn#M( zZT7iF3xo{(Wp6{*{4JiD{^Y9*y_Zu-LN|J&gWcn#H;FN7+Or=%3k)ZJO0SP@R$KI7EYFIg!se z7|*lvwA~&m1Uq-fw@mCR-`7%J}KG+J>MJF7P37<4VFi!G_PPzg%k%B^~W+%2lEpqZb$hI==Sv_WK$ zC+;ZS8)07x8gF=%W>}igW`MgbM^n<2t<|F;)qo5sS^#e}XT*i&Ml3E>lGrsG3!SQK zCpvQ$t%%$CamdajIL}K*?^hNj$6C<%o(OkY&>(2l+3d7%KC8IUhcJ(|MyMTVjArt5 z2&0*K`moVV@6vqJ+|}tW2SK;2EZC{Hda#ML28Zo~CzYqI@+m46i!;-Kz!sxAt|Ijo zAlhlQVGpvgS~Jn3Fl!FSBy>`i2&{#kt5wG?*FRCtfk1bVH=iSm7FqImi%{~pcT^Jj ztSsul^8PhSLi@GJz72`K^`cpsJ|hL3c|boBgRcGXlP7IzqK(`;pL)0S?}q;Uf&QK7 z`GOFJfGeTE&*eLlezR3@jq`4hrxece|3Uwqgmj#K^wZSe{Pg2=I{nFOdwlP z@K4m?pQuCr>kL=SpXfgu`Ogpg^qeFPZQ}f#D<}OANtmgm z_c{IjcE2XHDyDP|fn|7aKrv~6luj*Em|KVC8k)1q4MQq!7PGd{ki>k{9`8UAE}| z>0|^bk%GVT>wM0f?Bku3a_{*{E&k}Pqvrff|FrRcf1($sJOXU+i3f0XWLgh-ln5+*#^UK-p4ckV!s@=%St@MOY|_c z-c?fY8_wua8&^6nI5Sdke&Bsu`>7;=NtPUk%+U)Fo!CTI2;RK90_=65$h{`Bh2dkY zR8z?&WLOmqgXgRNN@y6LF&YVD=Q0CA_GLrdV?`dSPYnINVFl-LPBHqjZbw>A^~=hw zPFH6in2_DRtrKi6)K14feIHlYu1>dZ^&W2ZUgcIF0s*|&Ic)WxRGzlVrzd5IywNDd z?>jywQR?d#(UpmL=duqrDaSzXad?2hj7jzp%*Zo;6)ht_mBmNu7@)pFguO<27A|M1 z2%dlBOCA9!`ml&Vse^jQ>}T%BP=h!p-RJOAx_6{o#G0KL^sS5qbdF7(aVwLEwhy5q z>&3>jBhpE-ta;-vqRG=RyOK&5w#yO}jZ-Ic)r(062{o#mVF+>{iaNk(Zx}I2OW)8k zrkg|L3s-VWrNj@`38qHz`bnw-hl1iNAl6R`XNKHwyX;c-X-dX%@>U6YWXZIDYwN8yl_pvrO~&y{q_vbrrgF)1Yck%JAs z!fBJtlq4;~x1_EEu@B7ypuLjVEf~dZ@+4x%*IdV}LWN z16?>Q(^!(lAauf;1q2G5qfp}wHM7{yP0FBYs-Yk7ikKu!!v#gb*QL;}6G7s+W<+0( zOg2#S7552IXE{%%p?{DJo-O_B8fP|ge=K~LROV+k&jHjwrgnghg+0u}`Q~E0(IzCG zgqQ?NIOcqEgr0zwSJ(%~$0l5lH42wfY8&Sx5^F}4nM5ljUip$I3{c{%Rs#Er9Srrv ziSL<+Fo#;y(}q0j>o9c+uOzVxxA@A4a5ahPV_HH@VOZEWU&r?*@Mbto8|y5u8(m&N zybvrY_sq}=5>I&>{vD^_&B+;{RRr-#GJ=79>zR0WR#`VT)G#Z-T-ewUu|vW^z2Qw( zmeuf%h(>VQIlgwy0nXXCxJJRQq#W#$UoB%ytv=sd_`W*t9WdcwfJHVZcWn zdYE!-!lC!=z47ccdy)p+u=Z~owj-&CW4D%NZ5CR2c!mU+VvsyW|7F%CkiAR?5*Dj# zXqdXLFrtQ>q?i!Qj9w*uVhXH%i7fTqpaP4D>F$}6wam^wX8i6_Ib7H&<$^`y^0KDz zE%A?Kz*(VycH_H1^aFo|MKpC+P+OxZr&Hgxa6c~s`IEjPDS z&m}$J$sy@Mnbj-9tmHU3#13RZ+4e^Wf6DvnmiI|2s9jU8g{Dh(S0a3bF!r!SsIdIa z_Zi9#@bFIGl@kF~-ahOO4!f?fiuy4v`;^hyUik5xHcqmoQMWlpH5|$FQ>QhF5X&Xx zY+K<>i-2(?f@FPiGCK=&J}QC>R5(HuIH+MI;2VNC!Rge<(Fc6{pZ|RR(V=S@EaEtX zOkx7dlgX^erat2sI7K`dt`sf$n_7dC=XE zyJ#B*VuZs0(bLTp=VV%Zph>C_43e=Y2A<4#Uon%7$B96Q{EC*2lqvD}CyH1s0rALU z=RC(iHd8p#LZCJDhUS~p3f2)x!}K1qEx~{R>Kx_5_{;Jq#t7m@pMK-V#@NV>paq?r zi<^)8+T=U1am5;krHZ{~$D=5ZHb`=Zlvsd1Y3J48(sFdVh;YY}G#xQce2u%4yojqP zmO$K!PgCeC4wvF>@FQCfRy-T2BH2w*k=XTdaJ3GJkA_y1@QB1z4aYR&%R90nDeevL zOW5u!3t+cedV^uzV3G^^U8+#268bBZi+~hGGoz(QMZ#t;Gj9aGR^OuFy;*q60 z*)&@^?vTY(*98I2xj3vai)Zm1ELW;88T=7G4)hwAbIIi3PKq=LV@+>Q0u5YZ=}e<* zjM!?ah4~i0Q34esaueSQPZ8WuSxIbxTBZ#@+n0sG-R`^2iX_bVG#Cy9$)8<2>`6f> z96?`6RNS-N50PSUPdHHc6{;~s3}$LP_w%iQd;=V&Xmfn^mswma1TR^TtxIHT1Z#aR zB@Oa$lHjD&4L#|)Da@%PZ>gkX@nEXscLR?klX|+q*u`4Y@Qg@h9+%4P%&Jf;q)9gM&J%P3M4Kd!uKh}~{4>Vun3bH#;YL%eJ&=>RKk8aGoAZf~r!4F)0VKO9 zv(F)*#tD*9sXL$brLoB*FOan^Kj^JiUS- zyz6D1na2d#PvuJ%*H-SA`0+fxXX;i;>#|tcf{(7#cApO5J~U4sUwr1C;q0?+G|tV% zm^j#cX+x4OaHoGMkC5;pF@ngNVO`7TtVB~iibMStB%%rvkORpgh$4U9EI6>COv2mh z%&Cfm=S)1p8UFJ9XKEe$v!Tg9V}5@IaFl7Tl6T~YZJHOUcUnQ?)2Ii=KsG6!TyVmw z0OA7=!dpdv;i#ujQ7a4#&$>u~ewT^K7AX%>r^1?6>}^ldg(s#GZ@av3S0Pq*JcaBFVH;aW1F^YDz@+$8@{V zl2F@Hkn49RDM^YZTtl&)_}WL{yG%rKc3C~Jom`Bsfouz=druQd| zp1=2BPoI{dJp8q#IQmP24rhbrR~G-lWF6=j&L*n7wOTgawTEB|Bnet`84^o3V`P|J zjB)eQZx^J0iA|R6kSq^vP3-(5g&eej2;5j0Fj8jdjm<_nOQEgvql?m@$UA_H5|Ns@ zopL*c+@!CYaMEO%k;RsBmCUfqh*)`m%fNj>dfxRGF~q!yUn@(6uHJ+&U|su5L!`1^ zm>$iDIMjuAMK=m2@$%&Ct!T=Xljyp#^i*;~Y%W1(Frf%DX-?@3A3IIUcP3rTcR-7;WihYi~6D>URRoLpvCddL@R)nn(Ya(R$WA1|MS) zB1BcZx#O~sUMmUK#y`@SkPi_nhauHW$ zQ9SQ68fazGWx~SPhWD^Ed@&AI$q&NAv@m&=@3?Tdscm-4TlG8szt_~LU^HPw>K0@` zFaMg<1?C#W3g0|^EitWgV<7!bi_o!%wV=dHy$~@rd*hg;CKEGh-Cn09r zyW<%FO};)lJ3e~hjbFt%Z%>)$b-Vz6RXnd$2$p54h^BbL+~wGSPc)A#xYy-LjB4!> zM|?IF@$J&|(ZnucZJW03E$)>Dv$BKorE-e)&15-kMBinNENbGFHzeP#9JA}Y=C`X2 zm%8308)8;2k`8=rVg8N-m1dc4trk;v`DLL2*H+uAlywlciB!4os+d0a4uiXM0Q9*W z^(tah2e-!9Vz#(Pwh$86=8XWt3S_Y^x7yC3vfUF>F0UY>>7YUX5D)2X#brpdEyy-{ zfc--@a9IQ)lekX}GL`BajM0}%5F=-K>#gftqkJx*@A4^_`kF@Ovnx%6HTbo$lOMdYJUa72l*~_tW zAT${R@XkFacW0U}zAs>j0#Z3!B5I!HtJ7r}0?W4&E1=~*=@<^Z%Bkdu)a5tcAbiF5 z1$!y@4=XP1TcLGHJUP2ch(wam*E-JYZ{J543D4)k8M0b-0zs zxJ&^KH?&Pgv#XVng(ng(NE#yF_8f|jCZ(Ktg&d!!VaT*qmbHF%%lLdmJC$81U-;(P z-!H~Dh!(R~McYSjN`+kKII{hHmMjClNKbN;*G?C#;!ugBWVzYC9-VTl zrsT2=$Dr%N!^=$-dhWlGgkh3x1@YJ66u zuqq|~W-EXQ__s#f|4#58Ic8Q|6M1q|M?H@#Hi!g>yk$68(#T*E=NhMxfG6{~l#5Iw z&UY(U&&}H;y;iU%u>j5anN2iK!v%6gu33{R$-|j;$<)eaYtj!*y-KQ=(EXpR z?K_n(R~X^W4+Idvd6$VVFO5L%>Pt)ov9^#dYm4}k%uG~O=IWFRwzWuZm}n4RsA|hLUy?yuyX-F+enYH3~+D*7> z>VDm{Q;)Jbx7_3i+f(cWnsIZVB|~HWA-ag7e#z!LNeAYAF$133+0Q%{b$#L%K+Muwv_+}G_Da_O+@25l| zbP@EkHEL~h7959N2(+RLga|ZY?V9uS!2}~jaiJs+3DhyvB$5u`iQr^?V#p+Plc4IC zs#iHa9v9jfr{Q-A_Bge3np~JAumh7D=4O#BHNZX1$^uPnpK#3`T(gBM?7W8I!zq=< zKH)4hB~T?;Kx9*awyxPe6&6hg$41!#hFdjJK@^*5F-7t7mb+EPVJW}7TY*VX;J1Y$3^uD`-xn!+za-*>S2N=fd7CjiO^aKb z^kA0)gp-DIKMgGGtG)x^nk9|_-;ZvMk?n2fs}mJkY$TF7BiJFks}; zNP!i_=fb}-a3^$pUL^A<=k<`fgynZ;oglBqNtA6g<=#7!iUmw8kz{IC%c5zTRx-ox zRNn=*l$SS5gc0@)3o|j!kD~-v?qtspKnwHz)EH@`4 zLpL}Q{fnbLoZ3Fh2}8phw-^VQ&NUZMGe<+izTbEV(kbiSIm-f-&~k~3amsIAut?#i zuAAdrmS}e8A|5j7JabqSG(D3f1Gy!4eSsSPz-qc`HASZB2y$jL_&*>uH?O(?t2E{W zI{PMQ!B&6b_&X`3k_o!FH$nU`Ymr}tDJLrvy-2aDu_BOLC~Rv(B|#`5aR{a(b>(uJ z1>h5Gs^RAzhmF`eIBP)_99rLw6A@%0 z&9{-s)G0MPLGFdtBZuz9;BPn;c0@$y^JO6C{pCDiv?C6gzvD@`8(}!vcp9sr`IIHg zM7*P6a2yG*r5G%8c8{AxDw>dTgf)uWDO-mof>_dbLf?_8lWp_wQd~*7wfkZzo1K2Q z-I3T8J;z((Hwr!(i;>|bhM9<6---LJnc|Fpa zAdm&C@7tEF#8&CizGGTqNp#dSZ`(gu6iX{5y-zz6`v=>Oib(WV3sU@E1G>Z}UY7J> z8}TNRKr8z?mU8h11(B3b zf%M6Z3?R<$lc@Yk(1)XC(id_naW0lz$N<#_(Y$@N7}3hW%Hs}S>O z%9UiFT`}2QjHCH=#8>i(#uTFLA;OH+h$AoonN57r4VIZ%kp|8y*iAV?G?gk%B7*pK zwYaK%5?ir=F+g6uIra8lF?3G-jRq>y-EvB+K=azC1>dm6+10@?aDxwHk)EH^cwR#1 zT1G!GF1x4vT@#LT3l7PqYmyMIBff}9$GA?SfK#5-QdJG<_j#^ZQCh=x4gcS%c}ID& zk^+mGTg$=uK$7D29SDx|CxMNZlM{!aIdm+w4(Lm)f*J1k48q}(0YnjsM=#dQIva}` z)0d#+Pq`>C6DPb-fVvVC?D5(E^&*mNB(no0A?mJIi%0RZGP<(WZFV|MI)lrwPG_NB zQV$|6u?cedV0Gehwrn3&~lMX0(- zJbo!FE2V=-y)XQ^O;AQ+zZ81lpFY(A9Uh(n1#|C^PuaeC)58!iWk_C-l#e|wX+hL; zc}48o^A06n%>2kPSoWVoARWKs=OSHJeuYR_{z`+*FKGnRt1x)SYr=S(H^SKd#rb1( z4W4lp-kn9zoWx&y8MfzH{gtg~@AzHuC*SdgD9URtPI~`B;ji1Pfq!5CisrGI2?*g5 z!Mp2^e{}_@ z0*@`1*|q7K6E>-EDD_lP28+ zT8fUd7&tGULkbp1+~%8;?|!K*IVt94{Tb2r!Fz3FuOg6wHC8WgG4e7GsF?T+jc@Y6dsajOfd%UjEKrlRl6>;*jZTa^lPnFO9cxqH^(NN+|ML? zFeZ*c%vaBYZKvsO&3cTLS;Og_Fh`5{_&oHzs%Tqh83Xx0mg)!JGx z&7D^YTOgjX36_8)vDa<%PA5(grq>*GK%07}%8uHh%Ge+pGK)VooLc84yN!-?DXHZ* zu~j=9&$u3hM*^0EW?AsKq>tFMq;6z0t4FpYhB%)KIe*yLV*l zrVzM~ME^-2p=h#fL_(Ca> ziPB2SCE7$~uv!%*lR=j)fT+nSL`B}9L~2ts15N&OMkW^z=bebAXOa~e(s{jvy%GHW zEU~(S#;E}wn{zIY(p83Xgx2eJYx5$o#vH!_BkG({e+B$kt*fRYTGavP6Sq8G6!l|2 zP3ahG$l(`0qNmZvM+TskrpC#z$i%AAsWE>#;p)WBHDW$sOx!FtGqY|LRPe}+3x#$< zH=^VfTN5phdhD+eLknx{R$>`{r-QcdtI=Dc74a4VuaSRM{$awOY}G4Z9W z*rXvCsguCm(vo5=rA~R}%h|XNSf62CCcvK{qXc;xF zZC1%5HQqEME`fN%YsPv>?xy3EUfbNkrlrAJi#D);wkq50E@CHMP@nyO4exZr`PV|t z#s}l8sK$_G2tcoEz#J^o1}r%mC54A`n&$Y3una|IbM7NBDvkz*%hOZnE=a})&k+g9C7Jg2vXy< z9fU%T#7Qng{6?!OG8mJ^ z5!Q@%lSl;-L8j)!DQ6jFxo<%sq7fK9GJAGM@rpXJ_csnJ}h!ABkDd?pC&a^;qqJ z0uV62T-Cg{e`BEOns*uI)62%bMW}YBR-VB&qo^pEXyX4ZGxRM+;U3+VXfQXCYaDc5 zucR~~K%sF%0uX6IjRG~owfsmP(v{JC6AB7Z*8E0*j{bY6rG+Z8X2~#Z>=CD^nNIO9 ziDw|$cdiB-;Lp-)zIA?y^Zri7is)*bf-F3rs zTkk86{w?z1s|dW;07&(oM$^!7Rs;;2N?RK4O4aW*5%WP)HFb9-Y3~rwi)Z+SkqaFs ztL8~1bcy@v|NMUtQ1G3gU>LAnjwdjnGkTCF(YzW+u6Jqa;NR7v3yHf3mur|`k(RUa z;|THVnL%%7Mbm)i+W>1}7Lh1{jCESm5E%#F@pA-?B+I}=c3i7--bl|TS(}~e0++I0 zAU`sU+58jurXxlfHjqE(AEB5J(kT6+t}Om;pj#f5&Tk3C(QrQ!CiBHcH0)9! z+MWtK&Q>%&y}6_pL5sKLjumIn>@D%+0;XhtWsp*=V_h?kWaSK+`v1?~+qJiG9qFQf zrMD)tO=bWP0KY`Wu^(BMZDy=kv6S(dO>(%KK$B<-XdvByXeN_+=4YIJdG5~LzBtcw zuFhTl$^4R2Z&j_2#s>+}R4huBOh#n0d-dw|QLC!nx2i-vqNZGp5_Cxk1e&@p;i3dk z9(7VS8F+^}!fvl3(woY$fG^!Pm?)$^&FVRv8nrISdt7;GA&I+==u3+*pqm`Fj#slI z@J}H4?Vy;dS)2goM{~_Cjy^11TO~AZuTs{BS0i zsc;yP#IKmlME4~5gdg4Ca#v$36nu<}acS)aVBYltBysOI;H^#mfUg3{zv5iD?nN}N zFMJO+Rc+Q+O1XuNN=tEP?ip(T51a(7)GROT;zRP*&4nJK+YQY*m=?12qpoh$qoABB zJWT%#CR;j`{p|ZS|LI62KD>4{)ff7hhT=r*`3-}pd3(6mG#s0*%4Hu80O@sKy@0F>``0h~ zp0gQbMa|a@;rVRpV4P>bfZ=-lHNEoJ@!QxgMcN%&Levs0&?vYp5;Gy*SNT;fnuT&) zjW4E3JJ14(li7K-1qyB}ttLNxjw$`^E0>drR3{|dD5w8D@Bg;5v%M2znr`Uusnx)x znZ008S##(dO0jz(QAw50;S8nL9CRU89V|^&4dQJ08s9kN6hBRoCjEwAmD4%7A_L4h zU?g@FA{|eX#PQ& zgV&C69R4od{5&2^-+TzD$IzYoY%WM7#^*K84)wXf`_2#(@FO`DYSjbeAA1GuI~)*-s_4J1lOqo(x-MFJFdIqcEc8J{^LPk|sUbCc>FgZPNrdRIkXjphAjeRWHQ=m6 z*32X(C+c0p<)KEYoUD)@L>+b6j)bfM{Y?D~{Ig$nIQq3v#CR$-B0;>zsB^+xPIcP@ z-@mAk*eB0Go4uAF{Bn^Pu~%e`WttRQvRScDMzUGmZ$2y*)g$k8*KFSDR7&v_@dvgz z7r+0#fx3etMj4^=BX+A&G?nlz7{kGjm_Knnd#@ zih4sNY>j`Y{dvKZz4go18ASS`GdgB)KZcTi4MW*o9Ln8JOmK8BbXQ+$@j;29aMHC> zU_!uSZ@~S-Km3C_kNe429KIn^<5gO{_9?i2P+T$g>#TAqGmRU$y1mvb95jv%xfV%N zO`8?h-SRFt@4e;cX=8r_7_)Lp_b}B|ufY@ixpEECWgQup4HW4bYNZ*JD5zLGNqNRv zZ-UoNqD`Q>!4^kX(^N$<~})Gy|%|U3@R$XXU6p^@P-L!NcJr$HUKCfRR7@G4#`ny5S5AA)P9B~eDI-n&Byku_= zZu?T%VK6LRG#Rf{8OG7)w7{==WVI2d$G!Xw zz_Sh7Z+oJE{~x*N#3V+tbC=uWN%8|n*3nY^T8k9^xi*o{NgHVXFvzEL(lmd_hrRJn z^hfWGJ3YP}U0rd^A8au7F_7iqw^GA}n{8^W6(ZFnjJA+kGQc;Txt#tKg6q9+I!ejH z-&Ic!uqyZYmZo>u>h5)R4z~BX0q*TvKfisj-)YAi>#5g^-PLf#ty$6ewD5L`g1ZnM zZvSu;)tnqbu-pxDimRh&Lg%95#3=rqn#MO#4t=d09&<-(9(E15m!YqGhUWP*?3PW; z{Si!gp+Azv8U!$(B)i^rWbS`;CeDId7d2rtdm=e}NnKM~JGjR%8?w^|f6j|^LOKkM zGqSI_03q6L=pmMRq>_A{jsovT5fze-hO7cS{6oyfZw zKwJ=cVE-{^xd(K*%u|m+{MUc}pD-Md4)wqO^M8pGF?v^v|I30-n^7Lx|VX|S3xtl#zAbKqmBr|6OoaampGf4UF0f7g~KOz*_WK;(BO8*Sy-}ouwGmMRG-;QnwPqaBkncx zvz(b{U+WT6*vVP$*Y%o{Xqj-{rR04q$i_q_)vPQz zx@b5IW5^sf9(PU?F&mCb`QIc4|jLxG{BC(`bmQn@O5?0?h>7I zyQXs<~nNYS;CVVo;X-dcRPfz*(u=K`# z@Im;}RqBe?l&}b}5M341DDwpSt*{4a-8rMJmAvqHo-11~nd%sB$Li32HBmwB4QF1t z_5so;(1gT34UtD`VrPA?PD4vK1Gl$B;V*wn1JtmO&T)ar!f>Pki^FvBzav-X* z(%Qv&rJjUI6Yfwo(TW*;=iqvRJzueF>$2cOnmx7Z^4|FUH!ojpau*zzDX?5vWMn^0 zOWM(Jy^e%S`==Qjh4iWO$&myH5l1r;*<2;m@lDAfhcDDC+xBc@Rwdb42&RalvTCEyGTRS^D zTi?)^lMJr5cpMIP&Nu_yKX%)BMgNcE=Uwm~lN31Pxq^+O?2sfO50`uiX7~5sy-GCf z*DFqp`^-%x7D7pnh*85HZB{z8)*Bpw-<3>IlNq8@0teWkoPw^z=T+%h(^v(clMX!` zHuUV}!`?YPVe3noJm(KYHb}567-+f5n8`znQ(gdzRY<~DTp)stB)vn-*}2DpjoV;1 z7ISba;T?iQb*1Mwm++)M@zKpm1m0vGXhq-t8-LtF%d9zl?FLKJ?KFJy`02pKAlC<; z{`$?Q)$IJUHfoigf9A&hb2_iSn6XF9QM_lBe)X011_K28fBbI6Q&+ZI?@`BM2*XWWD~i*RkA2WvR?<3=a?>t& zK()0ha(QIll&Ts!#ViZdnP;0tF;f~0Se zY+0N}U!||o`qh;Rh@*>DgRSzoAvj^!OFT;4`Ds*sgJhE>X$ zg8LyRJLoARUlowE#dBJPcn?EoLA6R?4vgj_l!i9&it#LxC!-8%Tzy2PMJ<-Je6TiN zegDU2jpX~IufKWq`DSu|LtTG&=%5#{-_c?B-xwW95H5)9Rf2iBxME)YY)5jSM|Pd* z*hkeCe-dJ+I}R?#(Y&$TQ~{2hGb%JAjY#KHd9WNx79yd(NI_TqBq?XZP@w>4m(%z( zFdm1@8RFZ$kRM*3Mag95jwc5-EgQ~|TO~+Eq0^)An&3{(6b7rEOT6Zq2BklC%3yty zMqchQ7wHTY(q1|cx3TbnE)J*mA^Esma@sD)o(NT}n5|Q5iW8EJ+5b>)&aVtPUbHki z9ad7zoTG$_OHI=5sy~HQBVh;OHsKPI4MC+v#MGW)hZ-k9W2YJDr6dyQ^;lx$P}+e?+M~cAMQju1dLk zxD$qmueQV0Cq91%N6C87?EfCUF0FtG&C`-9_dXPH-W(KJw`aOpq7929)d7dQ?)*>li7W_X^W;W(prA$|a#l^=P8^>Hq zl8ru$!tn?Jx5LxpH+iqy)HZ>p3Wj5}K)yNDnQn&No z>#a$7*_!?oGVAhW=^1hj-XXi!eHA1v-!;f|cV2Misw^lbNQ#Z9Z=7=9j4x<0^u0U! z`0Ky@zx{03NS*;TC=r#1{G*rIxh4SxzYO*fe=Cg^r$dA_gs^?aP&Bz^?(pVIWvFTv zzbR*3(R7a?tVe$paz*9KUy8<)CNBnGE>AA@iqG4k$F5G0wSte11(x%v_OmI+rt`55 z*NG}DEDTUp&DEd%A9V84(SEf7Fzxx?yxp)_$N08qjz6^a@M-&Qk-)T}R_1$L9M-pw9*&W1VV z&}w~|`06FwXei3)JpWvD6@jWQf$G#}wr649oQ$Bhb6 zHLm<0`w$zWOyB zff7G3;SdI1ZV78vE{iVmTDjJp(W8bF*Ku{edcWxyO+Dw5X^ED&M6!+#YfPtB2%=FsZp0AdBh6Y&EQaI*5yc16Pg0V`F7K(*lXESFK|98To4vVN9L)JS&`0b1P{Y$Z6LLx0m*U({#kx`O_aRXwmabuW(cs?1#V zc{z{2$_qIGbzc5CD@b{}cJw%xFzL$~%8aF~KDm@vN1~s~$>$C!7}L%ExF<&v9}ytH zIQ9nimBLFA36&Hflql^h%-Bb64=dyDpY!EE>B3QCDd>HX&f>C4Lx^l|@M& zikinM2OzFv<2a;Vwdp& zIJFLfQ){o;?sB5s-NW6zPij}&;p&UQmJtK#b zs6^_paSQ|Jrujb0``ojCg>5_ksdYrf}8qZUTl8NH{Z<&u|uYT@ksYGKsY`IJxa z?Kh6v)ugHU1h-}rWTkR-T=uq>bfWd+L?Yoy@Ta2I;6p~VGq!%5WS8Pc+-b){-r2`I zcG`#AJD=39w!_t5(|@p6&;B^+BZojUIY_?D;H7ClK08c)Hyh?;H5IP%gi2qoNy4TR z67G-R)bor@MXN^oiCpVyXlA2hmoEU?(D!oqAU#1;WP%GN=Tn!*tUt`&|?w)NE{h)Wp8-De?mh$Dc6$c%&Cjx?3~Sa;%#M>!J@zBJc!N-`Lh|TY!39iO5fILSC{w=- zc{W}7ipsGV;C=91W=Mu0>6^`_rmu6k0iVo3IvBze!S2FB6HF(X=zCjQZ2_<&Pz64Jbkk1 zl0Pj?5teAhdgo#1RAbD2p$MJvc>Dm(i?HN*B(l02Sh#kTWV10xps$r{#@_11*@j|$5;&9)M zd7GA`ar#cYtA0O7_}+rPpj@V?2VTGRAvUxmItNoBXi5#ZD9-}ZiJpX-i%^bU48BAZ zq@w6L$Y^?${yEo*tenizW%a6UyxT^D^y6MRIhEDyq=r+v;$)WGU;fg*Phd?uxD=#W z$}nnb&FGzSCj+G+Ybj$mE~Q}c-tma_p6+}XA6*J?1qmf@gQ*&UTvPO@o@5qQLDP)Kud_T2MVi~2Z0M?09)rwZ!ztYXRD^7r<1&?9u3ye2LMd_Te#8h zi)S27B0?KyB|5|zKjVZDQQrTMQe@{6I3v?W4 zHZBaSo{G&mwZbU48Lxs<3LXuO{iY#@Gv$JVaRc8Zl#+_y-$;&M<)h@wG09V?A9Q5( zav>L_WGMSJ4yBxaIsP59|DF}Ja&Y@p{J%0yI6Z*qR}sAvdpDFfi@_}yFr;F!+; zq|be?O@EM}Eiz|mBArYW5!$;LZB1MJoXBGSsEj}AZQJ>=#n1U~{KC)D7jBkr60VFi zG94HZ?RXcLzmm*_*t4T+^&z^1`d8vK&|HS@kkwM8A&NUfX1y~qq`0kI*rO*jhMIL@ zb+MC95%NTwD%uJ0gcij7KxosBnr>3IZh<2#xYS9S_4fBsrxu-xpD&%I*PhbgZ5c_9&+M^ZoAe8_k}*2vy1UU1z}-U z$;t0GTR-YvrQ_Q2ghParGfc#oBV(V`a{4Vc$u#d|p)nrtPq*B+$TyaseLCfltR`vk z+mlD#lSdL@K|f0x`$wN)SAKe4Zhdz2<)>TI0Y5>#)+ctBKA~N4Pi)bPq8EPFaSeUe z^*@oh9j;BoPcZ=k!RfzHwf+RGc*_6@Y})`Re+_1ka#d0{ND z(%1f+`QB`W6)ya`LTM&&gvzD?9Io(u7$@SB0ebkhPG5-@mU#lgL4XonDWu(*Zb~o z)OI9?E%}#r*yhze7+vQj-HK1TC=15YQ_crcai<)56Aq@3Gdjp*A7R2Sd6c%xgeVHr zkaHGQLQ#GtPK|PmdA`adBR^ogj6DS65u4;byivLILDkixbOMl`7mLm(J11djv;8S_ z0+(4+k671#ghUHDk5g74zV%YF)^c1loeQ-G_$=#>r7LNac2Col)ozx2LM3RfWyFPi z?z7IxGRGOhX*ctbkku^$%@XC042i2qlQ$VSwsE%3^NYWn|Me_tq9e1@)>saSBGAc; zHx;+>qdfO%HJf}k9Uz=y@|oK)j{bmtD&m>+Go%&i%fshqf5{s^`Xs2e)y&EOH(T_z zS+rB%(DIRv^P-%PRiD^Rew%D`8lA?;$*@13R*m+iezbR7NWetFgkDj;^i%T6_qf~W z{FPejXsf9iLrTd42cF3e5AcZz#T&VBaJgM)U9 zw)QG20ut$X@;xz-kS71L^FQ6VVu)c1MSWbj+!Z||QF4|~loXA99imcaRx}iDkmM?i zC*e?#1*WA)VGRfX=Z0-zK0DFuElzgyM?`4r64CpH*~RqJT3e0U!zU-4uktgRXPR+Z zGkN z-tJ1zWIiT4?RI!)L!M$)yc$5NcG~}_tEqN4T)Dz*k;0z(Ma6sH^}hZx=U~t`$x9Fi zy!aAQPYa2C)5-%b2G!;CLxSW-TF9#c#iypb_M~3vIhS4&_HMb%JWDl~Ij2SQj?Z*> z1%ig8)T0-5pe)O7_6gE89zr@p?_u)3o6*|(aXBevI?vq(tMWf9Gf8qd$Gg^Xh{oQ^ zNmiXaSD8x)P>iSU6aSn=A9ve1lUD9;|6cnu{Z3owraWutl23W@X^)+GpNVUMyF~{$Cn26*hjLgaCy1N)3$_2(jn$p{aWfRO!lzu6 zY47$=`O$yMUzUu?+$FmC-~Y;;^$EJkI11mu1i2d=}RQN7*~o>v@L{hOYP=nlWo|EBHs9YJyk1cT0*O+7H{Vo=SVt|!u=Z+-A*_qM_TqH zFFsa%eP`Q?z44ec!Aylb7W7fw zL(5PKACGtATF+1P*Q<-?R!YVD%b~X>EdLGAI63#dz`b ziB2=?wqU~v79xO(>H<~dlX5zP6x6yJJG~@I#@UF5^3$It9QfIr_5Nbh;e7rJRZ3&7ODU##r`*bZF5Hk<9;RsY_$E zkyIe0A)WQadL`Z%yrz`YG zDTl#^_12(uwz*QLGPQLyH!NHq=-??A-f$ufuq@j{mK5>g6W=e31Eb%@M=TZox&NBBV&?MPknq`OS6ed-onri z&P5T@MBt&5Pk5T5ULYP?!a?9;%Z33){}+Wn+;NJe4y&~q{N*dI^6dR%K_8jwMw7j& zM!68IpRps6`TX%Y7U30cBi#+t}xtUIiB#HT7Om=UOJ&#A7WMQ72AgS&i* z?H{xSMm;pJTG9qbPfA#vL#n+rI?sSZLsg+g)QuKV_Y{XC-y;VanxJtb)LzVER*uq&XwW)$cNir7m9L6W47&$DfjUx)+4uzDnufsI1v)EQS z37jXe=9`W%8k#hJaDpiU;&2~c@_NOf$U`kBk7n?D+t0|ovjPbQNoGw-kqV(Kfgifm zw=+bNMe)6cFAMb^+-*JOC+LeLcuKTo$v_hMJjM#3dj=8z>7vvqOO~$)33Y9=0nMCg z|0gdn`9v((jnj}D@#PSh`;v6Q`6apki{ z%8u}(m&MSs3pPCdHXWT$$4r-aPO^(hEBL_Y892sy55FUyZ9YY{37SAu(|tLfk+S{+ z3Kr=ya&K{Qt2CXQI5O-u0nsk+5S$Cijs6Hx!-gSvduRygO23l6t9?Gv*39UYcuL!Fv@hzX;lu^ucZ`&qbu@DW!X70V*vL-VU(aP z&ZVUBO45LEX^%0KGcNYqj!_ zCWA7&`0dHs)bv{OpOEfG0}4&yj9R=8RfO8DX&^isSUNoPb0l&eh__TUI;?1Xx|Pl; z8|(pzR-o=7)-INic(EMaVIS2EcbmyezE+}N07oEt!0|z|A}o)*zM{xvpM18cJsidL z+?BuVATTYU?psv1i-apdLAYXqv0_vqN|MAMEEZnt5|^fmj~>TwvQ+dfp8?b>%XN)s zs=i8j{5LcMVGGq8?U(75){=)fn5qzWmU4mY<>v_Y_>IKeI+Nz)Q6Q2%LK+I)j{=n- zgf80V4%62F_t-}GjbzxbHyy$E^KIAryYeNdA{(4%Ui>Tm=;rV%i4FY$5vG#ApObO8 z5P`My0jCqp2V$_lg2~tFt`G(>z2H(}s9^(nVzPID*0pDmP)a0*WQLcZzWA@C$ST|4 zT(*op6HQNO&Snyhz?$UoS7hMyI0F-MKyXk;id2v%kNW!A8f-{o-fa3fji3fsz91mJ zP)lh@&>?GzWf~X;krf{`>nP`_7AN7dfQSGo%|SZzoxMdbkh{ICWK& zg~SW}kUbO7I*W*B3$AYN`iIlI62d~R^$Qj}H+4`XS5J!nrtlr9<|Pg%w6h>M_D>c0 zdeEv+mwL1z@UlFl42abEB2bl-BcPeOv8!j-(tXvXz=LhXALsLkLVYPz!*=DVk*l0E zX9=3G#5fp79B(&hVYMA;@;@vS)G3l0f4mZw?n+C3Guf7Zd}%kBD~%MD)cIRhMi)eSA&< z)Q|o;pMEntC&}gV+;9<5)Jk@O{`ln|zJ+9C)C_%|K=0&$ahi`ta{RsfYt66blL;KZ z@He{~doJ_cVh1^NCiz#?3uieG)losUuw7Wk&`L$%;A`Qc*ybKKlKHMV;-hF|k1ii&GqO!9^%l zqK1=|U+sjeYlTDa%vC7c zL2PcP7Mt5nx}8JLEBHz6YA0NE_6f+C=WZQ|5#;R=_EdT;T94FBBemEwiH2~z{7XEQ zGT-&4=9|IOHwQ~YPAtZhukmFxBDam=8b;%)cxd?(hP+!F@-ENgcY!o7xD zh|l}DfH3{cv6Q`Ap9c_dlB_!pcvCyy?a4F;P$RXxkAlJTGfFH=9| zbO#ui3uiS>2c^pr&JZxic+=-Ut@WY)%{~5N$Hsk0YAAK4pFAsi*FDbBkf&_a_)$AO zuCnpuG)nB@U04yKE!AR_9DchQ%!LeW!4ospd9w@1)3~Vyp`EcoB%X0CEDBb zFrnQr3pYZ&Q>GlBE8`ftu|88P58^uR3fooe@1VhQlxHEl`n1?CS3RdzdNY3QR{QZV z^}P7(C)D0Rri;AaN0|{C_tRwK+h@-=Bb=O5`8nOkB~H$AKA`>Q!7T9~+o~CBjy>tX zY3A8=|N3wKCr6}9zED@`v*${0l4Pb!-{FaLN%HIDDbDfEA(Q}tO8henuRBXob0*ZL zO+lM0t$fIROhLj8oIuw@G`#F1eGj_agPT9I-iY+i*Ou~cU;VfzBP{D6tN{c@UX7P6 zJy|;Z@L$3U{<7p7LJAN||Mjw%gSZ@ARobs?E{F_84zU{?3z#AW!z&^9{u@o{5q?bvrFc>!2X!?HyZFPiwbS<@k58G zxJoBNFvPLw@4X-!G;{fr*+DZHvonFq;)r4YKX871f=?NZQeSmD@f7 z^pMy7T)F#b21HgiB7E%;zbNNw&vU8~$vuYkm=&T^j9rdXuFpBhIOUflF{G8}wzUh= zij)?ti+)x}LD%8X^T+hTbEd__=P48mSC3BJZGjGdj3z?4I;ggJfs~oL0yQNZZ&ps( zfrsMIv1-QN(eU+)8FCx($}KaOlr)E9cv7a9{9RsexkZhw1te+dE;%M>e~r|#TegPN zGB7}~mN=13H5)<;q4R&p*<@d%V&hO!x-y?PpC(dK0~Re_Itmdqp@yaw%tiRyPvMe# zp*ZT#L9~%-o3zU`zhA@4=QKH{MKGKWB+$X94wc-I&h&L_`kJk*6(b+$CA4fy*k6lTGnL24JB^_^TlZ7+BPi~hcR>=zA7MEwEgN;KOc@Sw_JGyZ-uvax2^`) zO&sk_S~afLJVVm6fau`qq(sI!ce+@rL7&ELP!WT&%5&1qO|V0117bv0m29Dkh+ zFUAF@&weFR>2%}9p$-q6sTkXoop!sT%$bFI00XeY8xCM7HRW{B9o5;WDcf1!LcSZ- zJ*&FP9~H~?73DdoQ;gKD^Mp=w_V>hXx zraudsqqrg5dw+|wc>6UI`*ik_&hFvPUOhn{U2TV}>-q7zi)jsaw&!!~br0M7hn@CZ zj=gXdnGK&lVc3o~_O}kD;--HF2lmo*>N_ z90@4%KzcP4f}^L;zpZncrVIos(4ww?RxO9GqmoKs^-P47HHpJpze!o?K};PSfKuW@ z%7tlaSjeZOx~7hfDi_)nchIi@_xw zrJ^=uI)1w=m5g2urKF%Qud{1ZQ~8Zhcctj+fLRLBM@$Eo0pmJr`%7F9*N_2ge+Q^#H1+USvQ<>nDsI^)^asd6@yBf^|$f zr(YH?Tz30u+za{&2Hh%$s!u&Zhlg6%D19+Afm27yB^8*{68~{1K?9r>`Q($ncyxj7 z_a)C<&f&VgZ>r+v!i5j-loVKe9BGpCD3hg|DQZ)~Z#Y09`9Zr1!oYxONXg}xPd(HT zs;-UNxg;-gJ_@T!>Zws z;NvgHuV+8dPV;&Kwq2{5&>AE`l^0%BIPBA%_+it97G+4In|X6VMkq>T7eTU<;f(I3 z*D}=!1uskRUzC1%9IaBh^TyuDEr;Qus5CePn%Axdw4nwposo{WyRUb2SKYC_E7~(Q z^oq1(^d~?f5QbldPQT1iK>Sv{0Z%tWx0@9|RzvLOyZp)9N&odu@SXYz&IiqIe9&|| zj8{EA>>PYjyV?m?qq0aO&Dbb&)E+q+WaZImz{2C0#|%xuy4S)+dcTk~@<}?)JbVF$ zSVi&#!uc6u#sqGCu60ka=$@;s?`p`O9%mz(Ii?npnEU~}^K_K_vlz}~Ej}Hf65f~P z3}7prey5_e9{-+{GZMO4^1F2MdVH1l{tU)LFdk{zr00H7;1aI1dR2?gnF<-VTNk-# z_}q({{lFV_Hl~CADsA=BEsRC`@eeQM{=&WfNB{a8pZ*VeK%VUJ2ld?t-TYKf?~C$* zLQK1Hra9s%5_X#^o}tDzjB-Ae_5R9dC4Tbc$&qRXIS2EgoC?=5%LV4iuDx@`m924Q z9u^812*cX@4ZshR$m_|}1U9k)AbqpBDualROru)hl6v9D} zg;kA7q~rK=z5S?)|EQ&azXbh3_llqa;?SBpu2y3dK6My zlA(o~1aM#T2Ag{<@clVr6Z%Bvw3MTWX@yKpbiVcvktMRo{+xVC8eEDrk!+YjKRkJK z{GF=ra9mAF1OydYWa4G?Ew3bXY_Vjsr}ZaKfx#X>>tjy8O|SHAT50Bm!V<7IP;3|# zs8bHzjbzJHE=-gopd;0rP93njC_6YnjlB}kJW$A>Pu3rDZe&Qv4rP>EMb#l+mg$R> z3lMuU;FAKzn-pvNuEgFL|AeXxav8;M!pX)%*_WZL=Qssni}w2#-?HTh^0mQPPaMf) zZvE~|X3SNrKhAhmhcv3Sc1Za3Mx17N>pwF3tG?Qe-CMo$*%?imf1w+Iw`$_)nVTNh z4zq+YD1PBh$FttRC6tbGF7Rf_tC50L$5KU3t6dayF!aXfQacq*iH3dh2un-KkmieJ zNKYP}9*bGSl}1mU+ne=ZpORRUOpY&*j)=8DU*-i$Qcw={@?w!+UVsbxpOoH0O5ds8 zOUqq$qvqQn1FfO>>Vg!`o#~KIxTp^Ibc6GepnwNw->4vs23%`uTm*ME>)oNYNh(#O zLikIWG1Dv6F;wcf2GSz+>2TN_ji>pW)a6x{itF4h&LVQboOv;VDp&l1^J}sjEE)-X zHu%`Hsh%5AJi4Zf%c2w$@no%)m3D_qRo3gshOENaGk)E1C3(#d>^2C4SH5-|Qq9ps zMyZwJ!%o+Z07aa~gRYiR%?&1<;JUs$c?M^S)Lcb7iR&^AW_>Sx}Q1nShlj zDv<&pnFK}x*;VdbadxTZZjqh=!7lnK*QE^g)j4)iV^N}ZofR3k&`9J2RUZ)(t}7|P z4H*719r>y=NQunQudhFWx->}J!^vqhM)KLq7s=Q3lwx{w=}^3K=R{CV!E^L_`8HDN z@iKfzsAcCBK;e*8u3uqSXVrweT|Z0cDlXPYwZ3o?pY?w0dIt6Rc!-oktmQhgE%VW% z?31xlw?w_oS;ip%ZNgzkT!WE0WPkQ^BFZ=X*<|gfL)Dqb!|~-cw2>m^I#ThzjtY}c zg$LiFcN|6U;JkUg6k#n_O1kMY2Ym5S72#@!(K(E$A*mk0;5&?X8fhW4;TahW6mD{p z!P&?pPM1l-Qo6=PYNGxos)B$);51M4_i6mqs86S7Q^675c?LPJtzTU+Uw;4OZaMsOe$b^* zWpRlnUK`%DsvOqNCzaN2OeKNC49WXR97s(guD7WsfnWdL!5SGIB=b@UiVsT;;h{mI7Z-`~Asv&AwMegi`<^={pk7<+h2PyX!bm8l8F!LgS z@37CgQgq2E_w!mn@QaMj5oYa0wJ2q-VxNcxEA#S)x;MR+{Ij~ea2tpC4zsD|5yxO) z)%Vl`NW+aBBu-0os}kpVSSb9Ryi~Mc%aurfZwHe6>T{Z1<^(bS1gB^ZB25yIU{gt^ zR0;OFo@fycM}Fw{T~ThS8;PZjyfHLn8=R0$a}Z=BR$Nk7XO>HRiRP$h&XLwJ=!4?F z83nIR2K}%am*M=aSy5fNr2YRPz1T|o)8_C0_C6rJ60Um3+@}ERE>-uwgYcZo`EmWt&Xi46YJ}0+!S>sm6(5J3?vKYRa0b zx|T{0EnMQ8ugAb!QHV|-BFO+52`*@ATvPBapLY&4Dsxd`}#;@Xc4 z%5lzXDdk$uojXFm^uy##4cg)HDWS>E_*_#|B9sA9IDSp9F8iuc?=I287rrj(RpcU$ zKFu91DlrtNWJJbghVZ^<98ljl|Lx7r`|yUQzM)CoZK{gb9Qy9A9f7mdeEj6?yzuKe z{CDdP{|+4fdo_puK3=ucKHS;+q;|Cvt}?>s%ZzcYCda&@PqXecg!{JU9snHBC8lJ!3odp@Q(9)l%~G$^#@NYqFHc)#*bkBiAUuAKTyb=YD} z#A2340pIk?${Ty*;WYM*45zDZ6`v&rd#Q=O1NY!{-!IIXUZtg}uCO9mS>_#AZ5e;(ei2H_rFltWtJ! ziD+@jW^`9Evdt^1%9KDY5ip3#?4{G*;3g@!#QJoRPeLbmJMh@2%LixU^feZHS-U=$tMZ2uk!DMa<^3?v0tYcf)**&!hjPmntUG4ouxAvPAO z*?FZo#=@!Xi-+@MX1%=PM8z$kZD{$}Mcx1#Eaw^5KVzGc%YBY@ny*+^OTe52+b?{V zW?ADvT^v7=FfMt-1~OtnM)2>gONA8%BywxJ<@oEr{a@##C44)(B!iKSIU15MLva+i zk_^OGT1-o~F*v0J73hV3`b@&R)NlI3mRw+4%(n?sb#H@6ezBUC{%bRd(x?hQQK_54$B;Mhm}t8 zkZw57E?0wP|7Ah)2WInyjP2AV1YmlYBvu<{KhYsm)SS>8BZymWhC{fOkLbRpB2)B> z*pcF5i}s5S@_UC}mw&D9@_+xC7aEn68+p^8n-Yg8gnl6P$I zntwS-5iZVsax>GEeJvl(DMI4nu<-{E*~IprEZ@J8wJuwT?fx|XjL%Jk0J6X4k;Zt( zf9U0Z`+w*!n?Ak2x9qS9WA|TuqUUQ;yG92bQB1LOq6(qu;iC=o}tzOq=CHdiMVzH|KdCy z0?iqK^YA)51wQiyUur-P!ZjYQALtY&#l(5O(I?qi71PTI$#qB7Uv?$d}Q|^ zKyB;$@z@^D?<{6tNyw}cvIbB?zxn?-e z^HkTbw9r?{{QJC+i2DK4Z<`L&*DsMgtI(e>-~a>JckEJ%A*eBR5g;Q?sJpl>xP%-W zxY_*duv<>1sG;l&smA$}RF2^CtD>pdEfl^K9dq{qwmw=wJKy1*W%FT(__*bS;FG}#5S5Z1Coa{6ADt_y9>8n_Y_C1eFqY23ZiNcy! zIafY`Y3olaY2l4@FcF(TOe)2t$Giz#EU$cqFrrh=pcuTvtf#S}U3w2`cyBEiUfA-y zY38Sp(=|DzMvwW_!&0hJu(;ujdtB+;`|aEdv83r_si0y**cRG(X0EdZlFE4#Mmruu zY>w|X3Rq7`vB@AK5Z$lo3EC4Z+#~~t7cC_M;XTX3x7w6^c&=IAUS6&BCo7!eoJz{) zzN=NB(tA~;g&80pW6E}E7*hS zQw(B1(=mE2N{v8ESXMe55y3*F%er!HyW8Xy97{D%yb#~$(Y(9K=`l{tX)s4(rZ%@S z=;SB|MPsCdwI}C@s76#XMts-`%)R5MVmmn#74;&@*M~_>x<2t>40gm9;PXtAZ)q2k zLi^o>CP+COBwyW&PFbMJfYSW7)YYb*@z7jG^h@f|{f5$B(85?G3RBnYLxdKEsOTV5_ zz*JX|N{aAp<(D&7czk*RK5CicP)sF5;~RAK5_*6_N@4v}W!@j449MDR=wPS$(mjNA zB)Ry)EUbOTcDI{;k;DrU!J<&786x_IRMH{T()B_^oqKu^Jr)Im-jhpndA!I@XUQL~ zCQZ_ndan^q)S3(?a$^khJWkpNhn<7N?Sne_)7|!0*Tb3imf%c}YdF(x(%EP5YUh*M z)pocFGh~Hdw(h9p!~5rYUK`iNKf;dB$nwHXJQuQQ@X*uH@|IY$e z-f+T-n(J$M)`FW3c>T$v7ognzsv}I-q~!9>1@ekM9~b@f8Zht?A3rCbaDB}7KT3o& zDp+t(T$&3RHt91&p6PGyU5OkTBoF8%^mWvdYcW~Km9b5&BWOWvHElX`g`%WE2v(rK z364`YrDO?{SnE>8QA_VWo7*ZT%dIhHw`#k!E3JPT#=#7 zLeiah#XxQ0H8T|rkfgH9ITDto>d`F{M8%OBWU|Cl#{(+Tzy0qw0Z`Xdvzlb7GXMtp zNVDll(cmGSZQsz^mZYyJ*`e-v=pqdJ!`UmR)OeH$eN31dvVx*@I-Z4cgGl9q$i&Yj zeH-Wg7254|;A^QPAr1}#tpphcg!v3o$WU)(W`*39vjxUeQO(aqB}8&R6{y(?hTw(E zq|5G2WHt(Hmx4j7$f9Ft!a3YM1DCa)Tzvge{ZnmAh}U#dPu1^5BXu2$FNXecjmk7< zKMPOgdT3J*p-Sx>e1swVHHA&7d z(>K(^7gBG3C2cNtfhd=81{k*2QF0DmU!Ru7fd$}ALM!NJX(g&?}7p(rwrJ zLH-5+#I13^KS24Q7$k0QZ|Tj=v{}J1RyBh>kX!=Qv`If+=twr9%hAn_@YbA?4L8MG z`2*)-W-gI7jG1WoD=#r5UkDQCxhyrO+}w~qVQ0T7xp~ILT=s)q9dOJFHJ8uCi9K5=DD+RECbmEv~7%01In-UV6ZolsQ@ zqp1|{mZDM^J>{3Ar$qde;@y%=Gpl( z-**3e&wr$()Qg@fSY}>9ZW!biE+kESsGjs2mne&@F@BEP~KJm@9BI3!+ zXYHM_ckC6ckF~QMN(jY>nT3MA5=zH{+A>HsYCE(XgM2g(8or<3 z#E3g4d4 z&-;Nbhy-EP?vyg{ij|IJo;Uf%c*xndpYNd=ONi^`hrZNQ#pm>b5#{^K<4c)0NBX|3 z8e+E)#01gn<9fls#`VO}NqyE9xnQ#L;{i{IX7L-m;G1twvZ$?T#me9g&aN+ z{d3w@*&~Iu)dh$3v6*vdc?r<9q6D~7Ub5ykmbxrGXm-+gz}M|9$*aR|JzP#*S2&u= z@(CpgCLiuLGW-VpevVL$eugq{yT<0nSpC!f$aHVr*v}W4IsX0PYnqYi6G^e$n$4JN zgzH3uIlCv<_8qBBsN#9gC6_(PV;H+yUok$zZZD zC?jUZA|4-;A|OKfX$e(Y6n6x09)|E``+dENSBo+*q6*U9c-&p+&dQzWY#A{Jmjt+M z+_7MHo67bzSO#Lb6Gv-IqR5@;fRV;Wjnka_?kpW~PZ^^ZCVrGRyuJZ`Md}w#ZL)?1 zwwO{YZAl@vu(Vb;?N|-e=Mzt+eKrEqMjRm+wKXCf7s@1QS#VGqU(%xM0%WfYa~WI1 z4@Yn}2MDH;)>IEi#z~)RXELS31_>P0i5Q-ZVh%7p?P3lts2Pl74v4S>cJ^QBnQ`GX z00FK&)^rAIOn7h_{tiuJ6L7lz3y+Z}8HQZ0@I^j?iug%Qoqy6FaEBaQkJoeen%Td( z2#9?jIOt?j8BoZ0fS7B}vLSuU%HbsUaR8@e4e+Yz{*&CSK@{nYOGkwv(y$Laf>PW z&tj8rKOvrB5Dca*0Ut4h3`;nSsz=J0Xtz(k)C-jzPs?|tp_XA&(uXI-QkWE{kC-1nN_4^ zJlt^j@&rNj=WDu{OMfX8ejI0OzefZkLv>w&lQ&NS=yk=eWC2S){=+1KkA$8=^zH0e z0qYg#@nk5?4O>`UNaoDzudtMsF2P<}$loAUDJ6$0Uk_%Z@#v-JWVBJi$ta@euFwIp zk|DI8ZJ52*&bWT%fdc!GQjrqUwPLJZXXMzq2TufIoV|tZgJ#_C5m^|u0Hm~uqAy1yg@JbU#;mBL#&&8BFA@DQLeuch_w z&gW@v-1lr|E>%=+oV)nL3xAv%)`s4k<|@es5rh^?2v+>~fO7a|LRM3^(OhjBXi_S^ ztM#B#DY7)z=%h#EaxXg<_npl3Oq}ePhz(;}eaU%BOK|YpQ`HnWH%3H!HZ_I02pUg_ z8rW+<`>AW9tjF{bu)HJ24!%|Nb>g-4B+eRk@bu(F&FPhVq0&F6;Y8A9`rvNhw7mE6 z>WJZ3OQ$P=-arcB znNn+rGX>i-s2(wf>Ku(XR)v7a@A}pgytt0wXM-@y*Nd7HCR*>MBW9X)S{dlEqI3)+ zKUuN`9?RN8PNL#T_CpMLsM)oVno;8{DBIkieahM@b!^3gmCC!ISA+l^?Hxb=KLu~N zr_1Z>(`jjzbwA2VtROundtavn?5Cr#cw%KO#9dfVYwAxCQk83e7kbe88;ohH?b;GHZ={#^U`MlyV>ot^Fj-wCKP$?@2Ga&A8xM~cR>tYEKW=Ei_ zE>lUq@*WxCtlZotg{I$F#fbDdBXH%Z2U7vly)pc&!e&gBMM!YetqgQ`Z_PNU#nE2z zT*dG;!j%U)F<@Rean34J77Qeez3BZ_OQ?gmXkTp*RTpwE2BJDB5$^+rp3zWGG)?w>3Qw{(* zCi{;3R@z7fN8+I<0X9}gi!y%$o}I>Fl6?%{1_P0zrO(}7Q*NQVS9Jp;`)zm{qr(;$ zpElvZ%HQfju&{eWbx|~zjCg3+U!eeXD8aROd|G06ssHiRQCe6(GwYUWQ6vfb@JG#O z-hSVOZP&h*qGyvhp1taG4hN4N<0f++S%;O{P9f7)Dyx8QY3Wd1mY$ic7LM$FG8@#< zWv-{(&a(Y=j^-aT2?SkEaqgq$rE1TVth(IBf-W3q7B|tSr?~As85sYRB^X?9SZdw=zLQKhbZ}OMI`HBp(l1U5zP;7d3<&u4 zTVbk7gVShEggr4FXVy$F-;6NT(}VLyAw>3?zjwHMotHZEtH@Ve*dmMWVY)#LpbB_B z+RiMHwKmJE`BH6MYh0r@ev&XP;W%%0Pt#06`5=P4Yup8-^46uvZ6)KZj?;vypTOL+ zxHa`{A$MQDfZTRyJpdlgHZdRfC{XCeQl%&;d`aP@^CxsFAN6-ga!w>EHXbv*G*8?8 zhLi;}y^aD;0*U5q7H1+3c69WUKNB1=eUA}Nc2TQNj-$`#b_W8dcTc-FCn+P=D<{#` z_s_$-H!nB$UJunX{HKJYuEaX*-fO3>bqiy9oc|6V7N>XS-IfCRIb}3Oefz^!85n<< z*-kvA!p`FIpJ(71Bl}nqr4wviQup>dTuDMWhRyH<=S{xE-I*&FuT|_~Bnt@c5hoaC z4_EiQz8K%SXkL0uwa}tPIpM+aMv9j-R+!8M2LktX)jYNIPV6#a1bGr*jSe^lLlc5O z+Au>iZFm7)@x`O)7t7~HoI$-ilj38(oNaK;%T>k^xOe&**{XEh6qC!&yQAj^s?ni8 z-G*Hwp{0MLU+5-4h<41|#?sIySt5YeQ`&(de!HO))0HvW%|oCnZAx2m8a^kZr-$8% z)6Kw=k}q?aYx`3mXcX$lo^e<8R5>i}NmYivwnJQ$OCpxcu`{9~*q9*f3>{H?>z@(l0hTa#uz2NEf=A_x}{7_pf#sA%L3{aou;)dAz z61H~!_oZp(e5CiaS^g;|#eV2-(sQS;^uQ}eE%$SIo$OG2{;!P3UwcN@j2%^Sf{5br zaVZXb5z<-UviuwZ4e|oE7BiQWiX99ka7*>(%o6*9EaHjWwWBev(NX1b+uj5$;~`s4 z{ez_4+w;ND>BIpUc&C(_W;-+Q0+|t#t1*bds@i2Mlvfw^YLtT{DMOZha0yA|DJkD< zq20-}lTxqPO6wtJENLu(QPHrCLR_9(8uuPx*IubzxVuESHD<%+9pGzagXyvsr_5CT zJ}EjTzT>N%8A7?R=k*`39d>_2KuZvO7}s+34t@D;H&^x7lsOCB_!qxDYrYL@S$iqs zaZ_7$)1-_R(q)P&N<@()Sgu{3CMBgapWb@JM#%yFgL{>g3lK8o(xa%V#CUQjquGEf zY+EuCzG^(rf7}Ltk_%*0U*O7lkx62Bx_Ra|J2O?Sj!4E6{w6H3PnGH>;a2}Tvwb~77TXUEw}MR+pJyEA;}6h^#l`aIdN{1_X2~zA zV8n7dH2>oe=bpA!W=(EPwPcuq;j3mPFSdBg`*VtN2NwKGoIG(3P@WZ@nQfwNH=koM zWyh!3j2EHLzL@t=Emp7FFY|iQ`M2n9V)7pa)krh7A!hG{D~2tCgdWl#x+``wFkUvMW*AbDWb)Z~Bby-tjHDyA)Inr?f5TZAW=l%#ce_m@?u)ufTNv(o6kv&xOu*}HZrwv(3P$eFPH0HhGe1($Ad zQ^_$D|2(pFEHqB@*Rah9|1#30DGuhM*O#Ms_nYQKgTIc;47ymH82N^u&n@j5#EV2W&{(isf=%w8tD?e9ztRkZe4^ zkho%~v$lxx!tP1pEnAJcx5H#-?N$1Tzn0b@%p~%j0sXX5=pZfIK=F3f;VNHqZDv9^ z!DP#?!d~PTe~3*jeaT1%&jkc6Sgt8%9H`9YdCpG&uqKS>k{+G4nQdy2%tW21{K}J` z`>q*rdsinFPvF<3vvx+deorNtb!Arj2pY~<{RkSwM%TVc_Jifqe(Ax&)Hk?-fK>L$ zX6g$g)W1y325*@Wb-MuB-v?-`CBq zU(|9FwaK1p_wEhIp462#Aa{g$tTwg1HRs*^%;}KRhvcozuj?Er1n0J7IUL~a8qdK* zJ==cfmVYr?MfoIH(WCJkstgX`fy048rS?{74pnF__HQGq-~R;eLW)1yP$Pwqu9@>_ z5S3yAX%Aw&VK5j8$w@@A_en6V@e z8BheJ!2dHGoioi`vArDC^R0Ia3KE%Y&-3;7R!sj9O4Qlon|HQQnU;}?7HxnJXQ3Oe zrs~Q4*KiCrkjrzXYA_qAF-g!>Ea#1Z+fesY%$8^rBjA9rsVr0s2iMdhK1cCCz#U

tMUQyY_*F%TgVi3En}WzJ3sMdZ-FbdxV88{3*b z5?K(!!t?Jo-_0cu$Jh&O`w=oH>C~#Dm}}V3=ERy)UNMhA?{08m-d^l=%N0EzBptt^ z2C*Hydwh4Q9#s}XUtLdd`|~{eE2Fc1vf7A-K=HPeew$`2OcjiS&Wlv!E?u1$lpFL zYTQDL z>Idf@->e58(sIVULX$qOO+5Q^9+#a(lLa?{JFe|g$h5$l!>Zk3|E~HZmJh6DY(mx; zl%3j)H5KodmRFY{=s#MVU$?rFnWzBvA=CCpu?uzF^4SsjqM`9;`q44I}YoClmVDESm@UY%t>|GCu2Nz^esd@L$Z$J`?0KakfeQ}WQGrW@oi9m z)S8?o5(C0YFY(L+8jI^qGp3E!yGW;HGlk1>(uz2Ame}%WE12S%m8XMH>1V@cVC5;! z)zC(uK)>)qrC|WF47a=mSM6@(+0BCevt5JK0Qy`JpQKN9R&=NfSwA_7RPdI8eYXeU zxqxjrFaM!M#2BdcfCzBM`rCJ8#a=v@trn#9UxDcBYzr5zWR*P4e(F{sWCQN3l|q?t z#b3dRH(0}YR$!lbqSUC zoEo*j>(S+e_#8S|3u)LBLcY}nY2W2`l|UBZrdv6b{uMlrbeF8%!*bo3G2KKtSOI{r za%XhEH25{NVAQJqn@9n0E?3dpW8G@iaC3H$c@PaS1B8Ho4U149oo<9xzs-Rt?`_t6 zCp)SjHty6Qt>B<;>VFmUc{n0a5bNYJ!^+TR+8)PlP;^#JILOf%1@#hbv2hn z+!{ADw`V^6y#D_-uq?0?vJ|)2D!cTr;8`G`6gRQuYushzpM70c7EJ z-?Em_To_C#CS>tCXSUWVT=KJX_AC;P1byOJfW}PrXqy+LWmdvlNO(b0^8^Rjh~t5j)a?W+kCB~ z;T<{yg&q} zCnN>=40-hAJiJlcbS7A=V;Y9M6SxnkN~5fZu$j+8R5j@wgFl zxT?G~bH`|F27s@x>LpSy>au&>7cZCr^2Y@za~lX3K};G$bpA&l^g<_vU$ zToLBkH5UE_tN}t?{aG&OJGH^=5!%@Q3O;+#ZszPB-e4|X>Ys$+@!|pF(dyh(*Rco& zwA!o)1VFCidRYG#crgqBbiB9b0)_<17s8>>s}Q=`p%;jCf*k$|Rv)0c*K{Ga)}gc4 zG5$`3bb$`;pPl-@C$+y0C*laEovQMu_U_R~?rA{Q>9I%XV1J#z)c)7tJ8`)JRDC>) zJUr69S^;7_p6%T8`K@rD;^Q|W?Rz0t;`+TXU9EIiue|izg?o3|`XgGskQg`iDYdyl zXuaHP*j~kK)`u9@hfb}-iYjgF7F z&=WVeLt+}3j=oa^O+?UEWhmGL7gzDnP90Kf3Y<+&}RjG!ua$}z&q;}m}zo3888p>7>ztM`i@1`f<4vulR?ad zIX4_dH{u_X&*b-{htR=V$b)@Nlj`YXuPz$QyGv2ofi5mSI82MXM4RMux0h_-TA}=7 zW5R1?!K66Z6Ty&e+h~oR1(P!sG2<2Pyg}$tTq*7h>^9>08GG#8z{(KhBx01;h!g)DNqE5MPcZgXp{4o-= z|8>oNEh(%=ibrEiswEK3MO|&Wi$-nwo{zS~y9wyu#5)lVs~(wf%cLo3kHTQ6>9}sH zhQAZC=8ao)Ff|B1)V6#8C2%ny#J{cU|D6XrrRqo6cq2YOs}cjBk+bTPhM&N$Tz}ge?PcP zA93N5-dx`3eB+hJcuX+kGh4D{m6pi3JoPo7k3JD-Jq52cznSwc->w}0Z7G0WM9PQ2 z@OaBkZ^%)D&7`xaFPBPHp8kUy)cvfXsE0e~tCKP2-5mM$_wle+lzI=-Hp9wfa&0f9 zCCx;ep;&r;Y`H{o?OGFQBBF!gSbEJ`@*VY0Bi(r0Ra-)b7H4WyD?*pwXzobH>Un?qM)KKL@+^{GH z7bi64IA5b2CE~UoW9wUv4B{VR$q8jOL4$0#a@r$n7cuC>+ZWkRiO{fZGE=J~JgWh7 zmiD#)0G~ftl)U3FFMp~YBW7ZazlCsFXEq1k68D(+rV4w#Im!uxhWa3 z=KeDLt|f-fnwyN$7eQtAotf-6j^gM~qkocWI*1g@TYpfwvC5uQey=&F$z9Zoq-xBX z3}1$Fc13%Lb8C7wMDF8@(5@?%Co~KL5@<$Pii@GO3Z^D2mPAOIs?g*q;UpY?%Tprg zcs?Jw_@39Ep$L(DqSNldaa0sFL_978kC@D`z4>*=1i3Nu5&n?baYomSGq8~ml>g13 zSzC9<4#$0@NxOfIfv z!~u*pnfB9rcQl*It19PcX8RF{I_cRCF|txM+siKUES}d3FFv;yaH!_ZKaQMOrAC(S z(oQ2C4c~czd-X9Jk;9O*#IA6x09WH)?s4rNo4U{qZ;M~Zj20c_tSU!iepf7^DE+FES z@j#*}F(buMrrC?v?OeTW7lu7+KpfJ?ZJj6NsXG&q%jYu=&-85b5y?Zjlb?o|Eo7g- z40ZEgL1g~H+Y9yGyJ@a+J<)zmGUSOu=PUsyF6Ksj%6JuKqqwur&Dc@UNQGf~HA_NP zYH>QjeqRObGuK6H6^N-4-YFGU|4E5vpgxP(4R2v4tJ+H#@H6QPZ*?84^w+$UZUUc1 z!X#}#lI-)HgRUy{Q`;Jo>^m5~)Pg5WjQOf+KCWjy7@Ji?DF-qh%k5Yq;NRzLe=C)z zjtiVVOc1ML^D3C4jnuRJ-$_KcKqqr*;5CQSj_y$cZ`ol&^ub}m+UKq5mui0T%hTYp ze>=mc<26PJiO0)KJ?^FPl&}xl(6zCVR=aXyf&Fg+zHU;oE1`{L7idixW7LpH6KQ!( zf(NO=-dyP|r;{$T90=OC4^KYX+v^2;*?F5w{H>u@Rt{sryc&*;%!Vb0bqO|hCA+zh zwAE+Mxt}pPc}CvG^@UM4aSycsNVC)tqP-tvM9mzst%|X z82e{N20T=&qsgk&IrBGecTGlHlFKiqZP>opS6J~aZc-wp1*r4#v7#{(ge^4WRIW0p z@?c0))%cbDykI7fU**;Ghl^EOu+HwsG=|(@dH5BvawS4rP5v7m3RaK<8|g`C*hxlF zy2*xLUh;&R<)^b{vaF__enGQL4y*x0Hd)P~nII@wHz<~5_iiHgWqudz#xNbB<55ue zVSejjM8~p0?1dnCo7o4oWrNo1$odz97kyWril9cm{r;dLIOxT6hu3ae;`~_|sKsg- zh~AiWjrqQ}7OMXIiVdIZGP@OiQvWULuDQY!j=K-XvKceT+t=q+gQPy!CaF9XTd!ZW zHpR{m`ocB^Sc)Sx(Qi@|j0Ps2+4Mw`NIlnGo)h%ncOmHT{Q%zjy36Ct&KkShZ;Zh+ z)g19}!z6?j4=K(Xrhtfaso9JzF zo9nWf51Fy^Y)yzZt84tK(+O$_?9pc4)IshmtR4_&OqfnabSB0mcyLP^gZ&~Ln6)*3 zB13HctCWuYmmYd1B!We8K`!=;E+2h!nvwl`XJMZ19|mD9GH<@?C(PH)@u9EYc7*(A zQ}_5azBjc=H^UAOuQi)%Q5Uh{&m}{0fSA*>^UZL#huv_u2=%qqb>055_Jlbf_0MTT zGSkU4m4r&iasg^PeGqh9JfXX~&$XTq&wMZW1SJYi?|TjO2-7<2yreU5*5 zam%e!mPy$c$2pHAMECZhRVq!m)C&^LV#9A#lFed6>{6AAPLZ+R>Qa?g29Z$Ke2ebA zL2LTEU6oxTKw2a>e+;BXyc+}FjC@~*MPHxArAV(yOUgA{|2dE9{vdY3@_$5W>~!HG z&Ho2!5vnt2PoPZTjGxLi`4yVOWE;^fcGKm3n)oimj~>yUndEWJd9;8av@B~?Xt88` z`4)|U(VXDh+XxF5O;|D`9!bc#eA1uZdzQ*;-MK{@c2;pi}f`B_Z&3+k4IIX|{9+@Ni#3C2_} z$X<2eML_aZUkI@mH3$A%VdFzB;>c?U1&(J7Gd%soQ7T{AM9RuSVA<|KyV&xT!8uE6 z79I-Uwih}x5YB^M-}fPnKl{oWczLzs&0p^k6h5)qr-m&yF}8 z{0{ws&$Y}B?>HOB_nkY>epxnS{i5ZCl6y8sd9;WBt+_%HLi1n!j;1d)y&g~T-t>#2 zU0J)x3!pBpa!{5@;fH0pjWcv(V_WLsMrLw#3Ow9iHj>Bb&>U{UnK!M!vSD}RyP3U` zmmygA)dqI!XnWL;etpU>x%>P!@l!%z!C0O`Uy_28Y#(Ot1bNuCp0C6QO{p0%lQMC` z+^AXZnvy)p8qW%5Uz#{mbS=~5{g=eP+0~=eLZ<)w*3&`Cx3*#UJDM~yeN)rE6N8#v zSA_{tq~Y9!6jKvZ3^A`?Di@?>&31=BT57aSIL(PdH_~lSZ~!_k}tDi^W?aoYL?UKi@ASEOh4^$`3Kr zu-}^js}^S)Z=2$2m&>xw`D5M-`;sfgDF;$jn^JfqJvRjx5cf#&(Qzt1i zfJAQo1HCu!Q%kZGb>`waSMJ#*N+U&c>+9mP!bkY#+2seMfg+BwqjASl`b*6u^$gp= z@^Llivbu)FxrU}0)^h%yWmAn6Q(&rT1>pKD&8ecor^e@mJGN5m(vp3HO3-dLNK8o@T;%u^ZWK#ud0LQ^Eph`=5&hIOy zI6FgFZ`)&nHB-u(ylSLxVHNd@ESimfs`CIjrDR7_`!23QVjc@AEz|RDr{EjP-y9}R zaDYN##*QUd+QfV7V*L`zr3VFM9I{WRDYj|}U4;^IBBu3-xxtR@br$qB>fd_y(?oz} zhLU}4D{&HhV%0^-Xw6)WKT%+LX34kq429WXuwtnm=9(FL!JA^T# zUs$nGi%i``hul`SMXS{uAN1?2V~_BpS~nC?fs6>}N7wCaO`qG6tG{pk{C{2kLy!)7 zncn=o4J94q{iF5M;qUu^kp903+s{Da%Fnmk*&Z_riZ?`mc=aW9fwle|tCCi@m_wOW zpYGu#FMddcKK8x=zgt&)*evegUHEZbN?GjvqvgLf&syMjaZs*0xUN!Ozn|}@k13IY zTSdQlL`xjE9i3w-@;80SY$;Gd$Tb{{xUIuAs_n7be&8bSNtmBqMQ`a)LdhgTKJR%f zEmVe7{0=kj*;ksO-WN3ltd_xrB?P zBxqsFh$Hez@>=wcF~NM)7S3JvjObKnu@IVT53-pt=w})aPZ&@dU)|Vg6W%bCd42`Q z`xPz^E7!nJDOA0SER#N^HMsL^D2V>m7oD~%cn|8;t2?F35}{*sTjsu%=Y}}n2tpX{ zUEg;{|IAc-4^GjkzShq|wmdw_ zN26NVdPpH#!_Bo~LD=Us$fUavaQ{7-T*)$3IwB z2ORjV!<`qguO?j!s*79VPn~FH0}>&44rrdiWfVS>ZjErA>r?h;(hWAVb`tqp()G-_ z-@HfaLty{Rx>w)+mUYQeZ3j`u)Na1lH6-VLX5Bhbe(hP)@a4~}%Ut@mta~l?nRQpE z{*iTGr%nl3BtEk)$rxX_if0C|EZuYg{<=set&^b6b2H)>q`tF6EAKeUVsABP?&&ng(Y6l*jW5P4pS$mi_u7)scLR+Z&Md`( zjcaH^pJI5qq`yrorQh?vxw!e;yo^oroKS4fu=0eo<0LQ$re4$e;lcWpk~!={ho?_{ zuWrwV`4dSxW?O-8Mv%^mYSvM^LyiAo=0|)+_)#eBp%rBg@Wbeccg3T4(mKl&!jA3i zl9Ha_Zdb`q=nrSQp&WlfJ9ztblNHQ!ET7)NS+D57{rW~3{``0Kz1P!!yG_t>Jx=J; z=kI&3=HKsEt%ZR%lLpj*GnZXFb6C&?+$X+H_Ps}y)p=eDD0@eiNJ9e>FOAwdz$o6S@JYJd4 zZfsd{R@hK2-R25wZ1lxQY*3*RR(9jDL>Vj$xDa=S@IRl$9gs@h4^%)=W{+d-CO*w_ zR4|oGZTLC9W)b+PB(ev7IJ5PLI?Cv)6lwY^8t-9Fn{0SVEu9=yp)p4Rrtfn+tc{!ElwmwNKSsE^Uv*Uf@gK{v4@^)qe>SN=B zW@c87@D)FDfcQ~!Ts{0vfCMcr0z8jAHfqZ=1g?;*oP%VzyCf|HtRT)%xlt-9P^W=F zCDS}z6B%oUK>|60tf!-i>5BW&nS%A)6=xn|LHUbv*yeRsAo_0{5`rFX)KZD$wjBS+ zZ-x0an(Sn&>CBCZ7?E~@2!2uUqO_?pUA)iMCY*AFnzUx|u#J%$u=*H}sK+Wkx5SA_ zu}XS6ax&6C5^^Btt5?OYG?+zDsh^PAFC(VJ4Th$n;kg|-JeluE-^&cq;QJjQb@Ph_ z3`y_dB6sEGdc7Fp7UP7tS-yHZ;gB8Cn+(J+(_ihNvx?dufeX%DUTuEmhzgnP#qoZ~ z7rxP+f$91)HT3$~;Ze<*Y^AKG7B;Q@Y*M{R5wl9Qj2Occ-tX*ngbhBHd=FKKwiiP7 zwZA>h|Hg8hk(;SgZeBCPcK#EkeoBl=n5HmdA%u37Co!AEzv)bdnRhzKrGiMn&_lka zhE|8>1BO<;7hG-~mK{Qv>>z9KVPzy9^LVHUUz)W3b1?neDe4!rMxx%NggA%X8^j;` z{x8Rp0(gA%nKnCuwF;K)k}TIO6r%Ar>(v*D8GYPn9i3H1`DQO2@6jJ=!KiQ)rqHE? z!U}j_*~N2ra(*j$mQ8EpaS7(0qP0wai4k?ZCeG>vpD=WgCk;?Bj2_prxbPWhf*7`~ zXNF`g%n4>(L}4ORHQ-#7x1@z$%I7#g6xN|K2Ny}q;Qjf9(td_h@kVExuz*fw!EoJvP~0zGIXA7*!ZA0 zSV%twxn2rPZ`h{_nJdO8AbMp*iWp)60pG23O#{f<@2JhdtQ6tPgUcOQr38JW^BN6~ zegry2WR~Ut$5hNB2xkCoD;>vNH_vhxrS@egxE~F%{lhS+J8PicT_b3?x(kiNI7bfP z)I=|!WxXonWC-&-^JEfA|Bp1KoJHn{@1OP48L*!Wl=fOx5|?q|%OW06{ZINTgh*8$-n zmM?=p^nQV!fQqvHx{NS^!{O_V;afM~BF0j(VVH`Z7Q-2|K%bk^$kCzs=2422;&bQZ zG(Y^hyy51GsEtaO&P-}+emRKhdzd^CD#}&#V@c-?2DQnd?ZN2ziq+`E5VaQ(Xn2BV z)`2Q|BI8ME6c{xfXR9Z1c3aLri@4dI*3cb`mkC*OiL~;Tk?rkJ4{)FLJ=Hs_6BMbs z4yJp1zbMkjC%m?mn5|<7)dOf>F`q&Jd-B$I^6Wkg_pAVIUR6Hu$CYc7K@J2TT`yip z^Bc({Z#rRNXTS4~wt7xvKYHf3wo>I74wMlHEA;^Cnto8i9p1BR+{djrb1n_)dO-CN z`;hRHak_D&-9;rMMT{Oz+!&A?aooh=C0`-d7hN5A#=~FqFh_N6Rk*rAHJAPL?c-ZU zimv{q)nfGXXd*v$7{g^r6(N2UEa82LpL<9Vuz*#Xjn8nh)wt~_N^zLZ9r_C2cpzqz zNW>Ydp&{GMY=IMg^!+!RjTd!#hu&1zHeiQSYsTxch_|m)V14jt8HlVle zlX<-lk%qV)dA@eI;F3p~RRs42#yu(c=j9bL{a}dRDP6FoJ<0n-tMY!2DEY?E-V!z2 z-EV{c-~m_8KDz$`1y`RB-kpH1jRe=82M(~XRwBF;CcbuV1b161zC#%4&2~Q3^zZQa z_I-y_LR8X@8DUQk51lAdv5Ry!^cVV61=G)Z#PbRR-g0{_dd%N?1Jz5D^;V^5YbWOn z`-rzlQEoK!o2y@(=hD>H>#qwFI4jyL_hidf%(QG{N&c+bxOI9TKzbOD;u!-ruy39~ zvfKqa2kLd~)foab_;vKp8B#OCsZUO@eeqH=WN}V_=X#M#pPl3fj~WQCJ7*SN-{EJG z;g-);xKWPd=6$DI1AF>XZN293I@dw&A*(q9fL~4#4D3O?!5066v?;G|W-Gh%lHK*FXZLJp;X}lR zy8+bzG4t~Fw;rIqkN3_B-p2aleLsM+9e8yXY5z~sLlH;2@6fHh>4>0R?Ou}BG~V)c zG@h-ez-zhmhxN@zkln-2kB{mjtGGa3uLYa}X2w`Oz|4soQlqoi+m;<3!O{sKexsw? zvOM1$n+BQp_$k0^{1hb#?J<+?_a-^QTgRlScm66B+=Nw_iyb=B%34M?z`MTAy}nK# zxJ+MFsXCZ}tFJr+JWz3R7nr}8_bq4K{gCAanvg5*q{2p2t+Y*BN& zhuqj2z;=HC^46tTN3EN;WuGS4nCEU=^R4@=0<~YThk47S|IcIFSL;2}AQcpZKT~5| zdCC2tlcBi1)spfu*=Bd%s}+16Z01dQ&0h!R*eJWc9w>h6Wx6HKjwxeyPC4c!{w$=X zq~iiQL!r!Mk2d6daJRMPn^bCKX zEOn7=m0sBF9njW0_2JL{Gk#f&e$?FqfxlceWCcF=^wMZ8NVkGJ0w*fwn&5Py%;5$& zbIxXzY#jYKt;2qk?MfOaU3r)#55r-W&r12>nH}KcakH8gn5-l{>l@}gcjStbUEqWH zC&|2EBUacYX;iuqs`!OWb}v&XCw?R0-Icg;1{9^eOo$vb2e(S$&W9 z$5HR@4N~~W@*CFlX`j9U27L%Ct&A>ukU%S7%wrVY1>0iA*29#~;w(aLhR#u*lo#-p9k&Dg>)k!-TLqy&hs#b#YV@iQ zf8FW!PMz#LqI=bWWg~<+0?@G!xaI zSiEjch}BW%cLVn1`4o7=!9>w!R&*`0n!Q?cG@Y^xmQD~1aIOo(wp1csQMR(2p5-mI znLK_^puK+ikS$hrIb$Kjz;cP4C3q{G_?#|Wl^NUxG4?78!uQ2Z-k!{?JZel7e1PLk z%D!KL`dSpEke|ebISqW4Yu(Hi=i;nLzOkohV1=i&Dt|m^AU(}ct`fZ+{9GfMRbuIE zIlxs5fO7iP9pzAkNzRBFv9$(fvuQ^ru9tBanyK~Yb#$12BDahe&@60-hsTj1!o(1lqnC;OX07O||CA^26*fkJj8%(RFi}$nmBJ z`xgjjen&Qx$sjMegzS1euu!<1i4glOmf5&FLKoqw+ad91lr`cj>N_2_iK7c<#I_ID z$jze36|}4x+A93o9EphF4kOgY5+fAj(MW7f=qTi~SNQjL@aKexK9Q2E&>6&ErYtVk zwJ{-m#evMp2<@vh5JMOtT<|7*Vp*i4i!DBJdRzS7ulV-*g#KBmIa1Mg2L|I!Diwa{ zRqw;Xo#DHvdh4EQ)uRv3Pjw%F-qn)}au(vYaL*ek!VNocL3t4`D73$H{cRoJ;_xBL z%}cyB%$c6H%4>2{I0y=Xkj>*CK!m|IxEsvG{r2Ey#?Ms#*O+AubJN3Z z?Z+?e)q}bp7TM@ifGYX{@*cd_Lo@RK@%2v8nRQ*zXl&ayI_cQ9(dn>b+qP}nwr$(C zojkE~@_zp~cmFvTYwj9*-|sQkteRCh)y@+YBMhdZd>!HSnJ$EdXwa)>!`n=kSBE@? zA&^?^3fhf%uc~w!+lL@D>h3~Lr#Sd^DaJU6(DrQvc5hLX8;Qd-gRM|ga0z2+-A3#( z5wdtVaTBH@`~x%ETsR!uB(B+Ux4}E7p=6urSFTJHyNB<0<)6GA!}iG5R5ae$A2R08 zMp72UFJTJ2k{O<-ST9E$t3d=O2E7Nh!d9nRacH_5+=H^IIj39T^s-o6A0Y0ULxwY_ zYmP!)Q~IKw}AEBW`o!%Ty(^kq(f=@cK;q!!eHro)|w)>*6cB8Q}d zd;~Pts#Q{{TvbIV%Ss(1L(yw+>Nn`LP|Cu03cvTjV8S115r>HK)57e>ruz4TGSt1T z!%UD>UqTdF5~tYb;DH_?ua2Qyz3rUb^`i?$aR}?(EulLZKO^;62519|jSy%@vAx|1L4*p_ z4Fx=bMQd~eMAf;-Dy4QH{R?9TIOY*E>x8kt$4RLS>K+IcD1VJ&z}5k2u=y5Pz~phA zZ;XNN-q(=;)U0`Pjye~Z>Omxm00jEvQLaOkZb_z@OTMDkk8T>J&pEve`_+5ueN5kX zBoqLw&fa03khSdkvfONhtQE3$8Is?NYF=>a4nWy+yo=^2T($$Er=cCIPM9aApUlqz zKbKW4f&seyHk_rWpe4ZKoT#7ZRr8j=$%Bx!13&{CS=Atdu{&6O8imz00DQ!%6rV^;o6E{ zhgkwJV96kmPaBwOc$OdwG!kT^SDp&{rMKu&@d+V6a-TSC^TQh|<4&#!BYJo+c|hPn zh9!jUOnm5p9+)1I)7TmqJB{zj+o51+kSzvDFSmXSICVV9)a74db>{e5J+=Tt)_@R# zUP%*7?%D@KDSQzRWP|*e$xQY(B0m>_2R8f}m^8?+@dkWav*wq7#dav#bsn%{{ONSY zAz9^y#mMi1)bNVB zYpA9LQxxFB`9H(^aM0D5h7?*jqS76R4W^k6K;CXuU~>{*9Nq)Fevk)r;lpYzWS+z? z$hF1js9`23av&Oh`XhBGafvWh;*UHbuehWfx8rf^>{rLp}sibcp63IJu+uUkwR*G=^zI zGSK9~-5Sq=YwX5_QWZ2q%wa*F+T|l7BIGb>FF*33VM?^kpDw!v`p1MR8`81HMrsN|v6Y{mE05!}_R-OhKe+{;j|NJdUCX+jrs2f_C{+|QrI8HX`NoN7ifE(u^t}2mrU>263xZfvj62S^2 zGfFC5S|<}ec1VFrnW`J$AB!W`NCyggFi-@Ycz45YE7&CdkS zssD5hCe3bzWoXD?^SFa^A7k&*4*N)>;~GFSNf|&c_7X!y1E#BP=$*Gi;L1l4$eg5= zzl}TzJi*`b{(Tgp@G?g0)|cutwU*`}KlA;;2-}rg`RT8Fz7gRuxlQcEB7h60O9>$Vo))=O5eiL3P=A7Ko2}1;iPD;vSKs zuZk-Upg<ewu?F?I<0G1h3GmL4Z+0E5po1G z{*%h8`gHzM!AN|6kNA+yA->Z)oja2v<2vY`Kj$_Gyp(0>SdN@e-RK-1);vw`YF_vK+*jH zNIYn+$L|v8qy@3tPWx7e9vWi$djVA#6{K%~U6G{?-*O1Z)Wy&#(tg}TnV|>#f0uap zHu8(~NY=;DWyTdYHf-DreEfZ%*~w645WLn?~x zpVj?+dPWpf8zOg5KsR>in28M@#WrK7bTF~(N+7*MbML`hwYI?v1sPZT2j0xJ++l3ijDOCa z{Q)1Q=~AQ|sSGze1$HNKcKgZBq4dK=Rv*QoJ5EHorV$uMdVAVGVO$7KJ653UA5HaY z04+14%gZxM#xOAjWiW1TNzXY>`5o(vX>#FcXNMC@SAuVX6fvC#m*)cC<_}bVwax;T6($#01n0)uifb9x(FzOI@d2CBa-f`efnR~`gR^!tM<#>D7 zs#R z;~sWFLS>6ihhF&U{TT;3eq*}MisFZ%6bv3 zssynY8pae+5$|mqT@!BieXA?sk(|57YjLr*sn6Ed{#&jJg(b zD^qC4qmrBq+;X!nNItPf|2_(|F6<0WKe9D?{)u|6 zA2Sst#x;`B6_|cGB30oo6xb1oN6SaC5SkZf;*XtYMDeD*D~Vugh;X9s#*lPw`y6b* z%#@mtjBbsrG=?g_qx#3^fD?tSSfc;^uEbN?1*?)dZ5AGP3% zg>K&OI9A+VfEUrn8`%y8MPpdq$%Mi}U2d&ghSLh_B7^z*$KgDnTCGzg9wB5wy9``n z3LE2CSG^=G(~%}^G3`);4>zdKg?e;^Ukorx4RQ|#L_D5-_rov>#(3M^>7K=RXYR9D zWKMVdpF!{i&5cz^LG8V}yZ7DhS%RxQ!Qt91g|-;}uT$O`sLJ+|Z?GLz;hW#BcPLMr!Ze=>Vk^4PjR}I+k+^p=`NGPVy&K`~f zPYaYA#GeOn`507Xjf*0;$+aQlQL_jKoMw&LWNI-ijT=?*!B1+J4;Vw!*9Mfrz21{r zqKLG@)cW2F*c!je&4^hdN6W>8a(@vfEM3HSU|@)HbowR=DA$2B>&O>4kT$34Xj zv8#J7MO?*@HYaS**C#X$IqipJ+_8-}uh@QXFK;|uNRTm?UytAb_M>TC z-81Ruclgh*cq_H8l2vsiK#&=XrcfzOqyNb^!`U#G!wJPWh*S0a6V?E9@g2wEVj%s?_FuKfD_zs zW{|r}wQUAi+B!^ZzH>Et8kWcv1_H00eZLaMUq|R8T6uq)?oH9juBr#&*>f-}OpA>- zq-NZgLS+xif8QB$dyn_~d%KPFjMm*9+Z^NG|D#vf+*2aEquM87IPEKobh9OplRv3j zvqSFk7<$FcSn0{ij4Wu_=io>nB1tRzrjUw?rt~xfg*b%5)jNi6WrlYa6Byu28 z;JYW&dMkyJ6*6jNF$RCzaf{jXPDyz(#~`T6FV&CD#s<&Fu)N%E;MRqlXrpa`#w{JJ zm}y-{QJ51{SG9#2Hs6RYmno>+Nt6xp3q(PEvJ{H^k==i#0U4UhRX-ThjK^|QjSLiw zGE_Pc-x9k-3vvXEv=wQ-;b#5&HfteLb!6|*lSB?<7`^yS1#U#9?dr_K3}UTMEkHE3 z%-+|WE6lx9NFui;-^ce|mRF62C3-6!d^$r1e`cR&9wHVft*=WzRwz@t3$99Kz=&U{ z>N=3$z7FeY-Lx<7py4UhJ@dD@{ta@BM0mY1-BDvc_R1?&OQn0-6ujj{=n=dN3T0yZmO^I z_Mv&`?oJfNS0OaLQepj~{-xDl;Rdg$pWh+0IMeqmu!^R^Eru*^PCQMjI@rjx^C#ge zI59@*ToG9(l!)Dtzvcm_J-{#U3hETj;f@4|17#A4$iNNr-dtB#S4>Kg{~O|?|33BY z+#2x^aJqwd`^XR~ zJ#cR8nObIr)v2CC4go>OUD>Ifj$-GkrvI=L3@m}r=7Ob%iPoe#GZM;9-M6uRN{KOF zY0lyoR)UN|Xoy1RuyL6<-b3%(%))%36K(uqIfj~fgO4{^#E>WUMPS{(%){m{r7EbD zH}gkWv05c~4Rn@!IpD~^pxWMXDZfB!)Zo-eY6q`fmSu6*$GaY_&tZ1iDcQeh-Z#U6 zGS&P$@1fhPFw?g=RrFr0iqC;t(yzwLc?*eog1rJ{f z7&y9hUnQc9oQ;Zpn)Kn$yra5Zw$cI0-qO%Km@knrk=IX(X=9?_j`vopM{>~rM z2CXO%s*jA02>$brM7D9KbzWN)KtzO5pR8Z`&l^1yn}(nY6K=G5=FIDn_wyX2OF`$n z`l`fo$y^C+?6L*173OD~4rSpDwrZ)=YPBH8IQ)o7F*{x6DqImB^jgaR(m$hDg35;A zwI2o@H`Wk5)=^4KXN?Si8r4Px%?7oe8Qw%!E^hOI&X=`0>zc?gtF#2MU zpC5Gg*$xoRr@!)_fflL(_OPK4(~C0XYAA8ITMlzxN^=$oYwea+x39Iv{4$*G=_wr+6Md9M0BoR9;Kgt(>)6K?DrG5N$39x4v;67bVh^ zYEQWfx+xj~hGL6`j)J23VY~f)foj~4*|}lpN^@MDDE7grv4%ssF(fVPPL%K6jfH1> zIarcTm^HoYA5f=IrrBq~ZNZjx2JBeN`&rFqB-;-Y)YvyY{#2FDX zxB!T7EM7&#LDpX1Q0a_UD{|7u)0CuOKOmqW|Sl1sh;#&?!|D4|~%+yA*&Yki4=VJa5 z;loPJHHObn27$Y54{cN=p)O9j^=%QiY7rU|#mke6wRq-7TlE$^up~I6hC+3MZ&I)2 ze>N>l7NLIejz(dTb`Pqc7m!4bP_2(Az?%~RjakA-je}?SsEv3Q3o?$oejVQM@FdV~ ziwWJNlVeBm`y@r!{Y&Z&E1wu^#~Rh|h};=mfI5NC?Zoij_!@3Y%Q3!DmE0JcUr`8Y z*yA7oD8Ct`%GuY0BO)NQ|J*7eh4AREzF<6%XuA^7#wrsEH@@Y(@U%rXj;1_Qvsyp@ zvNW?`$^R(oUb22M5)3G(r!U+?1tiSb(4l|5Bc{@9iVJ&~BQ5F_%u#pX$xo0wjKfOf zA7_0DUVlVourWp#qEEf^5fmGiX5);YHs}uxDg0WhN5l(xbs&KNOO;zD@82A>^(26= zXC#2oF#D151NH?ikU-TwhAV!u=)>7ksi0OU|C~c>uIK>u+1vN|dMsiP3T#JbT~C77 zEkhGmNwTb#p};$HYBzBc)|?YMy^D8^I1tj-Z0mNG_tj0KwxqdaMV9w*;CH_oi9tLn z8_bcTM_flnp;a`IvD~HzfPk8pMiuDzpW`pvk~UP`g(U>K`!g%4EsSxMJtncYj{jJ1 z-(2HgKl9g)cuuQFyX5c?*@nKQzL{$Yzi36bq^j~M6*tZA7wmS(-G)S;InZ)^F?%Hx zM>j(}DXlGFxLi{LhDitCo!r=hAmHTU7fhhl>vjZJ!THBRo#X<1k{+;nVJQh#=dz5Te^dE$=wX7iZ%j>m z^1zSJt1w{vnx`>5SK0^7xM*Tf>S&1BZl#5Zff6!CTr-iPdj%MEE_2b;xeg>c$_iw> zD7NC|k5-Fmn~7d(!HY8z?Zt(lbuxK~;{G(Q8Tp`Lm3&5x^Lkw}xCaEwX9VrF->p2R zM-7bWqGb;Qr?L7i#J77?ui-IdbXj&&uXQSn81b&PmN{BiAJljBQ02UbC>6D>LpVG( zP+*5SaB-06jf6bay1oNi3=Y6F1H1>HzeKZp#gHlC{>%VTL>%aah zz-!!bToouP3Es#QH9szGM7BG7w2At<0_(@{4G^SiulJHeF+$j6o(Cdy9xWl&TbPH- z-O(c2v^%1UXju*s;kvL2ehpnmQS;@`NpsBzt6#DLMTFpUVZ0^iZo(Z#z9%EK*;0wON_ zA}+W4VgDf$g6Xpqm3aZ>*6|?9u6&$;a>Bu%fc8gUcKMEE9YJp~0;Mr5He}!JVEM9O zo^w_L6LpC$%eXx@KoEiONs-*X2^e%>m|5C@u;PfEO$`bykHaKUc4xxQvbaYoy-Ykqch?$!JJ*8`eQm6g|P?k-$iWtvbVgJ(o$IKXjVs zpF(;`lg0vlHdD&1EdNnHUy(b#i2!hbImh8w|LQx8{*9j6G-R1Im4uT8^O!Z3B%ifF z{Dk7>u*97mY5e6clCiB3@z0?|M296($rv(zgm}1-ltsdsQ^4#VpK!>`fTQ}kWRkYSI#BfcD+RR)k99ra3prIvXCe$r@hXotb*$= zeVl)8O9hJ?zDoR%Gifn>-V|yN-kJV!FQ|;kIlguVCpu&|ZcpI6H+o&qnmfFzXD9yA zkBs|nuAMOH)^IT%TVkG_3Of!brY<2suR^4gD$jT+>GosSxjdG56~XT*kvWM%R#?qP z^x7t?%Uq#FXS#~7sE8**Mk40J*K=~R2M^`!?450UQ}8y{t_}kkxGqbZs}H$dgLw7N zZL65>qsbdn*?8y2&`u8MxkEVgojLVz#ioXXujeiLyoI0)lw69-{8vv00z3Dq z%>>P7S!8RuvdQ;eXi8Fe-u7M_=!chxTrkO^N`hXUsan&oYSd;N5TG-(#^_8oYj|2a z6=eLEfILXpD2v9+D!`Znpa~bHZZo=>dhJ^6+t0xuH7n?KOcAB$uP&4*;3O{`g-~qg zOqZceHZYm%FG8s`ml>$vnvd)GQb$lNcyGaOLg6C@Qa9oHh(>XR*x`)4C z26Z`M)~+E(OYP0pRnSiEr6~ugxOVZB$WQU-n}_E6UQbj7d!&2xXJ(jiAp5!7wzxSw zpF$#PGy?zW`bS?99V{i>)*ykXHLD3>bgQs!m(LLmSf$G9jG41332QqcAJef6&h%d$ zW$)vi0u%bmadSvB%s~O&!kH8rymRa%a<|n(BHW>eiIS$`C{T!}>hwnuiw%)3jX(;RTpG?A52TrqqZ{Fny3n$j1bSHDl9xrWOrb@Pot`fW> ziK$BlR;HOMK?sR;(_fF1WSe-o)tjVk@|G~*3=8qhbxG}kHr+tp2U059itqU6xLuAL zt;>-S(t1XcJ-#|hbQ?EZcr%Z@os<$UXIue-({Hv(!7DoZ0iQu?m@wyQbs*Nk&>id| zGHJuNjRh~H8Z))d3QIjos&EZW9B8HF36^T>Y{kdTj|rQFIb0+`#rcG_PfIX9kQXOr zkL4H_E@+NPg|I9Yvz3eE&uwlFS? zL@2~0;VOS%CXDV_PIR;EqWi=tB&WgaV}5!*#W*<`TJ_^<4O>6}sz!b?jlhlsvc_Nr zC7#pZ9Yl=RIDP75F|*Suv68@wKz_gO(O@^64kw7hc`m3809i)~;v-Ea8%RK_iph=T z3YdirrlU4u%2Qh9tG5XR7N=oFL_pMh5Aj-8;JD1*cAh;a;4wadzoJ%}o+rWx>&hmD zDZ@6?R}t!I>e(pS%9h&`_bwYxeEdZiDz_l?7n_AXCodbp1P1f)k)~X{h`(c}sv6>hhK9m{@@77vx1+%M- zhY;aTjDOzZ>iH=Pe})|~ZZTnTd&lOA;BpnfWB~~CoK9^*Cmc{7i$xKBI+O0Mb3x;Q z-IY^`G{R62J(VEb^*f!b_ob^IB)YVUDQwHEH6NMn1|WZStH{YqUG>v;Nb67|Tm_{# zvD`?#lkm=D^HIBc-kU+KNbc0#tjP-q`*yt<##-@v0hSW3_Wg5m|6ZVWG|8JPPoi#! z9(&)K&2*lc?R+hsIgVGPmLA1Kzmy<~iFz=*WY3H+n-f;e1X-t(wlWJCCA;2vw%@~BLMY_=V>?9-6^*?mu zid-|Y%JOyv2CBIp8v^gXByLZtjAsV;*9*ui)d>)>RrMn-T~@C7M?z{QRYXq#Ne zE`(T9zCi3{x)2}A(ayq8fAMW;e5~{NY_okU8i`vA(rm;$t)eXRo@$+tZ`nQS+$Jmu zH?_}{B4NL_{Kj5GcPhd#%;b!BPZZOS2c;S&s~bVyDuN&lQ#)t#jm1g`9IQvX_Z%}` z!QHeasM{f;$+ax`jeE7s<_ug*qsAl;t4_z2!wA0mWn7WCRRi{9NT&iwe_F^G>&GK$ z<&<;yX}WTHcGH9vn5JnFYlCr%<`ZY3FBxe9`RCix3YFl5FH+QH*2kl{nFL55bg3rr z(u43r>Y%T))IpqFEgJ<4k%Gyt$pUXA|1aj+rKxfa)5GUf^qv+9+Qw z-x^z8>!ZHYKl^8TCyW1Tmv7Ml%U8MZaeT5`cP*=(ZfY3&_SVRXy=ioov}YAXq%=an z4iE0fmtN`ezmT{tT0CkTC~6#5T_-Dhc0~p>^bn~s7H{Q-F;-o*xcAI?xUcIo)6e+p zvEhnBSf};b`=-O*m{}X<+)pp^m1dY4ZZa~Gx5n&Oqu>c_Xy`}M&TdGff>!JEoyCC5 z;jMhMZ1<%79=h#kv0rbsZDVK+_dLF`tVU{pzE!xfJHjE=* zZB&Q3@V!JR0h`&>dOC(Z;a<4>2!$*EfVz%~ZH01Q> zs@x3hE2XRsPvlf+Tl4|M1Uu!Pq8|uwV8;^Y4bnEp%^}1uTf$7+_2;`@>e8v>h&?I0 zSC^;97wpRETW&>O6j`fYa^k&T=xO#B>GoOi<5CPZW`MUOpOToDW*iPq*bfRbR-6<(vSl(TVN5)Jn-8f7sQv7(}~9`6mYU z5CXg+CXe}4VEqo8gSfQ0@wdJ0AM2`A`XHnf5oL0)o9l&4_NKu)6@FH;v6|YyO^R=FTJNll#JUD*a{tH0^N^HBrlv~{2Y(1;c!NYj^>@>? zJT1D(Qy<5!m!$2_je@u0&SV&*)+8M71kS43gwx`J#ci8z=V^ou3(K^`!6-in4otBr{`0^pdRYjOaTqYLVON7Iy*W8;{ zIKOm#@UI$0-MC~HBH74s8|?OVCAUzW>>!=r*oXqd|1_8SlE$7VjrKJb#AJBk!SYkA z0O0TE%PPEk6PHPUTrnu8(aAn;Khorw50v`Pf|xEjMD%Bx7`kM{E*kK}1JKnI*Wm*? zg-X`cx$x7~$K{1h!ff_8PuV%y4|$2X_euv48k?e-9l^TT8KM1(c8647mwBioTl`rK z`C<+7uc6Im^9_hozjdk-^4HMkyf@D@xjS~&)Q{Xwot=N?!&}Ag(&EA$Cf9uo+*twe z_^=cBP+lddS6$Oz_C9{qn9bd3w&lqq^(qFCN7v&7-F{|kagdtDs%ox zK$sZ#HthfOg%P0acz#2o`HfBZg;B4PG?N&GMN9@X8b)QYr*evDcce~5#g@={R~A@> zy{|?>6X+k4fqW&DeIRRhLC6uH^5Pn9@0Wu2@0HD3sGqXEvEXzf|b$8cRM>KoQ+e0{(z(} zf(nQiJz(Pbn!#CSlzObFh03UlLPJJ1&vb8<`lEC@9N$$o%*ykSwQJ!FKD=g&*S0?S zvnAvGg3%M#vTZvW{<%pt$0j#5(TfL9}G)x4oL`hrK@M2 zO3A>W&w$`wf7^owhp-@bLJhGuuZ%ZeZ&Y0v)iKcc$FH^ZQyY%GhbY z0@kj5T-9mycx9_UQ737kbz0wo>689@>+u)2B1Sq(1yvtV7$vGNI}_NvAc2w1PzVk; zpYa^a0DulNf3T??Srbzvu3ud4T)q(G(*jE+U&8lwug)7kt>rD{Ez1z()Sr1>fesh6Xv2}NI0S7h%W4RJ(5CHtKUlawo0XuQ+@RB?G&zR8EmHu!HGP}<#zWhbZASe7G z2NEUA-bX?w*~O?L$R;h4wjYW;8|ODymH_rqAu(=_xW3A+MOPbXuJ8sPwIKC4zUAhc z+RDa=DrNIGYWB~>jBDE=$DqLTG7+e6fSwB9nI@9MT%Q1@^E2YfQRsmn2`n#(0Fa>!eRA;Mj-y`l*RVEigSAw*DKnm+7u`=-!#0j`i)~7@3AeyJU4hjhzT{$j+pi z)pw?Gzrss+0}Sl(LUjZr90gU_VvuWda<6eoo7hERWU}_UqOi?=M*aJJc0p59Qll3b| zTlWCvaJD4IR1IFBB(?n0&S}|pLee2RI!=&i^qbCQg|Pyc@r|QiL=aI2zx3DPYN`Fb z4*5sx_v9cdza^c>X)MZ8=GPQ#E2MOS0)-3CN(8s+!WRcB^>zszY+YEz9Vu&DYYZSD z2T67y_DstK{=T_ro{{7L8@C9!vLU^^(Z8c@=`ft4Oh6hs~gPtVBTNc)N zAHqPf;7nE*zJ?d|+E}$^9SHiXfWO33^@}OdBh(YiLL5%6O-cS57Cg6Ynabr%dNxa} z^{tbUs2B^KkSg6uVp@di4>6yJ1ia9-gh~OfdGtJ|ixNqVK6#`~7UoHd>^G?3h}c9V zi?LQ_vDnKDeKf6`;eok->wk>n(oe2&4!x(aQ#Ex;1kGvd_2vZaUnMn@C~2h23V{=X z&nzhZp~O&6FJGyr`t)F#Pt8u|?1{L?9~3Ad)J|MG-4835VjJ|{4sO)16F+-C@Cl9J zpM)T5vpi=3djqPbO_2Ip;P;xq9t+LNn^NALMpKABEwLC$3G_=Lp zwW~u!wEpO~q`4AAduJ7eEsmV^BI7EOSoVx?)#HfRTxi?`i#Q$SV4}Fpi?Tna%YYK- z!dX#siA)=Lq4h|m(0v@eg9-~ZUzQ0gWB`>`@e{U`JA09V8Hi;omtJ_?TD!+7a&FnD zNKyuDNt{F7MkhFGR$6Ps@)MeIm+^C{Ms=gy!zeE&qKx4eObuF9s5t$gc?#;6ZPxLl zY*PxD;8C1>1qPRPlkH=v$QN0AdSIr=oLdPPuusE=!I=<)5eOF=fgowHAmw1uLY*6& z-mC7X?qQrhr)^Z=p!`I}l{kBmgC;O6B}V9p=eoYD{-VP*%J>JxWh{ozwSg9+G~==w ziY9L3$JQsEErwSFAgpLQl88p?F_-uK?`gCj;Mz7dx%u{bC?R-P1=w%;VqFn zO=n{XrGD?GfzjWqHGQXbrGxxMm%*wRj2ZS?A1{h>aiXqKL20Q*1>$TTn-3K45-tQY zT`7;6qAL{3pa)jkN$u8qSKzkv!-3QPj{~QB=*E0&@>ez5zw;`6Uigm$ryof5CqXB} z>&rnYD=<%wu99!u3a8Z@IqB&F*FrkmY zU$5YzMlnC1;>Tp-vrmn5X!ORQKa3jQqCBprbb{1*Rg)!@jT#!8LKpsZfk$k)XqvmV z2bG4ZT=L{lPA+xB-M)wCF#SUwr1I7I~UQ1E1YUA32_e^f%+tX3_7 zS~HJ93e<=I9tc8a*7=#GP>VlPYOPoWhHI1{eW!{i%{*Rf+=jH~`Z^W4G&+L=Qmetu z8C>m~#QHE>;+{}qb$ul6Qzz>Pc8er)JXAF&pm^*U72ZED-zn)hlSUU+aw6Zx&O1;w zm$lu@uj!BFoRE+pyb4DI0qh1!0_UDavwY;giH3Wvg3XMd?Ui-uUc;`7uvH14EXQP4} zW64g@mT&*;vrA}UbZ?tZK-9HgpB@Fw*FU; z`iD)Y1Y}pv3cOe4*VYs*&^60j65`3{_`CS$w&D#yi*&qVyXNw9{Qq7)+}@I~8*vc> zT^~AKUfZ{g+=eg((G3YmdD}kVZ?qr=ZN_mwN%Z9N)6&FDCRo=kwETh^LEvMLdKvgN zU1g7`otI~2u=?{aAn~Q)m&LG!=PlUuF1o}=V-_)_VXMdfted#1r5oT+4@V|i~whhRb2Jp5Ww8|64o(%4hI z2f5&vtVt0QM-5Rshe9WRfF&i{)9=dn8PYXs^W%t4WantAkz+|>^dv|nFoKn9y4P8= z!KZLe42dP;?^LMY7bNDphyeO>VAu;(brS|?=z2U)b@+rEuT*teTkEUS<+bC-Kd5+h z+Pj6VxuHd81*-epfV+jbSy>eZmEZFgO5P!FLX+S`-D1$1Wxb)3qBQ2slTDE@%Z~8% zRU(csLFtWeF30lzC1$MlOG{tCxYY}9bi{8t2K1~PP?w8{0b7qQ=*e?hFcR=Lshl1B zbcdNH@gMuI#Yjp1mebg>gI!V)J|V|v5|JZf7(OXS?gEkG*n$|etv!jjvh>RWQ9g}E zBzmDK(rn8_crdOs*@{P?qILk)!D~X#{jJR5=otybk461|0s$u><0|ErS6&juwd1tQ}O0n^=>D4*7Y-GC^c*%&3gS0vJ$w{obdnWORj4>AZX)xjlM zzgS}RuX#h7!-qzG?JR!JTsaI7ee@&zyl)IyrZDA<G5iLdRAkH#M!uuN7yc*1vL;;F6=n0{|OXr%<3Jyaa<5XR^v$<^6JDii^86v}zO zFUSXiK`tgrvz?WRqD$9xl4qM~Wr|UgH3359ep4zC+QdKD z)78^LA8y?bY$&k;i+W+7V3Mo^2qwoz8+oCbzzs03-*zTI|1gA67}Jc{`VLFObn~&^k<+y0IX|`N@n$FEbebLq1t3uECCd#vvSBSC zpah0RpBf=2wf#m~9cKcDaH{~0A)^>BTw`(yOaV?CrR&t9)YxW~mI7bfvAJ=C4aPA> z2R!5xkElBm;I5Emu0POUZptYD1`PdY&f0oFXG^l*JTmMXF|E5U2H0+vYwJ2-_WPj5J)srSD&R*O49%>A_x?7j>CQ*tdX2!lU0UNUp5n5 z_3ZMHcvI2n{bO4tqw&QBjL3B| zVYAfnhaNF=raNLG%;Tg-=X`MDf|lFhm3>oFQl4%FzVlU$lD7alBTLh0Ji~sdAsWUE z{I83nFOmv+X#+Kj`BpGn1C)EZsG_%GNeSiM*^?Uo*7WOwRUDm_*;D7Wwy>4GPKx>U zj(iW1)8up9n$|6|*YcbPE0_DX>9Z}s!2OZn;?&UV^JA~^9&%xdP=whBs;g6kmhxGc zlu{eFW_fsQ%6uOJK9j{_N7nHMWvU~4yQ_NnpU%m)eOKFL_tuV;>f{6tc-tS7I6LX# z)AZr0`#P}+=N>T&0=%omQqwN?n5NHj@rit;u-De$}g2wRsIY}A^#Y5&_dTHu!#kHx>*MVb!*pE*;!9a-au7TAuZ*9(T!tJekb zBa~0g=Y~;xE)GlTS|keuzMGvA;Q0DIt{fkTxAgWKM!Zr^hj`mXOgQS_G?$JIg9?}S z`@#}cSasy>dWDn}cN*i}YcRHpDVTMzA}L9Aphu2QY$u*YTUWdB+K?t&bTWfk{WM(< zPMYXX`_^?K!%*BqnC&%pll&AV`9%~)3ydU-CD=QuA|ncOuxagg+xa))w|uKE@fOCtO|d3)|I zs)b2QW8jqyG8l38CgnDuxK|a{ZDG44s3^G-#Ca?{HQK(ZlV1oVF zjmk4|B`0|K@gINw|1@@%QFV0Nnnr>X2=1DV`^Mef-3jjQPH>mt5F9oH3l72E-QC^Y zt@+M5nX9`$WbwP&4Tp_#4JSW^*y$~rmWl-+ zW*D&fkR8ttzQJr=IOS&G3D1`JSq z^soHV4~45208KK8oT8C(wPik|)E%IJ7i~YRLxInMKUR1JUoB&5oMKF$W99FMjv>q# zmnff51f-@v`N=4m$_6PJ#=};q40@c|{DLYsr8K~oYBG}9(V}!5%95PmJ}@W!7dp@H z#Szc7@Y}J)%4|5o`4k#~j1sPQjWeyE9OV{1ge(M|d)SC68ln|7n5Z%t8D5Dx!ikPQEM} znjP4)J0`M{cHFl6!OFMWd@RiNq?wsVg>C>IeXf*FuZMUtLtlKc@5nb3YB-Fnp^mHd z(+SCCVTHte#6s9HVr{Z8oR%7?k~PaCX-8zomImyNq#Z39(^Ff*QJOB#m0}-aF*_^_ z^NOeDSAc;+CbLV`KKCF1kpvf{Y|nZRccAk1L2GRd#-ecj0flGz>snh)^P}k$U5%uY zmtT9GSEZ3S!tptr-w_om7+`+)Q2NdFoRXL(#<-akyMP9Rg!B|J8j+Fm30N1;Z)4{L zkX_=EJi%VP=gnJh7}}JVEdFQ_1!72CS^A*k9OVMf{%t1m+2}l;k1Qg2YU`43n}ECo z82Kv5Y{cr*B8E24YF(WRv>81g!-hLJn-CAf=7FJc-qW;sp3~)Qu1q-(jD*UQPLy|+ zt?Z^d3Xk4_SFhVk-_h;XrT%7@*z<+{=AYiJCkD5BN&Mr{;A%HZ1jY5n%Vj2eyvX?$ zObId`iK9Tc%s7wDxW=qP*sgH=D@6$a3OFx0#; zuB~gL{-L!ohb}C{+u~{+l4+?fDko)>+_ZyZ1S@l-o`Z8*8uul5aXo6uqhOvMHHj+) z(PTvToV(MF^A4I#7xh9_%M{6v(r=pSzbSD?Q^;G=MD1*L9#%LU-`bHkmsba9c3xX0 z{Aj~GFSa#2+#xG(BbAczG*QNS?!@ZM*DIap;>3xaBDHgo^$l|Y+N&E4ntkB_>4rNk zZ~2zq|4c}rfO$czS)>H&48MvTVO>Ut$z-z2^%FA&C zpHq!8jIxv`t&aq~``9}IdrES%ALQ4Ev)kB3g#n>=UY<(dlb`6|tB3kseraZ~&-zvv z*G`WXd0tcPgI5bDfKAa#b3pb+6dx{VvPXBk0$B&X1<~*5iZIw?I&yx9nzA?>F*jZy z73RoKAC5>d%r=+7d#&0&%9aOr*>nAiimNM%qDU+S0`_qy2$p(#yjE-5o|v}bCm06B zhe}Y+#9^)D39$Cmw@Rc%2a)QM5ti3#InYd}6HbFpoUp>L9;y}ddPChuFh7?m+{JwQ zR;~=hpBBIVe)#a9fIOk@%E%7GNnAXV#R83^db#XwY+76G`p3qC6G*gdeem&RLs(9K7##kj z#==v!<)70M7|^e6gznT;Xd?GK&RfgtUrLb2Djzpvl4hR}Io1C`$B_w+o0{BWQlb}2 zw+|UbqIBbR)+~KOBeeOVC|2y`unx$v7-2FU0rE0JRAv@85vfqjs~SI==w6`^-wP0@qOLjWQtOMP?Owu z?X+WnlE(?!e+WX-E>7x;;^pl#cF@%R%*>t6P0M{@sU2=w)Sa|*RF_ByZWAPM+$eK# z$;~~;^cRD@inGYJ8%;^yGO?9SLEfs2 zCI3)KiBy|Y`zlw&{rrt5uWq(0eaW#TkKsDBfuA=@A{@~bU``rVv^UFP@yG0jaVcx_e?TC;TscUW) z&jvKPVirQo&u2qTWh6&+YM~AX!8jfsHyUI|!Cwm4*mJF4X zdXeQ8P_bBtW!CMma|ECe+ISeyei2)9?ItPRcgly~pKY`J`or$Xr_)qo zRd%g(?UW(Nqq)%970oE`SWb!uULOww1sU(ufuzOeqs#WHuHg3Krl^Py2`x-K%EC_o znZvMM%^_4TQrY>4APlGPXZmhl4EMVjRu!pU?U$}fI|Xan{w*91iJG#*Che4p^CK^U zzsr4tg-h0whCGOH0eQ?&frn$noB)ArJ|t9yi_hzA8=0+RYX)+8*$Lr0=kDqds(v%K z4tP`(vyWvmpbw8`qD(^BH5Zk+)LS{!%D0?O=3150=a{yVoOx|-*qOJ7ja+EDmm0-8 z(j5qXK)R8IbVcb^DkIx3Ov`1$k+mx%{YMSP@K{1X7p{m_MiXurNy~gaU)8Jts!;w2 zI?zwc4$m^NU1?a@p5FE7Vdzr405_(71n+HHll+!JG$bU!k%3a36F0*&aw4H1|p%Nmb~bU-9T=g%CrE5LkgVczAYFW@BZ6(4$f2`M7%ELlH+SnWBW`8B}t{?A40c;+s-8RVMFVJ zxAz`&H*|T+L9Ia*zJ0PAMr)cqqqOEDo%WMPcfOcVI{;q=WlDQ*7sQK6T{DB^8(L)! zr*&~LDm-U${VIpb!oX*CNz@W4;-13IMiEJwM>2!!RL1VA#EkF+_KKKJp@<0e5KOXm z0;swOZ6$swDHSvTBSAqOwR(|mKN&Zj@t96<=?nRnrdZb zO5{8#RC(DF&rvODCynFeH~)t;VS$(ke+5 z*TuC|?v+Nq#($kMq(S?Xn&;(yo4995?(8iV+`k^@0hD<}49W`wojkf{Of$yT4yU<3 zg5!_rn>aRw1=r3T`5S|BL$TtiDWKYLZdIi$lhhinwS=3+3Q3vuIMD+qqS3h0><&sC zwY=|(*GBC$E=IIldn6)V+PNHL4OsFij~BC%P|6EBoix;CFqutK&Jn~g0wVNL_$ zYoM$wCP=yXch9dDvXGgrN(E=?S~WTkzTGFY@NOO`FIKkGK((=8FnxF&h6lBh%`gpK z!g~3S->@%3xYOq~z*D^;`;4XtbbhWW;T28Bu|e=*rm#K{NMt2C6Gb;$JqrBZaU~m7 z_x==OK75Lgj7s~pBVJ^(dQMz442?2%94O%k{!rZQ$`(joiYSB^i76eVyu!Ft4R7g| zCwnSV=HJZUw_UT62k0%ozB%G-kdINO9;VdY!P$I?Dqxr$VrK%YI&A;R8s0>o2bd3_jUJtX| zcCVqo6bX41b;MES;1od-{R4jcE>?fYIV}7fjgD#4!~vE-ZYP;|o{+!f2{lf+Xo3TVN~f zcM``Y3OY>u=tBff-jEbcW5)}y2PkT|KRYZ^QS;q%!t++coUG%bCwL0r28I~HjXGcQ#0DT#-)RyZ_Z4MZo>Sm%lItu7`+%eZa+d@+kIkW#%cOeLso5 zc=H&`_kJ02MTA#7a_dO^lbQH^?**Vc|KJ2P@UJ?Ymf>W+Wu*COhUM;y>f>8i#bIR5h`xfworM{v#^ z+M8`hd^kjh9qURfEpGAFkpxTSal-$}7Snhx6-F}@ntHA)0&&3F1gYRhf*TJqP&)~( z__b^_#9D;uA#yxSClkd2YqDrGf~BO1?K5e!UE`0%*1?z&YxNI)ivaNi3wVKG$T@Z}YFiX)Hd^FIVKUzpm@|MblQ-c!r>E z?9mnz)5eQlSRNC(LQ5T~=Rxq>V%h2*XdX#o1#RDKL2bSJ>Q662pvR;b%6rb_I?m)i z>yjPKxiHb>LcaMcUMITfn`Pp4pf>+DP&>^jtvt=e0wRg;+VOQnnxN)6)u-{~V3hTW zXnw5y#73QOF4f;d!|P)2%vTqh@>}aOj9r^?QE;cN*hC{-uOBbmr*D46$PZl~*v;FgG?UZ4V3a?i=I zA{))BZ*0H!sYN3j>-gHxN3U!=^46p3LowvX7Pc&aG>h@}de7MQdhg<}mO<}zkMDvFUOn%A3(mJt8=7ix z0x%=)WF7Wb`l!e?wY=Cx`}+`P8;ntdTfDv=a7!3P;T?oA(2FuJ{G!|2aNJ|z=GFFjk+~ODgfv6z!D>lx->IsjBwF!E&9gy_mvmS;y4H#M(r;~t2UCUw8 z&U+XD6>zyxssBSHU>z4-EMGXbcK{bpPr!SUs3r&nD8hZfUSI8GC&{)%CH% zYn*&v&laMCpP)a%A0)f)?b{3=pVM`rSmuudwa%KUlb0QuuShfcP=szZ|$DEOa z);v!q5+WGufuvs<-fe>vH&)Fz5g^CSCKVG)ZxNxBF4Vg7y~5eJP=TZ0gQ1 z+i-k^vhByGaj~|}q>=ue@Kna|)ZSmw+55-w&jq!wxcYj(x`(hS@%6%hrIDsE&E8w2 zjLAnkcsqa#+9k9Sb8NapVxc1Q$5+5plyfoxpYvEkU5xaVQ`^>j;jPsfe+t+P zxurrmglYHL;wjWsQQT)2n8Mgr1nxOEnL3l*w;R73sGkKrsz8>z46gxSQ#fbe*=ayq zKr5u{-80AjBDQ#Ssjf=24yvk{Ylk1V=*o2Pdg_DC?d z%-!DRiKb=kvuyr0bL7k|ugbB=-E5oX`V-^xR}k*AS9Blu2Q0?>yvAyKVR_^DZ8)%R zSGB?6v3}JSF~se4)_w8I_khYj2{k8vS+Zsc+27Qx*oYAKA0lQQzZDlDv^U!7X6U`+ zh*wQ6R2;bdS$qIj>j*$;z5CjGkE9>dW~Iy4-uBQq%h_t4!QRg6vwX%j_Ga(7YT??h zM|bX+_3L_e=SRD5)iSN>-t(Zr`?O2xvUgvqq05-ya=JwwJ@@FbZNAH5t7r8Tk^Hiq zV60ED{XO+Eg#$Da3}jN}Zel#NjF0Tg)a)B}-N1LPZ%1 z-0zHK1Jb`rwl+yf7IO!~^7^Dohcpx^`y2b*Mk-&y#-#||=Q^ONpppWskd#N9bXmpL z(Q66PLj~!Wl?#D|En*Y9`aTni^(7Mk1wq^^_&lXS70s_8&P!g#LavuAlVU1aX=rF* zV!)*Yt1_IjAPNxjSDaR%(qFzD(t#!xm9(FeLB2v1kOqST=1|(WJlGWi6H8SEQc|)7 zGa`RG#4E>V#P1{m%mAEU2XQ3FBpk)!nd38l>_q+3#3?x@=_nWP6rYi>lL6QVa8eBZ z!z37w5U&%r69B*hG!hR&NzzL=3dXZG%M9jS4g>wg%PBQI&x5GC6X}kjw;`#qS+8gn z(~KjI3TcCDm8F2Oy_CH}7k4@6H2E4hCTWI~AbIHfcnfk(HhY=^gLqeD)k-jzRH%yI znzEx!7IU&Sw&}N&nZ;A}U4SGOWPj6WR`$_0$tou$xP~rYDKZn9k8>7$O-(JJCLZzG zqDu=8}QR}bd3enysP_Lp;Ix$3l#_p99t4dKkF-3pI z>3tJjEXzYAfhV?57X!*rHxu{rNmQXJodlydll1yZ_iTw$s`{WJZ^-aU!ThkTojV>g^YV$FY$`Xdh$u{fNDy|#DpK}#wI?r%% zS~~`9{x%ymh?L13u3tQip(YjV!YR`YtEGC!sD%f49*eJB>g}|t#o7`qF;uE6R6$gc z=a{XDyPWp^tY0rcG0%%HW?^0H`l$p+{%kQyV#+U=JN9ct?Og|}Im|sqg5EQ0rLdhc zG`-6(oc*A9&OoequAu=b!sK`#-tM~R4=&xsp2U><{DS0dFweLu9q z3T#n5myZzixY0ek1yF$5V4w^X)@=|&5KFRsw4F~k48t8AWLk;({S}L`*AXlu)x8}& zZ$mX!>iifBrsey;2JHv9@>C&`-t84KiKD$i??!v!`zht(wb)tbdDzI$l;_-$o;5)Mh71VAT|9<`H++&UoZP*r4X+&mtQ)_z5#Ek3Gruoh zucuy(2OUYe*+Q_2G2eqkbTlxQvSmA}yXP~-+%^&33==lT(RFkWG~>3Yhz^M0@9k@d za>RLe^LK^8fd+EL5_G#pW(}oJyX}ZUvDOA0gksx^hI`Ey8j=qjm=_<)XrCxs(&K&G z(XOHDkKa;!^U(iV^{t!Q{5~MF`&#wEbAq=(NW#<*BO{}cWR{^q)8HD7PL4@Jp&6F3RZJX$ zq@k)-A*0m9@HeqBET4&+0#-T}3t(eT3{}<6prT7Fw8(`e74(r)RU=jb6?psa>OPnC z?0Ednd|;`Js<$|zG4(e|;9<&2f>DF^6o9UVH`IhYGNS&`Bb78OH!Py6LEeeE>K6mm zB@=;~uA25Gx?1xCA5b2bBXb`)6*fwYE zp4WZezEay?omdq?rkEq>@>19#&9H1ZR9OywzQHuxoo$p39nKMZqzS1Aiaiv_4t>iD zZ6Fd4lS<@(ZKwU|<@! z6MWnUtD=q&2B8BNT}L+?!iZv+3T{H4S}a6P$f6L^iAVrk0r3r0QFPuK#JnHUW$*T{ zial?y#~u=CpT$ml)G$y(y-mMfl-S;pK)y9SK{f4ve!aM~^FHq|!@V5s*46zoHhX{J zWQwh^=0D5xXM}Xl$eUPHJNTT*HMQ2>tz`?sZK=(VZld87aV*6H;%pK}=eQ+gN$ue$ z#`LB>_X^|!Z+bE|z0Y3QXi|T81v|H46W?iO2qz=EBDv52fD#{b%A6VhJq8yRR}pyai0N|ANI4h*{<5^DXlf7!^3+x@qz+?BBgIh+wi~h7pY+8$X?Nd zb`Sa9`CPL@vt;pZnnL9!l$?F7N7wkl6{KNcr}Wf>;k+Qh6ahG33eu3D;KBadiltIH z|8eoJ102{Fuy5aF1%>66H5gPi5W#ef6(#=~x8S!QVIV)DgTa36mo+K>%?<1CZuH+i zBp6(boGt9=ogA$H3W71BAo