Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

initial commit

  • Loading branch information...
commit 736c3a6b9d9976521016653aa064786389cf4c22 0 parents
Aurelian Oancea authored
2  .gitignore
@@ -0,0 +1,2 @@
+__git__
+*.swp
20 MIT-LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2009, 2010 Aurelian Oancea, <oancea [at] gmail.com>
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
45 README.textile
@@ -0,0 +1,45 @@
+h3. Grapi : Client to access the unofficial Google Reader API
+
+h4. Dependencies
+
+h5. Client
+
+* "Curb":http://github.com/taf2/curb (gem install curb)
+
+h5. ReadingList Parser
+
+* "Nokogiri":http://github.com/tenderlove/nokogiri (gem install nokogiri)
+* "Loofah":http://github.com/flavorjones/loofah (gem install loofah)
+
+h4. Synopsis
+
+<pre>
+require "rubygems"
+require "grapi"
+
+reader = Grapi::Reader.new
+reader.login USERNAME, PASSWORD
+
+require "grapi/parser"
+list= Grapi::Parser::ReadingList.parse reader.reading_list
+
+</pre>
+
+h4. API methods
+
+* login USERNAME, PASSWORD
+* reading_list
+* subscribe feed_url, label
+* unsubscribe feed_url
+* mark_as_read
+
+h4. License: see MIT-LICENSE
+
+h4. For: Contact / Ideas / Patches please use github infrastructure
+
+h4. Links
+
+* "Friends of the Unofficial Google Reader API":http://groups.google.com/group/fougrapi
+* "pyrfeed":http://code.google.com/p/pyrfeed/wiki/GoogleReaderAPI
+* "Using the Google Reader API – Part 2":http://blog.martindoms.com/2009/10/16/using-the-google-reader-api-part-2/
+
34 example.rb
@@ -0,0 +1,34 @@
+$LOAD_PATH<< "lib"
+
+require "rubygems"
+require "grapi"
+require "grapi/parser"
+require "yaml"
+
+def print_list(list)
+ puts "~> Got: #{list.gid} updated at: #{list.updated_at}\n\t contains: #{list.entries.size} items. continuation: #{list.continuation}"
+ list.entries.each do | entry |
+ puts "~> #{entry.title}"
+ puts "\tpublished at: #{entry.published_at}"
+ puts "\tcategories: \n\t\t#{entry.categories.map{|k| "label= #{k[:label]} | term= #{k[:term]}"}.join("\n\t\t")}"
+ puts "\tsource: #{entry.source[:title]}"
+ puts "\tauthor: #{entry.author}"
+ puts "\turl: #{entry.link}"
+ puts "\tsummary: #{entry.summary}"
+ puts "======================================================================="
+ end
+end
+
+config= YAML.load_file File.expand_path("~/.gdata.yml")
+
+reader= Grapi::Reader.new(true)
+reader.login config["username"], config["password"]
+
+continuation= nil
+loop do
+ list= Grapi::Parser::ReadingList.parse(reader.reading_list(continuation))
+ print_list list
+ continuation= list.continuation
+ break if continuation.nil?
+end
+
16 lib/core_ext/string.rb
@@ -0,0 +1,16 @@
+require 'zlib'
+
+class String
+ def uncompress
+ begin
+ gz = Zlib::GzipReader.new(StringIO.new(self))
+ xml = gz.read
+ gz.close
+ rescue Zlib::GzipFile::Error
+ # Maybe this is not gzipped?
+ xml = self
+ end
+ xml
+ end
+end
+
2  lib/grapi.rb
@@ -0,0 +1,2 @@
+require 'core_ext/string'
+require 'grapi/reader'
55 lib/grapi/parser.rb
@@ -0,0 +1,55 @@
+require "nokogiri"
+require "loofah"
+require "time"
+
+module Grapi
+
+ module Parser
+
+ class Entry
+ attr_accessor :crawled_at, :summary, :gid, :categories, :published_at, :updated_at, :author, :source, :title, :link
+
+ def initialize
+ @categories= []
+ yield self
+ end
+ end
+
+ class ReadingList
+ attr_accessor :entries, :gid, :updated_at, :continuation
+
+ def initialize
+ @entries= []
+ yield self
+ end
+
+ def self.parse(xml)
+ doc= Nokogiri::XML xml
+ Grapi::Parser::ReadingList.new do | list |
+ list.gid = doc.search("id").first.inner_text
+ list.updated_at = Time.parse(doc.search("updated").first.inner_text)
+ list.continuation = doc.at_xpath("//gr:continuation").inner_text rescue nil
+ doc.search("entry").each do | entry |
+ list.entries << Grapi::Parser::Entry.new do | e |
+ e.gid = entry.search("id").first.inner_text
+ e.title = entry.search("title").first.inner_text
+ e.published_at = Time.parse(entry.search("published").first.inner_text)
+ e.updated_at = Time.parse(entry.search("updated").first.inner_text)
+ e.link = entry.search("link").attr("href").value
+ e.crawled_at = Time.at(entry["crawl-timestamp-msec"].to_i/1000.0).utc
+ e.summary = Loofah.fragment(entry.search("summary").inner_text).scrub!(:strip).text
+ e.author = entry.search("author/name").inner_text
+ e.source = {
+ :id => entry.search("source/id").inner_text,
+ :title => entry.search("source/title").inner_text,
+ :link => entry.search("source/link").attr("href").value
+ }
+ entry.search("category").each { | category | e.categories << {:term=>category.attr("term"), :label=>category.attr("label")} }
+ end
+ end
+ end
+ end
+ end
+ end
+end
+
89 lib/grapi/reader.rb
@@ -0,0 +1,89 @@
+require 'curl'
+
+module Grapi
+
+ class Reader
+
+ def initialize(verbose= false)
+ @client = ::Curl::Easy.new do | easy |
+ easy.headers= {
+ "User-Agent" => "Grapi::Reader /0.2 +gzip",
+ "Accept-Encoding" => "gzip, deflate",
+ "GData-Version" => 2
+ }
+ easy.follow_location= true
+ easy.verbose= true if verbose
+ end
+ @token = nil
+ end
+
+ def get(url)
+ make_request(url){|c| c.http_get }
+ end
+
+ def post(url, params)
+ curl_post_params= params.inject([]){|p, e| p << ::Curl::PostField.content(e[0],e[1])}
+ make_request(url){|c| c.http_post(*curl_post_params)}
+ end
+
+ def post_with_token(url, params)
+ request_token if @token.nil?
+ params["T"]= @token
+ post url, params
+ end
+
+ def login(username, password)
+ post "https://www.google.com/accounts/ClientLogin", {
+ "Email" => username,
+ "Passwd" => password,
+ "source" => @client.headers["User-Agent"],
+ "service" => "reader",
+ "accountType" => "HOSTED_OR_GOOGLE"
+ }
+ @client.body_str.uncompress =~ /^SID=(.*)\n/
+ @client.headers['Cookie']= "SID=#{$1}"
+ self
+ end
+
+ def unsubscribe(feed_url)
+ edit_subscription "feed/#{feed_url}", "unsubscribe"
+ end
+
+ def subscribe(feed_url, label= "test")
+ edit_subscription "feed/#{feed_url}", "subscribe", {"a" => "user/-/label/#{label}"}
+ end
+
+ def mark_as_read(entry_id)
+ post_with_token "http://www.google.com/reader/api/0/edit-tag", {"i" => entry_id, "a" => "user/-/state/com.google/read"}
+ end
+
+ def reading_list(continuation= nil, dump_data= false)
+ get "http://www.google.com/reader/atom/user/-/state/com.google/reading-list?xt=user/-/state/com.google/read&ck=#{Time.now.to_i*1000}&n=1000&c=#{continuation}"
+ response= @client.body_str.uncompress
+ File.open("/tmp/#{Time.now.to_i}-reading_list.atom", "w"){|f|f<<response} if dump_data
+ response
+ end
+
+ private
+
+ def edit_subscription(feed_url, action, params={})
+ post_with_token "http://www.google.com/reader/api/0/subscription/edit", { "s" => "feed/#{feed_url}", "ac" => action }.update(params)
+ response = @client.body_str.uncompress
+ unless response == "OK"
+ $stderr<< "WARN: [#{__FILE__}:#{__LINE__}] ~> response is not OK. probably token has expired!\n#{response}\n\n"
+ end
+ end
+
+ def request_token
+ get "http://www.google.com/reader/api/0/token"
+ @token = @client.body_str.uncompress
+ end
+
+ def make_request(url)
+ @client.url= url
+ yield @client
+ @client.perform
+ self
+ end
+ end
+end
Please sign in to comment.
Something went wrong with that request. Please try again.