-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
1 changed file
with
44 additions
and
22 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,24 +1,46 @@ | ||
# This is a template for a Python scraper on morph.io (https://morph.io) | ||
# including some code snippets below that you should find helpful | ||
############################################################################### | ||
# START HERE: Tutorial 2: Basic scraping and saving to the data store. | ||
# Follow the actions listed in BLOCK CAPITALS below. | ||
############################################################################### | ||
|
||
# import scraperwiki | ||
# import lxml.html | ||
# | ||
# # Read in a page | ||
# html = scraperwiki.scrape("http://foo.com") | ||
# | ||
# # Find something on the page using css selectors | ||
# root = lxml.html.fromstring(html) | ||
# root.cssselect("div[align='left']") | ||
# | ||
# # Write out to the sqlite database using scraperwiki library | ||
# scraperwiki.sqlite.save(unique_keys=['name'], data={"name": "susan", "occupation": "software developer"}) | ||
# | ||
# # An arbitrary query against the database | ||
# scraperwiki.sql.select("* from data where 'name'='peter'") | ||
import scraperwiki | ||
html = scraperwiki.scrape('http://inmo.ie/6022') | ||
print "Click on the ...more link to see the whole page" | ||
print html | ||
|
||
# ----------------------------------------------------------------------------- | ||
# 1. Parse the raw HTML to get the interesting bits - the part inside <td> tags. | ||
# -- UNCOMMENT THE 6 LINES BELOW (i.e. delete the # at the start of the lines) | ||
# -- CLICK THE 'RUN' BUTTON BELOW | ||
# Check the 'Console' tab again, and you'll see how we're extracting | ||
# the HTML that was inside <td></td> tags. | ||
# We use lxml, which is a Python library especially for parsing html. | ||
# ----------------------------------------------------------------------------- | ||
|
||
import lxml.html | ||
root = lxml.html.fromstring(html) # turn our HTML into an lxml object | ||
tds = root.cssselect('td') # get all the <td> tags | ||
for td in tds: | ||
print lxml.html.tostring(td) # the full HTML tag | ||
print td.text # just the text inside the HTML tag | ||
|
||
# ----------------------------------------------------------------------------- | ||
# 2. Save the data in the ScraperWiki datastore. | ||
# -- UNCOMMENT THE THREE LINES BELOW | ||
# -- CLICK THE 'RUN' BUTTON BELOW | ||
# Check the 'Data' tab - here you'll see the data saved in the ScraperWiki store. | ||
# ----------------------------------------------------------------------------- | ||
|
||
for td in tds: | ||
record = { "td" : td.text } # column name and value | ||
try: | ||
scraperwiki.sqlite.save(["td"], record) # save the records one by one | ||
except: | ||
record = { "td" : "NO ENTRY" } # column name and value | ||
scraperwiki.sqlite.save(["td"], record) # save the records one by one | ||
|
||
# ----------------------------------------------------------------------------- | ||
# Go back to the Tutorials page and continue to Tutorial 3 to learn about | ||
# more complex scraping methods. | ||
# ----------------------------------------------------------------------------- | ||
|
||
# You don't have to do things with the ScraperWiki and lxml libraries. | ||
# You can use whatever libraries you want: https://morph.io/documentation/python | ||
# All that matters is that your final data is written to an SQLite database | ||
# called "data.sqlite" in the current working directory which has at least a table | ||
# called "data". |