Skip to content

Commit

Permalink
Update scraper.py
Browse files Browse the repository at this point in the history
  • Loading branch information
CharlotteMaher committed Jul 23, 2019
1 parent 3faa7da commit cf4ad1a
Showing 1 changed file with 47 additions and 22 deletions.
69 changes: 47 additions & 22 deletions scraper.py
@@ -1,24 +1,49 @@
# This is a template for a Python scraper on morph.io (https://morph.io)
# including some code snippets below that you should find helpful
###############################################################################
# START HERE: Tutorial 3: More advanced scraping. Shows how to follow 'next'
# links from page to page: use functions, so you can call the same code
# repeatedly. SCROLL TO THE BOTTOM TO SEE THE START OF THE SCRAPER.
###############################################################################

# import scraperwiki
# import lxml.html
#
# # Read in a page
# html = scraperwiki.scrape("http://foo.com")
#
# # Find something on the page using css selectors
# root = lxml.html.fromstring(html)
# root.cssselect("div[align='left']")
#
# # Write out to the sqlite database using scraperwiki library
# scraperwiki.sqlite.save(unique_keys=['name'], data={"name": "susan", "occupation": "software developer"})
#
# # An arbitrary query against the database
# scraperwiki.sql.select("* from data where 'name'='peter'")
import scraperwiki
import urlparse
import lxml.html

# You don't have to do things with the ScraperWiki and lxml libraries.
# You can use whatever libraries you want: https://morph.io/documentation/python
# All that matters is that your final data is written to an SQLite database
# called "data.sqlite" in the current working directory which has at least a table
# called "data".
# scrape_table function: gets passed an individual page to scrape
def scrape_table(root):
rows = root.cssselect("table.wikitable tr") # selects all <tr> blocks within <table class="data">
for row in rows:
# Set up our data record - we'll need it later
record = {}
table_cells = row.cssselect("td")
if table_cells:
record['Release date'] = table_cells[0].text
record['Artist'] = table_cells[1].text
record['Album'] = table_cells[2].text
record['Genre'] = table_cells[3].text
record['Label'] = table_cells[4].text
# Print out the data we've gathered
print record, '------------'
# Finally, save the record to the datastore - 'Artist' is our unique key
scraperwiki.sqlite.save(["Artist"], record)

## scrape_and_look_for_next_link function: calls the scrape_table
# function, then hunts for a 'next' link: if one is found, calls itself again
#def scrape_and_look_for_next_link(url):
# html = scraperwiki.scrape(url)
# print html
# root = lxml.html.fromstring(html)
# scrape_table(root)
# next_link = root.cssselect("a.next")
# print next_link
# if next_link:
# next_url = urlparse.urljoin(base_url, next_link[0].attrib.get('href'))
# print next_url
# scrape_and_look_for_next_link(next_url)

# ---------------------------------------------------------------------------
# START HERE: define your starting URL - then
# call a function to scrape the first page in the series.
# ---------------------------------------------------------------------------
base_url = 'https://en.wikipedia.org/wiki/List_of_2019_albums'
#starting_url = urlparse.urljoin(base_url, 'scraping-for-everyone/webpages/example_table_1.html')
#scrape_and_look_for_next_link(starting_url)

0 comments on commit cf4ad1a

Please sign in to comment.