From 552c7b4ea10459d70331547386d767f50087cd24 Mon Sep 17 00:00:00 2001 From: clabrow <30670646+clabrow@users.noreply.github.com> Date: Wed, 4 Oct 2017 16:30:33 -0400 Subject: [PATCH] Revises to scrape Trolley Watch --- scraper.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/scraper.py b/scraper.py index a7c80a9..9085e62 100644 --- a/scraper.py +++ b/scraper.py @@ -10,20 +10,20 @@ # scrape_table function: gets passed an individual page to scrape def scrape_table(root): # root variable defined in scrape_and_look_for_next_link: parses xml from url - rows = root.cssselect("table.data tr") # selects all blocks within + rows = root.cssselect("table.Trolley.table tr") # selects all blocks within
for row in rows: # Set up our data record - we'll need it later record = {} table_cells = row.cssselect("td") # extract cell in the table as you loop through it if table_cells: # if there are any cells - record['Artist'] = table_cells[0].text # put the text between each tag in a variable called record, unique key is artist - record['Album'] = table_cells[1].text - record['Released'] = table_cells[2].text - record['Sales m'] = table_cells[4].text + record['Date'] = table_cells[0].text # put the text between each tag in a variable called record, unique key is artist + record['Hospital'] = table_cells[1].text + record['Region'] = table_cells[2].text + record['Trolley total'] = table_cells[4].text # Print out the data we've gathered print record, '------------' # Finally, save the record to the datastore - 'Artist' is our unique key - scraperwiki.sqlite.save(["Artist"], record) + scraperwiki.sqlite.save(["Hospital"], record) # scrape_and_look_for_next_link function: calls the scrape_table # function, then hunts for a 'next' link: if one is found, calls itself again @@ -48,8 +48,8 @@ def scrape_and_look_for_next_link(url): # --------------------------------------------------------------------------- # START HERE: define your starting URL - then -# call a function to scrape the first page in the series. +# call a function to scrape it # --------------------------------------------------------------------------- -base_url = 'https://paulbradshaw.github.io/' -starting_url = urlparse.urljoin(base_url, 'scraping-for-everyone/webpages/example_table_1.html') +starting_url = 'http://inmo.ie/6022' +# urlparse breaks up the url by /. urlparse.urljoin combines two urls together. scrape_and_look_for_next_link(starting_url)